path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/e_extra/pytorch_image_filtering_ml/Chapter 20 -- Coding Example.ipynb | ###Markdown
(Coding_Example)= Chapter 20 -- Coding Example
###Code
import numpy as np
import random
class Network(object):
def __init__(self, sizes):
self.num_layers = len(sizes) # sizes = [2,3,1] --> it has 2 inputs, 3 neurons in the second layer, and 1 neuron in the last layer
self.sizes = sizes # len of sizes is how many layers it has, it this case, 3 layers
self.biases = [np.random.randn(y, 1) for y in sizes[1:]] # y = 3 and 1
self.weights = [np.random.randn(y, x) # np.random.randn(y, 1) creates an array containing y lists, each has 1 random value
for x, y in zip(sizes[:-1], sizes[1:])] # np.random.randn mean 0 and variance 1
# 2,3 3,1
def feedforward(self, a):
"""Return the output of the network if "a" is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a # a is an array that contains
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The "training_data" is a list of tuples
"(x, y)" representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If "test_data" is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially."""
if test_data: n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print("Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test))
else:
print("Epoch {0} complete".format(j))
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The "mini_batch" is a list of tuples "(x, y)", and "eta"
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations-y)
def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
sizes = [2, 3, 1]
for y in sizes[1:]:
#print(y)
pass
np.random.randn(3, 1)
net = Network([2, 3, 1])
"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""
#### Libraries
# Standard library
import pickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding='latin1')
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
training_data, validation_data, test_data = load_data_wrapper()
net = Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
###Output
Epoch 0: 8281 / 10000
Epoch 1: 8372 / 10000
Epoch 2: 9320 / 10000
Epoch 3: 9359 / 10000
Epoch 4: 9395 / 10000
Epoch 5: 9395 / 10000
Epoch 6: 9416 / 10000
Epoch 7: 9437 / 10000
Epoch 8: 9469 / 10000
Epoch 9: 9444 / 10000
Epoch 10: 9464 / 10000
Epoch 11: 9471 / 10000
Epoch 12: 9450 / 10000
Epoch 13: 9488 / 10000
Epoch 14: 9478 / 10000
Epoch 15: 9465 / 10000
Epoch 16: 9476 / 10000
Epoch 17: 9482 / 10000
Epoch 18: 9490 / 10000
Epoch 19: 9486 / 10000
Epoch 20: 9505 / 10000
Epoch 21: 9497 / 10000
Epoch 22: 9496 / 10000
Epoch 23: 9491 / 10000
Epoch 24: 9494 / 10000
Epoch 25: 9508 / 10000
Epoch 26: 9506 / 10000
Epoch 27: 9496 / 10000
Epoch 28: 9493 / 10000
Epoch 29: 9518 / 10000
|
lectures/lecture_8/neural_network.ipynb | ###Markdown
Forward propagation of a 2-layer NN
###Code
features = [
"radius_mean",
"texture_mean",
"perimeter_mean",
"area_mean",
"smoothness_mean",
"compactness_mean",
"concavity_mean",
"concave_mean",
"symmetry_mean",
"fractal_mean",
]
label = "label"
# train test split
X_raw, X_raw_test, Y, Y_test = train_test_split(data[features].values, data[label].values, test_size=0.2, random_state=42)
# Standardize the input
scaler = StandardScaler()
scaler.fit(X_raw)
X = scaler.transform(X_raw)
X_test = scaler.transform(X_raw_test)
# formatting
Y = Y.reshape((-1, 1))
Y_test = Y_test.reshape((-1, 1))
# forward pass for a simple 2-layer NN, with 3 hidden units
np.random.seed(10)
def sigmoid(x):
"""Calculates sigmoid function."""
return 1. / (1 + np.exp(-x))
# parameters for the first layer
W_1 = np.random.normal(size=(3, X.shape[1]))
print(f"Shape of W_1 is {W_1.shape}")
b_1 = np.random.normal(size=(3, 1))
print(f"Shape of b_1 is {b_1.shape}")
# parameters for the second layer
W_2 = np.random.normal(size=(1, 3))
print(f"Shape of W_2 is {W_2.shape}")
b_2 = np.random.normal(size=(1, 1))
print(f"Shape of b_1 is {b_2.shape}")
# calculate the forward propagation
Z_1 = X @ W_1.T
print(f"\nShape of Z_1 is {Z_1.shape}")
print("Samples for Z_1:")
print(Z_1[:5])
A_1 = sigmoid(Z_1 + b_1.T)
print(f"Shape of A_1 is {A_1.shape}")
print("Samples for A_1:")
print(A_1[:5])
Z_2 = A_1 @ W_2.T
print(f"\nShape of Z_2 is {Z_2.shape}")
print("Samples for Z_2:")
print(Z_1[:5])
A_2 = Y_hat = sigmoid(Z_2 + b_2.T)
print(f"Shape of A_2 is {A_2.shape}")
print("Samples for A_2:")
print(A_2[:5])
###Output
Shape of W_1 is (3, 10)
Shape of b_1 is (3, 1)
Shape of W_2 is (1, 3)
Shape of b_1 is (1, 1)
Shape of Z_1 is (455, 3)
Samples for Z_1:
[[ 0.16410112 -4.76306361 3.93309998]
[-0.46604358 4.1992739 9.5658238 ]
[-1.60754809 -0.23753874 -1.01727238]
[ 1.37695245 2.28649564 -5.09016965]
[ 0.12721277 3.49293739 0.32441791]]
Shape of A_1 is (455, 3)
Samples for A_1:
[[0.47421887 0.00490603 0.98314001]
[0.32445766 0.97466643 0.99993863]
[0.13297977 0.31284592 0.29223288]
[0.75206111 0.85032936 0.00698167]
[0.46503108 0.94996148 0.61233221]]
Shape of Z_2 is (455, 1)
Samples for Z_2:
[[ 0.16410112 -4.76306361 3.93309998]
[-0.46604358 4.1992739 9.5658238 ]
[-1.60754809 -0.23753874 -1.01727238]
[ 1.37695245 2.28649564 -5.09016965]
[ 0.12721277 3.49293739 0.32441791]]
Shape of A_2 is (455, 1)
Samples for A_2:
[[0.59207723]
[0.84761911]
[0.69066552]
[0.76062638]
[0.82363926]]
###Markdown
Training a NN with backward propagation
###Code
def forward_prop(
X: np.array,
W_1: np.array,
b_1: np.array,
W_2: np.array,
b_2: np.array,
) -> Tuple:
"""Performs the forward propagation of the given NN."""
# Note the NN structure is passed in from outside.
Z_1 = X @ W_1.T
A_1 = sigmoid(Z_1 + b_1.T)
Z_2 = A_1 @ W_2.T
A_2 = Y = sigmoid(Z_2 + b_2.T)
return A_2, Z_2, A_1, Z_1
Y_hat, _, _, _ = forward_prop(X=X, W_1=W_1, b_1=b_1, W_2=W_2, b_2=b_2)
def derivatives_by_backprop(
X: np.array,
Y: np.array,
W_1: np.array,
b_1: np.array,
W_2: np.array,
b_2: np.array,
) -> Tuple:
"""Calculates the derivatives of the parameters by backforward propagation.
Here we assume it is a binary classification problem, with sigmoid activation functions.
"""
# forward propagation
dW_2, db_2, dW_1, db_1 = 0, 0, 0, 0
Y_hat, Z_2, A_1, Z_1 = forward_prop(X=X, W_1=W_1, b_1=b_1, W_2=W_2, b_2=b_2)
n = len(Y_hat)
loss = -np.mean(np.multiply(Y, np.log(Y_hat)) + np.multiply(1 - Y, np.log(1 - Y_hat)))
dZ_2 = Y_hat - Y
dW_2 = dZ_2.T @ A_1 / n
db_2 = np.mean(dZ_2.T, axis=1, keepdims=True)
dZ_1 = np.multiply(dZ_2 @ W_2, np.multiply(A_1, 1 - A_1))
dW_1 = (dZ_1.T @ X) / n
db_1 = np.mean(dZ_1.T, axis=1, keepdims=True)
return dW_2, db_2, dW_1, db_1, loss
dW_2, db_2, dW_1, db_1, loss = derivatives_by_backprop(X=X, Y=Y, W_1=W_1, b_1=b_1, W_2=W_2, b_2=b_2)
def gradient_descent(
X: np.array,
Y: np.array,
W_1_init: np.array,
b_1_init: np.array,
W_2_init: np.array,
b_2_init: np.array,
learning_rate: float = 0.01,
epsilon: float = 1e-6,
verbose: bool = False,
) -> Tuple:
"""Runs gradient descent to fit the NN via backprop."""
W_1 = W_1_init
b_1 = b_1_init
W_2 = W_2_init
b_2 = b_2_init
losses = [float("inf"), ]
roc_auc_scores = [0.5, ]
diff_in_loss = float("inf")
iteration = 0
while abs(diff_in_loss) > epsilon:
iteration += 1
dW_2, db_2, dW_1, db_1, loss = derivatives_by_backprop(
X=X, Y=Y, W_1=W_1, b_1=b_1, W_2=W_2, b_2=b_2
)
W_1 -= learning_rate * dW_1
b_1 -= learning_rate * db_1
W_2 -= learning_rate * dW_2
b_2 -= learning_rate * db_2
losses.append(loss)
diff_in_loss = losses[-1] - losses[-2]
Y_hat, _, _, _ = forward_prop(X=X, W_1=W_1, b_1=b_1, W_2=W_2, b_2=b_2)
roc_auc = roc_auc_score(y_true=Y, y_score=Y_hat)
roc_auc_scores.append(roc_auc)
if verbose and iteration % 10 == 0:
print(loss, roc_auc)
return W_1, b_1, W_2, b_2, losses
# parameters for the first layer
np.random.seed(42)
W_1_init = np.random.normal(size=(3, X.shape[1]))
b_1_init = np.random.normal(size=(3, 1))
# parameters for the second layer
W_2_init = np.random.normal(size=(1, 3))
b_2_init = np.random.normal(size=(1, 1))
W_1, b_1, W_2, b_2, losses = gradient_descent(
X=X,
Y=Y,
W_1_init=W_1_init,
b_1_init=b_1_init,
W_2_init=W_2_init,
b_2_init=b_2_init,
learning_rate=0.1,
epsilon=1e-3,
verbose=True,
)
# evaluate the model on the test set
Y_test_hat, _, _, _ = forward_prop(X=X_test, W_1=W_1, b_1=b_1, W_2=W_2, b_2=b_2)
roc_auc_score(y_true=Y_test, y_score=Y_test_hat)
# train a NN with Keras
from tensorflow import keras
from tensorflow.keras import layers
def keras_model(nn_size: int, num_features: int, num_layers: int):
"""Creates a simple Keras model."""
inputs = keras.Input(
shape=(num_features, ), name="inputs")
x = inputs
for i in range(num_layers):
x = layers.Dense(
nn_size, activation="sigmoid", name=f"desnse_layer_{i}")(x)
outputs = layers.Dense(
1, activation="sigmoid", name="output")(x)
model = keras.Model(
inputs=inputs, outputs=outputs, name="simple_model")
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["AUC"])
return model
model = keras_model(nn_size=3, num_features=X.shape[1], num_layers=1)
history = model.fit(
x=X,
y=Y,
batch_size=32,
epochs=20,
validation_data=(X_test, Y_test),
verbose=1,
shuffle=True,
)
# evaluate the model on the test set
roc_auc_score(y_true=Y_test, y_score=model.predict(X_test))
###Output
_____no_output_____ |
Notebooks/01_ObjDetect.ipynb | ###Markdown
1.0 Object Detection ApplicationThe DeepStream SDK offers a complete set of sample reference applications and pre-trained neural networks to jump-start development. In this lab, you'll work with the `deepstream-test1` reference application to find objects in a video stream, annotate them with bounding boxes, and output the annotated stream along with a count of the objects found. You'll follow the steps below to build your own applications based on the reference app:1.1 **[Build a Basic DeepStream Pipeline](01_overview)** 1.1.1 [Sample Application `deepstream-test1`](test1) 1.1.2 [Sample Application plus RTSP - `deepstream-test1-rtsp-out`](rtsp) 1.1.3 [Exercise: Build and Run the Base Application](01_ex_base)1.2 **[Configure an Object Detection Model](01_change_objects)** 1.2.1 [Gst-nvinfer Configuration File](01_config) 1.2.2 [Exercise: Detect Only Two Object Types](01_ex_change)1.3 **[Modify Metadata to Perform Analysis](01_count_objects)** 1.3.1 [Extracting Metadata with a GStreamer Probe](01_probe) 1.3.2 [Exercise: Count Vehicles and Bikes](01_ex_count)1.4 **[Put It All Together](01_final)** 1.4.1 [Exercise: Detect and Count three Object Types](01_ex_challenge) 1.1 Build a Basic DeepStream PipelineThe framework used to build a DeepStream application is a GStreamer **pipeline** consisting of a video input stream, a series of **elements** or **plugins** to process the stream, and an insightful output stream. Each plugin has a defined input, also called its **sink**, and defined output, known as its **source**. In the pipeline, the source pad of one plugin connects to the sink pad of the next in line. The source includes information extracted from the processing, the **metadata**, which can be used for annotation of the video and other insights about the input stream. 1.1.1 Sample Application - `deepstream-test1`The DeepStream SDK includes plugins for building a pipeline, and some reference test applications. For example, the `deepstream_test1` application can take a street scene video file as input, use object detection to find vehicles, people, bicycles, and road signs within the video, and output a video stream with bounding boxes around the objects found. The reference test applications are in the `deepstream_sdk_v4.0.2_jetson/sources/apps/sample_apps/` directory. You can take a look at the C code for the `deepstream-test1` app at [deepstream_sdk_v4.0.2_jetson/sources/apps/sample_apps/deepstream-test1/deepstream_test1_app.c](../deepstream_sdk_v4.0.2_jetson/sources/apps/sample_apps/deepstream-test1/deepstream_test1_app.c)Looking at the code, we can find where all the plugins are instantiated in `main` using the `gst_element_factory_make` method. This is a good way to see exactly which plugins are in the pipeline *(Note: the sample snippets below are abbreviated code for clarity purposes)*: ```c... /* Create gstreamer elements */ /* Create Pipeline element that will form a connection of other elements */ pipeline = gst_pipeline_new ("dstest1-pipeline"); /* Source element for reading from the file */ source = gst_element_factory_make ("filesrc", "file-source"); /* Since the data format in the input file is elementary h264 stream, * we need a h264parser */ h264parser = gst_element_factory_make ("h264parse", "h264-parser"); /* Use nvdec_h264 for hardware accelerated decode on GPU */ decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder"); /* Create nvstreammux instance to form batches from one or more sources. */ streammux = gst_element_factory_make ("nvstreammux", "stream-muxer"); /* Use nvinfer to run inferencing on decoder's output, * behaviour of inferencing is set through config file */ pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine"); /* Use convertor to convert from NV12 to RGBA as required by nvosd */ nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter"); /* Create OSD to draw on the converted RGBA buffer */ nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay"); /* Finally render the osd output */ transform = gst_element_factory_make ("nvegltransform", "nvegl-transform"); sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");...``` We see that the input is a file source, `filesrc`, in H.264 video format, which is decoded and then run through the `nvinfer` inference engine to detect objects. A buffer is created with `nvvideoconvert` so that bounding boxes can be overlaid on the video images with the `nvdsosd` plugin. Finally, the output is rendered. 1.1.2 Sample Application plus RTSP - `deepstream-test1-rtsp-out`For the purposes of this lab, which runs headless on a Jetson Nano connected to a laptop, the video stream must be converted to a format that can be transferred to the laptop media player. This is accomplished by customizing the sample app with additional plugins and some logic. Some specific customized apps are included in this lab in the `dli_apps` directory. Take a look at the C code in [/home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out/deepstream_test1_app.c](../deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out/deepstream_test1_app.c).Scrolling down to `main`, we can see that there are a few differences in the rendering plugins used for the RTSP protocol transfer of the video stream *(Note: the sample snippets below are abbreviated code for clarity purposes)*: ```c... /* Finally render the osd output */ transform = gst_element_factory_make ("nvvideoconvert", "transform"); cap_filter = gst_element_factory_make ("capsfilter", "filter"); caps = gst_caps_from_string ("video/x-raw(memory:NVMM), format=I420"); g_object_set (G_OBJECT (cap_filter), "caps", caps, NULL); encoder = gst_element_factory_make ("nvv4l2h264enc", "h264-encoder"); rtppay = gst_element_factory_make ("rtph264pay", "rtppay-h264"); g_object_set (G_OBJECT (encoder), "bitrate", 4000000, NULL);ifdef PLATFORM_TEGRA g_object_set (G_OBJECT (encoder), "preset-level", 1, NULL); g_object_set (G_OBJECT (encoder), "insert-sps-pps", 1, NULL); g_object_set (G_OBJECT (encoder), "bufapi-version", 1, NULL);endif sink = gst_element_factory_make ("udpsink", "udpsink");...``` The plugins are put in a pipeline bin with the `gst_bin_add_many()` methods :```c... /* Set up the pipeline */ /* we add all elements into the pipeline */ gst_bin_add_many (GST_BIN (pipeline), source, h264parser, decoder, streammux, pgie, nvvidconv, nvosd, transform, cap_filter, encoder, rtppay, sink, NULL);...```Next, a sink pad (input) for the `streammux` element is created and linked to the `decoder` source pad (output):```c... GstPad *sinkpad, *srcpad; gchar pad_name_sink[16] = "sink_0"; gchar pad_name_src[16] = "src"; sinkpad = gst_element_get_request_pad (streammux, pad_name_sink); if (!sinkpad) { g_printerr ("Streammux request sink pad failed. Exiting.\n"); return -1; } srcpad = gst_element_get_static_pad (decoder, pad_name_src); if (!srcpad) { g_printerr ("Decoder request src pad failed. Exiting.\n"); return -1; } if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) { g_printerr ("Failed to link decoder to stream muxer. Exiting.\n"); return -1; }...```Finally, the elements are linked together using the `gst_element_link_many()` method. The start of the pipeline through the `decoder` are linked together, and the `streammux` and beyond are linked together, to form the entire pipeline.```c... /* we link the elements together */ /* file-source -> h264-parser -> nvh264-decoder -> * nvinfer -> nvvidconv -> nvosd -> video-renderer */ if (!gst_element_link_many (source, h264parser, decoder, NULL)) { g_printerr ("Elements could not be linked: 1. Exiting.\n"); return -1; } if (!gst_element_link_many (streammux, pgie, nvvidconv, nvosd, transform, cap_filter, encoder, rtppay, sink, NULL)) { g_printerr ("Elements could not be linked: 2. Exiting.\n"); return -1; }...``` In summary, the pipeline for this app consists of the following plugins (ordered):- `GstFileSrc` - reads the video data from file- `GstH264Parse` - parses the incoming H264 stream- `Gst-nvv4l2decoder` - hardware accelerated decoder; decodes video streams using NVDEC- `Gst-nvstreammux` - batch video streams before sending for AI inference- `Gst-nvinfer` - runs inference using TensorRT- `Gst-nvvideoconvert` - performs video color format conversion (I420 to RGBA)- `Gst-nvdsosd` - draw bounding boxes, text and region of interest (ROI) polygons- `Gst-nvvideoconvert` - performs video color format conversion (RGBA to I420)- `GstCapsFilter` - enforces limitations on data (no data modification)- `Gst-nvv4l2h264enc` - encodes RAW data in I420 format to H264- `GstRtpH264Pay` - converts H264 encoded Payload to RTP packets (RFC 3984)- `GstUDPSink` - sends UDP packets to the network. When paired with RTP payloader (`Gst-rtph264pay`) it can implement RTP streaming 1.1.3 Exercise: Build and Run the Base Application In the `deepstream-test1` example, object detection is performed on a per-frame-basis. Counts for `Vehicle` and `Person` objects are also tracked. Bounding boxes are drawn around the objects identified, and a counter display is overlayed in the upper left corner of the video. Build the DeepStream appExecute the following cell to build the application:- Click on the cell to select it- Press [SHIFT][ENTER] or [CONTROL][ENTER] on your keyboard to execute the instructions in the code cell. Alternatively, you can click the run button at the top of the notebook.
###Code
# Build the app
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out
!make clean
!make
###Output
/home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out
rm -rf deepstream_test1_app.o deepstream-test1-app
cc -c -o deepstream_test1_app.o -DPLATFORM_TEGRA -I../../../includes `pkg-config --cflags gstreamer-1.0` deepstream_test1_app.c
cc -o deepstream-test1-app deepstream_test1_app.o `pkg-config --libs gstreamer-1.0` -L/opt/nvidia/deepstream/deepstream-4.0/lib/ -lnvdsgst_meta -lnvds_meta -lgstrtspserver-1.0 -Wl,-rpath,/opt/nvidia/deepstream/deepstream-4.0/lib/
###Markdown
Run the DeepStream appOpen the VLC media player on your laptop:- Click "Media" and open the "Open Network Stream" dialog- Set the URL to `rtsp://192.168.55.1:8554/ds-test`- Start execution of the cell below- Click "Play" on your VLC media player right after you start executing the cell. The stream will start shortly from the Jetson Nano and display in the media player. If you find you've missed it due to a time out in the media player, try the process again, this time waiting a little longer before starting the media player.
###Code
# Run the app
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out
!./deepstream-test1-app /home/dlinano/deepstream_sdk_v4.0.2_jetson/samples/streams/sample_720p.h264
###Output
/home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out
*** DeepStream: Launched RTSP Streaming at rtsp://localhost:8554/ds-test ***
Now playing: /home/dlinano/deepstream_sdk_v4.0.2_jetson/samples/streams/sample_720p.h264
Opening in BLOCKING MODE
Opening in BLOCKING MODE
Creating LL OSD context new
Running...
NvMMLiteOpen : Block : BlockType = 261
NVMEDIA: Reading vendor.tegra.display-size : status: 6
NvMMLiteBlockCreate : Block : BlockType = 261
Creating LL OSD context new
NvMMLiteOpen : Block : BlockType = 4
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4
H264: Profile = 66, Level = 0 bjects = 5 Vehicle Count = 3 Person Count = 2
Frame Number = 183 Number of objects = 5 Vehicle Count = 1 Person Count = 4
(deepstream-test1-app:13839): GLib-GObject-[1;33mWARNING[0m **: [34m04:47:52.292[0m: g_object_get_is_valid_property: object class 'GstUDPSrc' has no property named 'pt'
Frame Number = 1441 Number of objects = 0 Vehicle Count = 0 Person Count = 0
End of stream
Returned, stopping playback
Deleting pipeline
###Markdown
1.2 Configure an Object Detection Model The sample application shows counts for two types of objects: `Vehicle` and `Person`. However, the model that is used can actually detect four types of objects, as revealed in the application C code (line 46):```cgchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person", "Roadsign"};``` 1.2.1 `Gst-nvinfer` Configuration FileThis information is specific to the model used for the inference, which in this case is a sample model provided with the DeepStream SDK. The `Gst-nvinfer` plugin employs a configuration file to specify the model and various properties. Open the configuration file for the app we are using at [/home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out/dstest1_pgie_config.txt](../deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out/dstest1_pgie_config.txt). The `Gst-nvinfer` configuration file uses a “Key File” format, with details on key names found in the DeepStream Plugin Manual (use the link provided in the class pages for more details). - The **\[property\]** group configures the general behavior of the plugin. It is the only mandatory group.- The **\[class-attrs-all\]** group configures detection parameters for all classes.- The **\[class-attrs-\\]** group configures detection parameters for a class specified by \. For example, the \[class-attrs-2\] group configures detection parameters for class ID 2\. This type of group has the same keys as \[class-attrs-all\]. Note that the number of classes and the ordered `labels.txt` file are specified in the \[property\] group along with the model engine. For this exercise, we are more interested in configuring the \[class-attrs-all\] and \[class-attrs-\\] groups. In the sample, we see the following: ```c[class-attrs-all]threshold=0.2eps=0.2group-threshold=1``` The `threshold=0.2` key sets the detection confidence score. This tells us that all objects with a 20% confidence score or better will be marked as detected. If the threshold were greater than 1.0, then no objects could ever be detected. This "all" grouping is not granular enough if we only want to detect a subset of the objects possible, or if we want to use a different confidence level with different objects. For example, we might want to detect only vehicles, or we might want to identify people with a different confidence level than road signs. To specify a threshold for the four individual objects available in this model, add a specific group to the config file for each class: * \[class-attrs-0\] for vehicles- \[class-attrs-1\] for bicycles- \[class-attrs-2\] for persons- \[class-attrs-3\] for road signsIn each group, we can now specify the threshold value. This will be used to determine object detection for each of the four object categories individually. 1.2.2 Exercise: Detect Only Two Object TypesCreate a new app based on `deepstream-test1-rtsp_out` that detects **only** cars and bicycles. Start by copying the existing app to a new workspace.
###Code
# Create a new app located at /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-two-objects
# based on deepstream-test1-rtsp_out
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps
!mkdir -p my_apps/dst1-two-objects
!cp -rfv dli_apps/deepstream-test1-rtsp_out/* my_apps/dst1-two-objects
###Output
/home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps
'dli_apps/deepstream-test1-rtsp_out/Makefile' -> 'my_apps/dst1-two-objects/Makefile'
'dli_apps/deepstream-test1-rtsp_out/README' -> 'my_apps/dst1-two-objects/README'
'dli_apps/deepstream-test1-rtsp_out/deepstream-test1-app' -> 'my_apps/dst1-two-objects/deepstream-test1-app'
'dli_apps/deepstream-test1-rtsp_out/deepstream_test1_app.c' -> 'my_apps/dst1-two-objects/deepstream_test1_app.c'
'dli_apps/deepstream-test1-rtsp_out/deepstream_test1_app.o' -> 'my_apps/dst1-two-objects/deepstream_test1_app.o'
'dli_apps/deepstream-test1-rtsp_out/dstest1_pgie_config.txt' -> 'my_apps/dst1-two-objects/dstest1_pgie_config.txt'
###Markdown
Using what you just learned, modify the [configuration file](../deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-two-objects/dstest1_pgie_config.txt) in your new app to only detect cars and bicycles. You will need to add *class-specific groups* for each of the four classes to the end of your configuration file.Class-specific example: ``` Per class configuration car [class-attrs-0] threshold=0.2 ```Then, build and run the app to see if it worked!
###Code
# Build the app
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-two-objects
!make clean
!make
# Run the app
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-two-objects
!./deepstream-test1-app /home/dlinano/deepstream_sdk_v4.0.2_jetson/samples/streams/sample_720p.h264
###Output
/home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-two-objects
*** DeepStream: Launched RTSP Streaming at rtsp://localhost:8554/ds-test ***
Now playing: /home/dlinano/deepstream_sdk_v4.0.2_jetson/samples/streams/sample_720p.h264
Opening in BLOCKING MODE
Opening in BLOCKING MODE
Creating LL OSD context new
Running...
NvMMLiteOpen : Block : BlockType = 261
NVMEDIA: Reading vendor.tegra.display-size : status: 6
NvMMLiteBlockCreate : Block : BlockType = 261
(deepstream-test1-app:15542): GLib-GObject-[1;33mWARNING[0m **: [34m06:01:08.483[0m: g_object_get_is_valid_property: object class 'GstUDPSrc' has no property named 'pt'
Creating LL OSD context new
NvMMLiteOpen : Block : BlockType = 4
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4
H264: Profile = 66, Level = 0 bjects = 3 Vehicle Count = 3 Person Count = 0
Frame Number = 1441 Number of objects = 0 Vehicle Count = 0 Person Count = 0
End of stream
Returned, stopping playback
Deleting pipeline
###Markdown
How did you do?If you see something like this image, you did it! If not, keep trying or take a peek at the solution code in the solutions directory. If you aren't satisfied with the detection of the bicycle, you can experiment with the confidence threshold value. 1.3 Modify Metadata to Perform Analysis The object detection is working well, but we are only counting the `Person` and `Vehicle` objects detected. We would like to show the counts for the bicycles instead of people. The `Gst-nvinfer` plugin finds objects and provides metadata about them as an output on its source pad, which is passed along through the pipeline. Using a GStreamer **probe**, we can take a look at the metadata and count the objects detected downstream. This extraction of the information occurs at the input, or "sink pad", of the `Gst-nvdsosd` plugin. 1.3.1 Extracting Metadata with a GStreamer ProbeThe `osd_sink_pad_buffer_probe` code in [the deepstream-test1 app](../deepstream_sdk_v4.0.2_jetson/sources/apps/dli_apps/deepstream-test1-rtsp_out/deepstream_test1_app.c) is a callback that is run each time there is new frame data. With this probe, we can snapshot the metadata coming into the `Gst-nvdsosd` plugin, and count the current objects. The metadata collected that we want to look at will be collected in `obj_meta`: ```cNvDsObjectMeta *obj_meta = NULL;```The `NvDsObjectMeta` data structure includes an element for the `class_id`. This is the same class number used in the config file to identify object types: * 0 for vehicles* 1 for bicycles* 2 for persons* 3 for road signs The _for_ loop in the probe checks the `obj_meta->class_id` value for every object in the frame and counts them as needed. ```cdefine PGIE_CLASS_ID_VEHICLE 0define PGIE_CLASS_ID_PERSON 2...static GstPadProbeReturnosd_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer u_data){ GstBuffer *buf = (GstBuffer *) info->data; guint num_rects = 0; NvDsObjectMeta *obj_meta = NULL; guint vehicle_count = 0; guint person_count = 0; NvDsMetaList * l_frame = NULL; NvDsMetaList * l_obj = NULL; NvDsDisplayMeta *display_meta = NULL; NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf); for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) { NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data); int offset = 0; for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) { obj_meta = (NvDsObjectMeta *) (l_obj->data); if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) { vehicle_count++; num_rects++; } if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) { person_count++; num_rects++; } }...``` The count for each is then added to a display buffer, which is then added to the frame metadata. ```c... display_meta = nvds_acquire_display_meta_from_pool(batch_meta); NvOSD_TextParams *txt_params = &display_meta->text_params[0]; display_meta->num_labels = 1; txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN); offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count); offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);... nvds_add_display_meta_to_frame(frame_meta, display_meta); } ...``` In summary, there are four places that require changes if we want to modify the counts:* Constants for the class ID values (similar to `PGIE_CLASS_ID_VEHICLE`)* Variables to track the counts (similar to `vehicle_count`* _if_ statements to check the objects and count them* `snprintf` statements to fill the buffer for displaying the counts 1.3.2 Exercise: Count Vehicles and BikesCreate a new app based on `deepstream-test1-rtsp_out` that shows counts for vehicles and bicycles. Fill in the following cells with appropriate commands to create, build, and run your app. To edit your files, use the JupyterLab file browser at left to navigate to the correct folder; then, double click on the file you wish to open and edit.
###Code
# TODO
# Create a new app located at /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-counts
# based on deepstream-test1-rtsp_out
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps
!mkdir -p my_apps/dst1-counts
!cp dli_apps/deepstream-test1-rtsp_out/* my_apps/dst1-counts/
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps
!pwd
# TODO
# Edit the C-code to count vehicles and bicycles
# Build the app
!make clean
!make
# TODO
# Run the app
# %cd my_apps/dst1-counts/
!./deepstream-test1-app /home/dlinano/deepstream_sdk_v4.0.2_jetson/samples/streams/sample_720p.h264
###Output
*** DeepStream: Launched RTSP Streaming at rtsp://localhost:8554/ds-test ***
Now playing: /home/dlinano/deepstream_sdk_v4.0.2_jetson/samples/streams/sample_720p.h264
Opening in BLOCKING MODE
Opening in BLOCKING MODE
Creating LL OSD context new
Running...
NvMMLiteOpen : Block : BlockType = 261
NVMEDIA: Reading vendor.tegra.display-size : status: 6
NvMMLiteBlockCreate : Block : BlockType = 261
Creating LL OSD context new
NvMMLiteOpen : Block : BlockType = 4
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4
H264: Profile = 66, Level = 0 bjects = 3 Vehicle Count = 3 Bicycle Count = 0
Frame Number = 277 Number of objects = 6 Vehicle Count = 6 Bicycle Count = 0
(deepstream-test1-app:16888): GLib-GObject-[1;33mWARNING[0m **: [34m06:41:25.145[0m: g_object_get_is_valid_property: object class 'GstUDPSrc' has no property named 'pt'
Frame Number = 1441 Number of objects = 0 Vehicle Count = 0 Bicycle Count = 0
End of stream
Returned, stopping playback
Deleting pipeline
###Markdown
How did you do?If you see something like this image, you did it! If not, keep trying or take a peek at the solution code in the solutions directory. You can also modify the `g_print` lines to provide bicycle count feedback while the stream is running. 1.4 Putting It All Together Great job! You've learned how to build a pipeline, detect various objects, and probe/modify the metadata to count the objects. It's time to put what you've learned about objects and metadata into one new app. 1.4.1 Exercise: Detect and Count Three Object TypesCreate a new app based on `deepstream-test1-rtsp_out` that detects and shows counts for only three kinds of objects: persons, vehicles, and bicycles. Adjust the confidence values if needed for each. Fill in the following cells with appropriate commands to create, build, and run your app.
###Code
# TODO
# Create a new app located at /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-three-things
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps
!mkdir -p my_apps/dst1-three-things
!cp -r dli_apps/deepstream-test1-rtsp_out/* my_apps/dst1-three-things/
# TODO
# Edit the C-code to include counts for Persons, Vehicles, and Bikes
# Hint: For the offset in the display, you will need to account for two different offsets to properly place the third value.
# Build the app
# %cd my_apps/dst1-three-things
!make clean
!make
!pwd
%cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-counts
# TODO
# Run the app
!./deepstream-test1-app /home/dlinano/deepstream_sdk_v4.0.2_jetson/samples/streams/sample_720p.h264
###Output
*** DeepStream: Launched RTSP Streaming at rtsp://localhost:8554/ds-test ***
Now playing: /home/dlinano/deepstream_sdk_v4.0.2_jetson/samples/streams/sample_720p.h264
Opening in BLOCKING MODE
Opening in BLOCKING MODE
Creating LL OSD context new
Running...
NvMMLiteOpen : Block : BlockType = 261
NVMEDIA: Reading vendor.tegra.display-size : status: 6
NvMMLiteBlockCreate : Block : BlockType = 261
Creating LL OSD context new
NvMMLiteOpen : Block : BlockType = 4
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4
H264: Profile = 66, Level = 0 bjects = 3 Vehicle Count = 3 Bicycle Count = 0
Frame Number = 233 Number of objects = 7 Vehicle Count = 6 Bicycle Count = 0
(deepstream-test1-app:11949): GLib-GObject-[1;33mWARNING[0m **: [34m21:14:08.325[0m: g_object_get_is_valid_property: object class 'GstUDPSrc' has no property named 'pt'
Frame Number = 1441 Number of objects = 0 Vehicle Count = 0 Bicycle Count = 0
End of stream
Returned, stopping playback
Deleting pipeline
|
notebooks/Exploring Boston Weather Data.ipynb | ###Markdown
Exploring Boston Weather DataWe are presented with a messy, real-world dataset containing an entire year's worth of weather data from Boston, USA. Among other things, we'll be presented with variables that contain column names, column names that should be values, numbers coded as character strings, and values that are missing, extreme, and downright erroneous! Get a feel for the dataBefore diving into our data cleaning routine, we must first understand the basic structure of the data. This involves looking at things like the `class()` of the data object to make sure it's what we expect (generally a `data.frame`) in addition to checking its dimensions with `dim()` and the column names with `names()`.
###Code
weather = readRDS(gzcon(url('https://assets.datacamp.com/production/repositories/34/datasets/b3c1036d9a60a9dfe0f99051d2474a54f76055ea/weather.rds')))
###Output
_____no_output_____
###Markdown
Libraries
###Code
library(readr)
library(dplyr)
library(lubridate)
library(stringr)
library(installr)
library(tidyr)
# Verify that weather is a data.frame
class(weather)
# Check the dimensions
dim(weather)
# View the column names
names(weather)
###Output
_____no_output_____
###Markdown
We've confirmed that the object is a data frame with 286 rows and 35 columns. Summarize the dataNext up is to look at some summaries of the data. This is where functions like `str()`, `glimpse()` from dplyr, and `summary()` come in handy.
###Code
# View the structure of the data
str(weather)
# Look at the structure using dplyr's glimpse()
glimpse(weather)
# View a summary of the data
summary(weather)
###Output
'data.frame': 286 obs. of 35 variables:
$ X : int 1 2 3 4 5 6 7 8 9 10 ...
$ year : int 2014 2014 2014 2014 2014 2014 2014 2014 2014 2014 ...
$ month : int 12 12 12 12 12 12 12 12 12 12 ...
$ measure: chr "Max.TemperatureF" "Mean.TemperatureF" "Min.TemperatureF" "Max.Dew.PointF" ...
$ X1 : chr "64" "52" "39" "46" ...
$ X2 : chr "42" "38" "33" "40" ...
$ X3 : chr "51" "44" "37" "49" ...
$ X4 : chr "43" "37" "30" "24" ...
$ X5 : chr "42" "34" "26" "37" ...
$ X6 : chr "45" "42" "38" "45" ...
$ X7 : chr "38" "30" "21" "36" ...
$ X8 : chr "29" "24" "18" "28" ...
$ X9 : chr "49" "39" "29" "49" ...
$ X10 : chr "48" "43" "38" "45" ...
$ X11 : chr "39" "36" "32" "37" ...
$ X12 : chr "39" "35" "31" "28" ...
$ X13 : chr "42" "37" "32" "28" ...
$ X14 : chr "45" "39" "33" "29" ...
$ X15 : chr "42" "37" "32" "33" ...
$ X16 : chr "44" "40" "35" "42" ...
$ X17 : chr "49" "45" "41" "46" ...
$ X18 : chr "44" "40" "36" "34" ...
$ X19 : chr "37" "33" "29" "25" ...
$ X20 : chr "36" "32" "27" "30" ...
$ X21 : chr "36" "33" "30" "30" ...
$ X22 : chr "44" "39" "33" "39" ...
$ X23 : chr "47" "45" "42" "45" ...
$ X24 : chr "46" "44" "41" "46" ...
$ X25 : chr "59" "52" "44" "58" ...
$ X26 : chr "50" "44" "37" "31" ...
$ X27 : chr "52" "45" "38" "34" ...
$ X28 : chr "52" "46" "40" "42" ...
$ X29 : chr "41" "36" "30" "26" ...
$ X30 : chr "30" "26" "22" "10" ...
$ X31 : chr "30" "25" "20" "8" ...
Rows: 286
Columns: 35
$ X <int> 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, ...
$ year <int> 2014, 2014, 2014, 2014, 2014, 2014, 2014, 2014, 2014, 2014,...
$ month <int> 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,...
$ measure <chr> "Max.TemperatureF", "Mean.TemperatureF", "Min.TemperatureF"...
$ X1 <chr> "64", "52", "39", "46", "40", "26", "74", "63", "52", "30.4...
$ X2 <chr> "42", "38", "33", "40", "27", "17", "92", "72", "51", "30.7...
$ X3 <chr> "51", "44", "37", "49", "42", "24", "100", "79", "57", "30....
$ X4 <chr> "43", "37", "30", "24", "21", "13", "69", "54", "39", "30.5...
$ X5 <chr> "42", "34", "26", "37", "25", "12", "85", "66", "47", "30.6...
$ X6 <chr> "45", "42", "38", "45", "40", "36", "100", "93", "85", "30....
$ X7 <chr> "38", "30", "21", "36", "20", "-3", "92", "61", "29", "30.6...
$ X8 <chr> "29", "24", "18", "28", "16", "3", "92", "70", "47", "30.77...
$ X9 <chr> "49", "39", "29", "49", "41", "28", "100", "93", "86", "30....
$ X10 <chr> "48", "43", "38", "45", "39", "37", "100", "95", "89", "29....
$ X11 <chr> "39", "36", "32", "37", "31", "27", "92", "87", "82", "29.8...
$ X12 <chr> "39", "35", "31", "28", "27", "25", "85", "75", "64", "29.8...
$ X13 <chr> "42", "37", "32", "28", "26", "24", "75", "65", "55", "29.8...
$ X14 <chr> "45", "39", "33", "29", "27", "25", "82", "68", "53", "29.9...
$ X15 <chr> "42", "37", "32", "33", "29", "27", "89", "75", "60", "30.1...
$ X16 <chr> "44", "40", "35", "42", "36", "30", "96", "85", "73", "30.1...
$ X17 <chr> "49", "45", "41", "46", "41", "32", "100", "85", "70", "29....
$ X18 <chr> "44", "40", "36", "34", "30", "26", "89", "73", "57", "29.8...
$ X19 <chr> "37", "33", "29", "25", "22", "20", "69", "63", "56", "30.1...
$ X20 <chr> "36", "32", "27", "30", "24", "20", "89", "79", "69", "30.3...
$ X21 <chr> "36", "33", "30", "30", "27", "25", "85", "77", "69", "30.3...
$ X22 <chr> "44", "39", "33", "39", "34", "25", "89", "79", "69", "30.4...
$ X23 <chr> "47", "45", "42", "45", "42", "37", "100", "91", "82", "30....
$ X24 <chr> "46", "44", "41", "46", "44", "41", "100", "98", "96", "30....
$ X25 <chr> "59", "52", "44", "58", "43", "29", "100", "75", "49", "29....
$ X26 <chr> "50", "44", "37", "31", "29", "28", "70", "60", "49", "30.1...
$ X27 <chr> "52", "45", "38", "34", "31", "29", "70", "60", "50", "30.2...
$ X28 <chr> "52", "46", "40", "42", "35", "27", "76", "65", "53", "29.9...
$ X29 <chr> "41", "36", "30", "26", "20", "10", "64", "51", "37", "30.2...
$ X30 <chr> "30", "26", "22", "10", "4", "-6", "50", "38", "26", "30.36...
$ X31 <chr> "30", "25", "20", "8", "5", "1", "57", "44", "31", "30.32",...
###Markdown
Now that we have a pretty good feel for how the table is structured, we'll take a look at some real observations! Take a closer lookAfter understanding the structure of the data and looking at some brief summaries, it often helps to preview the actual data. The functions `head()` and `tail()` allow us to view the top and bottom rows of the data, respectively.
###Code
# View first 6 rows
head(weather)
# View first 15 rows
head(weather, n=15)
# View the last 6 rows
tail(weather)
# View the last 10 rows
tail(weather, n=10)
###Output
_____no_output_____
###Markdown
Let's tidy the data Column names are valuesThe `weather` dataset suffers from one of the five most common symptoms of messy data: column names are values. In particular, the column names `X1-X31` represent days of the month, which should really be values of a new variable called `day`.The tidyr package provides the `gather()` function for exactly this scenario.```pythongather(df, time, val, t1:t3)```>`gather()` allows us to select multiple columns to be gathered by using the `:` operator.
###Code
# Gather the columns
weather2 <- gather(weather, day, value, X1:X31, na.rm = TRUE)
# View the head
head(weather2)
###Output
_____no_output_____
###Markdown
Values are variable namesOur data suffer from a second common symptom of messy data: values are variable names. Specifically, values in the `measure` column should be variables (i.e. column names) in our dataset.The `spread()` function from tidyr is designed to help with this.```pythonspread(df2, time, val)```
###Code
# First remove column of row names
without_x <- weather2[, -1]
# Spread the data
weather3 <- spread(without_x, measure, value)
# View the head
head(weather3)
###Output
_____no_output_____
###Markdown
This dataset is looking much better already! Prepare the data for analysis Clean up datesNow that the weather dataset adheres to tidy data principles, the next step is to prepare it for analysis. We'll start by combining the `year`, `month`, and `day` columns and recoding the resulting character column as a `date`. We can use a combination of base R, stringr, and lubridate to accomplish this task.
###Code
# Remove X's from day column
weather3$day <- str_replace(weather3$day, 'X', '')
# Unite the year, month, and day columns
weather4 <- unite(weather3, date, year, month, day, sep = "-")
# Convert date column to proper date format using lubridates's ymd()
weather4$date <- ymd(weather4$date)
# Rearrange columns using dplyr's select()
weather5 <- select(weather4, date, Events, CloudCover:WindDirDegrees)
# View the head of weather5
head(weather5)
###Output
_____no_output_____
###Markdown
A closer look at column typesIt's important for analysis that variables are coded appropriately. This is not yet the case with our weather data.
###Code
# View the structure of weather5
str(weather5)
# Examine the first 20 rows of weather5. Are most of the characters numeric?
head(weather5, 20)
# See what happens if we try to convert PrecipitationIn to numeric
as.numeric(weather5$PrecipitationIn)
###Output
'data.frame': 366 obs. of 23 variables:
$ date : Date, format: "2014-12-01" "2014-12-10" ...
$ Events : chr "Rain" "Rain" "Rain-Snow" "Snow" ...
$ CloudCover : chr "6" "8" "8" "7" ...
$ Max.Dew.PointF : chr "46" "45" "37" "28" ...
$ Max.Gust.SpeedMPH : chr "29" "29" "28" "21" ...
$ Max.Humidity : chr "74" "100" "92" "85" ...
$ Max.Sea.Level.PressureIn : chr "30.45" "29.58" "29.81" "29.88" ...
$ Max.TemperatureF : chr "64" "48" "39" "39" ...
$ Max.VisibilityMiles : chr "10" "10" "10" "10" ...
$ Max.Wind.SpeedMPH : chr "22" "23" "21" "16" ...
$ Mean.Humidity : chr "63" "95" "87" "75" ...
$ Mean.Sea.Level.PressureIn: chr "30.13" "29.5" "29.61" "29.85" ...
$ Mean.TemperatureF : chr "52" "43" "36" "35" ...
$ Mean.VisibilityMiles : chr "10" "3" "7" "10" ...
$ Mean.Wind.SpeedMPH : chr "13" "13" "13" "11" ...
$ MeanDew.PointF : chr "40" "39" "31" "27" ...
$ Min.DewpointF : chr "26" "37" "27" "25" ...
$ Min.Humidity : chr "52" "89" "82" "64" ...
$ Min.Sea.Level.PressureIn : chr "30.01" "29.43" "29.44" "29.81" ...
$ Min.TemperatureF : chr "39" "38" "32" "31" ...
$ Min.VisibilityMiles : chr "10" "1" "1" "7" ...
$ PrecipitationIn : chr "0.01" "0.28" "0.02" "T" ...
$ WindDirDegrees : chr "268" "357" "230" "286" ...
###Markdown
Column type conversions`"T"` was used to denote a trace amount (i.e. too small to be accurately measured) of precipitation in the `PrecipitationIn` column. In order to coerce this column to numeric, wwe'll need to deal with this somehow. To keep things simple, we will just replace `"T"` with zero, as a string (`"0"`).
###Code
# Replace "T" with "0" (T = trace)
weather5$PrecipitationIn <- str_replace(weather5$PrecipitationIn, "T", "0")
# Convert characters to numerics
weather6 <- mutate_at(weather5, vars(CloudCover:WindDirDegrees), funs(as.numeric))
# Look at result
str(weather6)
###Output
Warning message:
"`funs()` is deprecated as of dplyr 0.8.0.
Please use a list of either functions or lambdas:
# Simple named list:
list(mean = mean, median = median)
# Auto named with `tibble::lst()`:
tibble::lst(mean, median)
# Using lambdas
list(~ mean(., trim = .2), ~ median(., na.rm = TRUE))
This warning is displayed once every 8 hours.
Call `lifecycle::last_warnings()` to see where this warning was generated."
###Markdown
It looks like our data are finally in the correct formats and organized in a logical manner! Now that our data are in the right form, we can begin the analysis. Missing, extreme, and unexpected values Find missing valuesBefore dealing with missing values in the data, it's important to find them and figure out why they exist in the first place. > If the dataset is too big to look at all at once, like it is here, we will use `sum()` and `is.na()` to quickly size up the situation by counting the number of NA values.The `summary()` function also come in handy for identifying which variables contain the missing values. Finally, the `which()` function is useful for locating the missing values within a particular column.
###Code
# Count missing values
sum(is.na(weather6))
# Find missing values
summary(weather6)
# Find indices of NAs in Max.Gust.SpeedMPH
ind <- which(is.na(weather6$Max.Gust.SpeedMPH))
# Look at the full rows for records missing Max.Gust.SpeedMPH
weather6[ind, ]
###Output
_____no_output_____
###Markdown
In this situation it's unclear why these values are missing and there doesn't appear to be any obvious pattern to their missingness, so we'll leave them alone for now. An obvious errorBesides missing values, we want to know if there are values in the data that are too extreme or bizarre to be plausible. A great way to start the search for these values is with `summary()`.Once implausible values are identified, they must be dealt with in an intelligent and informed way. > Sometimes the best way forward is obvious and other times it may require some research and/or discussions with the original collectors of the data.
###Code
# Review distributions for all variables
summary(weather6)
# Find row with Max.Humidity of 1000
ind <- which(weather6$Max.Humidity==1000)
# Look at the data for that day
weather6[ind, ]
# Change 1000 to 100
weather6$Max.Humidity[ind] <- 100
###Output
_____no_output_____
###Markdown
Once you find obvious errors, it's not too hard to fix them if you know which values they should take. Another obvious errorWe've discovered and repaired one obvious error in the data, but it appears that there's another. Sometimes we get lucky and can infer the correct or intended value from the other data. For example, if you know the minimum and maximum values of a particular metric on a given day...
###Code
# Look at summary of Mean.VisibilityMiles
summary(weather6$Mean.VisibilityMiles)
# Get index of row with -1 value
ind <- which(weather6$Mean.VisibilityMiles == -1)
# Look at full row
weather6[ind,]
# Set Mean.VisibilityMiles to the appropriate value
weather6$Mean.VisibilityMiles[ind] <- 10
###Output
_____no_output_____
###Markdown
Our data are looking tidy. Just a quick sanity check left! Check other extreme valuesIn addition to dealing with obvious errors in the data, we want to see if there are other extreme values. In addition to the trusty `summary()` function, `hist()` is useful for quickly getting a feel for how different variables are distributed.
###Code
# Review summary of full data once more
summary(weather6)
# Look at histogram for MeanDew.PointF
hist(weather6$MeanDew.PointF)
# Look at histogram for Min.TemperatureF
hist(weather6$Min.TemperatureF)
# Compare to histogram for Mean.TemperatureF
hist(weather6$Mean.TemperatureF)
###Output
_____no_output_____
###Markdown
It looks like you have sufficiently tidied your data! Finishing touchesBefore officially calling our weather data clean, we want to put a couple of finishing touches on the data. These are a bit more subjective and may not be necessary for analysis, but they will make the data easier for others to interpret, which is generally a good thing.There are a number of stylistic conventions in the R language. Depending on who you ask, these conventions may vary. Because the period (`.`) has special meaning in certain situations, we will be using underscores (`_`) to separate words in variable names. We also prefer all lowercase letters so that no one has to remember which letters are uppercase or lowercase.Finally, the `events` column (renamed to be all lowercase in the first instruction) contains an empty string ("") for any day on which there was no significant weather event such as rain, fog, a thunderstorm, etc. However, if it's the first time you're seeing these data, it may not be obvious that this is the case, so it's best for us to be explicit and replace the empty strings with something more meaningful.
###Code
new_colnames = c("date", "events",
"cloud_cover", "max_dew_point_f",
"max_gust_speed_mph", "max_humidity",
"max_sea_level_pressure_in", "max_temperature_f",
"max_visibility_miles", "max_wind_speed_mph",
"mean_humidity", "mean_sea_level_pressure_in",
"mean_temperature_f", "mean_visibility_miles",
"mean_wind_speed_mph", "mean_dew_point_f",
"min_dew_point_f", "min_humidity",
"min_sea_level_pressure_in", "min_temperature_f",
"min_visibility_miles", "precipitation_in","wind_dir_degrees")
# Clean up column names
names(weather6) <- new_colnames
# Replace empty cells in events column
weather6$events[weather6$events == ""] <- "None"
# Print the first 6 rows of weather6
head(weather6)
tail(weather6)
str(weather6)
glimpse(weather6)
summary(weather6)
###Output
_____no_output_____ |
Try_DeepWalk.ipynb | ###Markdown
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly
Enter your authorization code:
··········
Mounted at /content/drive
###Markdown
Data Parsing I got generated the graph by using `seealsology`* https://densitydesign.github.io/strumentalia-seealsology/* It does web crawling for any wiki-page you typed in and creates a graph for you to download.* I used "environment protection" and "global warming" 2 wiki pages
###Code
import networkx as nx
import pandas as pd
import numpy as np
import random
from tqdm import tqdm
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Extract RandomWalk Sequeces from the Graph
###Code
graph_df = pd.read_csv("environment_wiki_graph.tsv", sep = "\t")
graph_df.head()
graph_df['source'].value_counts()
# construct the graph
G = nx.from_pandas_edgelist(graph_df, "source", "target",
edge_attr=True, create_using=nx.Graph())
G
# The number of nodes
len(G)
def random_walk(source_node, sequence_len):
"""
For a souce node, get its neighbours that not in the random-walk sequence,
randomly select a neighbour as the next source node,
repeat until reaches to the max length or there is no node to choose.
NOTE: No need to set seed for random, cuz we want each time to generate
a different sequence when call this function.
"""
random_walk_sequence = [source_node]
for i in range(sequence_len - 1):
target_nodes = list(G.neighbors(source_node))
target_nodes = list(set(target_nodes) - set(random_walk_sequence))
if len(target_nodes) == 0:
break
selected_node = random.choice(target_nodes)
random_walk_sequence.append(selected_node)
source_node = selected_node
return random_walk_sequence
random_walk('sustainable development', 7)
# get random_walk sequences for all the nodes
all_nodes = list(G.nodes)
random_walks = []
for n in tqdm(all_nodes): # tqdm is to show the progress
for i in range(4):
random_walks.append(random_walk(n, 10))
print(len(random_walks))
for seq in random_walks[4:10]:
print(seq)
###Output
['biodiversity', 'environmental protection', 'renewable resource', 'scarcity']
['biodiversity', 'natural resource management', 'environmentalism', 'religion and environmentalism']
['biodiversity', 'defaunation', 'anthropocene', 'anthropocentrism', 'holocene extinction', 'extinction rebellion']
['biodiversity', 'deforestation and climate change']
['carbon offset', 'gold standard (carbon offset standard)']
['carbon offset', 'environmental protection', 'renewable resource', 'exploitation of natural resources']
###Markdown
Train Skip-Gram* [Relevant Params for Word2Vec here](https://radimrehurek.com/gensim/models/word2vec.htmlgensim.models.word2vec.Word2Vec)* Each sequence has been converted as a vector
###Code
from gensim.models import Word2Vec
# maximum distance between current node to predicted node is 3
# negative sampling, random 10 noise words
# learning rate linearly decrease from alpha to min alpha
model = Word2Vec(window = 4, sg = 1, hs = 0,
negative = 10,
alpha=0.03, min_alpha=0.0007,
seed = 10)
model.build_vocab(random_walks, progress_per=2) # process 2 nodes before updating progress
# By default in Word2Vec, each node is represented by a fixed length 100
model.train(random_walks, total_examples = model.corpus_count, epochs=20, report_delay=1)
import warnings
warnings.filterwarnings('ignore')
# with trained model, find similar sequence
## But input here has to be in the vocabulary
model.similar_by_word('earth day')
model.similarity('carbon footprint', 'earth day')
###Output
_____no_output_____
###Markdown
Plot Sequences Similarity* Each sequence is a vector here, it uses PCA to convert the sequence into 2D data and plot on the chart to show the similarity between each sequence.
###Code
from sklearn.decomposition import PCA
sequence_lst = []
for lst in random_walks[4:10]:
sequence_lst.extend(lst)
sequence_set = list(set(sequence_lst))
print(len(sequence_set))
sequence_set
X = model[sequence_set]
print(X.shape)
X
pca = PCA(n_components=2)
output_2d = pca.fit_transform(X)
output_2d
plt.figure(figsize=(9, 7))
plt.scatter(output_2d[:, 0], output_2d[:, 1], color='g')
for i, sequence in enumerate(sequence_set):
plt.annotate(sequence, xy=(output_2d[i, 0], output_2d[i, 1]), color='purple')
plt.title('Plot with PCA')
plt.show()
###Output
_____no_output_____
###Markdown
Replace PCA with t-SNEWondering whether there will be any structure difference.
###Code
from sklearn.manifold import TSNE
output_2d = TSNE(n_components=2).fit_transform(X)
output_2d
plt.figure(figsize=(9, 7))
plt.scatter(output_2d[:, 0], output_2d[:, 1], color='g')
for i, sequence in enumerate(sequence_set):
plt.annotate(sequence, xy=(output_2d[i, 0], output_2d[i, 1]), color='purple')
plt.title('Plot with t-SNE')
plt.show()
###Output
_____no_output_____ |
Chapter09/Recipe1-Add-Multiply-Features.ipynb | ###Markdown
Addition
###Code
# add the features
df['added_features'] = df[features].sum(axis=1)
df['added_features'].head()
# violin plot with added features
sns.violinplot(x="target", y="added_features", data=df)
plt.title('Added Features')
###Output
_____no_output_____
###Markdown
Product
###Code
# multiply the features
df['prod_features'] = df[features].prod(axis=1)
df['prod_features'].head()
# violin plot with product of features
sns.violinplot(x="target", y="prod_features", data=df)
plt.title('Product of Features')
###Output
_____no_output_____
###Markdown
Average
###Code
# mean of features
df['mean_features'] = df[features].mean(axis=1)
df['mean_features'].head()
# violin plot with with of features
sns.violinplot(x="target", y="mean_features", data=df)
plt.title('Mean of Features')
###Output
_____no_output_____
###Markdown
Standard deviation
###Code
# standard deviation of features
df['std_features'] = df[features].std(axis=1)
df['std_features'].head()
# violin plot with std of features
sns.violinplot(x="target", y="std_features", data=df)
plt.title('Standard Deviation of Features')
###Output
_____no_output_____
###Markdown
Maximum
###Code
# maximum of features
df['max_features'] = df[features].max(axis=1)
df['max_features'].head()
# violin plot with max of features
sns.violinplot(x="target", y="max_features", data=df)
plt.title('Maximum of Features')
###Output
_____no_output_____
###Markdown
Minimum
###Code
# minimum of the features
df['min_features'] = df[features].min(axis=1)
df['min_features'].head()
# violin plot with min of features
sns.violinplot(x="target", y="min_features", data=df)
plt.title('Minimum of Features')
# Perform all the operations in one line
df_t = df[features].agg(['sum', 'prod','mean','std', 'max', 'min'], axis='columns')
df_t.head()
###Output
_____no_output_____ |
ml/pycryptobot.ipynb | ###Markdown
Python Crypto Bot (pycryptobot)
###Code
MARKET = 'BCH-GBP'
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
plt.rc('axes', titlesize=16)
sns.set_style('darkgrid')
###Output
_____no_output_____
###Markdown
**Load trading data CSV into a Pandas dataframe**
###Code
df = pd.read_csv('./data/' + MARKET + '_3600.csv')
###Output
_____no_output_____
###Markdown
**Display Pandas dataframe**
###Code
df
###Output
_____no_output_____
###Markdown
**Display Pandas dataframe column types**
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
**We don't want null values, sum up the nulls per column**
###Code
df.isnull().sum()
###Output
_____no_output_____
###Markdown
**Fill nulls with a default value if required (which we don't)****Drop null values (we don't have any)****Convert all bool columns to int****Create dummy values if required, one hot encoding (not needed here)**
###Code
#df.rsi14.fillna(50, inplace = True)
#df.dropna(inplace = True)
def convert_bool(x):
if x == True:
return 1
elif x == False:
return 0
group_column_dtypes = df.columns.to_series().groupby(df.dtypes).groups
for k, v in group_column_dtypes.items():
if k == 'bool':
for column_name in v:
df[column_name] = df[column_name].map(convert_bool)
#df[column_name] = df[column_name].astype(int)
# one hot encoding for objects
#df = pd.get_dummies(df,columns= ['market'])
###Output
_____no_output_____
###Markdown
**Display Pandas dataframe**
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
**Save processed dataframe***
###Code
df.to_csv('./data/' + MARKET + '_3600_processed.csv')
df_processed = pd.read_csv('./data/' + MARKET + '_3600_processed.csv')
###Output
_____no_output_____
###Markdown
**Inspect processed dataset**
###Code
df.head()
###Output
_____no_output_____
###Markdown
Feature Engineering Example:df['NewColumn'] = df['ExistingColumn'].map(lambda x: x = 'do something') **Additional Examples**
###Code
df["goldencross"].value_counts()
df.groupby("goldencross").agg({"close_pc" : 'count', "close" : "mean"}).sort_values(by = "goldencross")
df.groupby("goldencross").agg({"close_pc" : 'count', "close" : "mean"}).sort_index()
df.groupby('goldencross').agg({"close_pc" : "mean"}).plot(kind = 'bar')
plt.figure(figsize = (12,10))
sns.heatmap(df.loc[:, [i for i in df.columns if "market" not in i]].corr(), annot=True)
plt.figure(figsize = (12,10))
sns.heatmap(df.loc[:, [
'close','volume','close_pc',
'sma20','sma200',
'ema12','ema26','ema12gtema26co','ema12ltema26co','ema12gtema26','ema12ltema26',
'macd','signal','macdgtsignal','macdltsignal',
'obv_pc',
'goldencross','deathcross']].corr(), annot=True)
sns.scatterplot(data=df, x="ema12", y="close")
sns.scatterplot(data=df, x="ema26", y="close")
df.plot()
df[['close','ema12','ema26']].plot()
plt.show()
###Output
_____no_output_____
###Markdown
Splitting the Data into Train and Test sets
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
df.columns
###Output
_____no_output_____
###Markdown
Seasonal ARIMA Model
###Code
from datetime import datetime
def parser(x):
return datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
ts = pd.read_csv('./data/' + MARKET + '_3600_processed.csv', header=0, parse_dates=[1], index_col=1, squeeze=True, date_parser=parser)
ts = ts.drop("Unnamed: 0", axis=1)
ts = ts[['close']]
ts
from statsmodels.tsa.statespace.sarimax import SARIMAX
model = SARIMAX(ts['close'], trend='n', order=(0,1,0), seasonal_order=(1,1,1,12))
results_ARIMA = model.fit(disp=-1)
fitted_values = results_ARIMA.fittedvalues
plt.plot(ts['close'], label='original')
plt.plot(fitted_values, color='red', label='fitted')
plt.ylim(bottom=np.amin(ts['close']))
plt.title('RSS: %.4f' % sum((fitted_values-ts['close'])**2))
plt.legend()
plt.ylabel('Price')
plt.xticks(rotation=90)
plt.tight_layout()
from datetime import datetime, timedelta
start_date = ts.last_valid_index()
end_date = start_date + timedelta(days=1)
print (start_date, end_date)
pred = results_ARIMA.predict(start=str(start_date), end=str(end_date), dynamic=True)
pred
#plt.plot(pred, label='prediction')
#plt.ylabel('Price')
#plt.xlabel('Days')
#plt.xticks(rotation=90)
#plt.tight_layout()
###Output
2021-02-15 14:00:00 2021-02-16 14:00:00
###Markdown
Logistic Regression
###Code
df = pd.read_csv('./data/' + MARKET + '_3600_processed.csv')
df = df.drop("Unnamed: 0", axis=1)
df.head(5)
# Split into train and test sets
y = df['goldencross'] # must be a classification like a 1 or 0
X = df.loc[:, ['close']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, shuffle=False)
df.shape, train.shape, test.shape
logreg = LogisticRegression()
type(logreg)
logreg.fit(X_train, y_train)
logreg.score(X_train, y_train), logreg.score(X_test, y_test)
y_hat = logreg.predict(X_test)
X_test_display = X_test.copy()
X_test_display['goldencross_predicted'] = y_hat
X_test_display['goldencross_actual'] = y_test
X_test_display.head()
from sklearn.metrics import confusion_matrix, classification_report
confusion_matrix(y_test, y_hat)
print(classification_report(y_test, y_hat))
df['goldencross'].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Decision Tree Model
###Code
tree_model = DecisionTreeClassifier(max_depth = 3)
tree_model.fit(X_train, y_train)
tree_model.score(X_train, y_train), tree_model.score(X_test, y_test)
print(classification_report(y_test, y_hat))
###Output
_____no_output_____
###Markdown
Persistence Algorithm
###Code
from datetime import datetime
def parser(x):
return datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
ts = pd.read_csv('./data/' + MARKET + '_3600_processed.csv', header=0, parse_dates=[1], index_col=1, squeeze=True, date_parser=parser)
ts = ts.drop("Unnamed: 0", axis=1)
ts = ts[['close']]
ts
###Output
_____no_output_____
###Markdown
Step 1: Define the Supervised Learning Problem
###Code
# Create lagged dataset
values = pd.DataFrame(ts.values)
df = pd.concat([values.shift(1), values], axis=1)
df.columns = ['t-1', 't+1']
print(df.head(5))
print(df.tail(5))
###Output
_____no_output_____
###Markdown
Step 2: Train and Test Sets
###Code
# Split into train and test sets
X = df.values
train_size = int(len(X) * 0.66)
train, test = X[1:train_size], X[train_size:]
train_X, train_y = train[:,0], train[:,1]
test_X, test_y = test[:,0], test[:,1]
X.shape, train.shape, test.shape
train_X
test_X
###Output
_____no_output_____
###Markdown
Step 3: Persistence Algorithm
###Code
def model_persistence(x):
# model code goes here for evaluation
return x
###Output
_____no_output_____
###Markdown
Step 4: Make and Evaluate Forecast
###Code
from sklearn.metrics import mean_squared_error
predictions = list()
for x in test_X:
yhat = model_persistence(x)
predictions.append(yhat)
test_score = mean_squared_error(test_y, predictions)
print('Test MSE: %.3f' % test_score)
###Output
_____no_output_____
###Markdown
Step 5: Plot Predictions and Expected Results
###Code
plt.plot(train_y)
plt.plot([None for i in train_y] + [x for x in test_y])
plt.plot([None for i in train_y] + [x for x in predictions])
plt.show()
predictions
df
###Output
_____no_output_____ |
jupyter_notebooks/good_bad_trols(1-4).ipynb | ###Markdown
Good and Bad Controls, Identifiability Checker applied to graphs G1 to G4Main Reference:* A Crash Course in Good and Bad Controls,by Carlos Cinelli, Andrew Forney and Judea PearlIn this notebook, we apply our Identifiability Checker to the 4 graphs G1 to G4 in the paper cited above.
###Code
# this makes sure it starts looking for things from the JudeasRx folder down.
import os
import sys
os.chdir('../')
sys.path.insert(0,os.getcwd())
print(os.getcwd())
import good_bad_trols as gb
# pots of in_bnet will be selected at random
import random
random.seed(871)
gb.run(gb.all_gnames[0:4],
num_1world_samples=1000,
num_worlds=100)
###Output
_____no_output_____ |
Analisis_de_lazo2.ipynb | ###Markdown
Análisis de lazo Ejemplo 10.4Determine $V_0$ en el circuito de la figura, aplicando el análisis de lazo. SoluciónComo se señala en la figure, los lazos 3 y 4 forman una supermalla debido a la fuente de corriente entre los lazos. En cuanto al lazo 1, la LTK da por resultado $$ -10 + (8 - j2) I_1 - (-j2) I_2 - 8 I_3 = 0 $$ o sea $$ (8 - j2) I_1 + j2 I_2 - 8I_3 = 10 \tag{1} $$ En cuanto al lazo 2,$$ I_2 = -3 \tag{2} $$ En cuanto a la supermalla, $$ (8 - j4) I_3 - 8 I_1 +(6 + j5) I_4 - j5 I_2 = 0 \tag{3} $$ Debido a la fuente de corriente entre las mallas 3 y 4, en el nodo A, $$ I_4 = I_3 + 4 \tag{4} $$ Método 1En vez de resolver las cuatro ecuaciones anteriores, se reducen a dos por eliminación. Al combinar las ecuaciones (1) y (2) $$ (8 - j2) I_1 - 8 I_3 = 10 + j6 \tag{5} $$ Al combinar las ecuaciones (2) a (4) $$ -8 I_1 + (14 + j) I_3 = -24 - j35 \tag{6} $$ De las ecuaciones (5) y (6) se obtiene la ecuación matricial $$\left[\begin{array}{cc}8 - j2 & -8 \\-8 & 14 + j\end{array}\right]\left[\begin{array}{c}I_1 \\I_2\end{array}\right]=\left[\begin{array}{c}10 + j6 \\-24 - j35\end{array}\right]$$ Se obtienen los siguientes determinantes: $$ \Delta = \left|\begin{array}{cc}8 - j2 & -8 \\-8 & 14 + j\end{array}\right| $$
###Code
# Importa biblioteca numpy
import numpy as np
M = np.array([ [8 - 2j , -8],[-8 , 14 + 1j] ])
Delta = np.linalg.det(M)
print('Delta = {:.1f}'.format(Delta))
###Output
Delta = 50.0-20.0j
###Markdown
$$ \Delta_1 =\left|\begin{array}{cc}10 + j6 & -8 \\-24 - j35 & 14 + j\end{array}\right|$$
###Code
M1 = np.array([ [10 + 6j , -8],[-24 - 35j , 14 + 1j] ])
Delta1 = np.linalg.det(M1)
print('Delta1 = {:.1f}'.format(Delta1))
###Output
Delta1 = -58.0-186.0j
###Markdown
La corriente $I_1$ se obtiene como $$ I_1 = \frac{\Delta_1}{\Delta} $$
###Code
I1 = Delta1/Delta
print('I1 = {:.3f} A'.format(I1))
###Output
I1 = 0.283-3.607j A
###Markdown
La tensión requerida $V_0$ es
###Code
I2 = -3
V0 = -2j*(I1 - I2)
print('V0 = {:.3f} V'.format(V0))
import cmath, math
V0_pol = cmath.polar(V0)
print('V0 = (%.3f<%.3f rad) V'%V0_pol)
print('V0 = (%.3f<%.2f°) V'%(V0_pol[0],V0_pol[1]*180/math.pi))
###Output
V0 = (9.754<-2.403 rad) V
V0 = (9.754<-137.69°) V
###Markdown
Método 2Se puede usar Python y la biblioteca numpy para resolver las ecuaciones (1) a (4). Primero se enuncia como $$\left[\begin{array}{cccc}8 - j2 & j2 & -8 & 0 \\0 & 1 & 0 & 0 \\-8 & -j5 & 8 - j4 & 6 + j5 \\0 & 0 & -1 & 1\end{array}\right]\left[\begin{array}{c}I_1 \\I_2 \\I_3 \\I_4\end{array}\right]=\left[\begin{array}{c}10 \\-3 \\0 \\4\end{array}\right] \tag{7a}$$ o sea$$ AI=B $$ Al invertir $A$ se puede obtener $I$ como $$ I = A^{-1} B \tag{7b} $$ Ahora se aplica Python, de esta manera: (usando la biblioteca numpy)
###Code
A = np.array([ [8-2j , 2j , -8 , 0],
[0 , 1 , 0 , 0],
[-8 , -5j , 8 - 4j , 6 + 5j],
[0 , 0 , -1 , 1] ])
B = np.array([ [10] , [-3] , [0] , [4] ])
I = np.dot( np.linalg.inv(A) , B )
np.set_printoptions(precision=4 , suppress=True)
print(I)
I1 = I[0] ; I2 = I[1] ; I3 = I[2] ; I4 = I[3]
print('I1 = %s'%I1)
print('I2 = %s'%I2)
print('I3 = %s'%I3)
print('I4 = %s'%I4)
###Output
I1 = [0.2828-3.6069j]
I2 = [-3.+0.j]
I3 = [-1.869-4.4276j]
I4 = [2.131-4.4276j]
###Markdown
$$ V_0 = -2j \, (I_1 - I_2) $$
###Code
V0 = -2j*(I1 - I2)
print('V0 = %s V'%V0)
###Output
V0 = [-7.2138-6.5655j] V
###Markdown
Como se obtuvo anteriormente.
###Code
# Esta celda da el estilo al notebook
from IPython.core.display import HTML
css_file = 'styles/aeropython.css'
HTML(open(css_file, "r").read())
###Output
_____no_output_____ |
reactions/search_neb.ipynb | ###Markdown
Nudged Elastic Band calculations
###Code
from tempfile import NamedTemporaryFile
from base64 import b64encode
def render_thumbnail(atoms):
tmp = NamedTemporaryFile()
ase.io.write(tmp.name, atoms, format='png')
raw = open(tmp.name, 'rb').read()
tmp.close()
return b64encode(raw).decode()
def display_thumbnail(th):
return '<img width="400px" src="data:image/png;base64,{}" title="">'.format(th)
def html_thumbnail(th):
return ipw.HTML('<img width="400px" src="data:image/png;base64,{}" title="">'.format(th))
viewer = nglview.NGLWidget()
style = {'description_width': '120px'}
layout = {'width': '70%'}
slider_image_nr = ipw.IntSlider(description='image nr.:',
value=1, step=1,
min=1, max=2,
style=style, layout=layout)
all_ase=[]
def on_image_nr_change(c):
visualized_ase = all_ase[slider_image_nr.value-1]
refresh_structure_view(visualized_ase)
slider_image_nr.observe(on_image_nr_change, 'value')
clear_output()
display(ipw.VBox([slider_image_nr, viewer]))
def initialize_structure_view():
global viewer
if hasattr(viewer, "component_0"):
viewer.component_0.clear_representations()
viewer.component_0.remove_unitcell()
cid = viewer.component_0.id
viewer.remove_component(cid)
if len(all_ase)==0:
return
atoms = all_ase[0]
atoms.pbc = [1, 1, 1]
#From Kristjan
viewer.add_component(nglview.ASEStructure(atoms), default_representation=False) # adds ball+stick
#viewer.add_component(nglview.ASEStructure(slab_atoms)) # adds ball+stick
viewer.add_unitcell()
viewer.center()
viewer.component_0.add_ball_and_stick(aspectRatio=10.0, opacity=1.0)
# Orient camera to look from positive z
cell_z = atoms.cell[2, 2]
com = atoms.get_center_of_mass()
def_orientation = viewer._camera_orientation
top_z_orientation = [1.0, 0.0, 0.0, 0,
0.0, 1.0, 0.0, 0,
0.0, 0.0, -np.max([cell_z, 30.0]) , 0,
-com[0], -com[1], -com[2], 1]
viewer._set_camera_orientation(top_z_orientation)
def refresh_structure_view(viz_atoms):
global viewer
old_camera_orientation = viewer._camera_orientation
if hasattr(viewer, "component_0"):
viewer.component_0.clear_representations()
viewer.component_0.remove_unitcell()
cid = viewer.component_0.id
viewer.remove_component(cid)
# if len(all_ase)==0:
# return
#atoms = all_ase[im]
viz_atoms.pbc = [1, 1, 1]
#From Kristjan
viewer.add_component(nglview.ASEStructure(viz_atoms), default_representation=False) # adds ball+stick
#viewer.add_component(nglview.ASEStructure(slab_atoms)) # adds ball+stick
viewer.add_unitcell()
#viewer.center()
viewer.component_0.add_ball_and_stick(aspectRatio=10.0, opacity=1.0)
viewer._set_camera_orientation(old_camera_orientation)
def make_replica_html(structure_data_list, energies, distances):
html = '<table>'
n_col = 4
for i, (rep, en, dist) in enumerate(zip(structure_data_list, energies, distances)):
thumbnail = rep.get_extra('thumbnail')
# The table cell
if i%n_col == 0:
html += '<tr>'
html += '<td><img width="400px" src="data:image/png;base64,{}" title="">'.format(thumbnail)
# Output some information about the replica...
html += '<p><b>Nr: </b>{} <br> <b>Energy:</b> {:.6f} eV <br> <b>Dist. to prev:</b> {:.4f} ang</p>'\
.format(i, en, dist)
html += '<p>pk: {}</p>'.format(rep.pk)
# ... and the download link.
html += '<p><a target="_blank" href="../export_structure.ipynb?uuid={}">View & export</a></p><td>'\
.format(rep.uuid)
if i%n_col == n_col-1:
html += '</tr>'
html += '</tr>'
html += '</table>'
return html
def sorted_opt_rep_keys(keys):
return sorted([ (int(key.split('_')[2]), key) for key in keys if 'opt_replica' in key])
def process_and_show_neb(c):
global all_ase
structure_data_list = []
btn_show.disabled = True
with main_out:
clear_output()
wc = drop_nebs.value
for i_rep in range(wc.inputs['nreplicas'].value):
label = "opt_replica_%d" % i_rep
structure_data_list.append(wc.outputs[label])
energies_array = wc.outputs['replica_energies'].get_array('energies') * 27.211386245
distances_array = wc.outputs['replica_distances'].get_array('distances') * 0.529177
energies_array = np.array([e_arr - e_arr[0] for e_arr in energies_array])
#### --------------------------------------------------------------
## Add thumbnails to replicas if they are not already added
## ans store list of ASE structures for the viz
for rep in structure_data_list:
the_ase=rep.get_ase()
all_ase.append(the_ase)
if not "thumbnail" in rep.extras:
rep.set_extra("thumbnail", render_thumbnail(the_ase))
#### --------------------------------------------------------------
replica_html = make_replica_html(structure_data_list, energies_array[-1], distances_array[-1])
barrier_list = [np.max(e_arr) for e_arr in energies_array]
with main_out:
f, axarr = plt.subplots(1, 2, figsize=(14, 4))
axarr[0].plot(energies_array[-1], 'o-')
axarr[0].set_ylabel("Energy (eV)")
axarr[0].set_xlabel("Replica nr")
axarr[0].set_title("NEB energy profile")
axarr[1].plot(barrier_list, 'o-')
axarr[1].axhline(barrier_list[-1], linestyle='--', color='lightgray')
axarr[1].set_ylabel("Barrier (eV)")
axarr[1].set_xlabel("Iteration nr")
axarr[1].set_title("NEB convergence")
plt.show()
display(ipw.HTML(replica_html))
print("List of all replica PKs:")
rep_pk_str = "["
for struct in structure_data_list:
rep_pk_str += "%d " % struct.pk
print(rep_pk_str[:-1] + "]")
btn_show.disabled = False
slider_image_nr.max=len(all_ase)
initialize_structure_view()
qb = QueryBuilder()
qb.append(WorkChainNode,
filters={
'attributes.process_label': {'==': 'NEBWorkChain'},
'attributes.process_state': {'==': 'finished'},
},
tag='wc',
project='*'
)
qb.append(StructureData,
with_incoming='wc'
)
qb.order_by({WorkChainNode: {'id': 'desc'}})
neb_wcs = qb.all()
sel_options = OrderedDict([("PK %d: " % (neb_wc[0].pk) + neb_wc[0].description, neb_wc[0]) for neb_wc in neb_wcs])
style = {'description_width': '120px'}
layout = {'width': '70%'}
drop_nebs = ipw.Dropdown(options = sel_options,
description = 'NEB: ', layout=layout, style=style)
btn_show = ipw.Button(description="Show")
btn_show.on_click(process_and_show_neb)
main_out = ipw.Output()
display(drop_nebs, btn_show, main_out)
###Output
_____no_output_____ |
4.Handwriting recognition.ipynb | ###Markdown
Handwriting recognition
###Code
#Import tensorflow
import tensorflow as tf
#import dataset
mnist=tf.keras.datasets.mnist
#(x_train, y_train),(x_test,y_test) = mnist.load_data()
#Load data
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
import matplotlib.pyplot as plt
plt.imshow(training_images[32])
plt.subplot(221)
plt.imshow(training_images[0], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.imshow(training_images[1], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.imshow(training_images[2], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.imshow(training_images[3], cmap=plt.get_cmap('gray'))
#print(training_labels[0])
#print(training_images[0])
plt.show()
training_images=training_images/255.0
test_images=test_images/255.0
# Plot ad hoc mnist instances
from keras.datasets import mnist
import matplotlib.pyplot as plt
plt.subplot(221)
plt.imshow(training_images[0], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.imshow(training_images[1], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.imshow(training_images[2], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.imshow(training_images[3], cmap=plt.get_cmap('gray'))
# show the plot
plt.show()
#print(training_labels[0])
#print(training_images[0])
#all of the values in the number are between 0 and 255. If we are training a
#neural network, for various reasons it's easier if we treat all values as between 0 and 1, a process called 'normalizing'
training_images = training_images / 255.0
test_images = test_images / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.fit(training_images,training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[5])
print(test_labels[4],test_labels[6],test_labels[7],test_labels[25])
import matplotlib.pyplot as plt
#plt.imshow(test_images[4],cmap=plt.get_cmap('gray') )
plt.subplot(221)
plt.imshow(test_images[4], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.imshow(test_images[6], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.imshow(test_images[7], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.imshow(test_images[25], cmap=plt.get_cmap('gray'))
# show the plot
#print(training_labels[0])
#print(training_images[0])
plt.show()
###Output
_____no_output_____ |
coding/notebook-customisation.ipynb | ###Markdown
28 Jupyter Notebook tips, tricks, and shortcutshttps://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/ Edit IPython cell in an external editorhttps://stackoverflow.com/questions/28309430/edit-ipython-cell-in-an-external-editorThis is what I came up with. I added 2 shortcuts:- 'g' to launch gvim with the content of the current cell (you can replace gvim with whatever text editor you like).- 'u' to update the content of the current cell with what was saved by gvim.So, when you want to edit the cell with your preferred editor, hit 'g', make the changes you want to the cell, save the file in your editor (and quit), then hit 'u'.Just execute this cell to enable these featuresor put it in `~/.jupiter/custom/custom.js`
###Code
%%javascript
IPython.keyboard_manager.command_shortcuts.add_shortcut('g', {
help : 'Edit cell in Visual Studio Code',
handler : function (event) {
var input = IPython.notebook.get_selected_cell().get_text();
var cmd = "f = open('.toto.py', 'w');f.close()";
if (input != "") {
cmd = '%%writefile .toto.py\n' + input;
}
IPython.notebook.kernel.execute(cmd);
//cmd = "import os;os.system('open -a /Applications/MacVim.app .toto.py')";
//cmd = "!open -a /Applications/MacVim.app .toto.py";
cmd = "!code .toto.py";
IPython.notebook.kernel.execute(cmd);
return false;
}}
);
IPython.keyboard_manager.command_shortcuts.add_shortcut('u', {
help : 'Update cell from externally edited file',
handler : function (event) {
function handle_output(msg) {
var ret = msg.content.text;
IPython.notebook.get_selected_cell().set_text(ret);
}
var callback = {'output': handle_output};
var cmd = "f = open('.toto.py', 'r');print(f.read())";
IPython.notebook.kernel.execute(cmd, {iopub: callback}, {silent: false});
return false;
}}
);
n = int (input("Enter a number:"))
print("this is the number:{:10.3f}".format(n))
###Output
Enter a number:90
this is the number: 90.000
###Markdown
Debugging```(python)from IPython.core.debugger import set_trace...set_trace()..``` or```import pdb; pdb.set_trace()```
###Code
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
###Output
_____no_output_____ |
Lending_Club_5_Clustering.ipynb | ###Markdown
We can observe 3 different custers in funded amount feature
###Code
sns.set(font_scale= 1)
plt.figure(figsize=(20,200))
sns.boxplot(x='K-Means_Cluster_ID', y='annual_inc', data=dummies_loan_status)
###Output
_____no_output_____
###Markdown
Concerning annual income feature the differences are not so visible, but we have enormous spread of data here.
###Code
sns.set(font_scale= 1)
plt.figure(figsize=(20,10))
sns.boxplot(x='K-Means_Cluster_ID', y='emp_length', data=dummies_loan_status)
###Output
_____no_output_____
###Markdown
Also three different cluster related to employment length
###Code
sns.set(font_scale= 1)
plt.figure(figsize=(20,10))
sns.boxplot(x='K-Means_Cluster_ID', y='dti', data=dummies_loan_status)
sns.set(font_scale= 1)
plt.figure(figsize=(20,10))
sns.boxplot(x='K-Means_Cluster_ID', y='FICO_mean', data=dummies_loan_status)
###Output
_____no_output_____
###Markdown
There we have very clear division between Clusters
###Code
sns.set(font_scale= 1)
plt.figure(figsize=(20,10))
sns.boxplot(x='K-Means_Cluster_ID', y='grade', data=dummies_loan_status)
###Output
_____no_output_____ |
wk-5/SourcePoint/W5_PersonAttrubutes.ipynb | ###Markdown
###Code
# mount gdrive and unzip data
from google.colab import drive
drive.mount('/content/gdrive')
!unzip -q "/content/gdrive/My Drive/hvc_data.zip"
# look for `hvc_annotations.csv` file and `resized` dir
%ls
%tensorflow_version 1.x
import cv2
import json
import numpy as np
import pandas as pd
from functools import partial
from pathlib import Path
from tqdm import tqdm
from google.colab.patches import cv2_imshow
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from keras.applications import VGG16
from keras.layers.core import Dropout
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras.layers import Input
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
# load annotations
df = pd.read_csv("hvc_annotations.csv")
del df["filename"] # remove unwanted column
df.head()
# one hot encoding of labels
one_hot_df = pd.concat([
df[["image_path"]],
pd.get_dummies(df.gender, prefix="gender"),
pd.get_dummies(df.imagequality, prefix="imagequality"),
pd.get_dummies(df.age, prefix="age"),
pd.get_dummies(df.weight, prefix="weight"),
pd.get_dummies(df.carryingbag, prefix="carryingbag"),
pd.get_dummies(df.footwear, prefix="footwear"),
pd.get_dummies(df.emotion, prefix="emotion"),
pd.get_dummies(df.bodypose, prefix="bodypose"),
], axis = 1)
one_hot_df.head().T
import keras
import numpy as np
# Label columns per attribute
_gender_cols_ = [col for col in one_hot_df.columns if col.startswith("gender")]
_imagequality_cols_ = [col for col in one_hot_df.columns if col.startswith("imagequality")]
_age_cols_ = [col for col in one_hot_df.columns if col.startswith("age")]
_weight_cols_ = [col for col in one_hot_df.columns if col.startswith("weight")]
_carryingbag_cols_ = [col for col in one_hot_df.columns if col.startswith("carryingbag")]
_footwear_cols_ = [col for col in one_hot_df.columns if col.startswith("footwear")]
_emotion_cols_ = [col for col in one_hot_df.columns if col.startswith("emotion")]
_bodypose_cols_ = [col for col in one_hot_df.columns if col.startswith("bodypose")]
class PersonDataGenerator(keras.utils.Sequence):
"""Ground truth data generator"""
def __init__(self, df, batch_size=32, shuffle=True):
self.df = df
self.batch_size=batch_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.floor(self.df.shape[0] / self.batch_size))
def __getitem__(self, index):
"""fetch batched images and targets"""
batch_slice = slice(index * self.batch_size, (index + 1) * self.batch_size)
items = self.df.iloc[batch_slice]
image = np.stack([cv2.imread(item["image_path"]) for _, item in items.iterrows()])
target = {
"gender_output": items[_gender_cols_].values,
"image_quality_output": items[_imagequality_cols_].values,
"age_output": items[_age_cols_].values,
"weight_output": items[_weight_cols_].values,
"bag_output": items[_carryingbag_cols_].values,
"pose_output": items[_bodypose_cols_].values,
"footwear_output": items[_footwear_cols_].values,
"emotion_output": items[_emotion_cols_].values,
}
return image, target
def on_epoch_end(self):
"""Updates indexes after each epoch"""
if self.shuffle == True:
self.df = self.df.sample(frac=1).reset_index(drop=True)
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(one_hot_df, test_size=0.15)
train_df.shape, val_df.shape
train_df.head()
train_df.info()
# create train and validation data generators
train_gen = PersonDataGenerator(train_df, batch_size=32)
valid_gen = PersonDataGenerator(train_df, batch_size=64, shuffle=False)
# get number of output units from data
images, targets = next(iter(train_gen))
num_units = { k.split("_output")[0]:v.shape[1] for k, v in targets.items()}
num_units
backbone = VGG16(
weights="imagenet",
include_top=False,
input_tensor=Input(shape=(224, 224, 3))
)
neck = backbone.output
neck = Flatten(name="flatten")(neck)
neck = Dense(512, activation="relu")(neck)
def build_tower(in_layer):
neck = Dropout(0.2)(in_layer)
neck = Dense(128, activation="relu")(neck)
neck = Dropout(0.3)(in_layer)
neck = Dense(128, activation="relu")(neck)
return neck
def build_head(name, in_layer):
return Dense(
num_units[name], activation="softmax", name=f"{name}_output"
)(in_layer)
# heads
gender = build_head("gender", build_tower(neck))
image_quality = build_head("image_quality", build_tower(neck))
age = build_head("age", build_tower(neck))
weight = build_head("weight", build_tower(neck))
bag = build_head("bag", build_tower(neck))
footwear = build_head("footwear", build_tower(neck))
emotion = build_head("emotion", build_tower(neck))
pose = build_head("pose", build_tower(neck))
model = Model(
inputs=backbone.input,
outputs=[gender, image_quality, age, weight, bag, footwear, pose, emotion]
)
# freeze backbone
for layer in backbone.layers:
layer.trainable = False
# losses = {
# "gender_output": "binary_crossentropy",
# "image_quality_output": "categorical_crossentropy",
# "age_output": "categorical_crossentropy",
# "weight_output": "categorical_crossentropy",
# }
# loss_weights = {"gender_output": 1.0, "image_quality_output": 1.0, "age_output": 1.0}
opt = SGD(lr=0.001, momentum=0.9)
model.compile(
optimizer=opt,
loss="categorical_crossentropy",
# loss_weights=loss_weights,
metrics=["accuracy"]
)
# model.fit(X_train, y_train, validation_data=(X_valid, y_valid), batch_size=32, epochs=10)
model.fit_generator(
generator=train_gen,
validation_data=valid_gen,
use_multiprocessing=True,
workers=6,
epochs=10,
verbose=1
)
model
###Output
_____no_output_____ |
notebooks/Peekaboo.ipynb | ###Markdown
Testing Phase
###Code
%%bash
rm /App/logs/client.logs
echo RR > /App/output/random.csv
for i in $(seq 1 30)
do
cd /App/mininettest/ && python /App/mininettest/demo.py --scheduler random --rtt 0
time=$(tail -1 /App/logs/client.logs | awk '{print $3}')
echo $time >> /App/output/random.csv
done
%%bash
rm /App/logs/client.logs
echo minRTT > /App/output/rtt.csv
for i in $(seq 1 30)
do
cd /App/mininettest/ && python /App/mininettest/demo.py --scheduler rtt --rtt 0
time=$(tail -1 /App/logs/client.logs | awk '{print $3}')
echo $time >> /App/output/rtt.csv
done
%%bash
rm /App/logs/client.logs
echo ECF > /App/output/ecf.csv
for i in $(seq 1 30)
do
cd /App/mininettest/ && python /App/mininettest/demo.py --scheduler ecf --rtt 0
time=$(tail -1 /App/logs/client.logs | awk '{print $3}')
echo $time >> /App/output/ecf.csv
done
%%bash
rm /App/logs/client.logs
echo BLEST > /App/output/blest.csv
for i in $(seq 1 30)
do
cd /App/mininettest/ && python /App/mininettest/demo.py --scheduler blest --rtt 0
time=$(tail -1 /App/logs/client.logs | awk '{print $3}')
echo $time >> /App/output/blest.csv
done
%%bash
rm /App/logs/client.logs
echo Peekaboo > /App/output/peekaboo.csv
for i in $(seq 1 30)
do
cd /App/mininettest/ && python /App/mininettest/demo.py --scheduler peek --rtt 0
time=$(tail -1 /App/logs/client.logs | awk '{print $3}')
echo $time >> /App/output/peekaboo.csv
done
import pandas
random = pandas.read_csv("/App/output/random.csv")
rtt = pandas.read_csv("/App/output/rtt.csv")
ecf = pandas.read_csv("/App/output/ecf.csv")
blest = pandas.read_csv("/App/output/blest.csv")
peekaboo = pandas.read_csv("/App/output/peekaboo.csv")
result = pandas.concat([(random), (rtt), (ecf), (blest), (peekaboo)], axis=1)
import matplotlib.pyplot as plt
from IPython.core.pylabtools import figsize
figsize(40, 7)
result.plot.box(sym='+')
plt.ylabel("Completion Time (ms)")
plt.ylim(top=4000)
plt.xlabel("Schedulers")
plt.title("Completion time of different schedulers")
###Output
1091 51671 0
0 434.271874 453.238516 488.150308
1 471.380898 410.833193 446.967829
2 471.589647 438.075983 483.812754
3 438.839682 435.952490 447.315560
4 439.324781 429.407502 468.740917
5 449.379670 417.444521 485.506205
6 409.198922 440.014064 427.197541
7 474.287381 415.110489 506.308822
8 428.236823 428.926592 427.116910
9 424.495530 435.809361 488.519849
10 432.037969 415.471403 435.957200
11 437.200076 444.430972 488.945293
12 433.142973 444.516063 450.548196
13 438.236781 450.495536 459.208608
14 420.086741 466.045296 503.012617
15 448.544177 453.561176 441.515699
16 418.770316 428.972216 491.440193
17 426.645355 429.159216 442.437941
18 489.837413 440.542029 439.528978
19 435.609987 425.897913 453.364277
20 442.384437 468.396638 431.669119
21 422.554074 425.876471 467.143138
22 434.222844 457.862645 448.973382
23 437.511493 435.754004 481.910711
24 443.367810 453.743125 451.808883
25 437.271374 411.826581 443.970018
26 442.208253 409.175939 455.248081
27 454.623312 440.579200 441.094412
28 430.006889 430.625747 435.795769
29 474.468795 434.907413 457.755058
|
python/Sequential_bow.ipynb | ###Markdown
Optimierung und Evaluierung des Sequential bow Modells
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
import h5py
import pydot
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report, log_loss
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from wordcloud import WordCloud
from PIL import Image
from PIL import ImageFilter
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform, normal, qlognormal, randint
from keras.models import Sequential
from keras.layers import Dense, Dropout, LeakyReLU, Activation
from keras.regularizers import l2, l1
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from keras.models import load_model
from keras.utils import np_utils, plot_model
from keras.optimizers import Adagrad
from keras.losses import binary_crossentropy
def data_bow():
x_test = pd.read_hdf("../build/preprocessed/bow_data_500.hdf5",key="test")
x_train = pd.read_hdf("../build/preprocessed/bow_data_500.hdf5",key="train")
y_test = x_test.label
y_train = x_train.label
x_test= x_test.drop('label',axis=1)
x_train = x_train.drop('label',axis=1)
return x_train, y_train, x_test, y_test
###Output
_____no_output_____
###Markdown
First Test
###Code
x_test = pd.read_hdf("../build/preprocessed/bow_data_500.hdf5",key="test")
x_train = pd.read_hdf("../build/preprocessed/bow_data_500.hdf5",key="train")
y_test = x_test.label
y_train = x_train.label
x_test= x_test.drop('label',axis=1)
x_train = x_train.drop('label',axis=1)
dim = x_train.shape[1]
model = Sequential()
model.add(Dense(1770, kernel_regularizer=l1(2.0036577552673407e-06), input_dim=dim))
model.add(Activation('relu'))
model.add(Dense(9,kernel_regularizer=l2(0.05407632514834404)))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', metrics=['accuracy'],
optimizer='Adagrad')
model.summary()
result = model.fit(x_train.values, y_train.values,
batch_size=64,
epochs=100,
verbose=2,
validation_split=0.3)
def plot_history(network_history):
plt.figure()
plt.xlabel('Epochen')
plt.ylabel('Verlust')
plt.plot(network_history.history['loss'])
plt.plot(network_history.history['val_loss'])
plt.legend(['Training', 'Validierung'])
plt.show()
plt.close()
plot_history(result)
###Output
_____no_output_____
###Markdown
Analyse des Inputs
###Code
x_test = pd.read_hdf("../build/preprocessed/bow_data_500.hdf5",key="test")
x_train = pd.read_hdf("../build/preprocessed/bow_data_500.hdf5",key="train")
X = x_test.append(x_train)
###Output
_____no_output_____
###Markdown
$$max\left(\frac{(\bar{w_1}-\bar{w_2})^2}{s_1^2+s_2^2}\right)$$
###Code
mu_real = X[X.label==1].mean()
mu_fake = X[X.label==0].mean()
sorted_words = ((mu_real-mu_fake)**2/(np.var(X[X.label==1])**2 + np.var(X[X.label==0])**2)).sort_values(ascending=False).index
words_plot = pd.DataFrame({'Fake':X[X.label==0][sorted_words[1:11]].mean(),'Real':X[X.label==1][sorted_words[1:11]].mean()})
words_plot.plot(kind='bar')
plt.ylabel(r"$\overline{w}$")
plt.legend()
plt.tight_layout()
plt.savefig("../build/plots/data_visualisation.pdf")
plt.show()
news = pd.read_csv('../data/mixed_news/news_dataset.csv')
news = news.dropna(subset=['title','content'])
news = news[news.content != ' ']
news = news[news.title != ' ']
text_len_real = [len(c) for c in news[news['label']=='real'].content]
text_len_fake = [len(c) for c in news[news['label']=='fake'].content]
print("Mittlere textlänge Real: ",np.mean(text_len_real))
print("Mittlere Textlänge Fake: ",np.mean(text_len_fake))
print("Real ist %d länger wie Fake: " % (np.mean(text_len_real)-np.mean(text_len_fake)))
###Output
_____no_output_____
###Markdown
How could the Hyperparameter be distributed StruktureGröße der ersten und zweiten hidden layer
###Code
x = np.round(np.random.lognormal(6,0.5,10000)/10)*10
plt.hist(x,bins=100)
plt.xlim(0,5000)
plt.show()
###Output
_____no_output_____
###Markdown
Größer der dritten hidden layer
###Code
x = np.round(np.random.lognormal(4,0.5,10000)/1)*1
plt.hist(x,bins=100)
plt.show()
###Output
_____no_output_____
###Markdown
Regularization
###Code
x =np.random.uniform(0,0.1,10000)
plt.hist(x,bins=100)
plt.show()
###Output
_____no_output_____
###Markdown
Model creation
###Code
def model_structure(x_train, y_train, x_test, y_test):
dim = x_train.shape[1]
model = Sequential()
model.add(Dense(int({{qlognormal(6,0.5,10)}}), input_dim=dim))
model.add(Activation('relu'))
if {{choice(['three', 'four'])}} == 'four':
model.add(Dense(int({{qlognormal(6,0.5,10)}})))
model.add(Activation('relu'))
model.add(Dense(int({{qlognormal(4,0.5,1)}})))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', metrics=['accuracy'],
optimizer='adam')
result = model.fit(x_train.values, y_train.values,
batch_size=64,
epochs=30,
verbose=2,
validation_split=0.3)
validation_acc = np.amax(result.history['val_acc'])
print('Best validation acc of epoch:', validation_acc)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
def model_training(x_train, y_train, x_test, y_test):
dim = x_train.shape[1]
model = Sequential()
model.add(Dense(1770, input_dim=dim))
model.add(Activation('relu'))
model.add(Dense(9))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss={{choice(['hinge','binary_crossentropy','squared_hinge'])}}, metrics=['accuracy'],
optimizer={{choice(['adam','AdaDelta','Adagrad'])}})
result = model.fit(x_train.values, y_train.values,
batch_size=64,
epochs=30,
verbose=2,
validation_split=0.3)
validation_acc = np.amax(result.history['val_acc'])
print('Best validation acc of epoch:', validation_acc)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
def model_regularization(x_train, y_train, x_test, y_test):
dim = x_train.shape[1]
model = Sequential()
model.add(Dense(1770, kernel_regularizer=l1({{uniform(0,0.1)}}), input_dim=dim))
model.add(Activation('relu'))
model.add(Dense(9,kernel_regularizer=l2({{uniform(0,0.1)}})))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', metrics=['accuracy'],
optimizer='Adagrad')
result = model.fit(x_train.values, y_train.values,
batch_size=64,
epochs=30,
verbose=2,
validation_split=0.3)
validation_acc = np.amax(result.history['val_acc'])
print('Best validation acc of epoch:', validation_acc)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
def model_optimizer(x_train, y_train, x_test, y_test):
dim = x_train.shape[1]
model = Sequential()
model.add(Dense(1770, kernel_regularizer=l1(2.0*10**(-6)), input_dim=dim))
model.add(Activation('relu'))
model.add(Dense(9,kernel_regularizer=l2(0.05407632514834404)))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', metrics=['accuracy'],
optimizer=Adagrad(lr={{uniform(0,1)}}, epsilon=None, decay={{uniform(0,1)}}))
result = model.fit(x_train.values, y_train.values,
batch_size=64,
epochs=30,
verbose=2,
validation_split=0.3)
validation_acc = np.amax(result.history['val_acc'])
print('Best validation acc of epoch:', validation_acc)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
###Output
_____no_output_____
###Markdown
Optimization with hyperoptAlgorithm: Tree of Parzen EstimatorsOptimierung in 3 Schritten: - Struktur (Tiefe (2 oder 3 hidden Layers) und Breite) - Training (loss function und optimizer) - Regularizierung ( L1 für die erste Layer und L2 für 2 und 3)
###Code
trials = Trials()
best_run, best_model = optim.minimize(model=model_structure,
data=data_bow,
algo=tpe.suggest,
max_evals=50,
trials=trials,
notebook_name='Sequential_bow')
print("Best performing model chosen hyper-parameters:")
print(best_run)
best_model.save('../model/best_Hyperopt_NN_bow_struct_500.hdf5')
trials = Trials()
best_run, best_model = optim.minimize(model=model_training,
data=data_bow,
algo=tpe.suggest,
max_evals=15,
trials=trials,
notebook_name='Sequential_bow')
print("Best performing model chosen hyper-parameters:")
print(best_run)
best_model.save('../model/best_Hyperopt_NN_bow_training_500.hdf5')
trials = Trials()
best_run, best_model = optim.minimize(model=model_regularization,
data=data_bow,
algo=tpe.suggest,
max_evals=80,
trials=trials,
notebook_name='Sequential_bow')
print("Best performing model chosen hyper-parameters:")
print(best_run)
best_model.save('../model/best_Hyperopt_NN_bow_regularization2_500.hdf5')
trials = Trials()
best_run, best_model = optim.minimize(model=model_optimizer,
data=data_bow,
algo=tpe.suggest,
max_evals=100,
trials=trials,
notebook_name='Sequential_bow')
print("Best performing model chosen hyper-parameters:")
print(best_run)
best_model.save('../model/best_Hyperopt_NN_bow_optimizer_500.hdf5')
###Output
_____no_output_____
###Markdown
Beste Regularization: L1 in der ersten Layer = 0.00010911483516010123L2 in der zweiten Layer = 0.027248712155710758lr = 0.12056175158012145epsilon = 0.1999709211230266Die Optimierung des Lernrate führt jedoch zu einem Optimizer, der leicht in Nebenmaxima stecken bleibt (hier das Nebenmaxima, dass alles auf real zu schätzen). Daher wird der Standard Lerner verwendet Evaluation of best model Train best modelNeues Training des besten Modells, welches Optimiert bezüglich der Hyperparameter ist
###Code
def plot_history(network_history):
plt.figure()
plt.xlabel('Epochen')
plt.ylabel('Verlust')
plt.plot(network_history.history['loss'])
plt.plot(network_history.history['val_loss'])
plt.legend(['Training', 'Validierung'])
#plt.show()
plt.savefig("../build/plots/bow/500/history_bow_best.pdf")
plt.close()
X_train, Y_train, X_test, Y_test = data_bow()
best_model = load_model('../model/best_Hyperopt_NN_bow_regularization2_500.hdf5')
model = Sequential.from_config(best_model.get_config())
print(model.summary())
model.get_config()
filepath = '../model/best_Hyperopt_NN_bow_trained_500.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True)
#stopping = EarlyStopping(monitor='val_loss', min_delta=0, restore_best_weights=True)
model.compile(loss='binary_crossentropy',
optimizer='Adagrad',
metrics=['accuracy'])
history = model.fit(X_train.values, Y_train.values, validation_split=0.3,
epochs=100,batch_size=64, callbacks=[checkpoint])#, stopping])
plot_history(history)
###Output
_____no_output_____
###Markdown
Evaluation of best modelBetrachten des trainierten Modells. Darstellung der Confusion Matrix, Overtraining Plot und ROC Curve
###Code
best_model = load_model('../model/best_Hyperopt_NN_bow_trained_500.hdf5')
y_pred = best_model.predict(X_test.values, batch_size=64, verbose=1)
y_pred_train = best_model.predict(X_train.values, batch_size=64, verbose=1)
y_pred_bool = np.round(y_pred[:,0])
Y_test = pd.DataFrame({"label":Y_test,"prediction":y_pred[:,0],"prediction_bool":y_pred_bool})
Y_train = pd.DataFrame({"label":Y_train,"prediction":y_pred_train[:,0]})
print(classification_report(Y_test['label'], Y_test['prediction_bool']))
print("Binary Cross Entropie: ",log_loss(Y_test.label, Y_test.prediction))
#Confusion Matrix
cnfn_matrix = pd.crosstab(Y_test['label'], Y_test['prediction_bool'], rownames=['Wahrheit'], colnames=['Schätzung'])
print(cnfn_matrix)
cnfn_matrix.columns = ['Fake','Real']
cnfn_matrix = cnfn_matrix.rename_axis("Schätzung", axis="columns")
cnfn_matrix.rename(index = {0.0: "Fake", 1.0:'Real'}, inplace = True)
cnfn_matrix = cnfn_matrix/Y_test.shape[0]
sn.heatmap(cnfn_matrix, annot=True , cmap='viridis')
#plt.show()
plt.savefig("../build/plots/bow/500/cnfsn_mtx_bow_best_nn.pdf")
plt.close()
#Overtraining test
plt.hist(Y_test.prediction[Y_test.label == 0],label="fake test", alpha = 0.4, color = "r",density=True)
plt.hist(Y_train.prediction[Y_train.label == 0],label='fake train', alpha = 0.4, color = 'r', histtype='step',density=True)
plt.hist(Y_test.prediction[Y_test.label == 1],label = "real test",alpha = 0.4, color = "b",density=True)
plt.hist(Y_train.prediction[Y_train.label == 1],label='real train', alpha = 0.4, color = 'b', histtype='step',density=True)
plt.xlabel("Geschätzte Likelihood")
plt.ylabel("Dichte")
plt.legend(loc='upper center')
#plt.show()
plt.savefig("../build/plots/bow/500/prob_bow_best_nn.pdf")
plt.close()
fpr = dict()
tpr = dict()
roc_auc = dict()
fpr, tpr, _ = roc_curve(Y_test.label, Y_test.prediction)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC Kurve (AUC = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
#plt.show()
plt.savefig("../build/plots/bow/500/roc_Hyperopt_bow_best_nn.pdf")
plt.close()
###Output
precision recall f1-score support
0 0.86 0.87 0.87 3658
1 0.90 0.89 0.89 4713
micro avg 0.88 0.88 0.88 8371
macro avg 0.88 0.88 0.88 8371
weighted avg 0.88 0.88 0.88 8371
Binary Cross Entropie: 0.316260521772017
Schätzung 0.0 1.0
Wahrheit
0 3184 474
1 515 4198
###Markdown
Interpretation Wordcloud confusion matrixDarstellung der Wordhäufigkeiten in WordClouds für FP,FN,TP,TN getrennt
###Code
FP = Y_test[(Y_test.prediction_bool== 1) & (Y_test.label == 0)]
FN = Y_test[(Y_test.prediction_bool== 0) & (Y_test.label == 1)]
TP = Y_test[(Y_test.prediction_bool== 1) & (Y_test.label == 1)]
TN = Y_test[(Y_test.prediction_bool== 0) & (Y_test.label == 0)]
X_FP = X_test.loc[FP.index]
X_FN = X_test.loc[FN.index]
X_TP = X_test.loc[TP.index]
X_TN = X_test.loc[TN.index]
def plotWordcloud_cnfn(TN,FN,FP,TP):
TN = TN.sum().to_dict()
FN = FN.sum().to_dict()
FP = FP.sum().to_dict()
TP = TP.sum().to_dict()
pad = 5
fig = plt.figure(figsize=(15,10),dpi=100)
ax = plt.subplot(2, 2, 1)
wordcloud = WordCloud(background_color='black',
width=1920,
height=1080,
mask=np.array(Image.open('../data/pictures/trump_silhouette.png'))
).generate_from_frequencies(TN)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
ax = plt.subplot(2, 2, 2)
wordcloud = WordCloud(background_color='black',
width=1920,
height=1080,
mask= np.array(Image.open('../data/pictures/trump_silhouette.png'))
).generate_from_frequencies(FP)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
ax = plt.subplot(2, 2, 3)
wordcloud = WordCloud(background_color='black',
width=1920,
height=1080,
mask=np.array(Image.open('../data/pictures/USA.jpg'))
).generate_from_frequencies(FN)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
plt.subplot(2, 2, 4)
wordcloud = WordCloud(background_color='black',
width=1920,
height=1080,
mask=np.array(Image.open('../data/pictures/USA.jpg'))
).generate_from_frequencies(TP)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.figtext(0.5, 1.09, r"Prediction", {'fontsize': 30},
horizontalalignment='center',
verticalalignment='top')
plt.figtext(0.25, 1.02, r"fake", {'fontsize': 20},
horizontalalignment='center',
verticalalignment='bottom',)
plt.figtext(0.75, 1.02, r"real", {'fontsize': 20},
horizontalalignment='center',
verticalalignment='bottom',)
plt.figtext(-0.07, 0.5, r"Actual", {'fontsize': 30},
horizontalalignment='left',
verticalalignment='center',
rotation=90)
plt.figtext(0.00, 0.75, r"fake", {'fontsize': 20},
horizontalalignment='right',
verticalalignment='center',)
plt.figtext(0.00, 0.25, r"real", {'fontsize': 20},
horizontalalignment='right',
verticalalignment='center',)
plt.margins(x=0, y=0)
plt.tight_layout()
#plt.show()
plt.savefig("../build/plots/bow/500/cnfn_wordcloud.pdf", bbox_inches = 'tight')
plt.close()
plotWordcloud_cnfn(X_TN,X_FN,X_FP,X_TP)
###Output
_____no_output_____
###Markdown
Wordcloud fake real news
###Code
def plotWordcloud(content,t):
if(t!=""):
mask = np.array(Image.open('../data/pictures/'+t))
else:
mask=None
content = content.sum().to_dict()
wordcloud = WordCloud(background_color='black',
width=1920,
height=1080,
mask=mask
).generate_from_frequencies(content)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
X = X_test.append(X_train)
y = Y_test.append(Y_train)
plt.figure(dpi=200)
plotWordcloud(X[y.label==0],"trump_silhouette.png")
#plt.show()
plt.savefig("../build/plots/fake_wordcloud.pdf",bbox_inches='tight',pad_inches = 0)
plt.close()
plt.figure(dpi=200)
plotWordcloud(X[y.label==1],"USA.jpg")
#plt.show()
plt.savefig("../build/plots/real_wordcloud.pdf",bbox_inches='tight',pad_inches = 0)
plt.close()
###Output
_____no_output_____
###Markdown
Untersuchung der first layerSummieren der Beträge aller Gewichte eines Neurons ohne Offset und anschließende Darstellung in WordCloud
###Code
words = X_test.columns
first_weights = best_model.layers[0].get_weights()[0]
first_weights = pd.DataFrame(first_weights.transpose())
first_weights.columns = words
first_weightabs = np.abs(first_weights)
first_weightsum = first_weightabs.sum(axis=0)
content = np.abs(first_weightsum).to_dict()
wordcloud = WordCloud(background_color='black',
width=1920,
height=1080
).generate_from_frequencies(content)
plt.figure(dpi=100)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
#plt.show()
plt.savefig("../build/plots/bow/500/weights_wordcloud.pdf")
plt.close()
###Output
_____no_output_____
###Markdown
Untersuchung der Confusion Matrix mithilfe der wichtigsten 10 Wörter
###Code
sorted_weights = first_weightsum.sort_values(ascending=False)
best_words = sorted_weights[:10].index
fig = plt.figure(figsize=(15,10),dpi=100)
ax = plt.subplot(2, 2, 1)
(X_TN[best_words].sum()/X_TN.shape[0]).plot(kind='bar',label="TN",color='r')
plt.margins(x=0, y=0)
plt.ylabel("mittlere Worthäufigkeit")
plt.ylim(0.01,60)
plt.yscale("log")
ax = plt.subplot(2, 2, 2)
(X_FP[best_words].sum()/X_FP.shape[0]).plot(kind='bar',label="FP",color='g')
plt.margins(x=0, y=0)
plt.ylabel("mittlere Worthäufigkeit")
plt.ylim(0.01,60)
plt.yscale("log")
ax = plt.subplot(2, 2, 3)
(X_FN[best_words].sum()/X_FN.shape[0]).plot(kind='bar',label="FN",color='k')
plt.margins(x=0, y=0)
plt.ylabel("mittlere Worthäufigkeit")
plt.ylim(0.01,60)
plt.yscale("log")
plt.subplot(2, 2, 4)
(X_TP[best_words].sum()/X_TP.shape[0]).plot(kind='bar',label="TP",color='b')
plt.margins(x=0, y=0)
plt.ylabel("mittlere Worthäufigkeit")
plt.ylim(0.01,60)
plt.yscale("log")
plt.figtext(0.5, 1.09, r"Schätzung", {'fontsize': 30},
horizontalalignment='center',
verticalalignment='top')
plt.figtext(0.25, 1.02, r"Fake", {'fontsize': 20},
horizontalalignment='center',
verticalalignment='bottom',)
plt.figtext(0.75, 1.02, r"Real", {'fontsize': 20},
horizontalalignment='center',
verticalalignment='bottom',)
plt.figtext(-0.07, 0.5, r"Wahrheit", {'fontsize': 30},
horizontalalignment='left',
verticalalignment='center',
rotation=90)
plt.figtext(0.00, 0.75, r"Fake", {'fontsize': 20},
horizontalalignment='right',
verticalalignment='center',)
plt.figtext(0.00, 0.25, r"Real", {'fontsize': 20},
horizontalalignment='right',
verticalalignment='center',)
plt.margins(x=0, y=0)
plt.tight_layout()
#plt.show()
plt.savefig("../build/plots/bow/500/cnfn_hist.pdf", bbox_inches = 'tight')
plt.close()
###Output
_____no_output_____
###Markdown
RF VergleichsmodellTraining einer RF auf dem bow Input und Evaluierung
###Code
RF = RandomForestClassifier(n_estimators=100, max_depth=10,random_state=0,criterion='entropy')
RF.fit(X_train.values,Y_train.label.values)
y_pred_bool_RF = RF.predict(X_test.values)
y_pred_RF = RF.predict_proba(X_test.values)
y_pred_RF = y_pred_RF[:,1]
y_pred_train_RF = RF.predict_proba(X_train.values)
y_pred_train_RF = y_pred_train_RF[:,1]
Y_test['prediction_RF'] = y_pred_RF
Y_test['prediction_bool_RF'] = y_pred_bool_RF
Y_train['prediction_RF'] = y_pred_train_RF
print(classification_report(Y_test.label, Y_test.prediction_bool_RF))
#Confusion Matrix
cnfn_matrix = pd.crosstab(Y_test.label, Y_test.prediction_bool_RF, rownames=['Wahrheit'], colnames=['Schätzung'])
print(cnfn_matrix)
cnfn_matrix.columns = ['Fake','Real']
cnfn_matrix = cnfn_matrix.rename_axis("Schätzung", axis="columns")
cnfn_matrix.rename(index = {0.0: "Fake", 1.0:'Real'}, inplace = True)
cnfn_matrix = cnfn_matrix/Y_test.shape[0]
sn.heatmap(cnfn_matrix, annot=True , cmap='viridis')
#plt.show()
plt.savefig("../build/plots/bow/500/RF/cnfsn_mtx_bow_best_nn.pdf")
plt.close()
#Overtraining test
bin_edges = np.linspace(0,1,11)
plt.hist(Y_test.prediction_RF[Y_test.label == 0],label="fake test", alpha = 0.4, color = "r",density=True,bins=bin_edges)
plt.hist(Y_train.prediction_RF[Y_train.label == 0],label='fake train', alpha = 0.4, color = 'r', histtype='step',density=True,bins=bin_edges)
plt.hist(Y_test.prediction_RF[Y_test.label == 1],label = "real test",alpha = 0.4, color = "b",density=True,bins=bin_edges)
plt.hist(Y_train.prediction_RF[Y_train.label == 1],label='real train', alpha = 0.4, color = 'b', histtype='step',density=True,bins=bin_edges)
plt.xlabel("Geschätzte Likelihood")
plt.ylabel("Dichte")
plt.legend(loc='upper center')
#plt.show()
plt.savefig("../build/plots/bow/500/RF/prob_bow_best_nn.pdf")
plt.close()
fpr_RF = dict()
tpr_RF = dict()
roc_auc_RF = dict()
fpr_RF, tpr_RF, _ = roc_curve(Y_test.label, Y_test.prediction_RF)
roc_auc_RF = auc(fpr_RF, tpr_RF)
plt.figure()
lw = 2
plt.plot(fpr_RF, tpr_RF, color='darkorange',
lw=lw, label='ROC Kurve (AUC = %0.2f)' % roc_auc_RF)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
#plt.show()
plt.savefig("../build/plots/bow/500/RF/roc_Hyperopt_bow_best_nn.pdf")
plt.close()
###Output
precision recall f1-score support
0 0.87 0.73 0.79 3658
1 0.81 0.91 0.86 4713
micro avg 0.83 0.83 0.83 8371
macro avg 0.84 0.82 0.83 8371
weighted avg 0.84 0.83 0.83 8371
Schätzung 0 1
Wahrheit
0 2679 979
1 405 4308
###Markdown
ComparisonVergleich des Sequential mit dem RF in der ROC Curve
###Code
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='Sequential (AUC = %0.2f)' % roc_auc)
plt.plot(fpr_RF, tpr_RF, color='darkred',
lw=lw, label='RandomForest (AUC = %0.2f)' % roc_auc_RF)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
#plt.show()
plt.savefig("../build/plots/bow/500/roc_comparison.pdf")
plt.close()
###Output
_____no_output_____
###Markdown
Draw Modell
###Code
from graphviz import Digraph
m = Digraph(name='Modell', node_attr={'shape': 'record'})
m.attr(rankdir='LR')
m.node("input", r"𝐄𝐢𝐧𝐠𝐚𝐧𝐠𝐬 𝐋𝐚𝐠𝐞 | 500 Wörter ")
m.node("first", r"𝐄𝐫𝐬𝐭𝐞 𝐋𝐚𝐠𝐞|Neuronen: 1770|Aktivierung: ReLu|L1: 0.00011")
m.node("second", r"𝐙𝐰𝐞𝐢𝐭𝐞 𝐋𝐚𝐠𝐞|Neuronen: 9|Aktivierung: ReLu|L2: 0.0272")
m.node("output", r"𝐀𝐮𝐬𝐠𝐚𝐧𝐠𝐬 𝐋𝐚𝐠𝐞|Neuronen: 1 |Aktivierung: Sigmoid")
m.edge('input', 'first')
m.edge('first', 'second')
m.edge('second', 'output')
m.render('../build/plots/bow/500/modell_scheme', view=True)
###Output
_____no_output_____ |
docs/memo/pipeline/filters.ipynb | ###Markdown
过滤器A Filter is a function from an asset and a moment in time to a boolean:```F(asset, timestamp) -> boolean```In Pipeline, [Filters](https://www.quantopian.com/helpquantopian_pipeline_filters_Filter) are used for narrowing down the set of securities included in a computation or in the final output of a pipeline. There are two common ways to create a `Filter`: comparison operators and `Factor`/`Classifier` methods. 比较操作`因子`或`分类`的比较操作产生`过滤`。例如以下案例,当最新的价格高于100时,返回`True`,生成一个过滤。
###Code
%%zipline --start 2016-5-2 --end 2016-5-10 --capital-base 100000 --bm_symbol 399001
from zipline.pipeline import Pipeline
from zipline.pipeline import Fundamentals
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from zipline.api import attach_pipeline, pipeline_output
from zipline.api import symbol, sid, get_datetime
import pandas as pd
def make_pipeline():
last_close_price = USEquityPricing.close.latest
close_price_filter = last_close_price > 100
return Pipeline(
columns={
'last_close_price': last_close_price,
},
screen=close_price_filter
)
def initialize(context):
attach_pipeline(make_pipeline(), 'example')
def handle_data(context, data):
today = get_datetime('Asia/Shanghai')
output = pipeline_output('example')
print('日期 {} 结果:\n {}'.format(today, output))
###Output
日期 2016-05-03 15:00:00+08:00 结果:
last_close_price
Equity(002466 [天齐锂业]) 158.370
Equity(002558 [巨人网络]) 117.010
Equity(002709 [天赐材料]) 111.590
Equity(002712 [思美传媒]) 114.200
Equity(002777 [久远银海]) 103.360
Equity(I000001 [上证指数]) 2938.323
Equity(I000002 [A股指数]) 3074.873
Equity(I000003 [B股指数]) 368.314
Equity(I000016 [上证50]) 2135.508
Equity(I000300 [沪深300]) 3156.745
Equity(300438 [鹏辉能源]) 110.800
Equity(300449 [汉邦高科]) 100.480
Equity(300451 [创业软件]) 131.390
Equity(300469 [信息发展]) 111.170
Equity(300474 [景嘉微]) 123.750
Equity(300484 [蓝海华腾]) 172.050
Equity(300496 [中科创达]) 211.600
Equity(I399001 [深证成指]) 10141.541
Equity(I399002 [深成指R]) 11868.595
Equity(I399003 [成份B指]) 6325.164
Equity(I399006 [创业板指]) 2138.738
Equity(I399102 [创业板综]) 2572.799
Equity(I399106 [深证综指]) 1873.993
Equity(600519 [贵州茅台]) 251.200
日期 2016-05-04 15:00:00+08:00 结果:
last_close_price
Equity(002466 [天齐锂业]) 168.210
Equity(002558 [巨人网络]) 119.100
Equity(002709 [天赐材料]) 115.920
Equity(002712 [思美传媒]) 114.200
Equity(002729 [好利来]) 104.300
Equity(002777 [久远银海]) 109.200
Equity(I000001 [上证指数]) 2992.643
Equity(I000002 [A股指数]) 3131.787
Equity(I000003 [B股指数]) 373.189
Equity(I000016 [上证50]) 2158.271
Equity(I000300 [沪深300]) 3213.539
Equity(300438 [鹏辉能源]) 116.080
Equity(300449 [汉邦高科]) 103.000
Equity(300451 [创业软件]) 137.600
Equity(300469 [信息发展]) 117.830
Equity(300474 [景嘉微]) 114.600
Equity(300484 [蓝海华腾]) 180.320
Equity(300496 [中科创达]) 227.820
Equity(I399001 [深证成指]) 10441.920
Equity(I399002 [深成指R]) 12220.850
Equity(I399003 [成份B指]) 6296.675
Equity(I399006 [创业板指]) 2217.230
Equity(I399102 [创业板综]) 2666.870
Equity(I399106 [深证综指]) 1929.029
Equity(600519 [贵州茅台]) 260.000
日期 2016-05-05 15:00:00+08:00 结果:
last_close_price
Equity(002407 [多氟多]) 100.100
Equity(002466 [天齐锂业]) 175.990
Equity(002558 [巨人网络]) 119.110
Equity(002709 [天赐材料]) 116.000
Equity(002712 [思美传媒]) 114.200
Equity(002729 [好利来]) 105.910
Equity(002777 [久远银海]) 107.900
Equity(I000001 [上证指数]) 2991.272
Equity(I000002 [A股指数]) 3130.355
Equity(I000003 [B股指数]) 372.919
Equity(I000016 [上证50]) 2152.933
Equity(I000300 [沪深300]) 3209.461
Equity(300438 [鹏辉能源]) 113.300
Equity(300449 [汉邦高科]) 101.500
Equity(300451 [创业软件]) 136.990
Equity(300469 [信息发展]) 115.950
Equity(300474 [景嘉微]) 115.300
Equity(300484 [蓝海华腾]) 183.500
Equity(300496 [中科创达]) 223.810
Equity(I399001 [深证成指]) 10422.802
Equity(I399002 [深成指R]) 12199.205
Equity(I399003 [成份B指]) 6237.359
Equity(I399006 [创业板指]) 2211.018
Equity(I399102 [创业板综]) 2667.932
Equity(I399106 [深证综指]) 1928.632
Equity(600519 [贵州茅台]) 256.209
日期 2016-05-06 15:00:00+08:00 结果:
last_close_price
Equity(002407 [多氟多]) 105.610
Equity(002466 [天齐锂业]) 182.560
Equity(002558 [巨人网络]) 130.000
Equity(002709 [天赐材料]) 127.600
Equity(002712 [思美传媒]) 114.200
Equity(002729 [好利来]) 101.620
Equity(002777 [久远银海]) 114.300
Equity(I000001 [上证指数]) 2997.841
Equity(I000002 [A股指数]) 3137.263
Equity(I000003 [B股指数]) 372.837
Equity(I000016 [上证50]) 2152.882
Equity(I000300 [沪深300]) 3213.919
Equity(300113 [顺网科技]) 100.810
Equity(300438 [鹏辉能源]) 123.790
Equity(300449 [汉邦高科]) 100.100
Equity(300451 [创业软件]) 143.880
Equity(300469 [信息发展]) 121.460
Equity(300474 [景嘉微]) 117.960
Equity(300484 [蓝海华腾]) 195.750
Equity(300496 [中科创达]) 237.900
Equity(I399001 [深证成指]) 10474.013
Equity(I399002 [深成指R]) 12259.181
Equity(I399003 [成份B指]) 6132.175
Equity(I399006 [创业板指]) 2224.095
Equity(I399102 [创业板综]) 2690.993
Equity(I399106 [深证综指]) 1942.543
Equity(600519 [贵州茅台]) 257.769
日期 2016-05-09 15:00:00+08:00 结果:
last_close_price
Equity(002407 [多氟多]) 106.150
Equity(002466 [天齐锂业]) 175.700
Equity(002558 [巨人网络]) 125.960
Equity(002709 [天赐材料]) 123.900
Equity(002712 [思美传媒]) 114.200
Equity(002777 [久远银海]) 104.600
Equity(I000001 [上证指数]) 2913.247
Equity(I000002 [A股指数]) 3048.577
Equity(I000003 [B股指数]) 366.679
Equity(I000016 [上证50]) 2107.008
Equity(I000300 [沪深300]) 3130.354
Equity(300438 [鹏辉能源]) 117.000
Equity(300451 [创业软件]) 134.980
Equity(300469 [信息发展]) 112.800
Equity(300474 [景嘉微]) 111.780
Equity(300484 [蓝海华腾]) 180.000
Equity(300496 [中科创达]) 236.070
Equity(I399001 [深证成指]) 10100.535
Equity(I399002 [深成指R]) 11828.582
Equity(I399003 [成份B指]) 5968.173
Equity(I399006 [创业板指]) 2129.194
Equity(I399102 [创业板综]) 2580.735
Equity(I399106 [深证综指]) 1871.609
Equity(600519 [贵州茅台]) 252.220
日期 2016-05-10 15:00:00+08:00 结果:
last_close_price
Equity(002407 [多氟多]) 105.720
Equity(002466 [天齐锂业]) 174.580
Equity(002558 [巨人网络]) 128.889
Equity(002709 [天赐材料]) 134.740
Equity(002712 [思美传媒]) 114.200
Equity(I000001 [上证指数]) 2832.112
Equity(I000002 [A股指数]) 2963.542
Equity(I000003 [B股指数]) 360.041
Equity(I000016 [上证50]) 2072.467
Equity(I000300 [沪深300]) 3065.615
Equity(300438 [鹏辉能源]) 122.330
Equity(300451 [创业软件]) 125.800
Equity(300469 [信息发展]) 104.760
Equity(300474 [景嘉微]) 102.430
Equity(300484 [蓝海华腾]) 179.200
Equity(300496 [中科创达]) 227.000
Equity(I399001 [深证成指]) 9790.476
Equity(I399002 [深成指R]) 11465.800
Equity(I399003 [成份B指]) 5822.897
Equity(I399006 [创业板指]) 2053.597
Equity(I399102 [创业板综]) 2479.196
Equity(I399106 [深证综指]) 1804.344
Equity(600519 [贵州茅台]) 250.820
[2018-01-09 01:10:18.139825] INFO: Performance: Simulated 6 trading days out of 6.
[2018-01-09 01:10:18.149831] INFO: Performance: first open: 2016-05-03 01:31:00+00:00
[2018-01-09 01:10:18.150837] INFO: Performance: last close: 2016-05-10 07:00:00+00:00
###Markdown
`Factor/Classifier`部分方法生成如:`Factor.top(n)`
###Code
%%zipline --start 2016-5-2 --end 2016-5-10 --capital-base 100000
from zipline.pipeline import Pipeline
from zipline.pipeline import Fundamentals
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from zipline.api import attach_pipeline, pipeline_output
from zipline.api import symbol, sid, get_datetime
import pandas as pd
def make_pipeline():
last_close_price = USEquityPricing.close.latest
top_close_price_filter = last_close_price.top(200)
return Pipeline(
columns={
'last_close_price': last_close_price,
},
screen=top_close_price_filter
)
def initialize(context):
attach_pipeline(make_pipeline(), 'example')
def handle_data(context, data):
today = get_datetime('Asia/Shanghai')
output = pipeline_output('example')
# 注意结果本身没有排序(默认升序排列)
print('日期 {} 结果:\n {}'.format(today, output.sort_values('last_close_price').tail()))
###Output
日期 2016-05-03 15:00:00+08:00 结果:
last_close_price
Equity(I000002 [A股指数]) 3074.873
Equity(I000300 [沪深300]) 3156.745
Equity(I399003 [成份B指]) 6325.164
Equity(I399001 [深证成指]) 10141.541
Equity(I399002 [深成指R]) 11868.595
日期 2016-05-04 15:00:00+08:00 结果:
last_close_price
Equity(I000002 [A股指数]) 3131.787
Equity(I000300 [沪深300]) 3213.539
Equity(I399003 [成份B指]) 6296.675
Equity(I399001 [深证成指]) 10441.920
Equity(I399002 [深成指R]) 12220.850
日期 2016-05-05 15:00:00+08:00 结果:
last_close_price
Equity(I000002 [A股指数]) 3130.355
Equity(I000300 [沪深300]) 3209.461
Equity(I399003 [成份B指]) 6237.359
Equity(I399001 [深证成指]) 10422.802
Equity(I399002 [深成指R]) 12199.205
日期 2016-05-06 15:00:00+08:00 结果:
last_close_price
Equity(I000002 [A股指数]) 3137.263
Equity(I000300 [沪深300]) 3213.919
Equity(I399003 [成份B指]) 6132.175
Equity(I399001 [深证成指]) 10474.013
Equity(I399002 [深成指R]) 12259.181
日期 2016-05-09 15:00:00+08:00 结果:
last_close_price
Equity(I000002 [A股指数]) 3048.577
Equity(I000300 [沪深300]) 3130.354
Equity(I399003 [成份B指]) 5968.173
Equity(I399001 [深证成指]) 10100.535
Equity(I399002 [深成指R]) 11828.582
日期 2016-05-10 15:00:00+08:00 结果:
last_close_price
Equity(I000002 [A股指数]) 2963.542
Equity(I000300 [沪深300]) 3065.615
Equity(I399003 [成份B指]) 5822.897
Equity(I399001 [深证成指]) 9790.476
Equity(I399002 [深成指R]) 11465.800
[2018-01-09 01:10:19.731890] INFO: Performance: Simulated 6 trading days out of 6.
[2018-01-09 01:10:19.731890] INFO: Performance: first open: 2016-05-03 01:31:00+00:00
[2018-01-09 01:10:19.734896] INFO: Performance: last close: 2016-05-10 07:00:00+00:00
###Markdown
`AverageDollarVolume`
###Code
%%zipline --start 2016-5-2 --end 2016-5-10 --capital-base 100000
from zipline.pipeline import Pipeline
from zipline.pipeline import Fundamentals
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from zipline.api import attach_pipeline, pipeline_output
from zipline.api import symbol, sid, get_datetime
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
mean_close_30 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=30)
percent_difference = (mean_close_10 - mean_close_30) / mean_close_30
dollar_volume = AverageDollarVolume(window_length=30)
# 成交额大于10亿
high_dollar_volume = (dollar_volume > 1000000000)
return Pipeline(
columns={
'amount': USEquityPricing.amount.latest,
'percent_difference': percent_difference,
'high_dollar_volume': high_dollar_volume
}
)
def initialize(context):
attach_pipeline(make_pipeline(), 'example')
def handle_data(context, data):
today = get_datetime('Asia/Shanghai')
output = pipeline_output('example')
# 注意结果本身没有排序(默认升序排列)
print('日期 {} 结果:\n {}'.format(today, output))
###Output
日期 2016-05-03 15:00:00+08:00 结果:
amount high_dollar_volume percent_difference
Equity(000001 [平安银行]) 4.285800e+08 False -4.197338e-03
Equity(000002 [万 科A]) 0.000000e+00 False 5.816969e-16
Equity(000004 [国农科技]) 0.000000e+00 False 9.613671e-03
Equity(000005 [世纪星源]) 4.420000e+07 False -4.377113e-02
Equity(000006 [深振业A]) 6.893000e+07 False -1.368466e-02
Equity(000007 [全新好]) 0.000000e+00 False 0.000000e+00
Equity(000008 [神州高铁]) 6.639000e+07 False -2.203059e-04
Equity(000009 [中国宝安]) 8.160100e+08 True 1.058023e-03
Equity(000010 [美丽生态]) 8.388000e+07 False 2.893666e-02
Equity(000011 [深物业A]) 9.628000e+07 False 8.060946e-03
Equity(000012 [南 玻A]) 2.115400e+08 False 4.579174e-03
Equity(000014 [沙河股份]) 7.950000e+07 False 7.410898e-02
Equity(000016 [深康佳A]) 7.446000e+07 False -1.278142e-02
Equity(000017 [深中华A]) 8.759000e+07 False 7.775183e-03
Equity(000018 [神州长城]) 1.351000e+08 False -4.289365e-02
Equity(000019 [深深宝A]) 3.325500e+08 False 6.568053e-02
Equity(000020 [深华发A]) 7.913000e+07 False -1.837093e-03
Equity(000021 [深科技]) 1.335100e+08 False -1.321033e-02
Equity(000022 [深赤湾A]) 2.585000e+07 False -1.528243e-02
Equity(000023 [深天地A]) 1.645400e+08 False 6.367730e-02
Equity(000025 [特 力A]) 1.402900e+08 False -3.505807e-02
Equity(000026 [飞亚达A]) 4.023000e+07 False -3.581436e-02
Equity(000027 [深圳能源]) 4.414000e+07 False -3.171198e-02
Equity(000028 [国药一致]) 1.238400e+08 False 2.760812e-02
Equity(000029 [深深房A]) 8.413000e+07 False -1.235490e-02
Equity(000030 [富奥股份]) 2.721000e+07 False -2.284726e-02
Equity(000031 [中粮地产]) 1.274900e+08 False -4.662847e-02
Equity(000032 [深桑达A]) 4.152000e+07 False -3.485386e-02
Equity(000033 [新都退]) 0.000000e+00 False NaN
Equity(000034 [神州数码]) 2.282400e+08 False -1.907231e-01
... ... ... ...
Equity(603806 [福斯特]) 7.201000e+07 False -2.570832e-03
Equity(603808 [歌力思]) 0.000000e+00 False 4.738626e-02
Equity(603818 [曲美家居]) 3.507000e+07 False -5.176335e-02
Equity(603822 [嘉澳环保]) 4.000000e+04 False 0.000000e+00
Equity(603828 [柯利达]) 4.298000e+07 False -3.757429e-02
Equity(603838 [四通股份]) 0.000000e+00 False -6.750258e-04
Equity(603861 [白云电器]) 1.348000e+08 False 1.482979e-01
Equity(603866 [桃李面包]) 1.582600e+08 False 3.660669e-02
Equity(603868 [飞科电器]) 1.423320e+09 False 0.000000e+00
Equity(603869 [北部湾旅]) 4.300000e+07 False -3.364202e-02
Equity(603883 [老百姓]) 6.152000e+07 False 1.151010e-02
Equity(603885 [吉祥航空]) 6.718000e+07 False 1.719471e-02
Equity(603889 [新澳股份]) 0.000000e+00 False -5.693451e-16
Equity(603898 [好莱客]) 6.387000e+07 False 4.456178e-02
Equity(603899 [晨光文具]) 1.237100e+08 False -2.222321e-03
Equity(603901 [永创智能]) 9.081000e+07 False -1.641140e-02
Equity(603918 [金桥信息]) 0.000000e+00 False 2.985075e-03
Equity(603919 [金徽酒]) 1.856200e+08 False -2.562604e-02
Equity(603936 [博敏电子]) 9.797000e+07 False -2.045646e-02
Equity(603939 [益丰药房]) 4.365000e+07 False 2.419415e-02
Equity(603968 [醋化股份]) 9.353000e+07 False 9.159574e-02
Equity(603969 [银龙股份]) 3.603000e+07 False -1.169915e-02
Equity(603979 [金诚信]) 4.234000e+07 False -3.424991e-02
Equity(603988 [中电电机]) 2.582500e+08 False 1.081822e-01
Equity(603989 [艾华集团]) 1.177100e+08 False 1.830384e-02
Equity(603993 [洛阳钼业]) 0.000000e+00 False -2.615694e-02
Equity(603996 [中新科技]) 8.860000e+07 False 1.266364e-02
Equity(603997 [继峰股份]) 5.378000e+07 False -6.025882e-02
Equity(603998 [方盛制药]) 1.735800e+08 False -9.163708e-03
Equity(603999 [读者传媒]) 8.001000e+07 False -4.237137e-02
[2858 rows x 3 columns]
日期 2016-05-04 15:00:00+08:00 结果:
amount high_dollar_volume percent_difference
Equity(000001 [平安银行]) 5.211300e+08 False -5.103479e-03
Equity(000002 [万 科A]) 0.000000e+00 False 5.816969e-16
Equity(000004 [国农科技]) 0.000000e+00 False 5.643033e-03
Equity(000005 [世纪星源]) 9.187000e+07 False -4.707313e-02
Equity(000006 [深振业A]) 1.069000e+08 False -1.522361e-02
Equity(000007 [全新好]) 0.000000e+00 False 0.000000e+00
Equity(000008 [神州高铁]) 1.430800e+08 False -2.762518e-03
Equity(000009 [中国宝安]) 1.035840e+09 True -6.473889e-04
Equity(000010 [美丽生态]) 1.577000e+08 False 3.556243e-02
Equity(000011 [深物业A]) 1.449100e+08 False 8.471421e-03
Equity(000012 [南 玻A]) 3.025100e+08 False 7.642653e-03
Equity(000014 [沙河股份]) 1.127200e+08 False 6.771589e-02
Equity(000016 [深康佳A]) 1.863300e+08 False -1.810043e-02
Equity(000017 [深中华A]) 9.283000e+07 False 3.393155e-03
Equity(000018 [神州长城]) 3.147400e+08 False -5.256490e-02
Equity(000019 [深深宝A]) 4.158900e+08 False 7.120504e-02
Equity(000020 [深华发A]) 1.296900e+08 False -4.841409e-04
Equity(000021 [深科技]) 2.519700e+08 False -1.480663e-02
Equity(000022 [深赤湾A]) 4.381000e+07 False -2.020500e-02
Equity(000023 [深天地A]) 1.489700e+08 False 6.800683e-02
Equity(000025 [特 力A]) 3.036300e+08 False -3.975018e-02
Equity(000026 [飞亚达A]) 5.362000e+07 False -3.958623e-02
Equity(000027 [深圳能源]) 8.613000e+07 False -3.505905e-02
Equity(000028 [国药一致]) 2.190600e+08 False 2.766267e-02
Equity(000029 [深深房A]) 9.527000e+07 False -1.041877e-02
Equity(000030 [富奥股份]) 5.772000e+07 False -2.519260e-02
Equity(000031 [中粮地产]) 1.937800e+08 False -5.233576e-02
Equity(000032 [深桑达A]) 6.057000e+07 False -4.442586e-02
Equity(000033 [新都退]) 0.000000e+00 False NaN
Equity(000034 [神州数码]) 1.868300e+08 False -1.926659e-01
... ... ... ...
Equity(603806 [福斯特]) 1.017200e+08 False -3.731406e-03
Equity(603808 [歌力思]) 0.000000e+00 False 4.611040e-02
Equity(603818 [曲美家居]) 7.810000e+07 False -5.743578e-02
Equity(603822 [嘉澳环保]) 4.000000e+04 False 0.000000e+00
Equity(603828 [柯利达]) 0.000000e+00 False -4.385730e-02
Equity(603838 [四通股份]) 0.000000e+00 False -7.671437e-04
Equity(603861 [白云电器]) 2.026200e+08 False 1.397064e-01
Equity(603866 [桃李面包]) 4.691600e+08 False 3.872320e-02
Equity(603868 [飞科电器]) 9.595100e+08 False 3.948974e-02
Equity(603869 [北部湾旅]) 6.435000e+07 False -4.059409e-02
Equity(603883 [老百姓]) 1.080500e+08 False 6.693127e-03
Equity(603885 [吉祥航空]) 1.365600e+08 False 1.506589e-02
Equity(603889 [新澳股份]) 0.000000e+00 False -5.693451e-16
Equity(603898 [好莱客]) 8.711000e+07 False 4.984511e-02
Equity(603899 [晨光文具]) 1.173337e+08 False -9.931631e-04
Equity(603901 [永创智能]) 1.128400e+08 False -1.961950e-02
Equity(603918 [金桥信息]) 0.000000e+00 False 1.622456e-03
Equity(603919 [金徽酒]) 5.135700e+08 False -1.826844e-02
Equity(603936 [博敏电子]) 1.517200e+08 False -2.698047e-02
Equity(603939 [益丰药房]) 6.310000e+07 False 1.898376e-02
Equity(603968 [醋化股份]) 1.252100e+08 False 9.270667e-02
Equity(603969 [银龙股份]) 7.447000e+07 False -1.785327e-02
Equity(603979 [金诚信]) 8.302000e+07 False -4.117806e-02
Equity(603988 [中电电机]) 2.073800e+08 False 9.719844e-02
Equity(603989 [艾华集团]) 1.407300e+08 False 1.554582e-02
Equity(603993 [洛阳钼业]) 0.000000e+00 False -2.830279e-02
Equity(603996 [中新科技]) 1.454000e+08 False 3.796150e-03
Equity(603997 [继峰股份]) 8.577000e+07 False -6.489910e-02
Equity(603998 [方盛制药]) 2.157100e+08 False -1.750944e-02
Equity(603999 [读者传媒]) 1.536500e+08 False -4.998479e-02
[2859 rows x 3 columns]
日期 2016-05-05 15:00:00+08:00 结果:
amount high_dollar_volume percent_difference
Equity(000001 [平安银行]) 4.418400e+08 False -5.323980e-03
Equity(000002 [万 科A]) 0.000000e+00 False 5.816969e-16
Equity(000004 [国农科技]) 0.000000e+00 False 3.041169e-03
Equity(000005 [世纪星源]) 9.830000e+07 False -4.988653e-02
Equity(000006 [深振业A]) 1.070200e+08 False -1.859780e-02
Equity(000007 [全新好]) 0.000000e+00 False 0.000000e+00
Equity(000008 [神州高铁]) 1.792700e+08 False -3.694890e-03
Equity(000009 [中国宝安]) 7.163100e+08 True -6.847019e-03
Equity(000010 [美丽生态]) 1.439700e+08 False 4.020392e-02
Equity(000011 [深物业A]) 1.840800e+08 False 5.969175e-03
Equity(000012 [南 玻A]) 2.517800e+08 False 9.765447e-03
Equity(000014 [沙河股份]) 1.581600e+08 False 5.816863e-02
Equity(000016 [深康佳A]) 2.576900e+08 False -2.224240e-02
Equity(000017 [深中华A]) 1.105600e+08 False -2.625089e-03
Equity(000018 [神州长城]) 3.373600e+08 False -5.601966e-02
Equity(000019 [深深宝A]) 5.728700e+08 False 6.970726e-02
Equity(000020 [深华发A]) 1.189600e+08 False -1.110340e-03
Equity(000021 [深科技]) 2.391600e+08 False -1.637715e-02
Equity(000022 [深赤湾A]) 4.453000e+07 False -2.625256e-02
Equity(000023 [深天地A]) 2.083900e+08 False 6.433806e-02
Equity(000025 [特 力A]) 4.769100e+08 False -4.285755e-02
Equity(000026 [飞亚达A]) 7.256000e+07 False -4.398167e-02
Equity(000027 [深圳能源]) 6.164000e+07 False -3.747894e-02
Equity(000028 [国药一致]) 1.163100e+08 False 2.568259e-02
Equity(000029 [深深房A]) 1.043300e+08 False -1.019936e-02
Equity(000030 [富奥股份]) 3.996000e+07 False -2.622037e-02
Equity(000031 [中粮地产]) 2.790700e+08 False -5.558812e-02
Equity(000032 [深桑达A]) 7.070000e+07 False -5.318746e-02
Equity(000033 [新都退]) 0.000000e+00 False NaN
Equity(000034 [神州数码]) 2.364200e+08 False -1.862329e-01
... ... ... ...
Equity(603806 [福斯特]) 1.149500e+08 False -4.564769e-03
Equity(603808 [歌力思]) 0.000000e+00 False 4.102366e-02
Equity(603818 [曲美家居]) 8.171000e+07 False -6.329309e-02
Equity(603822 [嘉澳环保]) 8.000000e+04 False 0.000000e+00
Equity(603828 [柯利达]) 0.000000e+00 False -4.993221e-02
Equity(603838 [四通股份]) 0.000000e+00 False -1.014657e-04
Equity(603861 [白云电器]) 2.189000e+08 False 1.266448e-01
Equity(603866 [桃李面包]) 3.647500e+08 False 3.917222e-02
Equity(603868 [飞科电器]) 6.251900e+08 False 7.643259e-02
Equity(603869 [北部湾旅]) 6.741000e+07 False -4.682107e-02
Equity(603883 [老百姓]) 1.779200e+08 False -7.405731e-04
Equity(603885 [吉祥航空]) 3.762000e+08 False 1.939030e-02
Equity(603889 [新澳股份]) 0.000000e+00 False -5.693451e-16
Equity(603898 [好莱客]) 6.283000e+07 False 4.926979e-02
Equity(603899 [晨光文具]) 1.224400e+08 False 3.777729e-03
Equity(603901 [永创智能]) 9.136000e+07 False -2.052725e-02
Equity(603918 [金桥信息]) 0.000000e+00 False 1.627351e-03
Equity(603919 [金徽酒]) 7.219000e+08 False -8.573537e-03
Equity(603936 [博敏电子]) 3.001300e+08 False -3.041395e-02
Equity(603939 [益丰药房]) 8.817000e+07 False 1.339123e-02
Equity(603968 [醋化股份]) 1.895559e+08 False 1.000176e-01
Equity(603969 [银龙股份]) 1.014500e+08 False -2.141240e-02
Equity(603979 [金诚信]) 8.282000e+07 False -4.460412e-02
Equity(603988 [中电电机]) 1.920300e+08 False 9.112956e-02
Equity(603989 [艾华集团]) 1.609500e+08 False 1.222321e-02
Equity(603993 [洛阳钼业]) 0.000000e+00 False -2.998847e-02
Equity(603996 [中新科技]) 2.614400e+08 False -2.240912e-03
Equity(603997 [继峰股份]) 1.475000e+08 False -6.888818e-02
Equity(603998 [方盛制药]) 1.124523e+08 False -2.285702e-02
Equity(603999 [读者传媒]) 2.135800e+08 False -5.635433e-02
[2859 rows x 3 columns]
日期 2016-05-06 15:00:00+08:00 结果:
amount high_dollar_volume percent_difference
Equity(000001 [平安银行]) 256000000.0 False -3.444928e-03
Equity(000002 [万 科A]) 0.0 False 5.816969e-16
Equity(000004 [国农科技]) 0.0 False -5.787750e-16
Equity(000005 [世纪星源]) 70290000.0 False -4.733310e-02
Equity(000006 [深振业A]) 79880000.0 False -2.168038e-02
Equity(000007 [全新好]) 0.0 False 0.000000e+00
Equity(000008 [神州高铁]) 136440000.0 False -1.030542e-03
Equity(000009 [中国宝安]) 543820000.0 True -7.716566e-03
Equity(000010 [美丽生态]) 222880000.0 False 4.722105e-02
Equity(000011 [深物业A]) 123920000.0 False -7.703081e-04
Equity(000012 [南 玻A]) 188290000.0 False 1.421671e-02
Equity(000014 [沙河股份]) 119150000.0 False 4.859110e-02
Equity(000016 [深康佳A]) 139670000.0 False -2.119798e-02
Equity(000017 [深中华A]) 111800000.0 False -9.066327e-03
Equity(000018 [神州长城]) 425980000.0 False -4.825382e-02
Equity(000019 [深深宝A]) 381220000.0 False 5.760244e-02
Equity(000020 [深华发A]) 94440000.0 False -1.064318e-02
Equity(000021 [深科技]) 154010000.0 False -1.297937e-02
Equity(000022 [深赤湾A]) 30760000.0 False -2.660141e-02
Equity(000023 [深天地A]) 239170000.0 False 4.705512e-02
Equity(000025 [特 力A]) 335430000.0 False -4.268304e-02
Equity(000026 [飞亚达A]) 57100000.0 False -4.196198e-02
Equity(000027 [深圳能源]) 45280000.0 False -3.599044e-02
Equity(000028 [国药一致]) 96170000.0 False 2.746071e-02
Equity(000029 [深深房A]) 79850000.0 False -1.862580e-02
Equity(000030 [富奥股份]) 25880000.0 False -1.952901e-02
Equity(000031 [中粮地产]) 153850000.0 False -5.487465e-02
Equity(000032 [深桑达A]) 54360000.0 False -5.499901e-02
Equity(000033 [新都退]) 0.0 False NaN
Equity(000034 [神州数码]) 190020000.0 False -1.841329e-01
... ... ... ...
Equity(603806 [福斯特]) 130990000.0 False -1.140016e-03
Equity(603808 [歌力思]) 0.0 False 3.624663e-02
Equity(603818 [曲美家居]) 63530000.0 False -6.090801e-02
Equity(603822 [嘉澳环保]) 130000.0 False 0.000000e+00
Equity(603828 [柯利达]) 0.0 False -4.966191e-02
Equity(603838 [四通股份]) 0.0 False 2.184941e-16
Equity(603861 [白云电器]) 147760000.0 False 9.161121e-02
Equity(603866 [桃李面包]) 382310000.0 False 4.835959e-02
Equity(603868 [飞科电器]) 616060000.0 False 1.091592e-01
Equity(603869 [北部湾旅]) 0.0 False -4.598266e-02
Equity(603883 [老百姓]) 121900000.0 False -9.161516e-04
Equity(603885 [吉祥航空]) 507740000.0 False 2.925512e-02
Equity(603889 [新澳股份]) 0.0 False -5.693451e-16
Equity(603898 [好莱客]) 51360000.0 False 5.406307e-02
Equity(603899 [晨光文具]) 238810000.0 False 4.504269e-03
Equity(603901 [永创智能]) 178520000.0 False -1.775090e-02
Equity(603918 [金桥信息]) 0.0 False 8.007969e-04
Equity(603919 [金徽酒]) 489360000.0 False 5.816686e-03
Equity(603936 [博敏电子]) 209240000.0 False -2.542388e-02
Equity(603939 [益丰药房]) 48200000.0 False 1.379733e-02
Equity(603968 [醋化股份]) 123560000.0 False 9.473151e-02
Equity(603969 [银龙股份]) 55170000.0 False -2.065280e-02
Equity(603979 [金诚信]) 64910000.0 False -4.206571e-02
Equity(603988 [中电电机]) 149900000.0 False 8.570162e-02
Equity(603989 [艾华集团]) 97320000.0 False 1.446118e-02
Equity(603993 [洛阳钼业]) 0.0 False -2.819748e-02
Equity(603996 [中新科技]) 192110000.0 False -1.862261e-03
Equity(603997 [继峰股份]) 88470000.0 False -6.614341e-02
Equity(603998 [方盛制药]) 128290000.0 False -1.171204e-02
Equity(603999 [读者传媒]) 140420000.0 False -5.389970e-02
[2859 rows x 3 columns]
日期 2016-05-09 15:00:00+08:00 结果:
amount high_dollar_volume percent_difference
Equity(000001 [平安银行]) 3.674000e+08 False -2.788832e-03
Equity(000002 [万 科A]) 0.000000e+00 False 5.816969e-16
Equity(000004 [国农科技]) 0.000000e+00 False -5.787750e-16
Equity(000005 [世纪星源]) 1.256400e+08 False -4.575635e-02
Equity(000006 [深振业A]) 1.587900e+08 False -2.662896e-02
Equity(000007 [全新好]) 0.000000e+00 False 0.000000e+00
Equity(000008 [神州高铁]) 1.316800e+08 False 1.277696e-03
Equity(000009 [中国宝安]) 1.039640e+09 True -1.123005e-02
Equity(000010 [美丽生态]) 2.323300e+08 False 4.986150e-02
Equity(000011 [深物业A]) 2.048300e+08 False -9.203963e-03
Equity(000012 [南 玻A]) 2.284900e+08 False 1.646416e-02
Equity(000014 [沙河股份]) 1.818400e+08 False 3.894983e-02
Equity(000016 [深康佳A]) 1.756200e+08 False -2.070219e-02
Equity(000017 [深中华A]) 1.776700e+08 False -1.746272e-02
Equity(000018 [神州长城]) 4.630400e+08 False -4.196685e-02
Equity(000019 [深深宝A]) 5.075900e+08 False 3.533270e-02
Equity(000020 [深华发A]) 1.125400e+08 False -1.978289e-02
Equity(000021 [深科技]) 4.643500e+08 False -7.202969e-03
Equity(000022 [深赤湾A]) 4.782000e+07 False -2.778328e-02
Equity(000023 [深天地A]) 1.920000e+08 False 3.543526e-02
Equity(000025 [特 力A]) 3.451300e+08 False -4.342136e-02
Equity(000026 [飞亚达A]) 6.445000e+07 False -4.027478e-02
Equity(000027 [深圳能源]) 7.969000e+07 False -3.514047e-02
Equity(000028 [国药一致]) 9.269000e+07 False 2.751426e-02
Equity(000029 [深深房A]) 1.247000e+08 False -2.896022e-02
Equity(000030 [富奥股份]) 5.598000e+07 False -1.618252e-02
Equity(000031 [中粮地产]) 2.341400e+08 False -5.337271e-02
Equity(000032 [深桑达A]) 7.673000e+07 False -5.583529e-02
Equity(000033 [新都退]) 0.000000e+00 False NaN
Equity(000034 [神州数码]) 2.539700e+08 False -1.782382e-01
... ... ... ...
Equity(603806 [福斯特]) 1.168700e+08 False 1.119823e-03
Equity(603808 [歌力思]) 0.000000e+00 False 3.221565e-02
Equity(603818 [曲美家居]) 1.053600e+08 False -5.712016e-02
Equity(603822 [嘉澳环保]) 2.000000e+05 False 0.000000e+00
Equity(603828 [柯利达]) 0.000000e+00 False -4.548044e-02
Equity(603838 [四通股份]) 0.000000e+00 False 2.184941e-16
Equity(603861 [白云电器]) 2.824100e+08 False 6.223925e-02
Equity(603866 [桃李面包]) 4.592400e+08 False 6.056135e-02
Equity(603868 [飞科电器]) 5.722300e+08 False 1.357829e-01
Equity(603869 [北部湾旅]) 0.000000e+00 False -4.426856e-02
Equity(603883 [老百姓]) 1.460200e+08 False -7.315689e-04
Equity(603885 [吉祥航空]) 4.289500e+08 False 3.949704e-02
Equity(603889 [新澳股份]) 0.000000e+00 False -5.693451e-16
Equity(603898 [好莱客]) 9.382000e+07 False 4.812647e-02
Equity(603899 [晨光文具]) 2.411800e+08 False 3.642961e-03
Equity(603901 [永创智能]) 2.090300e+08 False -1.816810e-02
Equity(603918 [金桥信息]) 0.000000e+00 False 1.045197e-03
Equity(603919 [金徽酒]) 4.083300e+08 False 1.678325e-02
Equity(603936 [博敏电子]) 2.654800e+08 False -1.973572e-02
Equity(603939 [益丰药房]) 7.732000e+07 False 1.188977e-02
Equity(603968 [醋化股份]) 1.570800e+08 False 8.493080e-02
Equity(603969 [银龙股份]) 7.151000e+07 False -2.072389e-02
Equity(603979 [金诚信]) 2.338300e+08 False -3.758663e-02
Equity(603988 [中电电机]) 2.111800e+08 False 7.945134e-02
Equity(603989 [艾华集团]) 1.395500e+08 False 1.540473e-02
Equity(603993 [洛阳钼业]) 0.000000e+00 False -2.671424e-02
Equity(603996 [中新科技]) 4.276900e+08 False 3.078846e-03
Equity(603997 [继峰股份]) 1.205600e+08 False -6.241031e-02
Equity(603998 [方盛制药]) 7.521900e+08 False -3.624075e-03
Equity(603999 [读者传媒]) 3.359500e+08 False -4.883662e-02
[2860 rows x 3 columns]
日期 2016-05-10 15:00:00+08:00 结果:
amount high_dollar_volume percent_difference
Equity(000001 [平安银行]) 428350000.0 False -4.138450e-03
Equity(000002 [万 科A]) 0.0 False 5.816969e-16
Equity(000004 [国农科技]) 0.0 False -5.787750e-16
Equity(000005 [世纪星源]) 108280000.0 False -4.888626e-02
Equity(000006 [深振业A]) 119010000.0 False -3.248184e-02
Equity(000007 [全新好]) 0.0 False 0.000000e+00
Equity(000008 [神州高铁]) 94580000.0 False 2.582693e-03
Equity(000009 [中国宝安]) 661740000.0 True -2.052828e-02
Equity(000010 [美丽生态]) 464750000.0 False 4.932632e-02
Equity(000011 [深物业A]) 117400000.0 False -1.588646e-02
Equity(000012 [南 玻A]) 171880000.0 False 1.563544e-02
Equity(000014 [沙河股份]) 112950000.0 False 2.711899e-02
Equity(000016 [深康佳A]) 132160000.0 False -2.419040e-02
Equity(000017 [深中华A]) 107190000.0 False -2.927051e-02
Equity(000018 [神州长城]) 389260000.0 False -3.763530e-02
Equity(000019 [深深宝A]) 376020000.0 False 1.938055e-02
Equity(000020 [深华发A]) 89700000.0 False -2.778519e-02
Equity(000021 [深科技]) 293590000.0 False -5.655112e-03
Equity(000022 [深赤湾A]) 43070000.0 False -3.226191e-02
Equity(000023 [深天地A]) 128400000.0 False 2.113659e-02
Equity(000025 [特 力A]) 245410000.0 False -4.704963e-02
Equity(000026 [飞亚达A]) 49750000.0 False -4.394267e-02
Equity(000027 [深圳能源]) 75850000.0 False -3.854127e-02
Equity(000028 [国药一致]) 97140000.0 False 2.575182e-02
Equity(000029 [深深房A]) 97980000.0 False -3.494970e-02
Equity(000030 [富奥股份]) 31780000.0 False -1.351651e-02
Equity(000031 [中粮地产]) 197480000.0 False -5.507310e-02
Equity(000032 [深桑达A]) 59880000.0 False -6.002701e-02
Equity(000033 [新都退]) 0.0 False NaN
Equity(000034 [神州数码]) 224680000.0 False -1.767275e-01
... ... ... ...
Equity(603806 [福斯特]) 98110000.0 False 4.943029e-04
Equity(603808 [歌力思]) 0.0 False 2.794341e-02
Equity(603818 [曲美家居]) 60140000.0 False -5.503239e-02
Equity(603822 [嘉澳环保]) 1360000.0 False 0.000000e+00
Equity(603828 [柯利达]) 55280000.0 False -4.840683e-02
Equity(603838 [四通股份]) 0.0 False 2.184941e-16
Equity(603861 [白云电器]) 181910000.0 False 3.375343e-02
Equity(603866 [桃李面包]) 405090000.0 False 7.243487e-02
Equity(603868 [飞科电器]) 429600000.0 False 1.539973e-01
Equity(603869 [北部湾旅]) 0.0 False -4.467604e-02
Equity(603883 [老百姓]) 99650000.0 False -2.368906e-03
Equity(603885 [吉祥航空]) 236650000.0 False 4.607435e-02
Equity(603889 [新澳股份]) 12280000.0 False -6.688963e-03
Equity(603898 [好莱客]) 50820000.0 False 4.177064e-02
Equity(603899 [晨光文具]) 137700000.0 False 2.656412e-03
Equity(603901 [永创智能]) 112270000.0 False -1.825349e-02
Equity(603918 [金桥信息]) 0.0 False 6.240129e-16
Equity(603919 [金徽酒]) 392020000.0 False 2.752503e-02
Equity(603936 [博敏电子]) 171550000.0 False -1.957497e-02
Equity(603939 [益丰药房]) 46550000.0 False 4.433192e-03
Equity(603968 [醋化股份]) 137220000.0 False 7.104044e-02
Equity(603969 [银龙股份]) 67050000.0 False -2.740719e-02
Equity(603979 [金诚信]) 110920000.0 False -3.896033e-02
Equity(603988 [中电电机]) 177970000.0 False 7.497257e-02
Equity(603989 [艾华集团]) 78140000.0 False 1.109854e-02
Equity(603993 [洛阳钼业]) 0.0 False -2.519548e-02
Equity(603996 [中新科技]) 782240000.0 False 9.642423e-03
Equity(603997 [继峰股份]) 78550000.0 False -6.125468e-02
Equity(603998 [方盛制药]) 264410000.0 False -4.478660e-03
Equity(603999 [读者传媒]) 187430000.0 False -5.030591e-02
[2860 rows x 3 columns]
[2018-01-09 01:10:22.107479] INFO: Performance: Simulated 6 trading days out of 6.
###Markdown
使用`Screen`筛选子集。`Pipeline`将会忽略掉`filter`为`False`的股票。如上,但将`high_dollar_volume`作为`screen`参数。
###Code
%%zipline --start 2016-5-2 --end 2016-5-10 --capital-base 100000
from zipline.pipeline import Pipeline
from zipline.pipeline import Fundamentals
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from zipline.api import attach_pipeline, pipeline_output
from zipline.api import symbol, sid, get_datetime
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
mean_close_30 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=30)
percent_difference = (mean_close_10 - mean_close_30) / mean_close_30
dollar_volume = AverageDollarVolume(window_length=30)
high_dollar_volume = (dollar_volume > 1000000000)
return Pipeline(
columns={
'percent_difference': percent_difference,
},
screen = high_dollar_volume
)
def initialize(context):
attach_pipeline(make_pipeline(), 'example')
def handle_data(context, data):
today = get_datetime('Asia/Shanghai')
output = pipeline_output('example')
# 注意结果本身没有排序(默认升序排列)
print('日期 {} 结果:\n {}'.format(today, output))
###Output
日期 2016-05-03 15:00:00+08:00 结果:
percent_difference
Equity(000009 [中国宝安]) 0.001058
Equity(000750 [国海证券]) -0.003309
Equity(000776 [广发证券]) -0.017161
Equity(000838 [财信发展]) -0.109842
Equity(000839 [中信国安]) 0.052221
Equity(002024 [苏宁云商]) -0.023862
Equity(002183 [怡 亚 通]) -0.051765
Equity(002276 [万马股份]) -0.032473
Equity(002280 [联络互动]) -0.085799
Equity(002284 [亚太股份]) 0.041020
Equity(002292 [奥飞娱乐]) -0.083087
Equity(002407 [多氟多]) 0.122763
Equity(002416 [爱施德]) 0.062311
Equity(002431 [棕榈股份]) -0.031307
Equity(002460 [赣锋锂业]) 0.094549
Equity(002466 [天齐锂业]) 0.029300
Equity(002488 [金固股份]) 0.029590
Equity(002601 [龙蟒佰利]) -0.064635
Equity(002673 [西部证券]) -0.046067
Equity(I000001 [上证指数]) -0.008641
Equity(I000002 [A股指数]) -0.008643
Equity(I000016 [上证50]) -0.001538
Equity(I000300 [沪深300]) -0.008148
Equity(300024 [机器人]) 0.018882
Equity(300033 [同花顺]) 0.009155
Equity(300059 [东方财富]) -0.065191
Equity(300168 [万达信息]) -0.070966
Equity(300315 [掌趣科技]) -0.043297
Equity(300418 [昆仑万维]) -0.061288
Equity(300431 [暴风集团]) -0.136319
Equity(I399001 [深证成指]) -0.014449
Equity(I399002 [深成指R]) -0.014185
Equity(I399003 [成份B指]) -0.013553
Equity(I399006 [创业板指]) -0.023885
Equity(I399102 [创业板综]) -0.013087
Equity(I399106 [深证综指]) -0.009503
Equity(600030 [中信证券]) -0.027633
Equity(600109 [国金证券]) -0.038397
Equity(600111 [北方稀土]) 0.040168
Equity(600116 [三峡水利]) -0.097697
Equity(600446 [金证股份]) -0.100412
Equity(600547 [山东黄金]) 0.045557
Equity(600570 [恒生电子]) -0.049338
Equity(600837 [海通证券]) 0.035000
Equity(600958 [东方证券]) -0.058310
Equity(600988 [赤峰黄金]) 0.027889
Equity(601198 [东兴证券]) -0.035591
Equity(601318 [中国平安]) 0.006977
Equity(601377 [兴业证券]) -0.033810
Equity(601688 [华泰证券]) 0.021098
Equity(601788 [光大证券]) -0.032081
Equity(601989 [中国重工]) -0.056948
日期 2016-05-04 15:00:00+08:00 结果:
percent_difference
Equity(000009 [中国宝安]) -0.000647
Equity(000750 [国海证券]) -0.009254
Equity(000776 [广发证券]) -0.021257
Equity(000838 [财信发展]) -0.116364
Equity(000839 [中信国安]) 0.038732
Equity(002024 [苏宁云商]) -0.026937
Equity(002276 [万马股份]) -0.038131
Equity(002280 [联络互动]) -0.095948
Equity(002284 [亚太股份]) 0.022613
Equity(002292 [奥飞娱乐]) -0.086278
Equity(002407 [多氟多]) 0.110297
Equity(002416 [爱施德]) 0.041369
Equity(002431 [棕榈股份]) -0.037966
Equity(002460 [赣锋锂业]) 0.095467
Equity(002466 [天齐锂业]) 0.021626
Equity(002488 [金固股份]) 0.005553
Equity(002601 [龙蟒佰利]) -0.076092
Equity(002673 [西部证券]) -0.052053
Equity(I000001 [上证指数]) -0.010422
Equity(I000002 [A股指数]) -0.010425
Equity(I000016 [上证50]) -0.001800
Equity(I000300 [沪深300]) -0.009041
Equity(300024 [机器人]) 0.011283
Equity(300028 [金亚科技]) -0.140935
Equity(300033 [同花顺]) 0.006143
Equity(300059 [东方财富]) -0.068088
Equity(300168 [万达信息]) -0.075683
Equity(300315 [掌趣科技]) -0.048689
Equity(300431 [暴风集团]) -0.141029
Equity(I399001 [深证成指]) -0.016667
Equity(I399002 [深成指R]) -0.016344
Equity(I399003 [成份B指]) -0.014297
Equity(I399006 [创业板指]) -0.026641
Equity(I399102 [创业板综]) -0.016294
Equity(I399106 [深证综指]) -0.012317
Equity(600030 [中信证券]) -0.030496
Equity(600109 [国金证券]) -0.041084
Equity(600111 [北方稀土]) 0.042735
Equity(600116 [三峡水利]) -0.117161
Equity(600446 [金证股份]) -0.107842
Equity(600547 [山东黄金]) 0.048596
Equity(600570 [恒生电子]) -0.052761
Equity(600837 [海通证券]) 0.033400
Equity(600958 [东方证券]) -0.057562
Equity(600988 [赤峰黄金]) 0.028783
Equity(601198 [东兴证券]) -0.040817
Equity(601318 [中国平安]) 0.007572
Equity(601377 [兴业证券]) -0.036471
Equity(601688 [华泰证券]) 0.019068
Equity(601788 [光大证券]) -0.037250
Equity(601989 [中国重工]) -0.062536
日期 2016-05-05 15:00:00+08:00 结果:
percent_difference
Equity(000009 [中国宝安]) -0.006847
Equity(000750 [国海证券]) -0.014035
Equity(000776 [广发证券]) -0.023397
Equity(000838 [财信发展]) -0.122387
Equity(000839 [中信国安]) 0.033322
Equity(002276 [万马股份]) -0.045066
Equity(002280 [联络互动]) -0.105687
Equity(002284 [亚太股份]) 0.009340
Equity(002292 [奥飞娱乐]) -0.090522
Equity(002407 [多氟多]) 0.098956
Equity(002416 [爱施德]) 0.025267
Equity(002431 [棕榈股份]) -0.043836
Equity(002460 [赣锋锂业]) 0.095674
Equity(002466 [天齐锂业]) 0.016594
Equity(002488 [金固股份]) -0.015254
Equity(002601 [龙蟒佰利]) -0.084899
Equity(002673 [西部证券]) -0.055651
Equity(I000001 [上证指数]) -0.011839
Equity(I000002 [A股指数]) -0.011843
Equity(I000016 [上证50]) -0.001670
Equity(I000300 [沪深300]) -0.009529
Equity(300024 [机器人]) 0.001450
Equity(300028 [金亚科技]) -0.142942
Equity(300033 [同花顺]) 0.005793
Equity(300059 [东方财富]) -0.070556
Equity(300168 [万达信息]) -0.080874
Equity(300315 [掌趣科技]) -0.054763
Equity(300431 [暴风集团]) -0.143173
Equity(I399001 [深证成指]) -0.018486
Equity(I399002 [深成指R]) -0.018102
Equity(I399003 [成份B指]) -0.014895
Equity(I399006 [创业板指]) -0.029178
Equity(I399102 [创业板综]) -0.019065
Equity(I399106 [深证综指]) -0.014595
Equity(600030 [中信证券]) -0.031091
Equity(600111 [北方稀土]) 0.041669
Equity(600116 [三峡水利]) -0.127542
Equity(600446 [金证股份]) -0.113245
Equity(600547 [山东黄金]) 0.048287
Equity(600570 [恒生电子]) -0.054239
Equity(600837 [海通证券]) 0.034032
Equity(600958 [东方证券]) -0.054769
Equity(600988 [赤峰黄金]) 0.024826
Equity(601198 [东兴证券]) -0.044635
Equity(601318 [中国平安]) 0.008519
Equity(601377 [兴业证券]) -0.036565
Equity(601788 [光大证券]) -0.040439
Equity(601989 [中国重工]) -0.066401
日期 2016-05-06 15:00:00+08:00 结果:
percent_difference
Equity(000009 [中国宝安]) -0.007717
Equity(000750 [国海证券]) -0.014420
Equity(000762 [西藏矿业]) -0.011417
Equity(000776 [广发证券]) -0.022698
Equity(000839 [中信国安]) 0.033930
Equity(002276 [万马股份]) -0.043520
Equity(002280 [联络互动]) -0.105531
Equity(002284 [亚太股份]) -0.003876
Equity(002292 [奥飞娱乐]) -0.086945
Equity(002407 [多氟多]) 0.095451
Equity(002416 [爱施德]) 0.016727
Equity(002431 [棕榈股份]) -0.035596
Equity(002460 [赣锋锂业]) 0.095578
Equity(002466 [天齐锂业]) 0.015210
Equity(002488 [金固股份]) -0.034188
Equity(002601 [龙蟒佰利]) -0.078584
Equity(002673 [西部证券]) -0.056722
Equity(I000001 [上证指数]) -0.010980
Equity(I000002 [A股指数]) -0.010985
Equity(I000016 [上证50]) -0.001449
Equity(I000300 [沪深300]) -0.008381
Equity(300024 [机器人]) -0.002106
Equity(300028 [金亚科技]) -0.138429
Equity(300033 [同花顺]) 0.009919
Equity(300059 [东方财富]) -0.065290
Equity(300168 [万达信息]) -0.076558
Equity(300315 [掌趣科技]) -0.051569
Equity(300431 [暴风集团]) -0.134884
Equity(I399001 [深证成指]) -0.015921
Equity(I399002 [深成指R]) -0.015476
Equity(I399003 [成份B指]) -0.015386
Equity(I399006 [创业板指]) -0.025660
Equity(I399102 [创业板综]) -0.015622
Equity(I399106 [深证综指]) -0.011940
Equity(600030 [中信证券]) -0.031571
Equity(600111 [北方稀土]) 0.035662
Equity(600116 [三峡水利]) -0.127431
Equity(600446 [金证股份]) -0.109033
Equity(600547 [山东黄金]) 0.050539
Equity(600570 [恒生电子]) -0.048944
Equity(600837 [海通证券]) 0.031614
Equity(600958 [东方证券]) -0.052858
Equity(601198 [东兴证券]) -0.043977
Equity(601318 [中国平安]) 0.007862
Equity(601788 [光大证券]) -0.041726
日期 2016-05-09 15:00:00+08:00 结果:
percent_difference
Equity(000009 [中国宝安]) -0.011230
Equity(000750 [国海证券]) -0.014882
Equity(000762 [西藏矿业]) -0.002773
Equity(000776 [广发证券]) -0.022530
Equity(000839 [中信国安]) 0.032849
Equity(002276 [万马股份]) -0.041623
Equity(002280 [联络互动]) -0.098416
Equity(002284 [亚太股份]) -0.012585
Equity(002292 [奥飞娱乐]) -0.081171
Equity(002407 [多氟多]) 0.102246
Equity(002416 [爱施德]) 0.007382
Equity(002431 [棕榈股份]) -0.017485
Equity(002460 [赣锋锂业]) 0.102349
Equity(002466 [天齐锂业]) 0.020485
Equity(002488 [金固股份]) -0.048597
Equity(002601 [龙蟒佰利]) -0.074532
Equity(002673 [西部证券]) -0.053824
Equity(I000001 [上证指数]) -0.011239
Equity(I000002 [A股指数]) -0.011247
Equity(I000016 [上证50]) -0.002285
Equity(I000300 [沪深300]) -0.008234
Equity(300024 [机器人]) -0.005628
Equity(300028 [金亚科技]) -0.130198
Equity(300033 [同花顺]) 0.016476
Equity(300059 [东方财富]) -0.058106
Equity(300168 [万达信息]) -0.071938
Equity(300315 [掌趣科技]) -0.050588
Equity(300431 [暴风集团]) -0.122067
Equity(I399001 [深证成指]) -0.014439
Equity(I399002 [深成指R]) -0.013900
Equity(I399003 [成份B指]) -0.017938
Equity(I399006 [创业板指]) -0.022896
Equity(I399102 [创业板综]) -0.012692
Equity(I399106 [深证综指]) -0.010198
Equity(600030 [中信证券]) -0.031941
Equity(600111 [北方稀土]) 0.026183
Equity(600116 [三峡水利]) -0.124316
Equity(600446 [金证股份]) -0.102764
Equity(600547 [山东黄金]) 0.046841
Equity(600570 [恒生电子]) -0.039914
Equity(600837 [海通证券]) 0.027417
Equity(601198 [东兴证券]) -0.041407
Equity(601318 [中国平安]) 0.006196
Equity(601788 [光大证券]) -0.044171
日期 2016-05-10 15:00:00+08:00 结果:
percent_difference
Equity(000009 [中国宝安]) -0.020528
Equity(000762 [西藏矿业]) 0.006234
Equity(000839 [中信国安]) 0.022403
Equity(002276 [万马股份]) -0.046856
Equity(002284 [亚太股份]) -0.023849
Equity(002292 [奥飞娱乐]) -0.083122
Equity(002407 [多氟多]) 0.101666
Equity(002416 [爱施德]) -0.008046
Equity(002431 [棕榈股份]) -0.010640
Equity(002460 [赣锋锂业]) 0.101502
Equity(002466 [天齐锂业]) 0.016543
Equity(002488 [金固股份]) -0.063278
Equity(002601 [龙蟒佰利]) -0.078759
Equity(002673 [西部证券]) -0.056263
Equity(I000001 [上证指数]) -0.014071
Equity(I000002 [A股指数]) -0.014085
Equity(I000016 [上证50]) -0.005245
Equity(I000300 [沪深300]) -0.010449
Equity(300024 [机器人]) -0.012958
Equity(300028 [金亚科技]) -0.125749
Equity(300033 [同花顺]) 0.018037
Equity(300059 [东方财富]) -0.058708
Equity(300168 [万达信息]) -0.074402
Equity(300431 [暴风集团]) -0.117850
Equity(I399001 [深证成指]) -0.016361
Equity(I399002 [深成指R]) -0.015736
Equity(I399003 [成份B指]) -0.023321
Equity(I399006 [创业板指]) -0.024217
Equity(I399102 [创业板综]) -0.014124
Equity(I399106 [深证综指]) -0.012271
Equity(600030 [中信证券]) -0.036735
Equity(600111 [北方稀土]) 0.014800
Equity(600116 [三峡水利]) -0.129685
Equity(600446 [金证股份]) -0.104318
Equity(600547 [山东黄金]) 0.043079
Equity(600570 [恒生电子]) -0.035683
Equity(600837 [海通证券]) 0.020897
Equity(601198 [东兴证券]) -0.046456
Equity(601318 [中国平安]) 0.002915
[2018-01-09 01:10:23.364320] INFO: Performance: Simulated 6 trading days out of 6.
###Markdown
`~`取反操作operator is used to invert a filter, swapping all True values with Falses and vice-versa. For example, we can write the following to filter for low dollar volume securities:
###Code
%%zipline --start 2016-5-2 --end 2016-5-10 --capital-base 100000
from zipline.pipeline import Pipeline
from zipline.pipeline import Fundamentals
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from zipline.api import attach_pipeline, pipeline_output
from zipline.api import symbol, sid, get_datetime
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
mean_close_30 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=30)
percent_difference = (mean_close_10 - mean_close_30) / mean_close_30
dollar_volume = AverageDollarVolume(window_length=30)
high_dollar_volume = (dollar_volume > 1000000000)
return Pipeline(
columns={
'percent_difference': percent_difference,
},
screen = ~high_dollar_volume
)
def initialize(context):
attach_pipeline(make_pipeline(), 'example')
def handle_data(context, data):
today = get_datetime('Asia/Shanghai')
output = pipeline_output('example')
# 注意结果本身没有排序(默认升序排列)
print('日期 {} 结果:\n {}'.format(today, output))
###Output
日期 2016-05-03 15:00:00+08:00 结果:
percent_difference
Equity(000001 [平安银行]) -4.197338e-03
Equity(000002 [万 科A]) 5.816969e-16
Equity(000004 [国农科技]) 9.613671e-03
Equity(000005 [世纪星源]) -4.377113e-02
Equity(000006 [深振业A]) -1.368466e-02
Equity(000007 [全新好]) 0.000000e+00
Equity(000008 [神州高铁]) -2.203059e-04
Equity(000010 [美丽生态]) 2.893666e-02
Equity(000011 [深物业A]) 8.060946e-03
Equity(000012 [南 玻A]) 4.579174e-03
Equity(000014 [沙河股份]) 7.410898e-02
Equity(000016 [深康佳A]) -1.278142e-02
Equity(000017 [深中华A]) 7.775183e-03
Equity(000018 [神州长城]) -4.289365e-02
Equity(000019 [深深宝A]) 6.568053e-02
Equity(000020 [深华发A]) -1.837093e-03
Equity(000021 [深科技]) -1.321033e-02
Equity(000022 [深赤湾A]) -1.528243e-02
Equity(000023 [深天地A]) 6.367730e-02
Equity(000025 [特 力A]) -3.505807e-02
Equity(000026 [飞亚达A]) -3.581436e-02
Equity(000027 [深圳能源]) -3.171198e-02
Equity(000028 [国药一致]) 2.760812e-02
Equity(000029 [深深房A]) -1.235490e-02
Equity(000030 [富奥股份]) -2.284726e-02
Equity(000031 [中粮地产]) -4.662847e-02
Equity(000032 [深桑达A]) -3.485386e-02
Equity(000033 [新都退]) NaN
Equity(000034 [神州数码]) -1.907231e-01
Equity(000035 [中国天楹]) -7.597630e-05
... ...
Equity(603806 [福斯特]) -2.570832e-03
Equity(603808 [歌力思]) 4.738626e-02
Equity(603818 [曲美家居]) -5.176335e-02
Equity(603822 [嘉澳环保]) 0.000000e+00
Equity(603828 [柯利达]) -3.757429e-02
Equity(603838 [四通股份]) -6.750258e-04
Equity(603861 [白云电器]) 1.482979e-01
Equity(603866 [桃李面包]) 3.660669e-02
Equity(603868 [飞科电器]) 0.000000e+00
Equity(603869 [北部湾旅]) -3.364202e-02
Equity(603883 [老百姓]) 1.151010e-02
Equity(603885 [吉祥航空]) 1.719471e-02
Equity(603889 [新澳股份]) -5.693451e-16
Equity(603898 [好莱客]) 4.456178e-02
Equity(603899 [晨光文具]) -2.222321e-03
Equity(603901 [永创智能]) -1.641140e-02
Equity(603918 [金桥信息]) 2.985075e-03
Equity(603919 [金徽酒]) -2.562604e-02
Equity(603936 [博敏电子]) -2.045646e-02
Equity(603939 [益丰药房]) 2.419415e-02
Equity(603968 [醋化股份]) 9.159574e-02
Equity(603969 [银龙股份]) -1.169915e-02
Equity(603979 [金诚信]) -3.424991e-02
Equity(603988 [中电电机]) 1.081822e-01
Equity(603989 [艾华集团]) 1.830384e-02
Equity(603993 [洛阳钼业]) -2.615694e-02
Equity(603996 [中新科技]) 1.266364e-02
Equity(603997 [继峰股份]) -6.025882e-02
Equity(603998 [方盛制药]) -9.163708e-03
Equity(603999 [读者传媒]) -4.237137e-02
[2806 rows x 1 columns]
日期 2016-05-04 15:00:00+08:00 结果:
percent_difference
Equity(000001 [平安银行]) -5.103479e-03
Equity(000002 [万 科A]) 5.816969e-16
Equity(000004 [国农科技]) 5.643033e-03
Equity(000005 [世纪星源]) -4.707313e-02
Equity(000006 [深振业A]) -1.522361e-02
Equity(000007 [全新好]) 0.000000e+00
Equity(000008 [神州高铁]) -2.762518e-03
Equity(000010 [美丽生态]) 3.556243e-02
Equity(000011 [深物业A]) 8.471421e-03
Equity(000012 [南 玻A]) 7.642653e-03
Equity(000014 [沙河股份]) 6.771589e-02
Equity(000016 [深康佳A]) -1.810043e-02
Equity(000017 [深中华A]) 3.393155e-03
Equity(000018 [神州长城]) -5.256490e-02
Equity(000019 [深深宝A]) 7.120504e-02
Equity(000020 [深华发A]) -4.841409e-04
Equity(000021 [深科技]) -1.480663e-02
Equity(000022 [深赤湾A]) -2.020500e-02
Equity(000023 [深天地A]) 6.800683e-02
Equity(000025 [特 力A]) -3.975018e-02
Equity(000026 [飞亚达A]) -3.958623e-02
Equity(000027 [深圳能源]) -3.505905e-02
Equity(000028 [国药一致]) 2.766267e-02
Equity(000029 [深深房A]) -1.041877e-02
Equity(000030 [富奥股份]) -2.519260e-02
Equity(000031 [中粮地产]) -5.233576e-02
Equity(000032 [深桑达A]) -4.442586e-02
Equity(000033 [新都退]) NaN
Equity(000034 [神州数码]) -1.926659e-01
Equity(000035 [中国天楹]) -8.273218e-03
... ...
Equity(603806 [福斯特]) -3.731406e-03
Equity(603808 [歌力思]) 4.611040e-02
Equity(603818 [曲美家居]) -5.743578e-02
Equity(603822 [嘉澳环保]) 0.000000e+00
Equity(603828 [柯利达]) -4.385730e-02
Equity(603838 [四通股份]) -7.671437e-04
Equity(603861 [白云电器]) 1.397064e-01
Equity(603866 [桃李面包]) 3.872320e-02
Equity(603868 [飞科电器]) 3.948974e-02
Equity(603869 [北部湾旅]) -4.059409e-02
Equity(603883 [老百姓]) 6.693127e-03
Equity(603885 [吉祥航空]) 1.506589e-02
Equity(603889 [新澳股份]) -5.693451e-16
Equity(603898 [好莱客]) 4.984511e-02
Equity(603899 [晨光文具]) -9.931631e-04
Equity(603901 [永创智能]) -1.961950e-02
Equity(603918 [金桥信息]) 1.622456e-03
Equity(603919 [金徽酒]) -1.826844e-02
Equity(603936 [博敏电子]) -2.698047e-02
Equity(603939 [益丰药房]) 1.898376e-02
Equity(603968 [醋化股份]) 9.270667e-02
Equity(603969 [银龙股份]) -1.785327e-02
Equity(603979 [金诚信]) -4.117806e-02
Equity(603988 [中电电机]) 9.719844e-02
Equity(603989 [艾华集团]) 1.554582e-02
Equity(603993 [洛阳钼业]) -2.830279e-02
Equity(603996 [中新科技]) 3.796150e-03
Equity(603997 [继峰股份]) -6.489910e-02
Equity(603998 [方盛制药]) -1.750944e-02
Equity(603999 [读者传媒]) -4.998479e-02
[2808 rows x 1 columns]
日期 2016-05-05 15:00:00+08:00 结果:
percent_difference
Equity(000001 [平安银行]) -5.323980e-03
Equity(000002 [万 科A]) 5.816969e-16
Equity(000004 [国农科技]) 3.041169e-03
Equity(000005 [世纪星源]) -4.988653e-02
Equity(000006 [深振业A]) -1.859780e-02
Equity(000007 [全新好]) 0.000000e+00
Equity(000008 [神州高铁]) -3.694890e-03
Equity(000010 [美丽生态]) 4.020392e-02
Equity(000011 [深物业A]) 5.969175e-03
Equity(000012 [南 玻A]) 9.765447e-03
Equity(000014 [沙河股份]) 5.816863e-02
Equity(000016 [深康佳A]) -2.224240e-02
Equity(000017 [深中华A]) -2.625089e-03
Equity(000018 [神州长城]) -5.601966e-02
Equity(000019 [深深宝A]) 6.970726e-02
Equity(000020 [深华发A]) -1.110340e-03
Equity(000021 [深科技]) -1.637715e-02
Equity(000022 [深赤湾A]) -2.625256e-02
Equity(000023 [深天地A]) 6.433806e-02
Equity(000025 [特 力A]) -4.285755e-02
Equity(000026 [飞亚达A]) -4.398167e-02
Equity(000027 [深圳能源]) -3.747894e-02
Equity(000028 [国药一致]) 2.568259e-02
Equity(000029 [深深房A]) -1.019936e-02
Equity(000030 [富奥股份]) -2.622037e-02
Equity(000031 [中粮地产]) -5.558812e-02
Equity(000032 [深桑达A]) -5.318746e-02
Equity(000033 [新都退]) NaN
Equity(000034 [神州数码]) -1.862329e-01
Equity(000035 [中国天楹]) -1.437836e-02
... ...
Equity(603806 [福斯特]) -4.564769e-03
Equity(603808 [歌力思]) 4.102366e-02
Equity(603818 [曲美家居]) -6.329309e-02
Equity(603822 [嘉澳环保]) 0.000000e+00
Equity(603828 [柯利达]) -4.993221e-02
Equity(603838 [四通股份]) -1.014657e-04
Equity(603861 [白云电器]) 1.266448e-01
Equity(603866 [桃李面包]) 3.917222e-02
Equity(603868 [飞科电器]) 7.643259e-02
Equity(603869 [北部湾旅]) -4.682107e-02
Equity(603883 [老百姓]) -7.405731e-04
Equity(603885 [吉祥航空]) 1.939030e-02
Equity(603889 [新澳股份]) -5.693451e-16
Equity(603898 [好莱客]) 4.926979e-02
Equity(603899 [晨光文具]) 3.777729e-03
Equity(603901 [永创智能]) -2.052725e-02
Equity(603918 [金桥信息]) 1.627351e-03
Equity(603919 [金徽酒]) -8.573537e-03
Equity(603936 [博敏电子]) -3.041395e-02
Equity(603939 [益丰药房]) 1.339123e-02
Equity(603968 [醋化股份]) 1.000176e-01
Equity(603969 [银龙股份]) -2.141240e-02
Equity(603979 [金诚信]) -4.460412e-02
Equity(603988 [中电电机]) 9.112956e-02
Equity(603989 [艾华集团]) 1.222321e-02
Equity(603993 [洛阳钼业]) -2.998847e-02
Equity(603996 [中新科技]) -2.240912e-03
Equity(603997 [继峰股份]) -6.888818e-02
Equity(603998 [方盛制药]) -2.285702e-02
Equity(603999 [读者传媒]) -5.635433e-02
[2811 rows x 1 columns]
日期 2016-05-06 15:00:00+08:00 结果:
percent_difference
Equity(000001 [平安银行]) -3.444928e-03
Equity(000002 [万 科A]) 5.816969e-16
Equity(000004 [国农科技]) -5.787750e-16
Equity(000005 [世纪星源]) -4.733310e-02
Equity(000006 [深振业A]) -2.168038e-02
Equity(000007 [全新好]) 0.000000e+00
Equity(000008 [神州高铁]) -1.030542e-03
Equity(000010 [美丽生态]) 4.722105e-02
Equity(000011 [深物业A]) -7.703081e-04
Equity(000012 [南 玻A]) 1.421671e-02
Equity(000014 [沙河股份]) 4.859110e-02
Equity(000016 [深康佳A]) -2.119798e-02
Equity(000017 [深中华A]) -9.066327e-03
Equity(000018 [神州长城]) -4.825382e-02
Equity(000019 [深深宝A]) 5.760244e-02
Equity(000020 [深华发A]) -1.064318e-02
Equity(000021 [深科技]) -1.297937e-02
Equity(000022 [深赤湾A]) -2.660141e-02
Equity(000023 [深天地A]) 4.705512e-02
Equity(000025 [特 力A]) -4.268304e-02
Equity(000026 [飞亚达A]) -4.196198e-02
Equity(000027 [深圳能源]) -3.599044e-02
Equity(000028 [国药一致]) 2.746071e-02
Equity(000029 [深深房A]) -1.862580e-02
Equity(000030 [富奥股份]) -1.952901e-02
Equity(000031 [中粮地产]) -5.487465e-02
Equity(000032 [深桑达A]) -5.499901e-02
Equity(000033 [新都退]) NaN
Equity(000034 [神州数码]) -1.841329e-01
Equity(000035 [中国天楹]) -1.300752e-02
... ...
Equity(603806 [福斯特]) -1.140016e-03
Equity(603808 [歌力思]) 3.624663e-02
Equity(603818 [曲美家居]) -6.090801e-02
Equity(603822 [嘉澳环保]) 0.000000e+00
Equity(603828 [柯利达]) -4.966191e-02
Equity(603838 [四通股份]) 2.184941e-16
Equity(603861 [白云电器]) 9.161121e-02
Equity(603866 [桃李面包]) 4.835959e-02
Equity(603868 [飞科电器]) 1.091592e-01
Equity(603869 [北部湾旅]) -4.598266e-02
Equity(603883 [老百姓]) -9.161516e-04
Equity(603885 [吉祥航空]) 2.925512e-02
Equity(603889 [新澳股份]) -5.693451e-16
Equity(603898 [好莱客]) 5.406307e-02
Equity(603899 [晨光文具]) 4.504269e-03
Equity(603901 [永创智能]) -1.775090e-02
Equity(603918 [金桥信息]) 8.007969e-04
Equity(603919 [金徽酒]) 5.816686e-03
Equity(603936 [博敏电子]) -2.542388e-02
Equity(603939 [益丰药房]) 1.379733e-02
Equity(603968 [醋化股份]) 9.473151e-02
Equity(603969 [银龙股份]) -2.065280e-02
Equity(603979 [金诚信]) -4.206571e-02
Equity(603988 [中电电机]) 8.570162e-02
Equity(603989 [艾华集团]) 1.446118e-02
Equity(603993 [洛阳钼业]) -2.819748e-02
Equity(603996 [中新科技]) -1.862261e-03
Equity(603997 [继峰股份]) -6.614341e-02
Equity(603998 [方盛制药]) -1.171204e-02
Equity(603999 [读者传媒]) -5.389970e-02
[2814 rows x 1 columns]
日期 2016-05-09 15:00:00+08:00 结果:
percent_difference
Equity(000001 [平安银行]) -2.788832e-03
Equity(000002 [万 科A]) 5.816969e-16
Equity(000004 [国农科技]) -5.787750e-16
Equity(000005 [世纪星源]) -4.575635e-02
Equity(000006 [深振业A]) -2.662896e-02
Equity(000007 [全新好]) 0.000000e+00
Equity(000008 [神州高铁]) 1.277696e-03
Equity(000010 [美丽生态]) 4.986150e-02
Equity(000011 [深物业A]) -9.203963e-03
Equity(000012 [南 玻A]) 1.646416e-02
Equity(000014 [沙河股份]) 3.894983e-02
Equity(000016 [深康佳A]) -2.070219e-02
Equity(000017 [深中华A]) -1.746272e-02
Equity(000018 [神州长城]) -4.196685e-02
Equity(000019 [深深宝A]) 3.533270e-02
Equity(000020 [深华发A]) -1.978289e-02
Equity(000021 [深科技]) -7.202969e-03
Equity(000022 [深赤湾A]) -2.778328e-02
Equity(000023 [深天地A]) 3.543526e-02
Equity(000025 [特 力A]) -4.342136e-02
Equity(000026 [飞亚达A]) -4.027478e-02
Equity(000027 [深圳能源]) -3.514047e-02
Equity(000028 [国药一致]) 2.751426e-02
Equity(000029 [深深房A]) -2.896022e-02
Equity(000030 [富奥股份]) -1.618252e-02
Equity(000031 [中粮地产]) -5.337271e-02
Equity(000032 [深桑达A]) -5.583529e-02
Equity(000033 [新都退]) NaN
Equity(000034 [神州数码]) -1.782382e-01
Equity(000035 [中国天楹]) -1.354735e-02
... ...
Equity(603806 [福斯特]) 1.119823e-03
Equity(603808 [歌力思]) 3.221565e-02
Equity(603818 [曲美家居]) -5.712016e-02
Equity(603822 [嘉澳环保]) 0.000000e+00
Equity(603828 [柯利达]) -4.548044e-02
Equity(603838 [四通股份]) 2.184941e-16
Equity(603861 [白云电器]) 6.223925e-02
Equity(603866 [桃李面包]) 6.056135e-02
Equity(603868 [飞科电器]) 1.357829e-01
Equity(603869 [北部湾旅]) -4.426856e-02
Equity(603883 [老百姓]) -7.315689e-04
Equity(603885 [吉祥航空]) 3.949704e-02
Equity(603889 [新澳股份]) -5.693451e-16
Equity(603898 [好莱客]) 4.812647e-02
Equity(603899 [晨光文具]) 3.642961e-03
Equity(603901 [永创智能]) -1.816810e-02
Equity(603918 [金桥信息]) 1.045197e-03
Equity(603919 [金徽酒]) 1.678325e-02
Equity(603936 [博敏电子]) -1.973572e-02
Equity(603939 [益丰药房]) 1.188977e-02
Equity(603968 [醋化股份]) 8.493080e-02
Equity(603969 [银龙股份]) -2.072389e-02
Equity(603979 [金诚信]) -3.758663e-02
Equity(603988 [中电电机]) 7.945134e-02
Equity(603989 [艾华集团]) 1.540473e-02
Equity(603993 [洛阳钼业]) -2.671424e-02
Equity(603996 [中新科技]) 3.078846e-03
Equity(603997 [继峰股份]) -6.241031e-02
Equity(603998 [方盛制药]) -3.624075e-03
Equity(603999 [读者传媒]) -4.883662e-02
[2816 rows x 1 columns]
日期 2016-05-10 15:00:00+08:00 结果:
percent_difference
Equity(000001 [平安银行]) -4.138450e-03
Equity(000002 [万 科A]) 5.816969e-16
Equity(000004 [国农科技]) -5.787750e-16
Equity(000005 [世纪星源]) -4.888626e-02
Equity(000006 [深振业A]) -3.248184e-02
Equity(000007 [全新好]) 0.000000e+00
Equity(000008 [神州高铁]) 2.582693e-03
Equity(000010 [美丽生态]) 4.932632e-02
Equity(000011 [深物业A]) -1.588646e-02
Equity(000012 [南 玻A]) 1.563544e-02
Equity(000014 [沙河股份]) 2.711899e-02
Equity(000016 [深康佳A]) -2.419040e-02
Equity(000017 [深中华A]) -2.927051e-02
Equity(000018 [神州长城]) -3.763530e-02
Equity(000019 [深深宝A]) 1.938055e-02
Equity(000020 [深华发A]) -2.778519e-02
Equity(000021 [深科技]) -5.655112e-03
Equity(000022 [深赤湾A]) -3.226191e-02
Equity(000023 [深天地A]) 2.113659e-02
Equity(000025 [特 力A]) -4.704963e-02
Equity(000026 [飞亚达A]) -4.394267e-02
Equity(000027 [深圳能源]) -3.854127e-02
Equity(000028 [国药一致]) 2.575182e-02
Equity(000029 [深深房A]) -3.494970e-02
Equity(000030 [富奥股份]) -1.351651e-02
Equity(000031 [中粮地产]) -5.507310e-02
Equity(000032 [深桑达A]) -6.002701e-02
Equity(000033 [新都退]) NaN
Equity(000034 [神州数码]) -1.767275e-01
Equity(000035 [中国天楹]) -1.990624e-02
... ...
Equity(603806 [福斯特]) 4.943029e-04
Equity(603808 [歌力思]) 2.794341e-02
Equity(603818 [曲美家居]) -5.503239e-02
Equity(603822 [嘉澳环保]) 0.000000e+00
Equity(603828 [柯利达]) -4.840683e-02
Equity(603838 [四通股份]) 2.184941e-16
Equity(603861 [白云电器]) 3.375343e-02
Equity(603866 [桃李面包]) 7.243487e-02
Equity(603868 [飞科电器]) 1.539973e-01
Equity(603869 [北部湾旅]) -4.467604e-02
Equity(603883 [老百姓]) -2.368906e-03
Equity(603885 [吉祥航空]) 4.607435e-02
Equity(603889 [新澳股份]) -6.688963e-03
Equity(603898 [好莱客]) 4.177064e-02
Equity(603899 [晨光文具]) 2.656412e-03
Equity(603901 [永创智能]) -1.825349e-02
Equity(603918 [金桥信息]) 6.240129e-16
Equity(603919 [金徽酒]) 2.752503e-02
Equity(603936 [博敏电子]) -1.957497e-02
Equity(603939 [益丰药房]) 4.433192e-03
Equity(603968 [醋化股份]) 7.104044e-02
Equity(603969 [银龙股份]) -2.740719e-02
Equity(603979 [金诚信]) -3.896033e-02
Equity(603988 [中电电机]) 7.497257e-02
Equity(603989 [艾华集团]) 1.109854e-02
Equity(603993 [洛阳钼业]) -2.519548e-02
Equity(603996 [中新科技]) 9.642423e-03
Equity(603997 [继峰股份]) -6.125468e-02
Equity(603998 [方盛制药]) -4.478660e-03
Equity(603999 [读者传媒]) -5.030591e-02
[2821 rows x 1 columns]
[2018-01-09 01:10:24.543111] INFO: Performance: Simulated 6 trading days out of 6.
###Markdown
`& (and) and | (or)`For example, let's say we want to screen for securities that are in the top 10% of average dollar volume and have a latest close price above $20. To start, let's make a high dollar volume filter using an `AverageDollarVolume` factor and `percentile_between`:
###Code
%%zipline --start 2016-5-2 --end 2016-5-10 --capital-base 100000
from zipline.pipeline import Pipeline
from zipline.pipeline import Fundamentals
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from zipline.api import attach_pipeline, pipeline_output
from zipline.api import symbol, sid, get_datetime
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
mean_close_30 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=30)
percent_difference = (mean_close_10 - mean_close_30) / mean_close_30
dollar_volume = AverageDollarVolume(window_length=30)
high_dollar_volume = dollar_volume.percentile_between(90, 100)
latest_close = USEquityPricing.close.latest
above_20 = latest_close > 20
tradeable_filter = high_dollar_volume & above_20
return Pipeline(
columns={
'percent_difference': percent_difference
},
screen=tradeable_filter
)
def initialize(context):
attach_pipeline(make_pipeline(), 'example')
def handle_data(context, data):
today = get_datetime('Asia/Shanghai')
output = pipeline_output('example')
# 注意结果本身没有排序(默认升序排列)
print('日期 {} 结果:\n {}'.format(today, output))
###Output
日期 2016-05-03 15:00:00+08:00 结果:
percent_difference
Equity(000025 [特 力A]) -0.035058
Equity(000049 [德赛电池]) -0.013503
Equity(000333 [美的集团]) 0.005841
Equity(000503 [海虹控股]) 0.064210
Equity(000623 [吉林敖东]) -0.028149
Equity(000626 [远大控股]) -0.085858
Equity(000697 [炼石有色]) -0.037609
Equity(000738 [航发控制]) -0.047974
Equity(000762 [西藏矿业]) -0.011096
Equity(000777 [中核科技]) -0.050172
Equity(000839 [中信国安]) 0.052221
Equity(000858 [五 粮 液]) -0.007725
Equity(000887 [中鼎股份]) 0.022356
Equity(000901 [航天科技]) -0.037928
Equity(000977 [浪潮信息]) -0.011664
Equity(002001 [新 和 成]) 0.005080
Equity(002019 [亿帆医药]) -0.061808
Equity(002030 [达安基因]) 0.007252
Equity(002049 [紫光国芯]) -0.052075
Equity(002055 [得润电子]) 0.001039
Equity(002074 [国轩高科]) -0.047752
Equity(002095 [生 意 宝]) -0.063008
Equity(002114 [罗平锌电]) 0.033148
Equity(002174 [游族网络]) -0.044594
Equity(002175 [东方网络]) -0.033433
Equity(002183 [怡 亚 通]) -0.051765
Equity(002192 [融捷股份]) 0.078486
Equity(002195 [二三四五]) -0.055824
Equity(002229 [鸿博股份]) 0.043725
Equity(002241 [歌尔股份]) 0.052768
... ...
Equity(300459 [金科文化]) -0.026236
Equity(300496 [中科创达]) 0.008315
Equity(I399001 [深证成指]) -0.014449
Equity(I399002 [深成指R]) -0.014185
Equity(I399003 [成份B指]) -0.013553
Equity(I399006 [创业板指]) -0.023885
Equity(I399102 [创业板综]) -0.013087
Equity(I399106 [深证综指]) -0.009503
Equity(600118 [中国卫星]) -0.037277
Equity(600259 [广晟有色]) 0.142932
Equity(600343 [航天动力]) 0.073858
Equity(600435 [北方导航]) -0.024893
Equity(600446 [金证股份]) -0.100412
Equity(600519 [贵州茅台]) 0.001166
Equity(600547 [山东黄金]) 0.045557
Equity(600549 [厦门钨业]) 0.096959
Equity(600570 [恒生电子]) -0.049338
Equity(600645 [中源协和]) -0.020181
Equity(600650 [锦江投资]) -0.062409
Equity(600654 [*ST中安]) -0.091127
Equity(600680 [*ST上普]) -0.064628
Equity(600699 [均胜电子]) -0.015637
Equity(600756 [浪潮软件]) -0.060105
Equity(600884 [杉杉股份]) -0.003932
Equity(600893 [航发动力]) -0.037707
Equity(600895 [张江高科]) 0.002471
Equity(601069 [西部黄金]) -0.018133
Equity(601198 [东兴证券]) -0.035591
Equity(601318 [中国平安]) 0.006977
Equity(601336 [新华保险]) -0.002538
[121 rows x 1 columns]
日期 2016-05-04 15:00:00+08:00 结果:
percent_difference
Equity(000025 [特 力A]) -0.039750
Equity(000049 [德赛电池]) -0.021191
Equity(000333 [美的集团]) 0.012164
Equity(000503 [海虹控股]) 0.070423
Equity(000623 [吉林敖东]) -0.029086
Equity(000626 [远大控股]) -0.097815
Equity(000697 [炼石有色]) -0.047729
Equity(000738 [航发控制]) -0.047824
Equity(000748 [长城信息]) -0.100988
Equity(000762 [西藏矿业]) -0.015372
Equity(000839 [中信国安]) 0.038732
Equity(000858 [五 粮 液]) 0.000970
Equity(000887 [中鼎股份]) 0.010385
Equity(000901 [航天科技]) -0.043799
Equity(000909 [数源科技]) -0.045866
Equity(000977 [浪潮信息]) -0.018221
Equity(002001 [新 和 成]) 0.004483
Equity(002019 [亿帆医药]) -0.067395
Equity(002030 [达安基因]) -0.000536
Equity(002049 [紫光国芯]) -0.051049
Equity(002055 [得润电子]) -0.002317
Equity(002074 [国轩高科]) -0.049912
Equity(002095 [生 意 宝]) -0.067059
Equity(002108 [沧州明珠]) -0.010939
Equity(002114 [罗平锌电]) 0.018861
Equity(002145 [中核钛白]) -0.019449
Equity(002174 [游族网络]) -0.064007
Equity(002175 [东方网络]) -0.037429
Equity(002183 [怡 亚 通]) -0.052880
Equity(002192 [融捷股份]) 0.073052
... ...
Equity(I399001 [深证成指]) -0.016667
Equity(I399002 [深成指R]) -0.016344
Equity(I399003 [成份B指]) -0.014297
Equity(I399006 [创业板指]) -0.026641
Equity(I399102 [创业板综]) -0.016294
Equity(I399106 [深证综指]) -0.012317
Equity(600118 [中国卫星]) -0.040605
Equity(600259 [广晟有色]) 0.139438
Equity(600343 [航天动力]) 0.078721
Equity(600366 [宁波韵升]) -0.001568
Equity(600435 [北方导航]) -0.020039
Equity(600446 [金证股份]) -0.107842
Equity(600519 [贵州茅台]) 0.003194
Equity(600547 [山东黄金]) 0.048596
Equity(600549 [厦门钨业]) 0.100489
Equity(600570 [恒生电子]) -0.052761
Equity(600645 [中源协和]) -0.028293
Equity(600650 [锦江投资]) -0.063572
Equity(600654 [*ST中安]) -0.089778
Equity(600680 [*ST上普]) -0.068230
Equity(600699 [均胜电子]) -0.025513
Equity(600756 [浪潮软件]) -0.063624
Equity(600884 [杉杉股份]) -0.008446
Equity(600893 [航发动力]) -0.037914
Equity(600895 [张江高科]) -0.000791
Equity(601069 [西部黄金]) -0.017365
Equity(601198 [东兴证券]) -0.040817
Equity(601318 [中国平安]) 0.007572
Equity(601336 [新华保险]) -0.002577
Equity(601601 [中国太保]) 0.035197
[131 rows x 1 columns]
日期 2016-05-05 15:00:00+08:00 结果:
percent_difference
Equity(000025 [特 力A]) -0.042858
Equity(000049 [德赛电池]) -0.030020
Equity(000333 [美的集团]) 0.016408
Equity(000503 [海虹控股]) 0.074437
Equity(000626 [远大控股]) -0.100959
Equity(000687 [华讯方舟]) 0.046599
Equity(000697 [炼石有色]) -0.050793
Equity(000738 [航发控制]) -0.047042
Equity(000762 [西藏矿业]) -0.018352
Equity(000839 [中信国安]) 0.033322
Equity(000858 [五 粮 液]) 0.010234
Equity(000887 [中鼎股份]) 0.001965
Equity(000901 [航天科技]) -0.047829
Equity(000977 [浪潮信息]) -0.025226
Equity(002001 [新 和 成]) 0.003505
Equity(002019 [亿帆医药]) -0.070273
Equity(002030 [达安基因]) -0.008768
Equity(002048 [宁波华翔]) 0.003182
Equity(002049 [紫光国芯]) -0.053731
Equity(002055 [得润电子]) -0.011713
Equity(002074 [国轩高科]) -0.051427
Equity(002095 [生 意 宝]) -0.069842
Equity(002108 [沧州明珠]) -0.001833
Equity(002114 [罗平锌电]) 0.003570
Equity(002145 [中核钛白]) -0.022088
Equity(002174 [游族网络]) -0.082049
Equity(002175 [东方网络]) -0.044055
Equity(002183 [怡 亚 通]) -0.054504
Equity(002192 [融捷股份]) 0.067142
Equity(002195 [二三四五]) -0.061856
... ...
Equity(300496 [中科创达]) 0.032685
Equity(I399001 [深证成指]) -0.018486
Equity(I399002 [深成指R]) -0.018102
Equity(I399003 [成份B指]) -0.014895
Equity(I399006 [创业板指]) -0.029178
Equity(I399102 [创业板综]) -0.019065
Equity(I399106 [深证综指]) -0.014595
Equity(600118 [中国卫星]) -0.044158
Equity(600259 [广晟有色]) 0.128273
Equity(600343 [航天动力]) 0.082617
Equity(600366 [宁波韵升]) 0.006033
Equity(600446 [金证股份]) -0.113245
Equity(600519 [贵州茅台]) 0.005197
Equity(600547 [山东黄金]) 0.048287
Equity(600549 [厦门钨业]) 0.094953
Equity(600570 [恒生电子]) -0.054239
Equity(600630 [龙头股份]) -0.048579
Equity(600645 [中源协和]) -0.036501
Equity(600650 [锦江投资]) -0.061220
Equity(600654 [*ST中安]) -0.094218
Equity(600680 [*ST上普]) -0.072036
Equity(600699 [均胜电子]) -0.033419
Equity(600756 [浪潮软件]) -0.067595
Equity(600884 [杉杉股份]) -0.011741
Equity(600893 [航发动力]) -0.037938
Equity(600895 [张江高科]) -0.009793
Equity(601069 [西部黄金]) -0.019182
Equity(601198 [东兴证券]) -0.044635
Equity(601318 [中国平安]) 0.008519
Equity(601336 [新华保险]) -0.000720
[130 rows x 1 columns]
日期 2016-05-06 15:00:00+08:00 结果:
percent_difference
Equity(000018 [神州长城]) -0.048254
Equity(000025 [特 力A]) -0.042683
Equity(000049 [德赛电池]) -0.029369
Equity(000333 [美的集团]) 0.023776
Equity(000503 [海虹控股]) 0.088084
Equity(000626 [远大控股]) -0.098635
Equity(000687 [华讯方舟]) 0.041368
Equity(000697 [炼石有色]) -0.049609
Equity(000738 [航发控制]) -0.042480
Equity(000762 [西藏矿业]) -0.011417
Equity(000839 [中信国安]) 0.033930
Equity(000858 [五 粮 液]) 0.020600
Equity(000887 [中鼎股份]) 0.002198
Equity(000901 [航天科技]) -0.047929
Equity(000977 [浪潮信息]) -0.027944
Equity(002001 [新 和 成]) 0.006372
Equity(002019 [亿帆医药]) -0.070193
Equity(002030 [达安基因]) -0.008670
Equity(002049 [紫光国芯]) -0.050993
Equity(002055 [得润电子]) -0.018192
Equity(002074 [国轩高科]) -0.045182
Equity(002095 [生 意 宝]) -0.065243
Equity(002108 [沧州明珠]) 0.014166
Equity(002114 [罗平锌电]) -0.000946
Equity(002174 [游族网络]) -0.085641
Equity(002175 [东方网络]) -0.042719
Equity(002183 [怡 亚 通]) -0.048927
Equity(002192 [融捷股份]) 0.070389
Equity(002195 [二三四五]) -0.057832
Equity(002229 [鸿博股份]) 0.046092
... ...
Equity(I399002 [深成指R]) -0.015476
Equity(I399003 [成份B指]) -0.015386
Equity(I399006 [创业板指]) -0.025660
Equity(I399102 [创业板综]) -0.015622
Equity(I399106 [深证综指]) -0.011940
Equity(600066 [宇通客车]) 0.012013
Equity(600118 [中国卫星]) -0.041840
Equity(600158 [中体产业]) -0.032597
Equity(600259 [广晟有色]) 0.119362
Equity(600343 [航天动力]) 0.077689
Equity(600366 [宁波韵升]) 0.013621
Equity(600446 [金证股份]) -0.109033
Equity(600519 [贵州茅台]) 0.009867
Equity(600547 [山东黄金]) 0.050539
Equity(600549 [厦门钨业]) 0.089546
Equity(600570 [恒生电子]) -0.048944
Equity(600630 [龙头股份]) -0.037891
Equity(600645 [中源协和]) -0.037445
Equity(600650 [锦江投资]) -0.050921
Equity(600654 [*ST中安]) -0.095305
Equity(600680 [*ST上普]) -0.069294
Equity(600699 [均胜电子]) -0.030380
Equity(600756 [浪潮软件]) -0.066893
Equity(600884 [杉杉股份]) -0.007806
Equity(600893 [航发动力]) -0.035189
Equity(600895 [张江高科]) -0.018972
Equity(601069 [西部黄金]) -0.014570
Equity(601198 [东兴证券]) -0.043977
Equity(601318 [中国平安]) 0.007862
Equity(601336 [新华保险]) 0.000661
[129 rows x 1 columns]
日期 2016-05-09 15:00:00+08:00 结果:
percent_difference
Equity(000025 [特 力A]) -0.043421
Equity(000049 [德赛电池]) -0.028297
Equity(000333 [美的集团]) 0.028489
Equity(000503 [海虹控股]) 0.105030
Equity(000626 [远大控股]) -0.098675
Equity(000687 [华讯方舟]) 0.032544
Equity(000697 [炼石有色]) -0.049490
Equity(000738 [航发控制]) -0.038886
Equity(000762 [西藏矿业]) -0.002773
Equity(000858 [五 粮 液]) 0.027852
Equity(000887 [中鼎股份]) 0.001684
Equity(000901 [航天科技]) -0.050223
Equity(000977 [浪潮信息]) -0.030522
Equity(002019 [亿帆医药]) -0.074789
Equity(002030 [达安基因]) -0.011981
Equity(002049 [紫光国芯]) -0.047397
Equity(002055 [得润电子]) -0.025428
Equity(002074 [国轩高科]) -0.039654
Equity(002095 [生 意 宝]) -0.061929
Equity(002114 [罗平锌电]) -0.006900
Equity(002174 [游族网络]) -0.086752
Equity(002175 [东方网络]) -0.040374
Equity(002183 [怡 亚 通]) -0.044003
Equity(002192 [融捷股份]) 0.080209
Equity(002195 [二三四五]) -0.051748
Equity(002229 [鸿博股份]) 0.054533
Equity(002241 [歌尔股份]) 0.068617
Equity(002253 [川大智胜]) -0.003798
Equity(002268 [卫 士 通]) -0.073447
Equity(002273 [水晶光电]) -0.060193
... ...
Equity(I399001 [深证成指]) -0.014439
Equity(I399002 [深成指R]) -0.013900
Equity(I399003 [成份B指]) -0.017938
Equity(I399006 [创业板指]) -0.022896
Equity(I399102 [创业板综]) -0.012692
Equity(I399106 [深证综指]) -0.010198
Equity(600066 [宇通客车]) 0.016855
Equity(600118 [中国卫星]) -0.040286
Equity(600259 [广晟有色]) 0.113868
Equity(600343 [航天动力]) 0.065937
Equity(600366 [宁波韵升]) 0.015246
Equity(600446 [金证股份]) -0.102764
Equity(600519 [贵州茅台]) 0.012310
Equity(600547 [山东黄金]) 0.046841
Equity(600549 [厦门钨业]) 0.082466
Equity(600570 [恒生电子]) -0.039914
Equity(600630 [龙头股份]) -0.028551
Equity(600645 [中源协和]) -0.035398
Equity(600650 [锦江投资]) -0.039604
Equity(600654 [*ST中安]) -0.098123
Equity(600680 [*ST上普]) -0.065630
Equity(600699 [均胜电子]) -0.026411
Equity(600756 [浪潮软件]) -0.065755
Equity(600884 [杉杉股份]) -0.004654
Equity(600893 [航发动力]) -0.032175
Equity(601069 [西部黄金]) -0.016846
Equity(601198 [东兴证券]) -0.041407
Equity(601318 [中国平安]) 0.006196
Equity(601336 [新华保险]) 0.000897
Equity(601601 [中国太保]) 0.028238
[120 rows x 1 columns]
日期 2016-05-10 15:00:00+08:00 结果:
percent_difference
Equity(000025 [特 力A]) -0.047050
Equity(000049 [德赛电池]) -0.033571
Equity(000333 [美的集团]) 0.030425
Equity(000503 [海虹控股]) 0.110568
Equity(000626 [远大控股]) -0.102114
Equity(000738 [航发控制]) -0.039245
Equity(000762 [西藏矿业]) 0.006234
Equity(000858 [五 粮 液]) 0.033542
Equity(000887 [中鼎股份]) -0.003683
Equity(000901 [航天科技]) -0.058855
Equity(000977 [浪潮信息]) -0.038625
Equity(002019 [亿帆医药]) -0.084214
Equity(002030 [达安基因]) -0.023996
Equity(002049 [紫光国芯]) -0.051624
Equity(002055 [得润电子]) -0.029445
Equity(002074 [国轩高科]) -0.039836
Equity(002095 [生 意 宝]) -0.064790
Equity(002114 [罗平锌电]) -0.016281
Equity(002174 [游族网络]) -0.095751
Equity(002175 [东方网络]) -0.046730
Equity(002183 [怡 亚 通]) -0.044719
Equity(002192 [融捷股份]) 0.078727
Equity(002195 [二三四五]) -0.051240
Equity(002229 [鸿博股份]) 0.057563
Equity(002241 [歌尔股份]) 0.062526
Equity(002253 [川大智胜]) -0.013608
Equity(002268 [卫 士 通]) -0.076597
Equity(002273 [水晶光电]) -0.057596
Equity(002292 [奥飞娱乐]) -0.083122
Equity(002407 [多氟多]) 0.101666
... ...
Equity(300496 [中科创达]) 0.087909
Equity(I399001 [深证成指]) -0.016361
Equity(I399002 [深成指R]) -0.015736
Equity(I399003 [成份B指]) -0.023321
Equity(I399006 [创业板指]) -0.024217
Equity(I399102 [创业板综]) -0.014124
Equity(I399106 [深证综指]) -0.012271
Equity(600066 [宇通客车]) 0.019560
Equity(600118 [中国卫星]) -0.043752
Equity(600259 [广晟有色]) 0.103131
Equity(600343 [航天动力]) 0.051940
Equity(600366 [宁波韵升]) 0.017681
Equity(600446 [金证股份]) -0.104318
Equity(600519 [贵州茅台]) 0.012579
Equity(600547 [山东黄金]) 0.043079
Equity(600549 [厦门钨业]) 0.072948
Equity(600570 [恒生电子]) -0.035683
Equity(600645 [中源协和]) -0.036906
Equity(600650 [锦江投资]) -0.031992
Equity(600654 [*ST中安]) -0.105227
Equity(600680 [*ST上普]) -0.070075
Equity(600699 [均胜电子]) -0.025691
Equity(600756 [浪潮软件]) -0.070497
Equity(600884 [杉杉股份]) -0.007035
Equity(601020 [华钰矿业]) -0.025815
Equity(601069 [西部黄金]) -0.021045
Equity(601198 [东兴证券]) -0.046456
Equity(601318 [中国平安]) 0.002915
Equity(601336 [新华保险]) -0.001274
Equity(601601 [中国太保]) 0.021671
[113 rows x 1 columns]
[2018-01-09 01:10:25.864993] INFO: Performance: Simulated 6 trading days out of 6.
###Markdown
`Masking`有时我们希望只在限定范围内计算,尤其是费时的计算。所有的`factor`及许多`factor`方法都接受`masking`参数,参数必须是`Filter`,用于指示计算那些返回为`True`的部分股票。 `Masking Factors`Let's say we want our pipeline to output securities with a high or low percent difference but we also only want to consider securities with a dollar volume above $10,000,000. To do this, let's rearrange our make_pipeline function so that we first create the high_dollar_volume filter. We can then use this filter as a mask for moving average factors by passing high_dollar_volume as the mask argument to SimpleMovingAverage.
###Code
# Dollar volume factor
dollar_volume = AverageDollarVolume(window_length=30)
# High dollar volume filter
high_dollar_volume = (dollar_volume > 1000000000)
# Average close price factors
mean_close_10 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10, mask=high_dollar_volume)
mean_close_30 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=30, mask=high_dollar_volume)
# Relative difference factor
percent_difference = (mean_close_10 - mean_close_30) / mean_close_30
###Output
_____no_output_____
###Markdown
Applying the mask to SimpleMovingAverage restricts the average close price factors to a computation over the ~2000 securities passing the high_dollar_volume filter, as opposed to ~8000 without a mask. When we combine mean_close_10 and mean_close_30 to form percent_difference, the computation is performed on the same ~2000 securities. `Masking Filters` `Masks` can be also be applied to methods that return filters like top, bottom, and percentile_between.Masks are most useful when we want to apply a filter in the earlier steps of a combined computation. For example, suppose we want to get the 50 securities with the highest open price that are also in the top 10% of dollar volume. Suppose that we then want the 90th-100th percentile of these securities by close price. We can do this with the following:
###Code
# Dollar volume factor
dollar_volume = AverageDollarVolume(window_length=30)
# High dollar volume filter
high_dollar_volume = dollar_volume.percentile_between(90,100)
# Top open price filter (high dollar volume securities)
top_open_price = USEquityPricing.open.latest.top(50, mask=high_dollar_volume)
# Top percentile close price filter (high dollar volume, top 50 open price)
high_close_price = USEquityPricing.close.latest.percentile_between(90, 100, mask=top_open_price)
###Output
_____no_output_____
###Markdown
Let's put this into make_pipeline and output an empty pipeline screened with our high_close_price filter.
###Code
%%zipline --start 2016-5-2 --end 2016-5-10 --capital-base 100000
from zipline.pipeline import Pipeline
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline import Fundamentals
from zipline.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from zipline.pipeline.builtin import IsStock
from zipline.api import attach_pipeline, pipeline_output
from zipline.api import symbol, sid, get_datetime
def make_pipeline():
# Dollar volume factor
dollar_volume = AverageDollarVolume(window_length=30)
# High dollar volume filter
high_dollar_volume = dollar_volume.percentile_between(90,100)
# Top open securities filter (high dollar volume securities)
top_open_price = USEquityPricing.open.latest.top(50, mask=IsStock())
# Top percentile close price filter (high dollar volume, top 50 open price)
high_close_price = USEquityPricing.close.latest.percentile_between(90, 100, mask=top_open_price)
return Pipeline(
screen=high_close_price
)
def initialize(context):
attach_pipeline(make_pipeline(), 'example')
def handle_data(context, data):
today = get_datetime('Asia/Shanghai')
output = pipeline_output('example')
# 注意结果本身没有排序(默认升序排列)
print('日期 {} 结果:\n {}'.format(today, output))
###Output
日期 2016-05-03 15:00:00+08:00 结果:
Empty DataFrame
Columns: []
Index: [Equity(002466 [天齐锂业]), Equity(300451 [创业软件]), Equity(300484 [蓝海华腾]), Equity(300496 [中科创达]), Equity(600519 [贵州茅台])]
日期 2016-05-04 15:00:00+08:00 结果:
Empty DataFrame
Columns: []
Index: [Equity(002466 [天齐锂业]), Equity(300451 [创业软件]), Equity(300484 [蓝海华腾]), Equity(300496 [中科创达]), Equity(600519 [贵州茅台])]
日期 2016-05-05 15:00:00+08:00 结果:
Empty DataFrame
Columns: []
Index: [Equity(002466 [天齐锂业]), Equity(300451 [创业软件]), Equity(300484 [蓝海华腾]), Equity(300496 [中科创达]), Equity(600519 [贵州茅台])]
日期 2016-05-06 15:00:00+08:00 结果:
Empty DataFrame
Columns: []
Index: [Equity(002466 [天齐锂业]), Equity(300451 [创业软件]), Equity(300484 [蓝海华腾]), Equity(300496 [中科创达]), Equity(600519 [贵州茅台])]
日期 2016-05-09 15:00:00+08:00 结果:
Empty DataFrame
Columns: []
Index: [Equity(002466 [天齐锂业]), Equity(300451 [创业软件]), Equity(300484 [蓝海华腾]), Equity(300496 [中科创达]), Equity(600519 [贵州茅台])]
日期 2016-05-10 15:00:00+08:00 结果:
Empty DataFrame
Columns: []
Index: [Equity(002466 [天齐锂业]), Equity(002709 [天赐材料]), Equity(300484 [蓝海华腾]), Equity(300496 [中科创达]), Equity(600519 [贵州茅台])]
[2018-01-09 01:10:29.481816] INFO: Performance: Simulated 6 trading days out of 6.
[2018-01-09 01:10:29.482818] INFO: Performance: first open: 2016-05-03 01:31:00+00:00
[2018-01-09 01:10:29.483823] INFO: Performance: last close: 2016-05-10 07:00:00+00:00
|
Copy_of_RealTimeVoiceCloning(multi_dl).ipynb | ###Markdown
Real-Time Voice CloningThis is a colab demo notebook using the open source project [CorentinJ/Real-Time-Voice-Cloning](https://github.com/CorentinJ/Real-Time-Voice-Cloning)to clone a voice.For other deep-learning Colab notebooks, visit [tugstugi/dl-colab-notebooks](https://github.com/tugstugi/dl-colab-notebooks).Original issue: https://github.com/tugstugi/dl-colab-notebooks/issues/18 Setup CorentinJ/Real-Time-Voice-Cloning
###Code
#@title Run this cell to Setup CorentinJ/Real-Time-Voice-Cloning
#@markdown * clone the project
#@markdown * download pretrained models
#@markdown * initialize the voice cloning models
%tensorflow_version 1.x
import os
from os.path import exists, join, basename, splitext
git_repo_url = 'https://github.com/CorentinJ/Real-Time-Voice-Cloning.git'
project_name = splitext(basename(git_repo_url))[0]
if not exists(project_name):
# clone and install
!git clone -q --recursive {git_repo_url}
# install dependencies
!cd {project_name} && pip install -q -r requirements.txt
!pip install -U --no-cache-dir gdown --pre
!apt-get install -qq libportaudio2
!pip install -q https://github.com/tugstugi/dl-colab-notebooks/archive/colab_utils.zip
download_ids = ['1n1sPXvT34yXFLT47QZA6FIRGrwMeSsZc',
'1jhlkXcYYsiP_eXeqxIqcnOHqAoFEwINY',
'1z3RV1oUzEhGTnbv3pBc3OVFFc_79fI0W',
'1-fpRMhyIbf_Ijm197pVEg6IzyKDxxTLh',
'148UUYI9yheoUZqztGwO4HOKql8Tt-ZVs']
for id in download_ids:
print("Attept download from", id)
response = !cd {project_name} && gdown --id $id --output pretrained.zip && unzip pretrained.zip
if response[0] == 'Downloading...':
break
else:
continue
# download pretrained model
# response = !cd {project_name} && gdown https://drive.google.com/uc?id=1n1sPXvT34yXFLT47QZA6FIRGrwMeSsZc && unzip pretrained.zip
import sys
sys.path.append(project_name)
from IPython.display import display, Audio, clear_output
from IPython.utils import io
import ipywidgets as widgets
import numpy as np
from dl_colab_notebooks.audio import record_audio, upload_audio
from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder import inference as vocoder
from pathlib import Path
encoder.load_model(project_name / Path("encoder/saved_models/pretrained.pt"))
synthesizer = Synthesizer(project_name / Path("synthesizer/saved_models/logs-pretrained/taco_pretrained"))
vocoder.load_model(project_name / Path("vocoder/saved_models/pretrained/pretrained.pt"))
#@title Run this cell to Record or Upload Audio
#@markdown * Either record audio from microphone or upload audio from file (.mp3 or .wav)
SAMPLE_RATE = 22050
record_or_upload = "Record" #@param ["Record", "Upload (.mp3 or .wav)"]
record_seconds = 10#@param {type:"number", min:1, max:10, step:1}
embedding = None
def _compute_embedding(audio):
display(Audio(audio, rate=SAMPLE_RATE, autoplay=True))
global embedding
embedding = None
embedding = encoder.embed_utterance(encoder.preprocess_wav(audio, SAMPLE_RATE))
def _record_audio(b):
clear_output()
audio = record_audio(record_seconds, sample_rate=SAMPLE_RATE)
_compute_embedding(audio)
def _upload_audio(b):
clear_output()
audio = upload_audio(sample_rate=SAMPLE_RATE)
_compute_embedding(audio)
if record_or_upload == "Record":
button = widgets.Button(description="Record Your Voice")
button.on_click(_record_audio)
display(button)
else:
#button = widgets.Button(description="Upload Voice File")
#button.on_click(_upload_audio)
_upload_audio("")
#@title Run this to Synthesize a text (result) { run: "auto" }
text = "One of the two people who tested positive for the novel coronavirus in the United Kingdom is a student at the University of York in northern England." #@param {type:"string"}
def synthesize(embed, text):
print("Synthesizing new audio...")
#with io.capture_output() as captured:
specs = synthesizer.synthesize_spectrograms([text], [embed])
generated_wav = vocoder.infer_waveform(specs[0])
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
clear_output()
display(Audio(generated_wav, rate=synthesizer.sample_rate, autoplay=True))
if embedding is None:
print("first record a voice or upload a voice file!")
else:
synthesize(embedding, text)
###Output
_____no_output_____ |
sample_code/date_utils.ipynb | ###Markdown
code,代码name,名称industry,所属行业area,地区pe,市盈率outstanding,流通股本(亿)totals,总股本(亿)totalAssets,总资产(万)liquidAssets,流动资产fixedAssets,固定资产reserved,公积金reservedPerShare,每股公积金esp,每股收益bvps,每股净资pb,市净率timeToMarket,上市日期undp,未分利润perundp, 每股未分配rev,收入同比(%)profit,利润同比(%)gpr,毛利率(%)npr,净利润率(%)holders,股东人数['name', 'pe', 'outstanding', 'totals', 'totalAssets', 'liquidAssets', 'fixedAssets', 'esp', 'bvps', 'pb', 'perundp', 'rev', 'profit', 'gpr', 'npr', 'holders']
###Code
col_show = ['name', 'open', 'pre_close', 'price', 'high', 'low', 'volume', 'amount', 'time', 'code']
initial_letter = ['HTGD','OFKJ','CDKJ','ZJXC','GXKJ','FHTX','DZJG']
code =[]
for letter in initial_letter:
code.append(df[df['UP']==letter].code[0])
#print(code)
if code != '': #not empty != ''
df_price = ts.get_realtime_quotes(code)
#print(df_price)
#df_price.columns.values.tolist()
df_price[col_show]
###Output
_____no_output_____
###Markdown
TO-DOAdd the map from initial to codebuild up a dataframe with fundamental and indicotorsFor Leadings, need cache more data for the begining data
###Code
from matplotlib.mlab import csv2rec
df=ts.get_k_data("002456",start='2018-01-05',end='2018-01-09')
df.to_csv("temp.csv")
r=csv2rec("temp.csv")
#r.date
import time, datetime
#str = df[df.code == '600487'][clommun_show].name.values
#print(str)
today=datetime.date.today()
yesterday = today - datetime.timedelta(1)
#print(today, yesterday)
i = datetime.datetime.now()
print ("当前的日期和时间是 %s" % i)
print ("ISO格式的日期和时间是 %s" % i.isoformat() )
print ("当前的年份是 %s" %i.year)
print ("当前的月份是 %s" %i.month)
print ("当前的日期是 %s" %i.day)
print ("dd/mm/yyyy 格式是 %s/%s/%s" % (i.day, i.month, i.year) )
print ("当前小时是 %s" %i.hour)
print ("当前分钟是 %s" %i.minute)
print ("当前秒是 %s" %i.second)
import time
localtime = time.localtime(time.time())
print("本地时间为 :", localtime)
# 格式化成2016-03-20 11:45:39形式
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
# 格式化成Sat Mar 28 22:24:24 2016形式
print(time.strftime("%a %b %d %H:%M:%S %Y", time.localtime()))
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import calendar
cal = calendar.month(2019, 3)
#print (cal)
###Output
_____no_output_____ |
pytorch/data_parallel/maskrcnn/.ipynb_checkpoints/pytorch_smdataparallel_maskrcnn_demo-checkpoint.ipynb | ###Markdown
PyTorch 및 SMDataParallel을 사용한 분산 데이터 병렬 MaskRCNN 훈련SMDataParallel은 Amazon SageMaker의 새로운 기능으로 딥러닝 모델을 더 빠르고 저렴하게 훈련할 수 있습니다. SMDataParallel은 PyTorch, TensorFlow 및 MXNet을 위한 분산 데이터 병렬 훈련 프레임워크입니다.이 노트북 예제는 [Amazon SageMaker](https://aws.amazon.com/sagemaker/)에서 PyTorch(버전 1.6.0)와 함께 SMDataParallel을 사용하여 [Amazon FSx for Lustre file-system](https://aws.amazon.com/fsx/lustre/) 파일 시스템을 데이터 소스로 사용하는 MaskRCNN 모델 훈련 방법을 보여줍니다. 본 예제의 개요는 다음과 같습니다.1. [Amazon S3](https://aws.amazon.com/s3/)에서 COCO 2017 데이터셋을 준비합니다.2. Amazon FSx Luster 파일 시스템을 생성하고 S3에서 파일 시스템으로 데이터를 가져옵니다.3. Docker 훈련 이미지를 빌드하고 [Amazon ECR](https://aws.amazon.com/ecr/)에 푸시합니다.4. SageMaker에 대한 데이터 입력 채널을 구성합니다.5. 하이퍼 파라메터를 세팅합니다.6. 훈련 지표를 정의합니다.7. 훈련 작업을 정의하고 분산 전략을 SMDataParallel로 설정하고 훈련을 시작합니다.**참고:** 대규모 훈련 데이터셋의 경우 SageMaker 훈련 작업을 위한 입력 파일 시스템으로 (Amazon FSx)[https://aws.amazon.com/fsx/] 를 사용하는 것이 좋습니다. SageMaker에 대한 FSx 파일 입력은 SageMaker 훈련 작업을 시작할 때마다 훈련 데이터 다운로드를 방지하고 (SageMaker 훈련 작업에 대한 S3 입력으로 수행됨) 우수한 데이터 읽기 처리량(throughput)을 제공하므로 SageMaker에서 훈련 시작 시간을 크게 단축합니다.**참고:** 이 예제는 SageMaker Python SDK v2.X가 필요합니다. Amazon SageMaker 초기화노트북 인스턴스를 초기화합니다. aws 리전, sagemaker 실행 역할을 가져옵니다.IAM 역할 arn은 데이터에 대한 훈련 및 호스팅 액세스 권한을 부여하는 데 사용됩니다. 이를 생성하는 방법은 [Amazon SageMaker 역할](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html)을 참조하세요. 노트북 인스턴스, 훈련 및 호스팅에 둘 이상의 역할이 필요한 경우 `sagemaker.get_execution_role()`을 적절한 전체 IAM 역할 arn 문자열로 변경해 주세요. 위에서 설명한 대로 FSx를 사용할 것이므로, 이 IAM 역할에 `FSx Access` 권한을 연결해야 합니다.
###Code
%%time
! python3 -m pip install --upgrade sagemaker
import sagemaker
from sagemaker import get_execution_role
from sagemaker.estimator import Estimator
import boto3
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
role = get_execution_role() # provide a pre-existing role ARN as an alternative to creating a new role
print(f'SageMaker Execution Role:{role}')
client = boto3.client('sts')
account = client.get_caller_identity()['Account']
print(f'AWS account:{account}')
session = boto3.session.Session()
region = session.region_name
print(f'AWS region:{region}')
###Output
_____no_output_____
###Markdown
SageMaker 훈련 이미지 준비1. SageMaker는 기본적으로 최신 [Amazon Deep Learning Container Images (DLC)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md) PyTorch 훈련 이미지를 사용합니다. 이 단계에서는 이를 기본 이미지로 사용하고 MaskRCNN 모델 훈련에 필요한 추가 종속성 패키지들을 설치합니다.2. Github 저장소 https://github.com/HerringForks/DeepLearningExamples.git 에서 PyTorch-SMDataParallel BERT 훈련 스크립트를 사용할 수 있도록 만들었습니다. 이 저장소는 모델 훈련을 실행하기 위해 학습 이미지에서 복제됩니다. Docker 이미지 빌드 및 ECR로 푸시아래 명령을 실행하여 도커 이미지를 빌드하고 ECR에 푸시합니다.
###Code
image = "<ADD NAME OF REPO>" # Example: mask-rcnn-smdataparallel-sagemaker
tag = "<ADD TAG FOR IMAGE>" # Example: pt1.6
!pygmentize ./Dockerfile
!pygmentize ./build_and_push.sh
%%time
! chmod +x build_and_push.sh; bash build_and_push.sh {region} {image} {tag}
###Output
_____no_output_____
###Markdown
SageMaker용 FSx 입력 데이터 준비1. S3에서 훈련 데이터셋을 다운로드하고 준비합니다.2. 여기에 나열된 단계에 따라 훈련 데이터(https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-fs-linked-data-repo.html)가 있는 S3 버켓과 연결된 FSx를 생성합니다. S3 액세스를 허용하는 엔드포인트를 VPC에 추가해야 합니다.3. 여기에 나열된 단계에 따라 FSx(https://aws.amazon.com/blogs/machine-learning/speed-up-training-on-amazon-sagemaker-using-amazon-efs-or-amazon-fsx-for-lustre-file-systems/)를 사용하도록 SageMaker 훈련 작업을 구성합니다. 중요 사항1. SageMaker 노트북 인스턴스를 시작할 때 FSx에서 사용하는 것과 동일한 `서브넷(subnet)` 과`vpc` 및 `보안 그룹(security group)`을 사용해야 합니다. SageMaker 훈련 작업에서 동일한 구성이 사용됩니다.2. '보안 그룹'에서 적절한 인바운드/출력 규칙을 설정했는지 확인합니다. 특히 SageMaker가 훈련 작업에서 FSx 파일 시스템에 액세스하려면 이러한 포트를 열어야합니다. https://docs.aws.amazon.com/fsx/latest/LustreGuide/limit-access-security-groups.html3. 이 SageMaker 훈련 작업을 시작하는 데 사용된 `SageMaker IAM 역할`이 `AmazonFSx`에 액세스할 수 있는지 확인합니다. SageMaker PyTorch Estimator function options다음 코드 블록에서 다른 인스턴스 유형, 인스턴스 수 및 분산 전략을 사용하도록 estimator 함수를 업데이트할 수 있습니다. 이전 코드 셀에서 검토한 훈련 스크립트도 estimator 함수로 전달합니다.**인스턴스 유형**SMDataParallel은 아래 인스턴스 유형들만 SageMaker 상에서의 모델 훈련을 지원합니다.1. ml.p3.16xlarge1. ml.p3dn.24xlarge [권장]1. ml.p4d.24xlarge [권장]**인스턴스 수**최상의 성능과 SMDataParallel을 최대한 활용하려면 2개 이상의 인스턴스를 사용해야 하지만, 이 예제를 테스트하는 데 1개를 사용할 수도 있습니다.**배포 전략**DDP 모드를 사용하려면 `distribution` 전략을 업데이트하고 `smdistributed dataparallel`을 사용하도록 설정해야 합니다.
###Code
import os
from sagemaker.pytorch import PyTorch
instance_type = "ml.p3dn.24xlarge" # Other supported instance type: ml.p3.16xlarge
instance_count = 2 # You can use 2, 4, 8 etc.
docker_image = f"{account}.dkr.ecr.{region}.amazonaws.com/{image}:{tag}" # YOUR_ECR_IMAGE_BUILT_WITH_ABOVE_DOCKER_FILE
region = '<REGION>' # Example: us-west-2
username = 'AWS'
subnets=['<SUBNET_ID>'] # Should be same as Subnet used for FSx. Example: subnet-0f9XXXX
security_group_ids=['<SECURITY_GROUP_ID>'] # Should be same as Security group used for FSx. sg-03ZZZZZZ
job_name = 'pytorch-smdataparallel-mrcnn-fsx' # This job name is used as prefix to the sagemaker training job. Makes it easy for your look for your training job in SageMaker Training job console.
file_system_id= '<FSX_ID>' # FSx file system ID with your training dataset. Example: 'fs-0bYYYYYY'
config_file = 'e2e_mask_rcnn_R_50_FPN_1x_64GPU_4bs.yaml'
hyperparameters = {
"config-file": config_file,
"skip-test": "",
"seed": 987,
"dtype": "float16",
}
estimator = PyTorch(entry_point='train_pytorch_smdataparallel_maskrcnn.py',
role=role,
image_uri=docker_image,
source_dir='.',
instance_count=instance_count,
instance_type=instance_type,
framework_version='1.6.0',
py_version='py3',
sagemaker_session=sagemaker_session,
hyperparameters=hyperparameters,
subnets=subnets,
security_group_ids=security_group_ids,
debugger_hook_config=False,
# Training using SMDataParallel Distributed Training Framework
distribution={'smdistributed':{
'dataparallel':{
'enabled': True
}
}
}
)
# Configure FSx Input for your SageMaker Training job
from sagemaker.inputs import FileSystemInput
file_system_directory_path= 'YOUR_MOUNT_PATH_FOR_TRAINING_DATA' # NOTE: '/fsx/' will be the root mount path. Example: '/fsx/mask_rcnn/PyTorch'
file_system_access_mode='ro'
file_system_type='FSxLustre'
train_fs = FileSystemInput(file_system_id=file_system_id,
file_system_type=file_system_type,
directory_path=file_system_directory_path,
file_system_access_mode=file_system_access_mode)
data_channels = {'train': train_fs}
# Submit SageMaker training job
estimator.fit(inputs=data_channels, job_name=job_name)
###Output
_____no_output_____ |
notebooks/tf.data.ipynb | ###Markdown
tf.data: Build TensorFlow input pipelineshttps://www.tensorflow.org/guide/data?hl=ja
###Code
!nvidia-smi
import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
dataset = tf.data.Dataset.from_tensor_slices([8, 3, 0, 8, 2, 1])
dataset
for elem in dataset:
print(elem.numpy())
it = iter(dataset)
print(next(it).numpy())
dataset.reduce(0, lambda state, value: state + value).numpy()
tf.random.uniform([4, 10])
###Output
_____no_output_____
###Markdown
Dataset.from_tensor_slices()- Dataset.from_tensor_slices() は入力テンソルの1次元目がデータ数になる- 残りの次元が返されるデータのshapeになる- 返されるデータの次元を知りたい場合は tensor.element_spec- ()はスカラーを意味する- Datasetをマージしたい時は Dataset.zip() が使える。ただし、データ数があってないとできない- Datasetではなく、ndarrayをマージする時は from_tensor_slices() でタプルにすればいい- 適当な入力が作りたい時は tf.random.uniform([4, 100])
###Code
dataset1 = tf.data.Dataset.from_tensor_slices(tf.random.uniform([4, 10]))
dataset1.element_spec
dataset2 = tf.data.Dataset.from_tensor_slices(
(tf.random.uniform([4]),
tf.random.uniform([4, 100], maxval=100, dtype=tf.int32))
)
dataset2.element_spec
dataset3 = tf.data.Dataset.zip((dataset1, dataset2))
dataset3.element_spec
dataset1 = tf.data.Dataset.from_tensor_slices(
tf.random.uniform([4, 10], minval=1, maxval=10, dtype=tf.int32)
)
dataset1
for z in dataset1:
print(z.numpy())
dataset2 = tf.data.Dataset.from_tensor_slices(
(tf.random.uniform([4]),
tf.random.uniform([4, 100], maxval=100, dtype=tf.int32))
)
dataset2
dataset3 = tf.data.Dataset.zip((dataset1, dataset2))
dataset3
for a, (b, c) in dataset3:
print(a.shape, b.shape, c.shape)
###Output
(10,) () (100,)
(10,) () (100,)
(10,) () (100,)
(10,) () (100,)
###Markdown
メモリ内のndarrayからDatasetを作る- Dataset.from_tensor_slices() が最適- ndarrayがtf.Tensorに変換される
###Code
train, test = tf.keras.datasets.fashion_mnist.load_data()
len(train), train[0].shape, train[1].shape, type(train[0])
images, labels = train
images = images / 255
# 0次元目がデータ数として扱われる
# imagesもlabelsも0次元目が60000で同じサイズなのに注意
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
dataset
###Output
_____no_output_____
###Markdown
GeneratorからDatasetを作る- Dataset.from_generator()- repeat() を使うと無限に生成する- batch() で複数のデータをまとめられる- take() を指定すると取得が制限できる- output_shapeはできるだけ指定する、出力サイズがわからない、可変の時はNoneとする- padded_batch() でバッチ単位でpaddingができる、padded_shapesは可変の時はNoneとすると最大長でpadding
###Code
def count(stop):
i = 0
while i < stop:
yield i
i += 1
for n in count(5):
print(n)
ds_counter = tf.data.Dataset.from_generator(count, args=[25], output_types=tf.int32, output_shapes=())
for n in ds_counter:
print(n)
for count_batch in ds_counter.repeat().batch(10).take(10):
print(count_batch.numpy())
# 出力サイズが可変の系列を返すgenerator
def gen_series():
i = 0
while True:
size = np.random.randint(0, 10)
yield i, np.random.normal(size=(size, ))
i += 1
for i, series in gen_series():
print(i, ':', str(series))
if i > 5:
break
ds_series = tf.data.Dataset.from_generator(
gen_series,
output_types=(tf.int32, tf.float32),
output_shapes=((), (None, )))
ds_series
ds_series_batch = ds_series.shuffle(20).padded_batch(10, padded_shapes=((), (None, )))
ids, sequence_batch = next(iter(ds_series_batch))
print(ids.shape)
print(sequence_batch.shape)
print(sequence_batch.numpy())
###Output
[[ 0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00
0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00]
[ 1.6893477e+00 -1.0703098e+00 1.2316743e+00 -7.8907430e-01
4.8051316e-01 -1.7831041e-01 1.0625595e+00 0.0000000e+00]
[ 4.9014375e-01 8.4993142e-01 4.7716224e-01 -4.6194640e-01
-1.9587033e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00]
[ 3.9407137e-01 -7.3681027e-01 -1.8738458e+00 -1.3484717e-02
-1.8493785e+00 2.4907112e-01 -7.9216349e-01 0.0000000e+00]
[ 4.5469400e-01 -9.2915034e-01 6.7814732e-01 -1.2471077e+00
4.2199367e-01 -7.2679061e-01 -1.3780706e+00 1.2675012e-03]
[ 6.2312776e-01 4.2411977e-01 -1.0572541e+00 -8.7057263e-01
0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00]
[ 1.2746660e+00 1.4433522e+00 -1.5729892e+00 1.1084211e+00
-1.0231497e+00 -2.6219788e-01 -1.2915394e+00 0.0000000e+00]
[-1.4109733e+00 -3.6311680e-01 -1.2182708e+00 1.0093077e-01
-6.1111176e-01 -4.8463607e-01 0.0000000e+00 0.0000000e+00]
[ 3.5503030e-01 -3.8825443e-01 0.0000000e+00 0.0000000e+00
0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00]
[ 0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00
0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00]]
###Markdown
ImageDataGenerator- ImageDataGenerator.flow_from_directory() はgeneratorを返すのでDatasetでwarppingできる- 公式の通りにやるとiterでInvalidArgumentErrorが起きる- https://github.com/tensorflow/tensorflow/issues/33133```pythonimg_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255, rotation_range=20)gen = img_gen.flow_from_directory(flowers, batch_size=64)ds = tf.data.Dataset.from_generator( lambda: gen, output_types=(tf.float32, tf.float32), output_shapes=([None, 256, 256, 3], [None, 5]))```
###Code
flowers = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255, rotation_range=20)
images, labels = next(img_gen.flow_from_directory(flowers, batch_size=32))
images.dtype, images.shape
labels.dtype, labels.shape
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255, rotation_range=20)
gen = img_gen.flow_from_directory(flowers, batch_size=64)
ds = tf.data.Dataset.from_generator(
lambda: gen,
output_types=(tf.float32, tf.float32),
output_shapes=([None, 256, 256, 3], [None, 5])
)
for x, y in ds:
print(x.shape, y.shape)
break
###Output
(64, 256, 256, 3) (64, 5)
###Markdown
TFRecord- メモリに収まらないデータも使える- バイナリフォーマット- .tfrecからDatasetを作るにはTFRecordDataset
###Code
fsns_test_file = tf.keras.utils.get_file(
"fsns.tfrec",
"https://storage.googleapis.com/download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001")
fsns_test_file
dataset = tf.data.TFRecordDataset(filenames=[fsns_test_file])
dataset
raw_example = next(iter(dataset))
parsed = tf.train.Example.FromString(raw_example.numpy())
parsed.features.feature['image/text']
###Output
_____no_output_____
###Markdown
Text Dataset- tf.data.TextLineDatasetが使える
###Code
directory_url = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/'
file_names = ['cowper.txt', 'derby.txt', 'butler.txt']
file_paths = [
tf.keras.utils.get_file(file_name, directory_url + file_name) for file_name in file_names
]
file_paths
dataset = tf.data.TextLineDataset(file_paths)
for line in dataset.take(5):
print(line)
###Output
tf.Tensor(b"\xef\xbb\xbfAchilles sing, O Goddess! Peleus' son;", shape=(), dtype=string)
tf.Tensor(b'His wrath pernicious, who ten thousand woes', shape=(), dtype=string)
tf.Tensor(b"Caused to Achaia's host, sent many a soul", shape=(), dtype=string)
tf.Tensor(b'Illustrious into Ades premature,', shape=(), dtype=string)
tf.Tensor(b'And Heroes gave (so stood the will of Jove)', shape=(), dtype=string)
###Markdown
Simple batching- batchを作るにはTensorが同じサイズである必要がある- batch() でミニバッチが構成できる- デフォルトでは最後のバッチの大きさがわからないためサイズはNoneになる- drop_remainder=Trueを指定するとサイズが固定になるので表示される
###Code
inc_dataset = tf.data.Dataset.range(100)
dec_dataset = tf.data.Dataset.range(0, -100, -1)
inc_dataset
dec_dataset
dataset = tf.data.Dataset.zip((inc_dataset, dec_dataset))
dataset
batched_dataset = dataset.batch(4)
batched_dataset
for x, y in batched_dataset.take(3):
print(x.numpy(), y.numpy())
batched_dataset = dataset.batch(7, drop_remainder=True)
batched_dataset
###Output
_____no_output_____
###Markdown
Batching tensors with padding- batch() はバッチの構成要素が同じサイズである必要がある- 系列データのように長さが固定でない場合は Dataset.padded_batch() を使う- padded_shapesには直前のdatasetのshapesを書けばOK- paddingしたい次元をNoneにすればOK- 0以外のpaddingも可能
###Code
dataset = tf.data.Dataset.range(100)
dataset = dataset.map(lambda x: tf.fill([tf.cast(x, tf.int32)], x))
dataset
dataset = dataset.padded_batch(4, padded_shapes=(None, ))
for batch in dataset.take(2):
print(batch)
a = np.ones(shape=(3, 5))
b = np.ones(shape=(1, 5))
c = np.ones(shape=(4, 5))
d = np.ones(shape=(7, 5))
data = [a, b, c, d]
data
dataset = tf.data.Dataset.from_generator(lambda: data, output_types=data[0].dtype, output_shapes=([None, 5]))
dataset
dataset = dataset.padded_batch(2, padded_shapes=(None, 5))
for x in dataset:
print(x.numpy())
###Output
[[[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]]
[[1. 1. 1. 1. 1.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]]]
[[[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]]
[[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]]]
###Markdown
Consuming sets of files- ファイルのDatasetの作り方- Dataset.list_files()
###Code
flowers_root = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
flowers_root = pathlib.Path(flowers_root)
flowers_root
for item in flowers_root.glob('*'):
print(item.name)
list_ds = tf.data.Dataset.list_files(str(flowers_root / '*/*'))
for f in list_ds.take(5):
print(f.numpy())
def process_path(file_path):
label = tf.strings.split(file_path, '/')[-2]
return tf.io.read_file(file_path), label
labeled_ds = list_ds.map(process_path)
labeled_ds
for image_raw, label_text in labeled_ds.take(1):
print(repr(image_raw.numpy()[:30]))
print(label_text.numpy())
###Output
b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00\xff\xe2\x0cXICC_PR'
b'sunflowers'
###Markdown
Training workflows Repeat- epoch単位で同じデータをなんどもなめる仕組みとしてDataset.repeat()がある- repeat().batch()とbatch().repeat()は違うので要注意- epochのループを回す場合はrepeat()を使わない!
###Code
titanic_file = tf.keras.utils.get_file(
'train.csv',
'https://storage.googleapis.com/tf-datasets/titanic/train.csv')
titanic_file
titanic_lines = tf.data.TextLineDataset(titanic_file)
for line in titanic_lines:
print(line.numpy())
break
def plot_batch_sizes(ds):
batch_sizes = [batch.shape[0] for batch in ds]
plt.bar(range(len(batch_sizes)), batch_sizes)
plt.xlabel('Batch number')
plt.ylabel('Batch size')
titanic_batches = titanic_lines.repeat(3).batch(128)
plot_batch_sizes(titanic_batches)
titanic_batches = titanic_lines.batch(128).repeat(3)
plot_batch_sizes(titanic_batches)
epochs = 3
dataset = titanic_lines.batch(128)
for epoch in range(epochs):
for batch in dataset:
print(batch.shape)
print('End of epoch: ', epoch)
###Output
(128,)
(128,)
(128,)
(128,)
(116,)
End of epoch: 0
(128,)
(128,)
(128,)
(128,)
(116,)
End of epoch: 1
(128,)
(128,)
(128,)
(128,)
(116,)
End of epoch: 2
###Markdown
Shuffle- buffer_sizeが小さいと十分shuffleされない- 最初のバッチにbuffer_size + batchを超えるindexが含まれないことからわかる
###Code
lines = tf.data.TextLineDataset(titanic_file)
counter = tf.data.experimental.Counter()
dataset = tf.data.Dataset.zip((counter, lines))
dataset = dataset.shuffle(buffer_size=30)
dataset = dataset.batch(20)
dataset
for i, x in dataset:
print(i, x)
break
###Output
tf.Tensor([ 2 22 8 0 25 7 30 20 21 5 32 36 9 6 10 40 14 13 47 3], shape=(20,), dtype=int64) tf.Tensor(
[b'1,female,38.0,1,0,71.2833,First,C,Cherbourg,n'
b'1,female,28.0,0,0,7.8792,Third,unknown,Queenstown,y'
b'1,female,14.0,1,0,30.0708,Second,unknown,Cherbourg,n'
b'survived,sex,age,n_siblings_spouses,parch,fare,class,deck,embark_town,alone'
b'1,female,28.0,1,0,146.5208,First,B,Cherbourg,n'
b'1,female,27.0,0,2,11.1333,Third,unknown,Southampton,n'
b'1,male,28.0,0,0,7.2292,Third,unknown,Cherbourg,y'
b'0,male,28.0,0,0,7.225,Third,unknown,Cherbourg,y'
b'0,male,19.0,3,2,263.0,First,C,Southampton,n'
b'0,male,28.0,0,0,8.4583,Third,unknown,Queenstown,y'
b'0,female,40.0,1,0,9.475,Third,unknown,Southampton,n'
b'0,male,28.0,0,0,8.05,Third,unknown,Southampton,y'
b'1,female,4.0,1,1,16.7,Third,G,Southampton,n'
b'0,male,2.0,3,1,21.075,Third,unknown,Southampton,n'
b'0,male,20.0,0,0,8.05,Third,unknown,Southampton,y'
b'0,male,7.0,4,1,39.6875,Third,unknown,Southampton,n'
b'1,male,28.0,0,0,13.0,Second,unknown,Southampton,y'
b'0,male,2.0,4,1,29.125,Third,unknown,Queenstown,n'
b'0,male,11.0,5,2,46.9,Third,unknown,Southampton,n'
b'1,female,26.0,0,0,7.925,Third,unknown,Southampton,y'], shape=(20,), dtype=string)
###Markdown
Preprocessing data- Dataset.map(f)で前処理ができる- TensorFlowではないnumpyやscipyの前処理をしたい時は tf.py_function() が使える- TensorFlowにも tensorflow_addons にいくつかある
###Code
list_ds = tf.data.Dataset.list_files(str(flowers_root / '*/*'))
list_ds
def parse_image(filename):
parts = tf.strings.split(filename, '/')
label = parts[-2]
image = tf.io.read_file(filename)
image = tf.image.decode_jpeg(image)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, [128, 128])
return image, label
file_path = next(iter(list_ds))
image, label = parse_image(file_path)
def show(image, label):
plt.figure()
plt.imshow(image)
plt.title(label.numpy().decode('utf-8'))
plt.axis('off')
show(image, label)
images_ds = list_ds.map(parse_image)
images_ds
for image, label in images_ds.take(2):
show(image, label)
import scipy.ndimage as ndimage
def random_rotate_image(image):
image = ndimage.rotate(image, np.random.uniform(-30, 30), reshape=False)
return image
image, label = next(iter(images_ds))
image = random_rotate_image(image)
show(image, label)
def tf_random_rotate_image(image, label):
im_shape = image.shape
[image, ] = tf.py_function(random_rotate_image, [image], [tf.float32])
image.set_shape(im_shape)
return image, label
rot_ds = images_ds.map(tf_random_rotate_image)
for image, label in rot_ds.take(2):
show(image, label)
###Output
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
|
dev_scripts/notebooks/test/test_run_notebook.ipynb | ###Markdown
Imports
###Code
%load_ext autoreload
%autoreload 2
import core.config as cconfig
# Initialize config.
config = cconfig.get_config_from_env()
###Output
_____no_output_____
###Markdown
Execute
###Code
if config is None:
raise ValueError("No config provided.")
if config["fail"]:
raise ValueError("Fail.")
print("success")
###Output
_____no_output_____ |
task-3/test.ipynb | ###Markdown
Testing This section allows you to test the model.Feel free to change the value of "prev_word" which specifies the previous words and "next_words" which specifcies the next words
###Code
prev_words = "As I stood hesitating in the hall"
next_words = "with all this passing through my mind,"
next_words = len(next_words.split())
model = load_model("h5-model-3.h5")
def get_data(filename="poirotInvestigates.txt"):
"""
This function will simply open the file and read in the text. The text
will be converted in to lower case characters. It then removes all characters
which are digits.
"""
lines = []
with open(filename) as file:
lines = [line for line in file if line != "\n"]
for i in range(len(lines)):
edit_line = lines[i].replace("\n", "")
edit_line = edit_line.replace("***", " ")
edit_line = edit_line.replace("_", "")
edit_line = edit_line.replace('”', "")
edit_line = edit_line.replace('“', "")
lines[i] = edit_line.lower()
return lines
def prediction(prev_word, next_words):
"""
Perform the prediction of the next words
"""
token = Tokenizer()
token.fit_on_texts(get_data())
max_seqence_len = 17
model = load_model("h5-model-3.h5")
for _ in range(next_words):
token_list = token.texts_to_sequences([prev_word])[0] # convert next_word to sequence
token_list = pad_sequences([token_list], maxlen=max_seqence_len-1, padding='pre') # add padding
predicted = np.argmax(model.predict(token_list, verbose=1), axis=-1)
ouput_word = ""
for word, index in token.word_index.items():
if index == predicted:
output_word = word
break
prev_word += ' '+ output_word
print(prev_word)
prediction(prev_words, next_words)
###Output
2022-02-24 23:29:00.262761: I tensorflow/stream_executor/cuda/cuda_dnn.cc:366] Loaded cuDNN version 8101
|
004-Python 基础/continue 语句.ipynb | ###Markdown
描述Python continue 语句**跳出本次循环**,而break跳出整个循环。continue 语句用来告诉Python跳过当前循环的剩余语句,然后继续进行下一轮循环。continue语句用在while和for循环中。 语法Python 语言 continue 语句语法格式如下:```continue``` 流程图: 实例
###Code
for letter in 'Python': # 第一个实例
if letter == 'h':
continue
print('当前字母 :', letter)
var = 10 # 第二个实例
while var > 0:
var = var -1
if var == 5:
continue
print('当前变量值 :', var)
print("Good bye!")
###Output
当前字母 : P
当前字母 : y
当前字母 : t
当前字母 : o
当前字母 : n
当前变量值 : 9
当前变量值 : 8
当前变量值 : 7
当前变量值 : 6
当前变量值 : 4
当前变量值 : 3
当前变量值 : 2
当前变量值 : 1
当前变量值 : 0
Good bye!
###Markdown
其他实例
###Code
# 我们想只打印0-10之间的奇数,可以用continue语句跳过某些循环:
n = 0
while n < 10:
n = n + 1
if n % 2 == 0: # 如果n是偶数,执行continue语句
continue # continue语句会直接继续下一轮循环,后续的print()语句不会执行
print(n)
# continue 语句是一个删除的效果,他的存在是为了删除满足循环条件下的某些不需要的成分
var = 10
while var > 0:
var = var -1
if var == 5 or var == 8:
continue
print('当前值 :', var)
print("Good bye!")
###Output
当前值 : 9
当前值 : 7
当前值 : 6
当前值 : 4
当前值 : 3
当前值 : 2
当前值 : 1
当前值 : 0
Good bye!
|
Unit 1 - Introduction/.ipynb_checkpoints/Chapter 5 | Minimum Edit Distance-checkpoint.ipynb | ###Markdown
Unit 1 Chapter 5 --- String similarity * Coreference - The task of determining whether the two strings refer to the same entity.Example,- President of India, Mr. Narendra Modi- Indian President, Mr. Narendra Modi The fact that two strings like the ones above, are very similary can be used as an evidence to decide if they coreferent. ---But how do we quantify a string's similarity ? Minimum Edit Distance> Minimum edit distance is the minimum number of operations like insertion, deletion, substitution required to convert one string to another.Smaller the distance, more similar the strings. Let's look at some examples Example 1--- Suppose there are two strings,s1 = 'Woman's2 = 'Women'The operations allowed are,```d - deletions - substitutioni - insertion```s1 can be converted to s2 just by a substituion operation - (s)Substitute e in s2 with an a.Now we can provide a weight to these 3 operations.If here the weight of each operation is 1, then the distance between s1 and s2 is 1. Levenshtein Distance> Levenshtein distance is a minimum distance metric in which each operation has a weight of 1, and substition is not allowed, which is equivalent of substitution having a weight of 2. (1 for insertion and 1 for deletion). Example 2--- Take two strings,s1 = 'I N T E N T I O N's2 = 'E X E C U T I O N'The operations allowed are,```d - deletion, weight = 1i - insertion, weight = 1```To convert s1 into s2, the following operations will be performed,- delete an 'I', cost = 1- substitute 'E' for 'N', cost = 2- substitute 'X' for 'T', cost = 2- insert 'C', cost = 1- substitute 'U' for 'N', cost = 2 Distance = 8  Minimum Edit Distance Algorithm--- > The minimum edit distance algorithm can be seen as finding the shortest path(minimum edits) from one string to another.This can be done using Dynammic Programming. Most of the NLP algorithms are based on Dynammic Programming.The intuition of Dynammic Programming is that a large problem can be solved by combining sub-problems. Python code for Minimum edit distance> It's okay if you don't understand the code, there's always a library for it. But try to make sense of the code. The concept should be clear.
###Code
import numpy as np
""" Change the penalties here to deviate from the Levenshtein cost """
INSERTION_PENALTY = 1
DELETION_PENALTY = 1
SUBSTITUTION_PENALTY = 2
ALLOWED_LEVELS = ["word", "char"]
LEVEL = "word"
reference = "if there is no rain in April you will have a great summer"
sequences = ["no rain in april then great summer come",
"there is rain in April you have summer",
"in April no rain you have summer great",
"there is no rain in apple a great summer comes",
"you have a great summer comes if there is no rain in April"]
def compute_cost(D, i, j, token_X, token_Y):
relative_subst_cost = 0 if token_X == token_Y else SUBSTITUTION_PENALTY
return min(D[i-1, j] + INSERTION_PENALTY, D[i, j-1] + DELETION_PENALTY, D[i-1, j-1] + relative_subst_cost)
def tokenize_string(string, level="word"):
assert level in ALLOWED_LEVELS
if level is "word":
return string.split(" ")
else:
return list(string)
def minimum_edit_distance(string1, string2, level="word"):
"""The function uses the dynamic programming approach from Wagner-Fischer to compute the minimum edit distance
between two sequences.
:param string1 first sequence
:param string2 second sequence
:param level defines on which granularity the algorithm shall be applied. "word" specifies the token to
be sequential words while "char" applies the algorithm on a character-by-character level"""
string1_tokens = tokenize_string(string1, level)
string2_tokens = tokenize_string(string2, level)
n = len(string1_tokens)
m = len(string2_tokens)
print(string2_tokens)
D = np.zeros((n, m))
for i in range(n):
for j in range(m):
if j == 0:
D[i,j] = i
elif i == 0:
D[i,j] = j
else:
D[i,j] = compute_cost(D, i, j, string1_tokens[i], string2_tokens[j])
return D[n-1,m-1]
def main():
for sequence in sequences:
print(minimum_edit_distance(reference, sequence, level=LEVEL))
###Output
_____no_output_____
###Markdown
---
###Code
main()
###Output
['no', 'rain', 'in', 'april', 'then', 'great', 'summer', 'come']
11.0
['there', 'is', 'rain', 'in', 'April', 'you', 'have', 'summer']
5.0
['in', 'April', 'no', 'rain', 'you', 'have', 'summer', 'great']
9.0
['there', 'is', 'no', 'rain', 'in', 'apple', 'a', 'great', 'summer', 'comes']
7.0
['you', 'have', 'a', 'great', 'summer', 'comes', 'if', 'there', 'is', 'no', 'rain', 'in', 'April']
12.0
|
notebooks/seqeval.ipynb | ###Markdown
[](https://colab.research.google.com/github/Loio-co/loio-metrics/blob/master/notebooks/seqeval.ipynb)
###Code
!pip install seqeval
from seqeval.metrics import accuracy_score
from seqeval.metrics import classification_report
from seqeval.metrics import f1_score
y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
f1_score(y_true, y_pred)
accuracy_score(y_true, y_pred)
print(classification_report(y_true, y_pred))
y_true_2 = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'B-PER', 'I-PER', 'O']]
y_pred_2 = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O', 'B-PER', 'I-PER', 'O']]
print(classification_report(y_true, y_pred))
###Output
precision recall f1-score support
PER 1.00 1.00 1.00 1
MISC 0.00 0.00 0.00 1
micro avg 0.50 0.50 0.50 2
macro avg 0.50 0.50 0.50 2
|
L14_model evaluation/code/L14_bias decomp mse loss.ipynb | ###Markdown
L14 - Model evaluation (bias decomp mse loss)---- Instructor: Dalcimar Casanova ([email protected])- Course website: https://www.dalcimar.com/disciplinas/aprendizado-de-maquina- Bibliography: based on lectures of Dr. Sebastian Raschka
###Code
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split
pip install mlxtend --upgrade --no-deps
from mlxtend.evaluate import bias_variance_decomp
###Output
_____no_output_____
###Markdown
Regression loss (MSE loss)
###Code
boston = datasets.load_boston()
X = boston.data[:,:]
y = boston.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=11)
tree = DecisionTreeRegressor(random_state=1)
avg_expected_loss, avg_bias, avg_var = bias_variance_decomp(
tree, X_train, y_train, X_test, y_test,
loss='mse', num_rounds=3,
random_seed=1)
print('Avarage expected loss: %.3f' %avg_expected_loss)
print('Avarage bias^2: %.3f' %avg_bias)
print('Avarage variance: %.3f' %avg_var)
###Output
Avarage expected loss: 16.990
Avarage bias^2: 9.962
Avarage variance: 7.028
|
examples/causal_inference/mediation_analysis.ipynb | ###Markdown
Mediation Analysis in WhyNotA common pitfall of many causal inference studies is to mistakenly include mediators as control variables. As illustrated in the diagram below, mediators (the yellow nodes) are variables that lie on causal paths from treatment $T$ to outcome $Y$. Including them in an adjustment set blocks any causal influence that might flow along these paths and hence biases estimates of causal effects.Since the causal influence of the treatment on the outcome variable can vary across different causal paths, the effects of mistakenly including a mediator can vary significantly depending on the role of the mediator in the system. In the example above, controlling for different subsets $S \subseteq (M_1, M_2, M_3)$ can have different effects on the resulting inference. Simulation serves as a powerful tool to rigorously examine the effects of mistakenly including mediators in causal inference.
###Code
%matplotlib inline
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
import whynot
from whynot.simulators.world3 import experiments
from whynot import causal_suite
import scripts.mediator_utils as utils
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
Mediation Analysis for the World 3 ModelIn this tutorial, we provide an example of mediation analysis for the [World 3 model](https://whynot.readthedocs.io/en/latest/simulators.htmlworld3-simulator). For this simulator, the experimental units consist of rollouts of the simulator from different initial states.In the World3 Pollution Experiment, treatment consists of decreasing the rate of pollution (a model parameter) by 15% in 1975. The outcome variable is the world population in the year 2050. The probability of a particular unit being treated is determined based off the level of pollution in 1975. To ensure that there is no confounding in our analysis, we include pollution levels and all other state variables at the time of treatment, as part of our adjustment set. We introduce mediation bias by including state variables from the period between 1975 and 2050. Since these variables are measured after the intervention year and before the outcome, these variables are mediators in the causal graph by nature of the dynamics.
###Code
experiment = experiments.PollutionMediation
###Output
_____no_output_____
###Markdown
Generating the dataIn this mediation experiment, we examine the bias induced by including state variables from different years in between the intervention time (1975) and the outcome year (2050). Presumably, state variables that are made right after intervention time have a weaker influence on the outcome than those measured just before 2050.
###Code
num_samples = 1000
mediation_years = range(1980, 2040, 20)
experiment_datasets = {}
for year in mediation_years:
data = experiment.run(num_samples=num_samples, mediation_year=year, show_progress=True)
experiment_datasets[year] = data
###Output
_____no_output_____
###Markdown
Running causal estimatorsWe run a collection of causal inference estimators defined in the `causal_suite` to understand how mistakenly including mediators affects each of the estimators performance.
###Code
inference_results = {}
for year in mediation_years:
data = experiment_datasets[year]
inference_results[year] = causal_suite(data.covariates, data.treatments, data.outcomes)
###Output
_____no_output_____
###Markdown
Analyzing the resultsSince the ground truth treatment effect for each experiment is available via simulation, we can now track how the errors of different estimators vary as a function of the mediation year.
###Code
def plot_year(year):
data = experiment_datasets[year]
inference_result = inference_results[year]
utils.error_bar_plot(
data, inference_result,
title='Estimated Effects After Including {} Covariates as Mediators'.format(year),
ylabel='Estimated Effects')
plot_year(1980)
plot_year(2000)
plot_year(2020)
###Output
_____no_output_____
###Markdown
Mediation Analysis in WhyNotA common pitfall of many causal inference studies is to mistakenly include mediators as control variables. As illustrated in the diagram below, mediators (the yellow nodes) are variables that lie on causal paths from treatment $T$ to outcome $Y$. Including them in an adjustment set blocks any causal influence that might flow along these paths and hence biases estimates of causal effects.Since the causal influence of the treatment on the outcome variable can vary across different causal paths, the effects of mistakenly including a mediator can vary significantly depending on the role of the mediator in the system. In the example above, controlling for different subsets $S \subseteq (M_1, M_2, M_3)$ can have different effects on the resulting inference. Simulation serves as a powerful tool to rigorously examine the effects of mistakenly including mediators in causal inference.
###Code
%matplotlib inline
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
import whynot
from whynot.simulators.world3 import experiments
from whynot import causal_suite
import scripts.mediator_utils as utils
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
Mediation Analysis for the World 3 ModelIn this tutorial, we provide an example of mediation analysis for the [World 3 model](https://whynot-docs.readthedocs-hosted.com/en/latest/simulators.htmlworld3-simulator). For this simulator, the experimental units consist of rollouts of the simulator from different initial states.In the World3 Pollution Experiment, treatment consists of decreasing the rate of pollution (a model parameter) by 15% in 1975. The outcome variable is the world population in the year 2050. The probability of a particular unit being treated is determined based off the level of pollution in 1975. To ensure that there is no confounding in our analysis, we include pollution levels and all other state variables at the time of treatment, as part of our adjustment set. We introduce mediation bias by including state variables from the period between 1975 and 2050. Since these variables are measured after the intervention year and before the outcome, these variables are mediators in the causal graph by nature of the dynamics.
###Code
experiment = experiments.PollutionMediation
###Output
_____no_output_____
###Markdown
Generating the dataIn this mediation experiment, we examine the bias induced by including state variables from different years in between the intervention time (1975) and the outcome year (2050). Presumably, state variables that are made right after intervention time have a weaker influence on the outcome than those measured just before 2050.
###Code
num_samples = 1000
mediation_years = range(1980, 2040, 20)
experiment_datasets = {}
for year in mediation_years:
data = experiment.run(num_samples=num_samples, mediation_year=year, show_progress=True)
experiment_datasets[year] = data
###Output
_____no_output_____
###Markdown
Running causal estimatorsWe run a collection of causal inference estimators defined in the `causal_suite` to understand how mistakenly including mediators affects each of the estimators performance.
###Code
inference_results = {}
for year in mediation_years:
data = experiment_datasets[year]
inference_results[year] = causal_suite(data.covariates, data.treatments, data.outcomes)
###Output
_____no_output_____
###Markdown
Analyzing the resultsSince the ground truth treatment effect for each experiment is available via simulation, we can now track how the errors of different estimators vary as a function of the mediation year.
###Code
def plot_year(year):
data = experiment_datasets[year]
inference_result = inference_results[year]
utils.error_bar_plot(
data, inference_result,
title='Estimated Effects After Including {} Covariates as Mediators'.format(year),
ylabel='Estimated Effects')
plot_year(1980)
plot_year(2000)
plot_year(2020)
###Output
_____no_output_____ |
CichyWanderers/complete_cichy_fMRI_MEG.ipynb | ###Markdown
Data loader SummaryHere we will load data from Cichy et al. 2014 [1]. The data consist of fMRI responses from early visual cortex (EVC) and inferior temporal (IT) cortex and MEG responses at different timepoints in the form of representational dissimilarity matrices (RDMs) to 92 images. These images belong to different categories as shown in the Figure below.  Representational Similarity Analysis (RSA)RSA is a method to relate signalsfrom different source spaces (such as behavior, neuralresponses, DNN activations) by abstracting signals fromseparate source spaces into a common similarity space. Forthis, in each source space, condition-specific responses arecompared to each other for dissimilarity (e.g., by calculatingEuclidean distances between signals), and the values areaggregated in so-called representational dissimilarity matrices (RDMs) indexed in rows and columns by the conditionscompared. RDMs thus summarize the representationalgeometry of the source space signals. Different from sourcespace signals themselves, RDMs from different sourcesspaces are directly comparable to each other for similarityand thus can relate signals from different spaces The figure below illustrates how RSA can be applied to different problems by comparing RDMs of different modalities/species.  Data from Cichy et al. 2014In the cells below, we will download and visualize MEG and fMRI RDMs. Please refer Figure 1 in [1] to learn details about the image category order in RDMs
###Code
# Imports
import glob
import numpy as np
import urllib
import torch
import cv2
import argparse
import time
import random
import matplotlib.pyplot as plt
import nibabel as nib
import pickle
from tqdm import tqdm
from PIL import Image
from torchvision import transforms as trn
import scipy.io as sio
import h5py
import os
from pathlib import Path
from PIL import Image
from sklearn.preprocessing import StandardScaler
from torch.autograd import Variable as V
from sklearn.decomposition import PCA, IncrementalPCA
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import ipywidgets
from ipywidgets import widgets, Play
import seaborn
def loadmat(matfile):
"""Function to load .mat files.
Parameters
----------
matfile : str
path to `matfile` containing fMRI data for a given trial.
Returns
-------
dict
dictionary containing data in key 'vol' for a given trial.
"""
try:
f = h5py.File(matfile)
except (IOError, OSError):
return sio.loadmat(matfile)
else:
return {name: np.transpose(f.get(name)) for name in f.keys()}
# Data download
Path("data").mkdir(parents=True, exist_ok=True)
!wget -qO data/data.zip -c https://osf.io/7vpyh/download
%%capture
!unzip -o data/data.zip -d data #unzip the files
!wget -qO data/92_Image_Set/cichy_stim_details.mat -c http://wednesday.csail.mit.edu/MEG1_MEG_Clear_Data/visual_stimuli.mat
def get_stim_details(path_to_file='data/92_Image_Set/cichy_stim_details.mat'):
"""
acquire category names and binary features describing the Cichy images
returns: stim_details (dict containing 5 keys: category (str), and four binary features (animate, human, natural, face)).
each key holds an array giving the information for all categories
"""
stim_dat = loadmat(path_to_file)['visual_stimuli']
fields = ['category', 'human', 'face', 'animate', 'natural']
stim_details = {field:[] for field in fields}
for ii in range(92):
for jj, field in enumerate(fields):
stim_details[field].append(stim_dat[0,ii][jj][0])
for field in fields[1:]:
stim_details[field] = np.array(stim_details[field]).squeeze()
return stim_details
stim_dict = get_stim_details()
# Each (key, value) pair of label_dict is of the form:
# key: label string, e.g., nonhuman bodypart
# value: list with the indicies for the given label
label_dict = {}
for label in np.unique(stim_dict['category']):
label_dict[label] = [i for i, x in enumerate(stim_dict['category']) if x == label]
from sklearn.manifold import TSNE
from sklearn.metrics import pairwise_distances
# Helper functions
def plot_RDM(RDM, metric=None, label=None, title=None, pmin=5, pmax=95):
"""Helper function for visualize a RDM."""
if metric is not None:
# Fill the upper portion of the RDM matrix
RDM = np.tril(RDM) + np.triu(RDM.T, 1)
RDM = np.nan_to_num(RDM)
# Compute distance between stimulus pairs
distance_matrix = pairwise_distances(RDM, RDM, metric=metric, n_jobs=-1)
else:
distance_matrix = RDM
vmin = np.percentile(distance_matrix.reshape(-1), pmin)
vmax = np.percentile(distance_matrix.reshape(-1), pmax)
# Since the RDM matrix is symmetric we set upper triangular values to NaN
distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)] = np.nan
# plot the RDM at given timepoint
plt.imshow(distance_matrix, cmap="viridis", vmin=vmin, vmax=vmax)
plt.title("RDM")
if title is not None:
plt.title(title)
cbar = plt.colorbar()
plt.xlabel("Stimuli")
plt.ylabel("Stimuli")
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel('Dissimilarity Measure', rotation=270)
if label is not None:
cbar.ax.set_ylabel(label, rotation=270)
def plot_RDMs(RDM_dict, metric="chebyshev"):
n_col = len(RDM_dict)
fig, axs = plt.subplots(1, n_col, figsize=(4.5 * len(RDM_dict), 4.5))
for i, (label, RDM) in enumerate(RDM_dict.items()):
ax = axs[i]
ax.set_title('%s' % label)
# Fill the upper portion of the RDM matrix
RDM = np.tril(RDM) + np.triu(RDM.T, 1)
RDM = np.nan_to_num(RDM)
# Compute distance between stimulus pairs
distance_matrix = pairwise_distances(RDM, RDM, metric=metric, n_jobs=-1)
# Since the RDM matrix is symmetric we set upper triangular values to NaN
distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)] = np.nan
pts = ax.imshow(distance_matrix, cmap="bwr")
ax.set_xlabel('Stimuli')
ax.set_ylabel('Stimuli')
ax.set_xticks([])
ax.set_yticks([])
cbar = fig.colorbar(pts, ax=ax)
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel('Dissimilarity Measure', rotation=270)
def get_RDM_lowd(RDM, metric='correlation'):
"""Compute a low-dimensional representation of a RDM."""
# Fill the upper portion of the RDM matrix
RDM = np.tril(RDM) + np.triu(RDM.T, 1)
RDM = np.nan_to_num(RDM)
# Compute distance between stimulus pairs
distance_matrix = pairwise_distances(RDM, RDM, metric=metric, n_jobs=-1)
# First do PCA to reduce dimensionality to 20 dimensions so that tSNE is faster
RDM_lowd = PCA(n_components=min(20, distance_matrix.shape[0]), random_state=0).fit_transform(distance_matrix)
# Then do tSNE to reduce dimensionality to 2 dimensions
RDM_lowd = TSNE(n_components=2, random_state=0).fit_transform(RDM_lowd)
return RDM_lowd
def plot_RDM_lowd(RDM_lowd, label_dict, title=None):
"""Plot a low-dimensional representation of a RDM."""
x, y = RDM_lowd[:, 0], RDM_lowd[:, 1]
for label, idxs in label_dict.items():
plt.scatter( x[idxs[:]], y[idxs[:]], marker = 'o', s = 50)
plt.title('RDM')
if title is not None:
plt.title(title)
plt.legend(label_dict.keys(), bbox_to_anchor=(1, .8), loc="upper left")
plt.xlabel('Dimension 1')
plt.ylabel('Dimension 2')
def plot_RDMs_lowd(RDM_lowd_dict, label_dict, title=None):
"""Plot low-dimensional representations of RDMs."""
n_col = len(RDM_lowd_dict)
fig, axs = plt.subplots(1, n_col, figsize=(4.5 * len(RDM_lowd_dict), 4.5))
for i, (label, RDM_lowd) in enumerate(RDM_lowd_dict.items()):
ax = axs[i]
ax.set_title('%s' % label)
x, y = RDM_lowd[:, 0], RDM_lowd[:, 1]
for label, idxs in label_dict.items():
ax.scatter( x[idxs[:]], y[idxs[:]], marker = 'o', s = 50)
ax.set_xlabel('Stimuli')
ax.set_ylabel('Stimuli')
fig.legend(label_dict.keys(), bbox_to_anchor=(1, .8), loc="upper left")
###Output
_____no_output_____
###Markdown
Loading MEG RDMs
###Code
# Load MEG RDMs for each time point for all subjects all sessions
MEG_RDMs = loadmat("data/MEG_decoding_RDMs.mat")['MEG_decoding_RDMs']
print(MEG_RDMs.shape)
###Output
(16, 2, 1301, 92, 92)
###Markdown
Shape of RDM is num_subjects x num_sessions x num_timepoints x num_stimulus x num_stimulus
###Code
# average RDM across subjects and sessions
MEG_RDMs_sub_averaged = np.mean(MEG_RDMs,axis=(0,1))
del MEG_RDMs
@widgets.interact( MEG_RDMs=widgets.fixed(MEG_RDMs_sub_averaged),
metric=widgets.fixed(None),
timepoint=widgets.IntSlider(min=0, max=600, step=20, value=420, description='t (ms):') )
def plot_MEG_RDMs(MEG_RDMs, metric=None, timepoint=420):
"""Helper function for visualize MEG RDMs with an interactive
slider for the timepoint."""
# Load RDM at a given timepoint
# +100 as the RDMs provided are from -100ms to 1000ms after the stimulus onset
RDM = np.array(MEG_RDMs[timepoint+100])
title = "MEG RDM at t = " + str(timepoint) + " ms"
label = "Decoding Accuracy"
plot_RDM(RDM, label=label, title=title)
###Output
_____no_output_____
###Markdown
Loading fMRI RDMs
###Code
fMRI_file = 'data/92_Image_Set/target_fmri.mat' # path of fMRI RDM file
fMRI_RDMs = loadmat(fMRI_file) # load the fMRI RDMs
print(fMRI_RDMs.keys())
print(fMRI_RDMs['EVC_RDMs'].shape)
###Output
dict_keys(['EVC_RDMs', 'IT_RDMs'])
(15, 92, 92)
###Markdown
fMRI_RDMs is a dictionary with keys 'EVC_RDMs' and 'IT_RDMs' corresponding to ROIs EVC and IT respectively. The shape of each RDM is num_subjects x num_stimulus x num_stimulus
###Code
# average RDM across subjects
fMRI_RDMs_sub_averaged = fMRI_RDMs.copy()
for k, v in fMRI_RDMs.items():
fMRI_RDMs_sub_averaged[k] = np.mean(v, axis=0)
del fMRI_RDMs
@widgets.interact( fMRI_RDMs=widgets.fixed(fMRI_RDMs_sub_averaged),
ROI=widgets.Dropdown(options=['EVC', 'IT'], value='IT') )
def plot_fMRI_RDMs(fMRI_RDMs, ROI='IT'):
"""Helper function for visualize fMRI RDMs with an interactive
dropdown menu for the ROI."""
# Load RDM at a given ROI
RDM = np.array(fMRI_RDMs[ROI + "_RDMs"])
title = ROI + " RDM"
label = "1 - correlation"
plot_RDM(RDM, label=label, title=title)
###Output
_____no_output_____
###Markdown
Example AnalysesBelow we will perform two analyses:1. MEG-fMRI comparison: To find out at which timepoint MEG representation is similar to a given ROI's representation. 2. MEG-Deep Neural Network (DNN) comparison: To find out at which timepoint MEG representation is similar to a given DNN layer's representation. In other words, the comparison will inform us about the sequential order of visual feature processing in the cortex.
###Code
# RDM Comparison functions
from scipy.stats import spearmanr
def RSA_spearman(rdm1,rdm2):
"""
computes and returns the spearman correlation between lower triangular
part of the input rdms. We only need to compare either lower or upper
triangular part of the matrix as RDM is symmetric
"""
# get lower triangular part of the RDM1
lt_rdm1 = get_lowertriangular(rdm1)
# get lower triangular part of the RDM1
lt_rdm2 = get_lowertriangular(rdm2)
# return Spearman's correlation between lower triangular part of rdm1 & rdm2
return spearmanr(lt_rdm1, lt_rdm2)[0]
def get_lowertriangular(rdm):
"""
returns lower triangular part of the matrix
"""
num_conditions = rdm.shape[0]
return rdm[np.tril_indices(num_conditions,-1)]
###Output
_____no_output_____
###Markdown
MEG-fMRI Comparison
###Code
# Correlating MEG RDMs with fMRI RDMs
num_timepoints = MEG_RDMs_sub_averaged.shape[0] # get number of timepoints
# initialize a dictionary to store MEG and ROI RDM correlation at each timepoint
MEG_correlation = {}
ROIs = ['EVC','IT']
for ROI in ROIs:
MEG_correlation[ROI] = []
# for loop that goes over MEG RDMs at all time points and correlate with ROI RDMs
for t in range(num_timepoints):
MEG_RDM_t = MEG_RDMs_sub_averaged[t,:,:]
for ROI in ROIs:
ROI_RDM = fMRI_RDMs_sub_averaged[ROI + '_RDMs']
MEG_correlation[ROI].append(RSA_spearman(ROI_RDM, MEG_RDM_t))
# Plotting MEG-fMRI comparison
plt.rc('font', size=12)
fig, ax = plt.subplots(figsize=(10, 6))
time_range = range(-100,1201)
ax.plot(time_range, MEG_correlation['IT'], color='tab:orange', label='IT')
ax.plot(time_range, MEG_correlation['EVC'], color='tab:blue', label='EVC')
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Spearmans Correlation')
ax.set_title('MEG-fMRI fusion')
ax.grid(True)
ax.legend(loc='upper left');
###Output
_____no_output_____
###Markdown
MEG-DNN Comparison Creating DNN (AlexNet) RDMs
###Code
# AlexNet Definition
__all__ = ['AlexNet', 'alexnet']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
# Here we redefine AlexNet differently from torchvision code for better understanding
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
)
self.conv4 = nn.Sequential(
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
)
self.conv5 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.fc6 = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
)
self.fc7 =nn.Sequential(
nn.Dropout(),
nn.Linear(4096, 4096),
)
self.fc8 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
out1 = self.conv1(x)
out2 = self.conv2(out1)
out3 = self.conv3(out2)
out4 = self.conv4(out3)
out5 = self.conv5(out4)
out5_reshaped = out5.view(out5.size(0), 256 * 6 * 6)
out6= self.fc6(out5_reshaped)
out7= self.fc7(out6)
out8 = self.fc8(out7)
return out1, out2, out3,out4, out5, out6,out7,out8
def alexnet(pretrained=False, **kwargs):
"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
return model
# Feature extraction code
def load_alexnet(model_checkpoints):
"""This function initializes an Alexnet and load
its weights from a pretrained model. Since we redefined model in a different
way we have to rename the weights that were in the pretrained checkpoint.
----------
model_checkpoints : str
model checkpoints location.
Returns
-------
model
pytorch model of alexnet
"""
model = alexnet()
# Load checkpoint
model_file = model_checkpoints
checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
# Rename the checkpoint keys according to new definition
model_dict =["conv1.0.weight", "conv1.0.bias", "conv2.0.weight", "conv2.0.bias", "conv3.0.weight", "conv3.0.bias", "conv4.0.weight", "conv4.0.bias", "conv5.0.weight", "conv5.0.bias", "fc6.1.weight", "fc6.1.bias", "fc7.1.weight", "fc7.1.bias", "fc8.1.weight", "fc8.1.bias"]
state_dict={}
i=0
for k,v in checkpoint.items():
state_dict[model_dict[i]] = v
i+=1
# initialize model with pretrained weights
model.load_state_dict(state_dict)
if torch.cuda.is_available():
model.cuda()
model.eval()
return model
def get_activations_and_save(model, image_list, activations_dir):
"""This function generates Alexnet features and save them in a specified directory.
Parameters
----------
model :
pytorch model : alexnet.
image_list : list
the list contains path to all images.
activations_dir : str
save path for extracted features.
"""
resize_normalize = trn.Compose([
trn.Resize((224,224)),
trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# for all images in the list generate and save activations
for image_file in tqdm(image_list):
# open image
img = Image.open(image_file)
image_file_name = os.path.split(image_file)[-1].split(".")[0]
# apply transformations before feeding to model
input_img = V(resize_normalize(img).unsqueeze(0))
if torch.cuda.is_available():
input_img=input_img.cuda()
x = model.forward(input_img)
activations = []
for i,feat in enumerate(x):
activations.append(feat.data.cpu().numpy().ravel())
for layer in range(len(activations)):
save_path = os.path.join(activations_dir, image_file_name+"_"+"layer" + "_" + str(layer+1) + ".npy")
np.save(save_path,activations[layer])
# get the paths to all the images in the stimulus set
image_dir = 'data/92_Image_Set/92images'
image_list = glob.glob(image_dir + '/*.jpg')
image_list.sort()
print('Total Number of Images: ', len(image_list))
cwd = os.getcwd() # get current working directory
save_dir = os.path.join(cwd, "content/activations_alexnet")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
######### load Alexnet initialized with pretrained weights ###################
# Download pretrained Alexnet from:
# https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth
# and save in the current directory
checkpoint_path = os.path.join(cwd, "content/alexnet.pth")
if not os.path.exists(checkpoint_path):
url = "https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth"
urllib.request.urlretrieve(url, checkpoint_path)
model = load_alexnet(checkpoint_path)
##############################################################################
######### get and save activations ################################
activations_dir = os.path.join(save_dir)
if not os.path.exists(activations_dir):
os.makedirs(activations_dir)
print("-------------Saving activations ----------------------------")
get_activations_and_save(model, image_list, activations_dir)
###################################################################
num_layers = 8 # number of layers in the model
layers = []
for i in range(num_layers):
layers.append("layer" + "_" + str(i+1))
Model_RDMs = {}
# create RDM for each layer from activations
for layer in layers:
activation_files = glob.glob(activations_dir + '/*' + layer + '.npy')
activation_files.sort()
activations = []
# Load all activations
for activation_file in activation_files:
activations.append(np.load(activation_file))
activations = np.array(activations)
# calculate Pearson's distance for all pairwise comparisons
Model_RDMs[layer] = 1-np.corrcoef(activations)
# Visualize model RDMs
options = [(k.replace('_', ' ').capitalize(), i+1) for k, i in zip(Model_RDMs.keys(), range(8))]
@widgets.interact( Model_RDMs=widgets.fixed(Model_RDMs),
layer=widgets.SelectionSlider(options=options,
value=8,
description='AlexNet') )
def plot_Model_RDMs(Model_RDMs, layer=8):
"""Helper function for visualize Model RDMs with an interactive
slider for the layer index."""
# Load RDM at a given layer
RDM = np.array(Model_RDMs['layer_' + str(layer)])
title = "Model's Layer " + str(layer) + " RDM"
label = "1 - correlation"
plot_RDM(RDM, label=label, title=title)
###Output
_____no_output_____
###Markdown
Comparing MEG RDMs with AlexNet RDMs
###Code
# Correlating MEG RDMs with DNN RDMs
num_timepoints = MEG_RDMs_sub_averaged.shape[0] #get number of timepoints
# initialize a dictionary to store MEG and DNN RDM correlation at each timepoint
for layer in layers:
MEG_correlation[layer] = []
# for loop that goes over MEG RDMs at all time points and correlate with DNN RDMs
for t in range(num_timepoints):
MEG_RDM_t = MEG_RDMs_sub_averaged[t,:,:]
for layer in layers:
model_RDM = Model_RDMs[layer]
MEG_correlation[layer].append(RSA_spearman(model_RDM,MEG_RDM_t))
# Plotting MEG-DNN comparison
plt.rc('font', size=12)
fig, ax = plt.subplots(figsize=(10, 6))
time_range = range(-100,1201)
ax.plot(time_range, MEG_correlation['layer_1'], color='tab:orange', label='layer_1')
ax.plot(time_range, MEG_correlation['layer_7'], color='tab:blue', label='layer_7')
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Spearmans Correlation')
ax.set_title('MEG-model comparison')
ax.grid(True)
ax.legend(loc='upper left');
###Output
_____no_output_____
###Markdown
Comparing 'decoding time' RDMs with AlexNet RDMs
###Code
# 'Decoding time' RDM
decoding_time_RDM = np.array(np.argmax(MEG_RDMs_sub_averaged, axis=0), dtype=np.float32)
pmin, pmax = 5, 95
plot_RDM(decoding_time_RDM, metric='chebyshev', pmin=pmin, pmax=pmax, label='time (ms)', title='Decoding time RDM')
# Correlating 'decoding time' RDMs with DNN RDMs
# initialize a dictionary to store MEG and DNN RDM correlation at each timepoint
decoding_time_correlation = {}
for layer in layers:
decoding_time_correlation[layer] = []
# for loop that goes over MEG RDMs at all time points and correlate with DNN RDMs
for layer in layers:
model_RDM = Model_RDMs[layer]
decoding_time_correlation[layer].append(RSA_spearman(model_RDM, decoding_time_RDM))
for k, v in decoding_time_correlation.items():
pass
# print(f"{k.replace('_', ' ').capitalize()}: {v[0]:7.4f}")
###Output
_____no_output_____
###Markdown
Dimensionality reduction of representationsWe can visualize a dimensionality-reduced version of the internal representations of the human neocortex or CNN internal representations in order to potentially uncover informative structure. Here, we use PCA to reduce the dimensionality to 20 dimensions, and then use tSNE to further reduce dimensionality to 2 dimensions. We use the first step of PCA so that tSNE runs faster (this is standard practice in the field).
###Code
EVC_RDM = np.array(fMRI_RDMs_sub_averaged["EVC" + "_RDMs"])
IT_RDM = np.array(fMRI_RDMs_sub_averaged["IT" + "_RDMs"])
RDM_dict = {}
RDM_dict["EVC_RDMs"] = EVC_RDM
RDM_dict["IT_RDMs"] = IT_RDM
plot_RDMs(RDM_dict, metric='chebyshev')
plot_RDMs(RDM_dict, metric='correlation')
@widgets.interact( fMRI_RDMs=widgets.fixed(RDM_dict),
ROI=widgets.Dropdown(options=['EVC', 'IT'], value='IT') )
def _plot_RDMs(fMRI_RDMs, ROI='EVC'):
"""Helper function for visualize fMRI RDMs in 2D space with an interactive
dropdown menu for the ROI."""
# Load RDM at a given ROI
RDM = np.array(fMRI_RDMs[ROI + "_RDMs"])
RDM_lowd = get_RDM_lowd(RDM, metric='chebyshev')
title = ROI + " RDM"
plot_RDM_lowd(RDM_lowd, label_dict, title=title)
RDM_lowd_dict = {label: get_RDM_lowd(RDM, metric='chebyshev')
for label, RDM in RDM_dict.items()}
plot_RDMs_lowd(RDM_lowd_dict, label_dict)
#timepoint=widgets.IntSlider(min=0, max=600, step=1, value=0, description='t (ms):'),
@widgets.interact( MEG_RDMs=widgets.fixed(MEG_RDMs_sub_averaged),
label_dict=widgets.fixed(label_dict),
timepoint=Play(min=0, max=1201, step=1, value=50, interval=500, description='t (ms):')
)
def plot_MEG_RDMs_lowd(MEG_RDMs, timepoint=50):
"""Helper function for visualize MEG RDMs with an interactive
slider for the timepoint."""
# Load RDM at a given timepoint
# +100 as the RDMs provided are from -100ms to 1000ms after the stimulus onset
RDM = np.array(MEG_RDMs[timepoint+100])
RDM_lowd = get_RDM_lowd(RDM, metric='correlation')
title = "MEG RDM at t = " + str(timepoint) + " ms"
plot_RDM_lowd(RDM_lowd, label_dict, title=title)
###Output
_____no_output_____ |
4.Deep_Learning/IMDB-keras/IMDB_In_Keras_Solutions.ipynb | ###Markdown
Analyzing IMDB Data in Keras - Solution 4. Building the model architectureBuild a model here using sequential. Feel free to experiment with different layers and sizes! Also, experiment adding dropout to reduce overfitting.
###Code
# Building the model architecture with one layer of length 100
model = Sequential()
model.add(Dense(512, activation='relu', input_dim=1000))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
# Compiling the model using categorical_crossentropy loss, and rmsprop optimizer.
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
5. Training the modelRun the model here. Experiment with different batch_size, and number of epochs!
###Code
# Running and evaluating the model
hist = model.fit(x_train, y_train,
batch_size=32,
epochs=10,
validation_data=(x_test, y_test),
verbose=2)
###Output
_____no_output_____ |
_docs/nbs/T734213-IEEE-Challenge-2021-Track1-Session-aware-Recommendation-with-Transformer-backbone-and-two-headed-buy-and-group-prediction.ipynb | ###Markdown
We will implement multitask model - buy and click.Model 1:- Input - 9 products displayed to users (emb, discrete attributes, continuous attributes of products) - User's click history (emb, discrete attributes, continuous attributes of products) - User attributes (discrete attributes)- Output - The user purchased the first session (purchased 0-3, 4-6, 7-9, three types of sessions) - Whether the user bought these 9 products (you can use the 4 types of product reweighting loss mentioned by Gaochen)Model 2:- Input - User’s previous click history (commodity emb, discrete attributes, and continuous attributes become discrete) - The product currently clicked by the user (the emb, discrete attributes, and continuous attributes of the product become discrete) - User attributes (discrete attributes)- Output - Whether the user clicked on this productThe above two models share all emb.
###Code
!wget -q --show-progress https://github.com/sparsh-ai/ieee21cup-recsys/raw/main/data/bronze/train.parquet.snappy
!wget -q --show-progress https://github.com/sparsh-ai/ieee21cup-recsys/raw/main/data/bronze/item_info.parquet.snappy
!wget -q --show-progress https://github.com/sparsh-ai/ieee21cup-recsys/raw/main/data/bronze/track1_testset.parquet.snappy
!wget -q --show-progress https://github.com/sparsh-ai/ieee21cup-recsys/raw/main/data/bronze/track2_testset.parquet.snappy
%load_ext tensorboard
%tensorboard --logdir=./
###Output
_____no_output_____
###Markdown
pre
###Code
!pip install einops
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
import pandas as pd
from tqdm import tqdm
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import einops
###Output
_____no_output_____
###Markdown
把所有的 feature 改成 离散的分桶
###Code
def load_item_info(data_path='/content/'):
# item info
df_item_info = pd.read_parquet(f'{data_path}/item_info.parquet.snappy')
item_info_dict = {}
for i in tqdm(range(df_item_info.shape[0])):
item_id = df_item_info.at[i, 'item_id']
item_discrete = df_item_info.at[i, 'item_vec'].split(',')[:3]
item_cont = df_item_info.at[i, 'item_vec'].split(',')[-2:]
price = df_item_info.at[i, 'price'] # / 3000
loc = df_item_info.at[i, 'location'] - 1 # 0~2
item_cont.append(price) # 2 + 1
item_discrete.append(loc) # 3 + 1
item_cont = [float(it) for it in item_cont]
item_discrete = [int(it) for it in item_discrete]
item_discrete[0] = item_discrete[0] - 1 # 1~4 -> 0~3
item_discrete[2] = item_discrete[2] - 1 # 1~2 -> 0~1
item_info_dict[int(item_id)] = {
'cont': np.array(item_cont, dtype=np.float64),
'discrete': np.array(item_discrete, dtype=np.int64),
}
return item_info_dict
item_info_dict = load_item_info(data_path='/content/')
item_info_dict[1]
cont1, cont2, cont3 = [], [], []
for k, v in item_info_dict.items():
c = v['cont']
cont1.append(c[0])
cont2.append(c[1])
cont3.append(c[2])
cont1, cont2, cont3 = np.array(cont1), np.array(cont2), np.array(cont3)
###Output
_____no_output_____
###Markdown
item cont1
###Code
cont1_nonzero = cont1[cont1 != 0]
cont1_nonzero_log = np.log(cont1_nonzero)
fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
ax1.boxplot(cont1)
plt.show()
plt.hist(cont1, color='blue', edgecolor='black', bins=20)
plt.show()
fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
ax1.boxplot(cont1_nonzero[cont1_nonzero < 5])
plt.show()
plt.hist(cont1_nonzero[cont1_nonzero < 5], color='blue', edgecolor='black', bins=100)
plt.show()
fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
ax1.boxplot(cont1_nonzero_log[cont1_nonzero_log < 0])
plt.show()
plt.hist(cont1_nonzero_log[cont1_nonzero_log < 0], color='blue', edgecolor='black', bins=3)
plt.show()
def cont1_to_discrete(cont_val):
if cont_val == 0:
return 0
cont_val_log = np.log(cont_val)
if cont_val_log > 0:
return 4
if cont_val_log < -7.5:
return 1
if cont_val_log < -5.5:
return 2
if cont_val_log < 0:
return 3
cont1_discrete = []
for c in cont1:
tmp = cont1_to_discrete(c)
cont1_discrete.append(tmp)
plt.hist(cont1_discrete, color='blue', edgecolor='black', bins=100)
plt.show()
###Output
_____no_output_____
###Markdown
item cont2
###Code
fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
ax1.boxplot(cont2[cont2 != 0])
plt.show()
plt.hist(cont2[cont2 != 0], color='blue', edgecolor='black', bins=9)
plt.show()
def cont2_to_discrete(cont_val):
if cont_val == 0:
return 0
if cont_val < 0.1:
return 1
if cont_val < 0.2:
return 2
if cont_val < 0.3:
return 3
if cont_val < 0.4:
return 4
if cont_val < 0.5:
return 5
if cont_val < 0.6:
return 6
if cont_val < 0.7:
return 7
if cont_val < 0.8:
return 8
if cont_val < 0.9:
return 9
cont2_discrete = []
for c in cont2:
tmp = cont2_to_discrete(c)
cont2_discrete.append(tmp)
plt.hist(cont2_discrete, color='blue', edgecolor='black', bins=100)
plt.show()
###Output
_____no_output_____
###Markdown
item cont3
###Code
cont3_log = np.log(cont3)
fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
ax1.boxplot(cont3)
plt.show()
plt.hist(cont3, color='blue', edgecolor='black', bins=9)
plt.show()
fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
ax1.boxplot(cont3[cont3 < 5000])
plt.show()
plt.figure(figsize=(10, 3))
plt.hist(cont3[cont3 < 5000], color='blue', edgecolor='black', bins=20)
plt.show()
fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
ax1.boxplot(cont3[cont3 >= 2000])
plt.show()
plt.hist(cont3[cont3 >= 2000], color='blue', edgecolor='black', bins=9)
plt.show()
## price
def cont3_to_discrete(cont_val):
if cont_val < 300:
return 0
if cont_val < 500:
return 1
if cont_val < 750:
return 2
if cont_val < 1000:
return 3
if cont_val < 1500:
return 4
if cont_val < 2000:
return 5
if cont_val < 2500:
return 6
if cont_val < 3000:
return 7
if cont_val < 3500:
return 8
if cont_val <= 5000:
return 9
if cont_val > 5000:
return 10
cont3_discrete = []
for c in cont3:
tmp = cont3_to_discrete(c)
cont3_discrete.append(tmp)
plt.hist(cont3_discrete, color='blue', edgecolor='black', bins=100)
plt.show()
###Output
_____no_output_____
###Markdown
overall data cont to discretecont1,2,3 -> discrete len = [5, 10, 11]
###Code
def cont1_to_discrete(cont_val):
if cont_val == 0:
return 0
cont_val_log = np.log(cont_val)
if cont_val_log < -7.5:
return 1
if cont_val_log < -5.5:
return 2
if cont_val_log <= 0:
return 3
if cont_val_log > 0:
return 4
cont1_discrete = []
for c in cont1:
tmp = cont1_to_discrete(c)
cont1_discrete.append(tmp)
assert len(cont1_discrete) == len(cont1)
plt.hist(cont1_discrete, color='blue', edgecolor='black', bins=100)
plt.show()
def cont2_to_discrete(cont_val):
if cont_val == 0:
return 0
if cont_val < 0.1:
return 1
if cont_val < 0.2:
return 2
if cont_val < 0.3:
return 3
if cont_val < 0.4:
return 4
if cont_val < 0.5:
return 5
if cont_val < 0.6:
return 6
if cont_val < 0.7:
return 7
if cont_val < 0.8:
return 8
if cont_val < 0.9:
return 9
cont2_discrete = []
for c in cont2:
tmp = cont2_to_discrete(c)
cont2_discrete.append(tmp)
assert len(cont2_discrete) == len(cont2)
plt.hist(cont2_discrete, color='blue', edgecolor='black', bins=100)
plt.show()
## price
def cont3_to_discrete(cont_val):
if cont_val < 300:
return 0
if cont_val < 500:
return 1
if cont_val < 750:
return 2
if cont_val < 1000:
return 3
if cont_val < 1500:
return 4
if cont_val < 2000:
return 5
if cont_val < 2500:
return 6
if cont_val < 3000:
return 7
if cont_val < 3500:
return 8
if cont_val <= 5000:
return 9
if cont_val > 5000:
return 10
cont3_discrete = []
for c in cont3:
tmp = cont3_to_discrete(c)
cont3_discrete.append(tmp)
assert len(cont3_discrete) == len(cont3)
plt.hist(cont3_discrete, color='blue', edgecolor='black', bins=100)
plt.show()
def load_item_info_turn_cont_to_discrete(
data_path='/content/'
):
# item info
df_item_info = pd.read_parquet(f'{data_path}/item_info.parquet.snappy')
num_items = 381+1 # 0 means no item; normal items start from 1
num_features = (3+1) + (2+1)
item_features = np.zeros((num_items, num_features)).astype(np.int64)
for i in tqdm(range(num_items - 1)):
item_id = df_item_info.at[i, 'item_id']
# discrete
item_discrete = df_item_info.at[i, 'item_vec'].split(',')[:3]
loc = df_item_info.at[i, 'location'] - 1 # 0~2
item_discrete.append(loc)
item_discrete = [int(it) for it in item_discrete]
item_discrete[0] = item_discrete[0] - 1 # 1~4 -> 0~3
item_discrete[2] = item_discrete[2] - 1 # 1~2 -> 0~1
# cont
item_cont = df_item_info.at[i, 'item_vec'].split(',')[-2:]
price = df_item_info.at[i, 'price']
item_cont.append(price)
item_cont = [float(it) for it in item_cont]
item_cont1 = cont1_to_discrete(item_cont[0])
item_cont2 = cont2_to_discrete(item_cont[1])
item_cont3 = cont3_to_discrete(item_cont[2])
# agg
item_discrete.append(item_cont1)
item_discrete.append(item_cont2)
item_discrete.append(item_cont3)
item_total_feat = np.array(item_discrete, dtype=np.int64)
item_features[item_id] = item_total_feat
# change 0 item to no-feature (last idx of each feature + 1)
last_idx = np.max(item_features, axis=0)
item_features[0] = last_idx + 1
return item_features
item_features = load_item_info_turn_cont_to_discrete(
data_path='/content/'
)
print(item_features[:10])
###Output
[[ 4 10 2 3 5 10 11]
[ 1 2 0 0 3 9 7]
[ 1 0 0 0 3 8 0]
[ 1 8 0 0 3 8 3]
[ 1 0 0 0 3 9 4]
[ 1 0 0 0 3 8 2]
[ 1 7 0 0 3 8 4]
[ 1 7 0 0 3 7 7]
[ 1 0 0 0 3 7 0]
[ 1 0 0 0 3 7 0]]
###Markdown
data
###Code
## 获取 user portrait 的映射,因为
data_path='/content/'
# portraitidx_to_idx_dict_list: list of 10 dict, int:int
portraitidx_to_idx_dict_list = []
for i in range(10):
portraitidx_to_idx_dict_list.append(dict())
acculumated_idx = [0] * 10
df_train = pd.read_parquet(f'{data_path}/trainset.parquet.snappy')
for i in tqdm(range(df_train.shape[0])):
user_portrait = [int(s) for s in df_train.at[i, 'user_protrait'].split(',')]
for idx, u in enumerate(user_portrait):
if portraitidx_to_idx_dict_list[idx].get(u, -1) == -1:
portraitidx_to_idx_dict_list[idx][u] = acculumated_idx[idx]
acculumated_idx[idx] += 1
print(acculumated_idx)
# 测试集中如果出现训练集里没出现的, 就统一置为最后一个
df_test1 = pd.read_parquet(f'{data_path}/track1_testset.parquet.snappy')
for i in tqdm(range(df_test1.shape[0])):
user_portrait = [int(s) for s in df_test1.at[i, 'user_protrait'].split(',')]
for idx, u in enumerate(user_portrait):
if portraitidx_to_idx_dict_list[idx].get(u, -1) == -1:
portraitidx_to_idx_dict_list[idx][u] = acculumated_idx[idx]
df_test2 = pd.read_parquet(f'{data_path}/track2_testset.parquet.snappy')
for i in tqdm(range(df_test2.shape[0])):
user_portrait = [int(s) for s in df_test2.at[i, 'user_protrait'].split(',')]
for idx, u in enumerate(user_portrait):
if portraitidx_to_idx_dict_list[idx].get(u, -1) == -1:
portraitidx_to_idx_dict_list[idx][u] = acculumated_idx[idx]
for i in range(10):
acculumated_idx[i] += 1
# 所以最后也统一加上一个, 即使有些维度其实没有 测试集出现但训练集没出现的东西
print(acculumated_idx)
def load_train_data(data_path='/content/'):
# trainset
train_samples = []
val_samples = []
df_train = pd.read_parquet(f'{data_path}/trainset.parquet.snappy')
# shuffle
df_train = shuffle(df_train, random_state=2333).reset_index()
total_num = int(df_train.shape[0])
num_train = int(total_num * 0.95)
num_val = total_num - num_train # 5% validation data
for i in tqdm(range(total_num)):
if df_train.at[i, 'user_click_history'] == '0:0':
user_click_list = [0]
else:
user_click_list = df_train.at[i, 'user_click_history'].split(',')
user_click_list = [int(sample.split(':')[0]) for sample in user_click_list]
num_user_click_history = len(user_click_list)
tmp = np.zeros(400, dtype=np.int64)
tmp[:len(user_click_list)] = user_click_list
user_click_list = tmp
exposed_items = [int(s) for s in df_train.at[i, 'exposed_items'].split(',')]
labels = [int(s) for s in df_train.at[i, 'labels'].split(',')]
user_portrait = [int(s) for s in df_train.at[i, 'user_protrait'].split(',')]
# portraitidx_to_idx_dict_list: list of 10 dict, int:int
for j in range(10):
user_portrait[j] = portraitidx_to_idx_dict_list[j][user_portrait[j]]
one_sample = {
'user_click_list': user_click_list,
'num_user_click_history': num_user_click_history,
'user_portrait': np.array(user_portrait, dtype=np.int64),
'item_id': np.array(exposed_items, dtype=np.int64),
'label': np.array(labels, dtype=np.int64)
}
if i < num_train:
train_samples.append(one_sample)
else:
val_samples.append(one_sample)
return train_samples, val_samples
train_samples, val_samples = load_train_data(data_path='/content/')
# aug items within sess
from itertools import permutations
from functools import reduce
import operator
import random
perm1 = list(permutations([0, 1, 2]))
perm2 = list(permutations([3, 4, 5]))
perm3 = list(permutations([6, 7, 8]))
aug_order = []
for p1 in perm1:
# print(p1)
for p2 in perm2:
# print(p1, p2)
for p3 in perm3:
# print(p1, p2, p3)
tmp = reduce(operator.concat, [p1, p2, p3])
aug_order.append(tmp)
len_aug_order = len(aug_order)
class BigDataCupDataset(torch.utils.data.Dataset):
def __init__(self,
item_features,
database,
get_click_data=True,
train_val='train' # if train, use augorder
):
super().__init__()
self.item_features = item_features
self.database = database
self.train_val = train_val
self.get_click_data = get_click_data
def __len__(self, ):
return len(self.database)
def __getitem__(self, idx):
one_sample = self.database[idx]
user_click_history = one_sample['user_click_list'] # [400]
num_user_click_history = one_sample['num_user_click_history'] # int
user_discrete_feature = one_sample['user_portrait'] # [10]
nine_item_id = one_sample['item_id'] # [9]
label = one_sample['label'] # [9]
if self.train_val == 'train':
ao = list(aug_order[random.randint(0, len_aug_order - 1)])
nine_item_id = nine_item_id[ao]
label = label[ao]
user_click_history_discrete_feature = np.zeros((400, (3+1) + (2+1))).astype(np.int64)
for i in range(num_user_click_history):
if user_click_history[i] == 0:
user_click_history_discrete_feature[i] = self.item_features[user_click_history[i]]
# 这里 0表示没有任何点击
else:
user_click_history_discrete_feature[i] = self.item_features[user_click_history[i]]
nine_item_discrete_feature = np.zeros((9, (3+1) + (2+1))).astype(np.int64)
for i in range(9):
nine_item_discrete_feature[i] = self.item_features[nine_item_id[i]]
session_label = 0 # 0,1,2,3
# 0: 什么都不买
for i in range(9):
if label[i]: # 买1~3个
session_label = 1
if i >= 3 and label[i]: # 买4~6个
session_label = 2
if i >= 6 and label[i]: # 买7~9个
session_label = 3
# click
if self.get_click_data:
def neg_sample(): # 这里没有考虑到 buy 和 click,但就先随机吧
return random.randint(1, 381)
click_user_discrete_feature = user_discrete_feature
click_user_click_history = user_click_history
click_user_click_history_discrete_feature = user_click_history_discrete_feature
if num_user_click_history == 1:
click_user_click_history = user_click_history
click_num_user_click_history = num_user_click_history
click_item_id = neg_sample() # random sample (todo)
click_item_discrete_feature = torch.IntTensor(self.item_features[click_item_id])
click_label = torch.IntTensor([0])
else: # num_user_click_history >= 2
# random sample to a click history thre
click_idx = random.randint(2, num_user_click_history) # 要预测的那个点击item
click_num_user_click_history = click_idx - 1 # 预测的点击item之前有多少东西
# pos or neg 1:4
if random.randint(1, 3) == 1:
# pos
click_item_id = click_user_click_history[click_idx - 1]
click_label = torch.IntTensor([1])
else:
# neg
click_item_id = neg_sample()
click_label = torch.IntTensor([0])
click_item_discrete_feature = torch.IntTensor(self.item_features[click_item_id])
# buy
user_click_history = torch.IntTensor(user_click_history)
user_click_history_discrete_feature = torch.IntTensor(user_click_history_discrete_feature)
num_user_click_history = torch.IntTensor([num_user_click_history])
user_discrete_feature = torch.IntTensor(user_discrete_feature)
nine_item_id = torch.IntTensor(nine_item_id)
nine_item_discrete_feature = torch.IntTensor(nine_item_discrete_feature)
label = torch.IntTensor(label)
session_label = session_label
if not self.get_click_data:
return user_click_history, \
user_click_history_discrete_feature, \
num_user_click_history, \
nine_item_id, \
nine_item_discrete_feature, \
user_discrete_feature, \
label, session_label
else:
# click
click_user_click_history = torch.IntTensor(user_click_history)
click_user_click_history_discrete_feature = torch.IntTensor(click_user_click_history_discrete_feature)
click_num_user_click_history = torch.IntTensor([click_num_user_click_history])
click_item_id = torch.IntTensor([click_item_id])
click_item_discrete_feature = torch.IntTensor(click_item_discrete_feature)
click_user_discrete_feature = torch.IntTensor(click_user_discrete_feature)
click_label = torch.IntTensor([click_label])
return user_click_history, \
user_click_history_discrete_feature, \
num_user_click_history, \
nine_item_id, \
nine_item_discrete_feature, \
user_discrete_feature, \
label, session_label, \
click_user_click_history, \
click_user_click_history_discrete_feature, \
click_num_user_click_history, \
click_item_id, \
click_item_discrete_feature, \
click_user_discrete_feature, \
click_label
ds = BigDataCupDataset(item_features, train_samples, get_click_data=True, train_val='train')
ds[0]
train_samples, val_samples = load_train_data()
train_ds = BigDataCupDataset(item_features, train_samples, get_click_data=True, train_val='train')
train_dl = torch.utils.data.DataLoader(dataset=train_ds, batch_size=32, shuffle=True)
val_ds = BigDataCupDataset(item_features, val_samples, get_click_data=True, train_val='val')
val_dl = torch.utils.data.DataLoader(dataset=val_ds, batch_size=32, shuffle=False)
next(iter(train_dl))
###Output
_____no_output_____
###Markdown
model transformer
###Code
class MultiHeadSelfAttention(nn.Module):
def __init__(self,
hidden_size,
qkv_size,
num_heads,
dropout_ratio=0.
):
super().__init__()
self.n = num_heads
self.d = qkv_size
self.D = hidden_size
self.scale = self.d ** -0.5
self.to_qkv = nn.Linear(self.D, self.n * self.d * 3, bias=False)
self.attend = nn.Softmax(dim=-1)
self.to_out = nn.Sequential(
nn.Linear(self.n * self.d, self.D),
nn.Dropout(dropout_ratio)
)
def forward(self, x):
"""
x: BND
output: BND
"""
B, N, D = x.shape
# get qkv
qkv_agg = self.to_qkv(x) # BND -> BN(num_heads*qkv_size*3)
qkv_agg = qkv_agg.chunk(3, dim=-1) # BND -> 3 * [BN(num_heads*qkv_size)]
q = einops.rearrange(qkv_agg[0], 'B N (n d) -> B n N d', n=self.n)
k = einops.rearrange(qkv_agg[1], 'B N (n d) -> B n N d', n=self.n)
v = einops.rearrange(qkv_agg[2], 'B N (n d) -> B n N d', n=self.n)
# calc self attention
dots = torch.einsum('Bnid, Bnjd -> Bnij', q, k) # BnNd, BnNd -> BnNN
attn = self.attend(dots * self.scale)
out = torch.einsum('BnNj, Bnjd -> BnNd', attn, v) # BnNN, BnNd -> BnNd
out = einops.rearrange(out, 'B n N d -> B N (n d)') # BnNd -> BN(nd) = BND
# aggregate multihead
out = self.to_out(out)
return out
class FeedForwardNetwork(nn.Module):
def __init__(self,
hidden_size,
mlp_size,
dropout_ratio
):
super().__init__()
self.model = nn.Sequential(
nn.Linear(hidden_size, mlp_size),
nn.GELU(),
nn.Dropout(dropout_ratio),
nn.Linear(mlp_size, hidden_size),
nn.Dropout(dropout_ratio)
)
def forward(self, x):
"""
x: BND
output: BND
"""
return self.model(x)
class MultitaskTransformer(nn.Module):
def __init__(self,
num_items=381,
hidden_size=128,
num_layers=3,
mlp_size=64, # normally = 4 * hidden_size
qkv_size=32, # normally = 64 = hidden_size / num_heads
num_heads=4,
msa_dropout_ratio=0.1,
ffn_dropout_ratio=0.1,
device='cpu'
):
"""
除了 item_emb 之外,其余的 emb 编号都是 0 开始的
"""
super().__init__()
self.device = device
self.num_items = num_items
self.NUM_ITEM_DISCRETE_FEATURE = 3+1 + 2+1 # item_vec3+location1 + item_vec2+price1
self.NUM_USER_DISCRETE_FEATURE = 10
self.hidden_size = hidden_size
self.N_buy = 1 + self.NUM_ITEM_DISCRETE_FEATURE + \
9 * (1 + self.NUM_ITEM_DISCRETE_FEATURE) + \
self.NUM_USER_DISCRETE_FEATURE
self.N_click = 1 + self.NUM_ITEM_DISCRETE_FEATURE + \
1 + self.NUM_ITEM_DISCRETE_FEATURE + \
self.NUM_USER_DISCRETE_FEATURE
# item emb
self.item_emb = nn.Embedding(self.num_items + 1, self.hidden_size) # 0 表示没有记录,因此 num_items + 1
# item discrete feature
self.item_discrete_feature_emb_list = nn.ModuleList()
num_unique_value_list = [4+1, 10+1, 2+1, 3+1, 5+1, 10+1, 11+1] # [4, 10, 2, 3]
for i in range(self.NUM_ITEM_DISCRETE_FEATURE):
num_unique_value = num_unique_value_list[i]
self.item_discrete_feature_emb_list.append(
nn.Embedding(num_unique_value, self.hidden_size)
)
# user discrete feature
self.user_discrete_feature_emb_list = nn.ModuleList()
num_unique_value_list = [4, 1364, 21, 11, 196, 50, 4, 12, 3, 2165] # (already add 1 for features in test but not in train)
for i in range(self.NUM_USER_DISCRETE_FEATURE):
num_unique_value = num_unique_value_list[i]
self.user_discrete_feature_emb_list.append(
nn.Embedding(num_unique_value, self.hidden_size)
)
# position emb
self.position_emb_buy = nn.Parameter(torch.randn(1, self.N_buy, self.hidden_size))
self.position_emb_click = nn.Parameter(torch.randn(1, self.N_click, self.hidden_size))
# transformer layers
self.transformer_layers_buy = nn.ModuleList([])
for _ in range(num_layers):
self.transformer_layers_buy.append(nn.ModuleList([
nn.Sequential( # MSA(LN(x))
nn.LayerNorm(self.hidden_size),
MultiHeadSelfAttention(self.hidden_size, qkv_size, num_heads, msa_dropout_ratio),
),
nn.Sequential( # MLPs(LN(x))
nn.LayerNorm(self.hidden_size),
FeedForwardNetwork(self.hidden_size, mlp_size, ffn_dropout_ratio)
)
]))
self.transformer_layers_click = nn.ModuleList([])
for _ in range(num_layers):
self.transformer_layers_click.append(nn.ModuleList([
nn.Sequential( # MSA(LN(x))
nn.LayerNorm(self.hidden_size),
MultiHeadSelfAttention(self.hidden_size, qkv_size, num_heads, msa_dropout_ratio),
),
nn.Sequential( # MLPs(LN(x))
nn.LayerNorm(self.hidden_size),
FeedForwardNetwork(self.hidden_size, mlp_size, ffn_dropout_ratio)
)
]))
# session prediction head
self.session_prediction_head = nn.Sequential(
nn.Linear(self.hidden_size, 64),
nn.PReLU(),
nn.Linear(64, 4)
)
# buy prediction head
self.buy_prediction_head = nn.Sequential(
nn.Linear(self.hidden_size, 64),
nn.PReLU(),
nn.Linear(64, 9)
)
# click prediction head
self.click_prediction_head = nn.Sequential(
nn.Linear(self.hidden_size, 64),
nn.PReLU(),
nn.Linear(64, 1)
)
def get_item_emb_attr(self,
item_id,
item_discrete_feature):
"""
param:
item_id: [B, 9] (0表示没有记录,从1开始是真的item)
item_discrete_feature: [B, 9, NUM_USER_DISCRETE_FEATURE]
return:
emb_attr:
[B(batchsize), 9, N(num_feat=1+7), D(hiddendim)]
note:
above, 9 can be an arbitrary number, e.g. 400
"""
tmp = []
# item emb
item_emb = self.item_emb(item_id) # [B, 9, D]
tmp.append(torch.unsqueeze(item_emb, 2)) # [B, 9, 1, D]
# item discrete feature emb
for i in range(self.NUM_ITEM_DISCRETE_FEATURE):
a = self.item_discrete_feature_emb_list[i](item_discrete_feature[:, :, i]) # [B, 9, D]
tmp.append(torch.unsqueeze(a, 2)) # [B, 9, 1, D]
# cat to [B, 9, N, D]
return torch.cat(tmp, dim=2) # [B, 9, 8, D]
def forward(self,
user_click_history, user_click_history_discrete_feature, num_user_click_history,
nine_item_id, nine_item_discrete_feature,
user_discrete_feature,
):
"""
用户的点击历史记录(商品的emb、离散属性)
user_click_history: [N, 400], 最多有400个点击历史记录, 每个里面是itemid, 0表示没有记录
user_click_history_discrete_feature: [N, 400, 3+1 + 2+1]
num_user_click_history: [N, 1], 用户点击历史数量
展示给用户的9个商品(商品的emb、离散属性、连续属性)
nine_item_id: [N, 9], 商品id
nine_item_discrete_feature: [N, 9, 3+1 + 2+1] 商品离散属性(已重映射) item_vec3 + location1 + item_vec2 + price1
用户的属性(离散属性)
user_discrete_feature: [B, 10] 用户离散属性(已重映射)
"""
batch_size = user_click_history.size()[0]
# 用户的点击历史记录(商品的emb、离散属性)
user_click_history_emb = torch.zeros( # [B, 8, D]
(batch_size, 1 + self.NUM_ITEM_DISCRETE_FEATURE, self.hidden_size)
).to(self.device)
assert 1 + self.NUM_ITEM_DISCRETE_FEATURE == 8
tmp = self.get_item_emb_attr(user_click_history, user_click_history_discrete_feature) # [B, 400, 8, D]
for i in range(batch_size):
aa = tmp[i, :num_user_click_history[i], :, :] # [B, 400, 8, D] -> [400-, 8, D]
a = torch.mean(aa, dim=0) # [400-, 8, D] -> [8, D]
user_click_history_emb[i] = a
# 展示给用户的9个商品(商品的emb、离散属性)
nine_item_emb = self.get_item_emb_attr(nine_item_id, nine_item_discrete_feature) # [B, 9, 8, D]
nine_item_emb = einops.rearrange(nine_item_emb, 'B n N D -> B (n N) D') # [B, 9*8, D]
# 用户的属性(离散属性)
tmp = []
for i in range(self.NUM_USER_DISCRETE_FEATURE):
a = self.user_discrete_feature_emb_list[i](user_discrete_feature[:, i]) # [B, D]
tmp.append(torch.unsqueeze(a, 1)) # [B, 1, D]
user_discrete_feature_emb = torch.cat(tmp, dim=1) # [B, 10, D]
# concat all emb
z0 = torch.cat([user_click_history_emb, # [B, 8, D]
nine_item_emb, # [B, 9*8, D]
user_discrete_feature_emb, # [B, 10, D]
], dim=1) # [B, N, D]
position_embs = einops.repeat(self.position_emb_buy, '() N D -> B N D', B=batch_size)
z0 = z0 + position_embs
# transformer
zl = z0
for transformer_layer in self.transformer_layers_buy:
zl = zl + transformer_layer[0](zl) # MSA(LN(x))
zl = zl + transformer_layer[1](zl) # MLPs(LN(x))
# global average pooling
zl = einops.reduce(zl, 'B N D -> B D', reduction='mean')
# head
session_pred = self.session_prediction_head(zl)
buy_pred = self.buy_prediction_head(zl)
return session_pred, buy_pred # [B, 4], [B, 9]
def forward_click(self,
user_click_history, user_click_history_discrete_feature, num_user_click_history,
item_id, item_discrete_feature,
user_discrete_feature):
"""
用户 之前的 点击历史记录(商品的emb、离散属性、连续属性变成离散)
user_click_history: [N, 400], 最多有400个点击历史记录, 每个里面是itemid, 0表示没有记录
user_click_history_discrete_feature: [N, 400, 3+1 + 2+1]
num_user_click_history: [N, 1], 用户点击历史数量
用户 __当前点击__ 的商品(商品的emb、离散属性、连续属性变成离散)
item_id: [N, 1], 商品id
item_discrete_feature: [N, 3+1 + 2+1] 商品离散属性(已重映射) item_vec3 + location1 + item_vec2 + price1
用户的属性(离散属性)
user_discrete_feature: [B, 10] 用户离散属性(已重映射)
输出:
1. 用户是否点击这个商品
"""
batch_size = user_click_history.size()[0]
# 用户的点击历史记录(商品的emb、离散属性)
user_click_history_emb = torch.zeros( # [B, 7+1, D]
(batch_size, 1 + self.NUM_ITEM_DISCRETE_FEATURE, self.hidden_size)
).to(self.device)
assert 1 + self.NUM_ITEM_DISCRETE_FEATURE == 8
# print(user_click_history.device, user_click_history_discrete_feature.device, flush=True)
tmp = self.get_item_emb_attr(user_click_history, user_click_history_discrete_feature) # [B, 400, 8, D]
for i in range(batch_size):
aa = tmp[i, :num_user_click_history[i], :, :] # [B, 400, 8, D] -> [400-, 8, D]
a = torch.mean(aa, dim=0) # [400-, 8, D] -> [8, D]
user_click_history_emb[i] = a
# 用户 __当前点击__ 的商品(商品的emb、离散属性)
item_discrete_feature = torch.unsqueeze(item_discrete_feature, dim=1) # [B, 7] -> [B, 1, 7]
# print(item_id.shape, item_discrete_feature.shape)
item_emb = self.get_item_emb_attr(item_id, item_discrete_feature) # [B, 1, 8, D]
item_emb = einops.rearrange(item_emb, 'B n N D -> B (n N) D') # [B, 1*8, D]
# 用户的属性(离散属性)
tmp = []
for i in range(self.NUM_USER_DISCRETE_FEATURE):
a = self.user_discrete_feature_emb_list[i](user_discrete_feature[:, i]) # [B, D]
tmp.append(torch.unsqueeze(a, 1)) # [B, 1, D]
user_discrete_feature_emb = torch.cat(tmp, dim=1) # [B, 10, D]
# concat all emb
z0 = torch.cat([user_click_history_emb, # [B, 8, D]
item_emb, # [B, 1*8, D]
user_discrete_feature_emb, # [B, 10, D]
], dim=1) # [B, N, D]
position_embs = einops.repeat(self.position_emb_click, '() N D -> B N D', B=batch_size)
z0 = z0 + position_embs
# transformer
zl = z0
for transformer_layer in self.transformer_layers_click:
zl = zl + transformer_layer[0](zl) # MSA(LN(x))
zl = zl + transformer_layer[1](zl) # MLPs(LN(x))
# global average pooling
zl = einops.reduce(zl, 'B N D -> B D', reduction='mean')
# head
click_pred = self.click_prediction_head(zl)
return click_pred # [B, 1]
m = MultitaskTransformer(
num_items=381,
hidden_size=128,
num_layers=3,
mlp_size=64, # normally = 4 * hidden_size
qkv_size=32, # normally = 64 = hidden_size / num_heads
num_heads=4,
msa_dropout_ratio=0.1,
ffn_dropout_ratio=0.1,
device='cuda'
)
m = m.to('cuda')
B = 3
a = m(
user_click_history=torch.ones([B, 400], dtype=torch.int32).cuda(),
user_click_history_discrete_feature=torch.ones([B, 400, 7], dtype=torch.int32).cuda(),
num_user_click_history=torch.ones([B, 1], dtype=torch.int32).cuda() * 10,
user_discrete_feature=torch.ones([B, 10], dtype=torch.int32).cuda(),
nine_item_id=torch.ones([B, 9], dtype=torch.int32).cuda(),
nine_item_discrete_feature=torch.ones([B, 9, 7], dtype=torch.int32).cuda(),
)
print(a)
b = m.forward_click(
user_click_history=torch.ones([B, 400], dtype=torch.int32).cuda(),
user_click_history_discrete_feature=torch.ones([B, 400, 7], dtype=torch.int32).cuda(),
num_user_click_history=torch.ones([B, 1], dtype=torch.int32).cuda() * 10,
user_discrete_feature=torch.ones([B, 10], dtype=torch.int32).cuda(),
item_id=torch.ones([B, 1], dtype=torch.int32).cuda(),
item_discrete_feature=torch.ones([B, 7], dtype=torch.int32).cuda(),
)
print(b)
###Output
(tensor([[-0.1304, 0.1264, 0.0343, -0.0283],
[-0.1473, 0.1203, 0.0343, -0.0342],
[-0.1419, 0.1202, 0.0390, -0.0435]], device='cuda:0',
grad_fn=<AddmmBackward>), tensor([[ 0.0914, -0.0263, 0.1005, 0.0078, -0.0815, 0.0474, 0.0286, 0.0280,
-0.3035],
[ 0.0947, -0.0134, 0.1019, 0.0182, -0.0888, 0.0532, 0.0417, 0.0208,
-0.3038],
[ 0.0872, -0.0221, 0.1106, 0.0211, -0.0725, 0.0380, 0.0364, 0.0222,
-0.3063]], device='cuda:0', grad_fn=<AddmmBackward>))
tensor([[0.1455],
[0.1450],
[0.1383]], device='cuda:0', grad_fn=<AddmmBackward>)
###Markdown
train
###Code
model_name = 'multitask_transformer_augorder_adamlr0.001_epoch10'
tb_path = 'runs/%s-%s' % (datetime.today().strftime('%Y-%m-%d-%H:%M:%S'), model_name)
tb_writer = SummaryWriter(tb_path)
device = 'cuda'
model = MultitaskTransformer(
num_items=381,
hidden_size=128,
num_layers=3,
mlp_size=64, # normally = 4 * hidden_size
qkv_size=32, # normally = 64 = hidden_size / num_heads
num_heads=4,
msa_dropout_ratio=0.1,
ffn_dropout_ratio=0.1,
device='cuda'
)
model = model.to(device)
def binary_acc(sess_pred, y_pred, y_test):
# print(sess_pred)
y_pred_tag = torch.round(torch.sigmoid(y_pred))
y_pred_tag_intact = y_pred_tag.clone()
##################################
## vanilla
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc1 = correct_results_sum / y_test.shape[0] / 9
real_acc1 = 0.0
for i in range(y_test.shape[0]):
correct_results_sum = (y_pred_tag[i] == y_test[i]).sum().float()
one_acc = correct_results_sum / 9
if one_acc == 1:
real_acc1 += 1
real_acc1 = real_acc1 / y_test.shape[0]
# print(y_pred_tag)
####################################
## use sess to refine y_pred_tag
for i in range(y_test.shape[0]):
if sess_pred[i] == 0:
y_pred_tag[i][:] = 0
elif sess_pred[i] == 1:
y_pred_tag[i][3:] = 0
elif sess_pred[i] == 2:
y_pred_tag[i][:3] = 1
y_pred_tag[i][6:] = 0
elif sess_pred[i] == 3:
y_pred_tag[i][:6] = 1
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc2 = correct_results_sum / y_test.shape[0] / 9
real_acc2 = 0.0
for i in range(y_test.shape[0]):
correct_results_sum = (y_pred_tag[i] == y_test[i]).sum().float()
one_acc = correct_results_sum / 9
if one_acc == 1:
real_acc2 += 1
real_acc2 = real_acc2 / y_test.shape[0]
# print(y_pred_tag)
#######################################
## rule 2
y_pred_tag = y_pred_tag_intact
acc_rule2 = 0.0
real_acc_rule2 = 0.0
for i in range(y_test.shape[0]):
for j in range(9):
k = 8 - j
if k >= 6 and y_pred_tag[i][k] == 1:
y_pred_tag[i][:6] = 1
if k >= 3 and y_pred_tag[i][k] == 1:
y_pred_tag[i][:3] = 1
correct_results_sum = (y_pred_tag[i] == y_test[i]).sum().float()
a = correct_results_sum / 9
acc_rule2 += a
if a == 1:
real_acc_rule2 += 1
acc_rule2 = acc_rule2 / y_test.shape[0]
real_acc_rule2 = real_acc_rule2 / y_test.shape[0]
# print(y_pred_tag)
return acc1, acc2, acc_rule2, real_acc1, real_acc2, real_acc_rule2
def click_acc(y_pred, y_test):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc = correct_results_sum / y_test.shape[0]
return acc
sess_criterion = nn.CrossEntropyLoss()
buy_criterion = nn.BCEWithLogitsLoss()
click_criterion = nn.BCEWithLogitsLoss()
# optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
NUM_EPOCH = 10
batches_done = 0
best_val_acc = 0
# for epoch_idx in range(NUM_EPOCH): # loop over the dataset multiple times
optimizer = optim.Adam(model.parameters(), lr=0.0001, betas=(0.9, 0.999))
for epoch_idx in range(10, 20): # loop over the dataset multiple times
train_running_sess_loss = 0.0
train_running_buy_loss = 0.0
train_running_click_loss = 0.0
train_cnt = 0
train_click_acc = 0
train_sess_acc_sum = 0
train_buy_acc1_sum = 0
train_buy_real_acc1_sum = 0
train_buy_acc2_sum = 0
train_buy_real_acc2_sum = 0
train_buy_acc_rule2_sum = 0
train_buy_real_acc_rule2_sum = 0
train_cnt_session_0 = train_cnt_session_1 = train_cnt_session_2 = train_cnt_session_3 = 0
for i, data in enumerate(train_dl, 0):
model.train()
# get the inputs; data is a list of [inputs, labels]
user_click_history, \
user_click_history_discrete_feature, \
num_user_click_history, \
item_id, item_discrete_feature, \
user_discrete_feature, label, session_label, \
click_user_click_history, \
click_user_click_history_discrete_feature, \
click_num_user_click_history, \
click_item_id, \
click_item_discrete_feature, \
click_user_discrete_feature, \
click_label = data
user_click_history = user_click_history.to(device)
user_click_history_discrete_feature = user_click_history_discrete_feature.to(device)
num_user_click_history = num_user_click_history.to(device)
item_id = item_id.to(device)
item_discrete_feature = item_discrete_feature.to(device)
user_discrete_feature = user_discrete_feature.to(device)
label = label.to(device)
session_label = session_label.to(device)
click_user_click_history = click_user_click_history.to(device)
click_user_click_history_discrete_feature = click_user_click_history_discrete_feature.to(device)
click_num_user_click_history = click_num_user_click_history.to(device)
click_item_id = click_item_id.to(device)
click_item_discrete_feature = click_item_discrete_feature.to(device)
click_user_discrete_feature = click_user_discrete_feature.to(device)
click_label = click_label.to(device)
train_batch_size = user_click_history.shape[0]
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
sess_outputs, buy_outputs = model(
user_click_history,
user_click_history_discrete_feature,
num_user_click_history,
item_id,
item_discrete_feature,
user_discrete_feature
)
click_outputs = model.forward_click(
click_user_click_history,
click_user_click_history_discrete_feature,
click_num_user_click_history,
click_item_id,
click_item_discrete_feature,
click_user_discrete_feature
)
sess_loss = sess_criterion(sess_outputs, session_label)
buy_loss = buy_criterion(buy_outputs, label.float())
click_loss = click_criterion(click_outputs, click_label.float())
loss = 0.1 * sess_loss + 0.8 * buy_loss + 0.1 * click_loss
# loss = click_loss
loss.backward()
optimizer.step()
# print statistics
train_running_sess_loss += sess_loss.item()
train_running_buy_loss += buy_loss.item()
train_running_click_loss += click_loss.item()
_, sess_predicted = torch.max(sess_outputs.data, 1)
sess_acc = (sess_predicted == session_label).sum().item() / train_batch_size
# buy_acc1, buy_acc2, buy_real_acc1, buy_real_acc2 = binary_acc(sess_predicted, buy_outputs, label)
buy_acc1, buy_acc2, buy_acc_rule2, buy_real_acc1, buy_real_acc2, buy_real_acc_rule2 = binary_acc(sess_predicted, buy_outputs, label)
train_click_acc += click_acc(click_outputs, click_label)
train_sess_acc_sum += sess_acc
train_buy_acc1_sum += buy_acc1
train_buy_real_acc1_sum += buy_real_acc1
train_buy_acc2_sum += buy_acc2
train_buy_real_acc2_sum += buy_real_acc2
train_buy_acc_rule2_sum += buy_acc_rule2
train_buy_real_acc_rule2_sum += buy_real_acc_rule2
train_cnt += 1
# train_cnt_session_0 += torch.sum(session_label == 0)
# train_cnt_session_1 += torch.sum(session_label == 1)
# train_cnt_session_2 += torch.sum(session_label == 2)
# train_cnt_session_3 += torch.sum(session_label == 3)
batches_done += 1
if i % 50 == 1:
print(i, end=' ')
if i % 500 == 1 and i != 1: # print every 2000 mini-batches
print('----- TRAIN -----')
print('[%d, %5d] sess loss: %.3f' % (epoch_idx + 1, i + 1, train_running_sess_loss / train_cnt))
print('[%d, %5d] buy loss: %.3f' % (epoch_idx + 1, i + 1, train_running_buy_loss / train_cnt))
print('[%d, %5d] click loss: %.3f' % (epoch_idx + 1, i + 1, train_running_click_loss / train_cnt))
print('- sess acc:', train_sess_acc_sum / train_cnt, flush=True)
print('- buy acc1:', train_buy_acc1_sum.cpu().item() / train_cnt, flush=True)
print('- buy real acc1:', train_buy_real_acc1_sum / train_cnt, flush=True)
print('- buy acc2:', train_buy_acc2_sum.cpu().item() / train_cnt, flush=True)
print('- buy real acc2:', train_buy_real_acc2_sum / train_cnt, flush=True)
print('- buy acc rule2:', train_buy_acc_rule2_sum.cpu().item() / train_cnt, flush=True)
print('- buy real acc rule2:', train_buy_real_acc_rule2_sum / train_cnt, flush=True)
print('- click acc:', train_click_acc / train_cnt, flush=True)
# print('- train sess cnt:', train_cnt_session_0, train_cnt_session_1, train_cnt_session_2, train_cnt_session_3)
tb_writer.add_scalar('train/sess loss', train_running_sess_loss / train_cnt, batches_done)
tb_writer.add_scalar('train/buy loss', train_running_buy_loss / train_cnt, batches_done)
tb_writer.add_scalar('train/click loss', train_running_click_loss / train_cnt, batches_done)
tb_writer.add_scalar('train/sess acc', train_sess_acc_sum / train_cnt, batches_done)
tb_writer.add_scalar('train/buy acc1', train_buy_acc1_sum.cpu().item() / train_cnt, batches_done)
tb_writer.add_scalar('train/buy real acc1', train_buy_real_acc1_sum / train_cnt, batches_done)
tb_writer.add_scalar('train/buy acc2', train_buy_acc2_sum.cpu().item() / train_cnt, batches_done)
tb_writer.add_scalar('train/buy real acc2', train_buy_real_acc2_sum / train_cnt, batches_done)
tb_writer.add_scalar('train/buy acc rule2', train_buy_acc_rule2_sum.cpu().item() / train_cnt, batches_done)
tb_writer.add_scalar('train/buy real acc rule2', train_buy_real_acc_rule2_sum / train_cnt, batches_done)
tb_writer.add_scalar('train/click acc', train_click_acc / train_cnt, batches_done)
train_running_sess_loss = 0.0
train_running_buy_loss = 0.0
train_running_click_loss = 0.0
train_cnt = 0
train_click_acc = 0
train_sess_acc_sum = 0
train_buy_acc1_sum = 0
train_buy_real_acc1_sum = 0
train_buy_acc2_sum = 0
train_buy_real_acc2_sum = 0
train_buy_acc_rule2_sum = 0
train_buy_real_acc_rule2_sum = 0
train_cnt_session_0 = train_cnt_session_1 = train_cnt_session_2 = train_cnt_session_3 = 0
## val
model.eval()
valid_running_sess_loss = 0.0
valid_running_buy_loss = 0.0
valid_running_click_loss = 0.0
valid_cnt = 0
valid_click_acc = 0
valid_sess_acc_sum = 0
valid_buy_acc1_sum = 0
valid_buy_real_acc1_sum = 0
valid_buy_acc2_sum = 0
valid_buy_real_acc2_sum = 0
valid_buy_acc_rule2_sum = 0
valid_buy_real_acc_rule2_sum = 0
valid_cnt_session_0 = valid_cnt_session_1 = valid_cnt_session_2 = valid_cnt_session_3 = 0
for _, val_data in tqdm(enumerate(val_dl, 0)):
user_click_history, \
user_click_history_discrete_feature, \
num_user_click_history, \
item_id, item_discrete_feature, \
user_discrete_feature, label, session_label, \
click_user_click_history, \
click_user_click_history_discrete_feature, \
click_num_user_click_history, \
click_item_id, \
click_item_discrete_feature, \
click_user_discrete_feature, \
click_label = val_data
user_click_history = user_click_history.to(device)
user_click_history_discrete_feature = user_click_history_discrete_feature.to(device)
num_user_click_history = num_user_click_history.to(device)
item_id = item_id.to(device)
item_discrete_feature = item_discrete_feature.to(device)
user_discrete_feature = user_discrete_feature.to(device)
label = label.to(device)
session_label = session_label.to(device)
click_user_click_history = click_user_click_history.to(device)
click_user_click_history_discrete_feature = click_user_click_history_discrete_feature.to(device)
click_num_user_click_history = click_num_user_click_history.to(device)
click_item_id = click_item_id.to(device)
click_item_discrete_feature = click_item_discrete_feature.to(device)
click_user_discrete_feature = click_user_discrete_feature.to(device)
click_label = click_label.to(device)
sess_outputs, buy_outputs = model(
user_click_history,
user_click_history_discrete_feature,
num_user_click_history,
item_id,
item_discrete_feature,
user_discrete_feature
)
click_outputs = model.forward_click(
click_user_click_history,
click_user_click_history_discrete_feature,
click_num_user_click_history,
click_item_id,
click_item_discrete_feature,
click_user_discrete_feature
)
sess_loss = sess_criterion(sess_outputs, session_label)
buy_loss = buy_criterion(buy_outputs, label.float())
click_loss = click_criterion(click_outputs, click_label.float())
valid_running_sess_loss += sess_loss.item()
valid_running_buy_loss += buy_loss.item()
valid_running_click_loss += click_loss.item()
valid_batch_size = user_click_history.shape[0]
_, sess_predicted = torch.max(sess_outputs.data, 1)
sess_acc = (sess_predicted == session_label).sum().item() / valid_batch_size
buy_acc1, buy_acc2, buy_acc_rule2, buy_real_acc1, buy_real_acc2, buy_real_acc_rule2 = binary_acc(sess_predicted, buy_outputs, label)
valid_click_acc += click_acc(click_outputs, click_label)
valid_sess_acc_sum += sess_acc
valid_buy_acc1_sum += buy_acc1
valid_buy_real_acc1_sum += buy_real_acc1
valid_buy_acc2_sum += buy_acc2
valid_buy_real_acc2_sum += buy_real_acc2
valid_buy_acc_rule2_sum += buy_acc_rule2
valid_buy_real_acc_rule2_sum += buy_real_acc_rule2
valid_cnt += 1
# valid_cnt_session_0 += torch.sum(session_label == 0)
# valid_cnt_session_1 += torch.sum(session_label == 1)
# valid_cnt_session_2 += torch.sum(session_label == 2)
# valid_cnt_session_3 += torch.sum(session_label == 3)
valid_acc = valid_buy_real_acc2_sum / valid_cnt
if valid_acc > best_val_acc:
best_val_acc = valid_acc
valid_acc = round(valid_acc, 6)
with open(f'{tb_path}/val_best_acc.txt', 'w') as fp:
print('epoch:', epoch_idx, file=fp)
print('batches_done:', batches_done, file=fp)
print('buy real acc2:', valid_acc, file=fp)
torch.save(model, f'{tb_path}/val_best.pth')
print('----- VAL -----')
print('- sess loss:', valid_running_sess_loss / valid_cnt)
print('- buy loss:', valid_running_buy_loss / valid_cnt)
print('- click loss:', valid_running_click_loss / valid_cnt)
print('- sess acc:', valid_sess_acc_sum / valid_cnt)
print('- buy acc1:', valid_buy_acc1_sum.cpu().item() / valid_cnt)
print('- buy real acc1:', valid_buy_real_acc1_sum / valid_cnt)
print('- buy acc2:', valid_buy_acc2_sum.cpu().item() / valid_cnt)
print('- buy real acc2:', valid_buy_real_acc2_sum / valid_cnt)
print('- buy acc rule2:', valid_buy_acc_rule2_sum.cpu().item() / valid_cnt)
print('- buy real acc rule2:', valid_buy_real_acc_rule2_sum / valid_cnt)
print('- click acc:', valid_click_acc / valid_cnt)
# print('valid sess cnt:', valid_cnt_session_0, valid_cnt_session_1, valid_cnt_session_2, valid_cnt_session_3)
tb_writer.add_scalar('val/sess loss', valid_running_sess_loss / valid_cnt, batches_done)
tb_writer.add_scalar('val/buy loss', valid_running_buy_loss / valid_cnt, batches_done)
tb_writer.add_scalar('val/click loss', valid_running_click_loss / valid_cnt, batches_done)
tb_writer.add_scalar('val/sess acc', valid_sess_acc_sum / valid_cnt, batches_done)
tb_writer.add_scalar('val/buy acc1', valid_buy_acc1_sum.cpu().item() / valid_cnt, batches_done)
tb_writer.add_scalar('val/buy real acc1', valid_buy_real_acc1_sum / valid_cnt, batches_done)
tb_writer.add_scalar('val/buy acc2', valid_buy_acc2_sum.cpu().item() / valid_cnt, batches_done)
tb_writer.add_scalar('val/buy real acc2', valid_buy_real_acc2_sum / valid_cnt, batches_done)
tb_writer.add_scalar('val/buy acc rule2', valid_buy_acc_rule2_sum.cpu().item() / valid_cnt, batches_done)
tb_writer.add_scalar('val/buy real acc rule2', valid_buy_real_acc_rule2_sum / valid_cnt, batches_done)
tb_writer.add_scalar('val/click acc', valid_click_acc / valid_cnt, batches_done)
print('Finished Training')
# torch.save(model, f'{tb_path}/model_epoch10.pth')
torch.save(model, f'{tb_path}/model_epoch20.pth')
###Output
_____no_output_____
###Markdown
test
###Code
def load_test_data(data_path='/content/',
filename='track1_testset.csv'):
test_samples = []
df_test = pd.read_parquet(f'{data_path}/{filename}', sep=' ')
total_num = int(df_test.shape[0])
for i in tqdm(range(total_num)):
if df_test.at[i, 'user_click_history'] == '0:0':
user_click_list = [0]
else:
user_click_list = df_test.at[i, 'user_click_history'].split(',')
user_click_list = [int(sample.split(':')[0]) for sample in user_click_list]
num_user_click_history = len(user_click_list)
tmp = np.zeros(400, dtype=np.int64)
tmp[:len(user_click_list)] = user_click_list
user_click_list = tmp
exposed_items = [int(s) for s in df_test.at[i, 'exposed_items'].split(',')]
user_portrait = [int(s) for s in df_test.at[i, 'user_protrait'].split(',')]
# portraitidx_to_idx_dict_list: list of 10 dict, int:int
for j in range(10):
user_portrait[j] = portraitidx_to_idx_dict_list[j][user_portrait[j]]
one_sample = {
'user_click_list': user_click_list,
'num_user_click_history': num_user_click_history,
'user_portrait': np.array(user_portrait, dtype=np.int64),
'item_id': np.array(exposed_items, dtype=np.int64),
}
test_samples.append(one_sample)
return test_samples
class BigDataCupTestDataset(torch.utils.data.Dataset):
def __init__(self,
item_features,
database
):
super().__init__()
self.item_features = item_features
self.database = database
def __len__(self, ):
return len(self.database)
def __getitem__(self, idx):
one_sample = self.database[idx]
user_click_history = one_sample['user_click_list']
num_user_click_history = one_sample['num_user_click_history']
user_discrete_feature = one_sample['user_portrait']
item_id = one_sample['item_id']
user_click_history_discrete_feature = np.zeros((400, 3+1 + 2+1)).astype(np.int64)
for i in range(num_user_click_history):
if user_click_history[i] == 0:
user_click_history_discrete_feature[i] = self.item_features[user_click_history[i]]
else:
user_click_history_discrete_feature[i] = self.item_features[user_click_history[i]]
item_discrete_feature = np.zeros((9, 3+1 + 2+1)).astype(np.int64)
for i in range(9):
item_discrete_feature[i] = self.item_features[item_id[i]]
user_click_history = torch.IntTensor(user_click_history)
user_click_history_discrete_feature = torch.IntTensor(user_click_history_discrete_feature)
num_user_click_history = torch.IntTensor([num_user_click_history])
user_discrete_feature = torch.IntTensor(user_discrete_feature)
item_id = torch.IntTensor(item_id)
item_discrete_feature = torch.IntTensor(item_discrete_feature)
return user_click_history, \
user_click_history_discrete_feature, \
num_user_click_history, \
item_id, item_discrete_feature, \
user_discrete_feature
test_samples = load_test_data(data_path='/content/',
filename='track1_testset.csv')
test_ds = BigDataCupTestDataset(item_features, test_samples)
test_dl = torch.utils.data.DataLoader(dataset=test_ds, batch_size=32, shuffle=False)
# tb_path = 'runs/2021-08-16-07:50:46-4sess_pred_item_feat_extracion_deepermodel_augorder_adamlr0.001_epoch10_lr0.0001_epoch20'
# tb_path = 'runs/2021-08-18-17:25:49-4sess_pred_item_feat_extracion_deepermodel_augorder_itemalldiscretefeat_adamlr0.001_epoch30_lr0.0001_epoch50'
# tb_path = 'runs/2021-08-19-14:52:47-transformer_augorder_itemalldiscretefeat_adamlr0.001_epoch30'
tb_path = 'runs/2021-08-20-11:39:18-multitask_transformer_augorder_adamlr0.001_epoch10_lr0.0001_epoch20'
# model = torch.load(f'{tb_path}/model_0.0001.pth', map_location='cpu')
# model = torch.load(f'{tb_path}/model.pth', map_location='cpu')
model = torch.load(f'{tb_path}/val_best.pth', map_location='cpu')
model = model.eval()
model = model.to('cpu')
model.device = 'cpu'
tta_augorder = False
# aug items within sess
from itertools import permutations
from functools import reduce
import operator
import random
perm1 = list(permutations([0, 1, 2]))
perm2 = list(permutations([3, 4, 5]))
perm3 = list(permutations([6, 7, 8]))
aug_order = []
for p1 in perm1:
# print(p1)
for p2 in perm2:
# print(p1, p2)
for p3 in perm3:
# print(p1, p2, p3)
tmp = reduce(operator.concat, [p1, p2, p3])
aug_order.append(tmp)
len_aug_order = len(aug_order)
# fp = open(f'{tb_path}/output_test_tta_augorder3_val_best.csv', 'w')
fp = open(f'{tb_path}/output_test_val_best.csv', 'w')
print('id,category', file=fp)
bs = 32
for i, data in tqdm(enumerate(test_dl, 0)):
user_click_history, \
user_click_history_discrete_feature, \
num_user_click_history, \
item_id, item_discrete_feature, \
user_discrete_feature = data
if not tta_augorder:
sess_outputs, buy_outputs = model(
user_click_history,
user_click_history_discrete_feature,
num_user_click_history,
item_id,
item_discrete_feature,
user_discrete_feature
)
y_pred_tag = torch.round(torch.sigmoid(buy_outputs))
_, sess_pred = torch.max(sess_outputs.data, 1)
else:
sum_sess_outputs = None
sum_buy_outputs = None
total_aug_num = 3
aug_order_shuffle = shuffle(aug_order)
aug_order_shuffle = aug_order_shuffle[:total_aug_num]
for aug_idx, ao in enumerate(aug_order_shuffle):
ao = list(ao)
ao_inv = np.argsort(ao)
sess_outputs, buy_outputs = model(
user_click_history,
user_click_history_discrete_feature,
num_user_click_history,
item_id[:, ao],
item_discrete_feature[:, ao, :],
user_discrete_feature
)
buy_outputs = buy_outputs[:, ao_inv]
if aug_idx == 0:
sum_sess_outputs = nn.functional.softmax(sess_outputs, dim=1)
sum_buy_outputs = torch.sigmoid(buy_outputs)
else:
sum_sess_outputs += nn.functional.softmax(sess_outputs, dim=1)
sum_buy_outputs += torch.sigmoid(buy_outputs)
sess_outputs = sum_sess_outputs / total_aug_num
buy_outputs = sum_buy_outputs / total_aug_num
y_pred_tag = torch.round(buy_outputs)
_, sess_pred = torch.max(sess_outputs.data, 1)
for j in range(y_pred_tag.shape[0]):
if sess_pred[j] == 0:
y_pred_tag[j][:] = 0
elif sess_pred[j] == 1:
y_pred_tag[j][3:] = 0
elif sess_pred[j] == 2:
y_pred_tag[j][:3] = 1
y_pred_tag[j][6:] = 0
elif sess_pred[j] == 3:
y_pred_tag[j][:6] = 1
tmp = list(y_pred_tag[j].detach().numpy().astype(np.int32))
tmp = [str(a) for a in tmp]
p = ' '.join(tmp)
print(f'{i * bs + j + 1},{p}', file=fp)
# break
fp.close()
tta_augorder
!tail /content/drive/MyDrive/202108-bigdatacup2021/runs/2021-08-20-11:39:18-multitask_transformer_augorder_adamlr0.001_epoch10/output_test_tta_augorder3_val_best.csv
###Output
206245,1 1 1 1 1 0 0 0 0
206246,1 1 1 1 1 1 1 0 0
206247,1 1 1 1 1 1 0 0 0
206248,1 1 1 1 1 1 0 0 0
206249,0 0 0 0 0 0 0 0 0
206250,1 1 1 1 1 1 1 1 1
206251,1 1 1 0 0 0 0 0 0
206252,1 0 0 0 0 0 0 0 0
206253,1 0 0 0 0 0 0 0 0
206254,0 0 0 0 0 0 0 0 0
###Markdown
validation analysis
###Code
m = torch.load('4sess_pred_item_feat_extracion_deepermodel_epoch2.pth', map_location='cpu')
m.device = 'cpu'
m = model.eval()
m = model.to('cpu')
m.device = 'cpu'
train_samples, val_samples = load_train_data()
# train_ds = BigDataCupDataset(item_info_dict, train_samples)
# train_dl = torch.utils.data.DataLoader(dataset=train_ds, batch_size=32, shuffle=True)
val_ds = BigDataCupDataset(item_info_dict, val_samples, train_val='val')
val_dl = torch.utils.data.DataLoader(dataset=val_ds, batch_size=32, shuffle=False)
###Output
100%|██████████| 260087/260087 [00:18<00:00, 14386.48it/s]
###Markdown
tta, augorder
###Code
a = np.array([1,2,3,4,5,6,7,8,9])
permutation = list(aug_order[100])
permutation
np.argsort(permutation)
a[permutation]
a[permutation][np.argsort(permutation)]
def binary_acc_nosigmoid(sess_pred, y_pred, y_test):
# print(sess_pred)
y_pred_tag = torch.round(y_pred)
y_pred_tag_intact = y_pred_tag.clone()
##################################
## vanilla
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc1 = correct_results_sum / y_test.shape[0] / 9
real_acc1 = 0.0
for i in range(y_test.shape[0]):
correct_results_sum = (y_pred_tag[i] == y_test[i]).sum().float()
one_acc = correct_results_sum / 9
if one_acc == 1:
real_acc1 += 1
real_acc1 = real_acc1 / y_test.shape[0]
# print(y_pred_tag)
####################################
## use sess to refine y_pred_tag
for i in range(y_test.shape[0]):
if sess_pred[i] == 0:
y_pred_tag[i][:] = 0
elif sess_pred[i] == 1:
y_pred_tag[i][3:] = 0
elif sess_pred[i] == 2:
y_pred_tag[i][:3] = 1
y_pred_tag[i][6:] = 0
elif sess_pred[i] == 3:
y_pred_tag[i][:6] = 1
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc2 = correct_results_sum / y_test.shape[0] / 9
real_acc2 = 0.0
for i in range(y_test.shape[0]):
correct_results_sum = (y_pred_tag[i] == y_test[i]).sum().float()
one_acc = correct_results_sum / 9
if one_acc == 1:
real_acc2 += 1
real_acc2 = real_acc2 / y_test.shape[0]
# print(y_pred_tag)
#######################################
## rule 2
y_pred_tag = y_pred_tag_intact
acc_rule2 = 0.0
real_acc_rule2 = 0.0
for i in range(y_test.shape[0]):
for j in range(9):
k = 8 - j
if k >= 6 and y_pred_tag[i][k] == 1:
y_pred_tag[i][:6] = 1
if k >= 3 and y_pred_tag[i][k] == 1:
y_pred_tag[i][:3] = 1
correct_results_sum = (y_pred_tag[i] == y_test[i]).sum().float()
a = correct_results_sum / 9
acc_rule2 += a
if a == 1:
real_acc_rule2 += 1
acc_rule2 = acc_rule2 / y_test.shape[0]
real_acc_rule2 = real_acc_rule2 / y_test.shape[0]
# print(y_pred_tag)
return acc1, acc2, acc_rule2, real_acc1, real_acc2, real_acc_rule2
# aug items within sess
from itertools import permutations
from functools import reduce
import operator
import random
perm1 = list(permutations([0, 1, 2]))
perm2 = list(permutations([3, 4, 5]))
perm3 = list(permutations([6, 7, 8]))
aug_order = []
for p1 in perm1:
# print(p1)
for p2 in perm2:
# print(p1, p2)
for p3 in perm3:
# print(p1, p2, p3)
tmp = reduce(operator.concat, [p1, p2, p3])
aug_order.append(tmp)
len_aug_order = len(aug_order)
for trial in range(10):
sess_pred_list = []
sess_gt_list = []
one_zero = np.zeros(9) # 本来买了1,预测称没买0
zero_one = np.zeros(9) # 本来没买0,预测成购买1
one_one = np.zeros(9) # 本来买了,预测成买了
zero_zero = np.zeros(9) # 本来没买,预测称没买
pred_num_list = []
gt_num_list = []
valid_cnt = 0
valid_sess_acc_sum = 0
valid_buy_acc1_sum = 0
valid_buy_real_acc1_sum = 0
valid_buy_acc2_sum = 0
valid_buy_real_acc2_sum = 0
valid_buy_acc_rule2_sum = 0
valid_buy_real_acc_rule2_sum = 0
valid_buy_acc1_gtsess_sum = 0
valid_buy_real_acc_gtsess_sum = 0
for i, data in tqdm(enumerate(val_dl, 0)):
user_click_history, \
user_click_history_discrete_feature, user_click_history_cont_feature, \
num_user_click_history, \
item_id, item_discrete_feature, item_cont_feature, \
user_discrete_feature, label, session_label = data
sum_sess_outputs = None
sum_buy_outputs = None
total_aug_num = 2
aug_order_shuffle = shuffle(aug_order)
aug_order_shuffle = aug_order_shuffle[:total_aug_num]
for aug_idx, ao in enumerate(aug_order_shuffle):
ao = list(ao)
ao_inv = np.argsort(ao)
sess_outputs, buy_outputs = model(
user_click_history,
user_click_history_discrete_feature,
user_click_history_cont_feature,
num_user_click_history,
item_id[:, ao],
item_discrete_feature[:, ao, :],
item_cont_feature[:, ao, :],
user_discrete_feature
)
buy_outputs = buy_outputs[:, ao_inv]
if aug_idx == 0:
sum_sess_outputs = nn.functional.softmax(sess_outputs, dim=1)
sum_buy_outputs = torch.sigmoid(buy_outputs)
else:
sum_sess_outputs += nn.functional.softmax(sess_outputs, dim=1)
sum_buy_outputs += torch.sigmoid(buy_outputs)
sess_outputs = sum_sess_outputs / total_aug_num
buy_outputs = sum_buy_outputs / total_aug_num
bs = user_click_history.shape[0]
## let all 0,1,2 item buy (this will reduce performance, tested)
# buy_outputs[:, :3] = 1
_, sess_predicted = torch.max(sess_outputs.data, 1)
sess_acc = (sess_predicted == session_label).sum().item() / bs
buy_acc1, buy_acc2, buy_acc_rule2, buy_real_acc1, buy_real_acc2, buy_real_acc_rule2 = binary_acc_nosigmoid(sess_predicted, buy_outputs, label)
_, buy_acc1_gtsess, _, _, buy_real_acc_gtsess, _ = binary_acc_nosigmoid(session_label, buy_outputs, label)
sess_pred_list.extend(list(sess_predicted.numpy()))
sess_gt_list.extend(list(session_label))
y_pred_tag = torch.round(buy_outputs).detach().numpy() # note rm sigmoid here,
label = label.numpy()
pred_num = np.sum(y_pred_tag, axis=1)
gt_num = np.sum(label, axis=1)
pred_num_list.extend(list(pred_num))
gt_num_list.extend(list(gt_num))
valid_sess_acc_sum += sess_acc
valid_buy_acc1_sum += buy_acc1
valid_buy_real_acc1_sum += buy_real_acc1
valid_buy_acc2_sum += buy_acc2
valid_buy_real_acc2_sum += buy_real_acc2
valid_buy_acc_rule2_sum += buy_acc_rule2
valid_buy_real_acc_rule2_sum += buy_real_acc_rule2
valid_buy_acc1_gtsess_sum += buy_acc1_gtsess
valid_buy_real_acc_gtsess_sum += buy_real_acc_gtsess
valid_cnt += 1
for b in range(bs):
y_pred = y_pred_tag[b]
y_gt = label[b]
for i in range(9):
if y_pred[i] == 1 and y_gt[i] == 1:
one_one[i] += 1
elif y_pred[i] == 0 and y_gt[i] == 0:
zero_zero[i] += 1
elif y_pred[i] == 1 and y_gt[i] == 0:
one_zero[i] += 1
elif y_pred[i] == 0 and y_gt[i] == 1:
zero_one[i] += 1
print('----- VAL -----')
print('- sess acc:', valid_sess_acc_sum / valid_cnt)
print('- buy acc1:', valid_buy_acc1_sum / valid_cnt)
print('- buy real acc1:', valid_buy_real_acc1_sum / valid_cnt)
print('- buy acc2:', valid_buy_acc2_sum / valid_cnt)
print('- buy real acc2:', valid_buy_real_acc2_sum / valid_cnt)
print('- buy acc rule2:', valid_buy_acc_rule2_sum / valid_cnt)
print('- buy real acc rule2:', valid_buy_real_acc_rule2_sum / valid_cnt)
print('- buy acc1 gtsess:', valid_buy_acc1_gtsess_sum / valid_cnt)
print('- buy real acc gtsess:', valid_buy_real_acc_gtsess_sum / valid_cnt)
###Output
407it [00:14, 27.97it/s]
###Markdown
result analysis
###Code
model = m.eval()
sess_pred_list = []
sess_gt_list = []
one_zero = np.zeros(9) # 本来买了1,预测称没买0
zero_one = np.zeros(9) # 本来没买0,预测成购买1
one_one = np.zeros(9) # 本来买了,预测成买了
zero_zero = np.zeros(9) # 本来没买,预测称没买
pred_num_list = []
gt_num_list = []
valid_cnt = 0
valid_sess_acc_sum = 0
valid_buy_acc1_sum = 0
valid_buy_real_acc1_sum = 0
valid_buy_acc2_sum = 0
valid_buy_real_acc2_sum = 0
valid_buy_acc_rule2_sum = 0
valid_buy_real_acc_rule2_sum = 0
valid_buy_acc1_gtsess_sum = 0
valid_buy_real_acc_gtsess_sum = 0
for i, data in tqdm(enumerate(val_dl, 0)):
user_click_history, \
user_click_history_discrete_feature, user_click_history_cont_feature, \
num_user_click_history, \
item_id, item_discrete_feature, item_cont_feature, \
user_discrete_feature, label, session_label = data
sess_outputs, buy_outputs = model(
user_click_history,
user_click_history_discrete_feature,
user_click_history_cont_feature,
num_user_click_history,
item_id,
item_discrete_feature,
item_cont_feature,
user_discrete_feature
)
bs = user_click_history.shape[0]
## let all 0,1,2 item buy (this will reduce performance, tested)
# buy_outputs[:, :3] = 1
_, sess_predicted = torch.max(sess_outputs.data, 1)
sess_acc = (sess_predicted == session_label).sum().item() / bs
buy_acc1, buy_acc2, buy_acc_rule2, buy_real_acc1, buy_real_acc2, buy_real_acc_rule2 = binary_acc(sess_predicted, buy_outputs, label)
_, buy_acc1_gtsess, _, _, buy_real_acc_gtsess, _ = binary_acc(session_label, buy_outputs, label)
y_pred_tag = torch.round(torch.sigmoid(buy_outputs)).detach().numpy()
label = label.numpy()
pred_num = np.sum(y_pred_tag, axis=1)
gt_num = np.sum(label, axis=1)
pred_num_list.extend(list(pred_num))
gt_num_list.extend(list(gt_num))
valid_sess_acc_sum += sess_acc
valid_buy_acc1_sum += buy_acc1
valid_buy_real_acc1_sum += buy_real_acc1
valid_buy_acc2_sum += buy_acc2
valid_buy_real_acc2_sum += buy_real_acc2
valid_buy_acc_rule2_sum += buy_acc_rule2
valid_buy_real_acc_rule2_sum += buy_real_acc_rule2
valid_buy_acc1_gtsess_sum += buy_acc1_gtsess
valid_buy_real_acc_gtsess_sum += buy_real_acc_gtsess
valid_cnt += 1
for b in range(bs):
y_pred = y_pred_tag[b]
y_gt = label[b]
for i in range(9):
if y_pred[i] == 1 and y_gt[i] == 1:
one_one[i] += 1
elif y_pred[i] == 0 and y_gt[i] == 0:
zero_zero[i] += 1
elif y_pred[i] == 1 and y_gt[i] == 0:
one_zero[i] += 1
elif y_pred[i] == 0 and y_gt[i] == 1:
zero_one[i] += 1
_, sess_pred = torch.max(sess_outputs.data, 1)
sess_pred_list.extend(list(sess_pred.numpy()))
sess_gt_list.extend(list(session_label))
print('----- VAL -----')
print('- sess acc:', valid_sess_acc_sum / valid_cnt)
print('- buy acc1:', valid_buy_acc1_sum / valid_cnt)
print('- buy real acc1:', valid_buy_real_acc1_sum / valid_cnt)
print('- buy acc2:', valid_buy_acc2_sum / valid_cnt)
print('- buy real acc2:', valid_buy_real_acc2_sum / valid_cnt)
print('- buy acc rule2:', valid_buy_acc_rule2_sum / valid_cnt)
print('- buy real acc rule2:', valid_buy_real_acc_rule2_sum / valid_cnt)
print('- buy acc1 gtsess:', valid_buy_acc1_gtsess_sum / valid_cnt)
print('- buy real acc gtsess:', valid_buy_real_acc_gtsess_sum / valid_cnt)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
# Confusion matrix whose i-th row and j-th column entry indicates
# the number of samples with
# true label being i-th class, and
# predicted label being j-th class.
a = confusion_matrix(sess_gt_list, sess_pred_list)
a_per = a / np.sum(a, axis=1, keepdims=True) * 100
cm_display = ConfusionMatrixDisplay(a, display_labels=range(4)).plot(values_format='d')
cm_display = ConfusionMatrixDisplay(a_per, display_labels=range(4)).plot(values_format='2.0f')
a = confusion_matrix(pred_num_list, gt_num_list)
a_per = a / np.sum(a, axis=1, keepdims=True) * 100
cm_display = ConfusionMatrixDisplay(a, display_labels=range(10)).plot(values_format='d')
cm_display = ConfusionMatrixDisplay(a_per, display_labels=range(10)).plot(values_format='2.0f')
s = 0
for i in range(10):
s += a[i][i]
print(s)
4605 / np.sum(a)
np.sum(a)
a = one_zero + zero_one + one_one + zero_zero
print(one_zero)
print(zero_one)
print(one_one)
print(zero_zero)
print('')
print(np.round(one_zero / a, 2))
print(np.round(zero_one / a, 2))
print(np.round(one_one / a, 2))
print(np.round(zero_zero / a, 2))
###Output
_____no_output_____ |
examples/SGT for mixtures and beta != 0.ipynb | ###Markdown
Square Gradient Theory for MixturesThis notebook has te purpose of showing examples of computing interfacial tension of mixtures and beta != 0.First it's needed to import the necessary modules
###Code
import numpy as np
from phasepy import component, mixture, prsveos
from phasepy.equilibrium import bubblePy
from phasepy.sgt import sgt_mix, msgt_mix
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Then the mixture and EoS are created. The bubble point of the mixture of x1 = 0.3 at 320K is computed. The ```full_output``` option allows to obtain the compositions, densities and equilibrium pressure.
###Code
hexane = component(name = 'n-Hexane', Tc = 507.6, Pc = 30.25, Zc = 0.266, Vc = 371.0, w = 0.301261,
ksv = [ 0.81185833, -0.08790848],
cii = [ 5.03377433e-24, -3.41297789e-21, 9.97008208e-19],
GC = {'CH3':2, 'CH2':4})
ethanol = component(name = 'Ethanol', Tc = 514.0, Pc = 61.37, Zc = 0.241, Vc = 168.0, w = 0.643558,
ksv = [1.27092923, 0.0440421 ],
cii = [ 2.35206942e-24, -1.32498074e-21, 2.31193555e-19],
GC = {'CH3':1, 'CH2':1, 'OH(P)':1})
mix = mixture(ethanol, hexane)
a12, a21 = np.array([1141.56994427, 125.25729314])
A = np.array([[0, a12], [a21, 0]])
mix.wilson(A)
eos = prsveos(mix, 'mhv_wilson')
T = 320 #K
X = np.array([0.3, 0.7])
P0 = 0.3 #bar
Y0 = np.array([0.7, 0.3])
sol = bubblePy(Y0, P0, X, T, eos, full_output = True)
Y = sol.Y
P = sol.P
vl = sol.v1
vv = sol.v2
#computing the density vector
rhol = X / vl
rhov = Y / vv
###Output
_____no_output_____
###Markdown
In order to set the beta correction is necessary to create the matrix and then use it with the ```beta_sgt``` method from the eos. If this step is not done the ```sgt_mix``` or ```msgt_sgt``` function will raise an error as the influence parameter matrix will be singular.
###Code
bij = 0.1
beta = np.array([[0, bij], [bij, 0]])
eos.beta_sgt(beta)
###Output
_____no_output_____
###Markdown
The first possibility is to solve the BVP iteratively using ortoghonal collocation. The initial interfacial lenght is set to 10 Amstrong and the density profiles are solved, then the interfacial lenght is increased until the calculated interfacial tension doesnt change more than a given tolerance.The initial value can be set as ```'linear'``` or ```'hyperbolic'``` to use a linear or a hyperbolic approximation. Optionally a array can be passed to the argument ```rho0``` or a TensionResult for another calculation, as for example, the density profile computed with beta0 calculation.
###Code
sol = sgt_mix(rhol, rhov, T, P, eos, z0 = 10, rho0 = 'linear', full_output = True)
sol.tension
sol.rho
sol.z
sol.GPT
###Output
_____no_output_____
###Markdown
The other option is to used a modified SGT system which includes a temporal variable which help to reach the stationary density profile ignoring the non linearity of the BVP at the first iterations. This type of computation use a fixed value for the interfacial lenght.The initial value options to solve the density profiles are the same as for ```sgt_mix```. In this case the previously computed TensionResult is used as an initial guess.
###Code
solm = msgt_mix(rhol, rhov, T, P, eos, z = 20, rho0 = sol, full_output = True)
solm.tension
print('BVP SGT : ', sol.tension, 'mN/m')
print('Modified BVP SGT : ', solm.tension, 'mN/m')
###Output
BVP SGT : 14.367813285943496 mN/m
Modified BVP SGT : 14.367828523644494 mN/m
###Markdown
Finally the density profiles can be compared. It can be seen that when a correction to the cross influence parameter just one stationary point across the interface.
###Code
#conver densitites to kmol/m3
rho = sol.rho * 1000
rhom = solm.rho * 1000
fig = plt.figure(figsize = (5,5))
ax = fig.add_subplot(111)
ax.plot(rho[0], rho[1], '--')
ax.plot(rhom[0], rhom[1], '.')
ax.set_xlabel(r'$\rho_1$ / kmol m$^{-3}$ ')
ax.set_ylabel(r'$\rho_2$ / kmol m$^{-3}$ ')
###Output
_____no_output_____ |
notebooks/rtf.ipynb | ###Markdown
AimCompute and plot rejection transfer functions for different closed-loop gain values.
###Code
import os
import numpy as np
from scipy.signal import welch, windows
from matplotlib import pyplot as plt
datapath = "../data/bench/closedloop/"
fileslist = list(filter(lambda x: (x.find("30_07_2021_15") != -1) and (x.startswith("cl_gain")), os.listdir(datapath)))
def genpsd(tseries, dt, nseg=4):
nperseg = 2**int(np.log2(tseries.shape[0]/nseg))
# firstly ensures that nperseg is a power of 2
# secondly ensures that there are at least nseg segments per total time series length for noise averaging
window = windows.hann(nperseg)
freq, psd = welch(tseries, fs=1./dt, window=window, noverlap=nperseg*0.25, nperseg=nperseg, detrend=False,scaling='density')
freq, psd = freq[1:],psd[1:] #remove DC component (freq=0 Hz)
return freq, psd
fileslist
cl_data = {}
gainvals = []
for f in fileslist:
gain = f[8:8+f[8:].index("_")]
if gain not in gainvals:
gainvals.append(gain)
if f.find("time") != -1:
timefile = f
ttfile = f.replace("time", "tt")
else:
timefile = f.replace("tt", "time")
ttfile = f
times = np.load(datapath + timefile)
ttvals = np.load(datapath + ttfile)
cl_data[gain] = (times, ttvals)
times_ol, ttvals_ol = cl_data["0.0"]
f_ol, p_ol = genpsd(ttvals_ol[:,0], dt=0.02)
rtfs = {}
plt.figure(figsize=(10,10))
for gain in np.sort(gainvals):
f, p = genpsd(cl_data[gain][1][:,0], dt=0.02)
plt.loglog(f_ol, np.sqrt(p / p_ol), label="gain = " + gain)
rtfs[gain] = np.sqrt(p / p_ol)
plt.legend()
plt.xlabel("Frequency (Hz)")
plt.ylabel("Rejection TF (unitless)")
plt.title("Rejection TFs of closed-loop integrator with varying gains")
plt.savefig("../plots/rtfs.pdf")
plt.figure(figsize=(10,10))
plt.loglog(f_ol, p_ol, label="OL")
plt.loglog(f_ol, rtfs["0.5"]**2 * p_ol, label="CL")
plt.legend()
plt.xlabel("Frequency (Hz)")
plt.ylabel("Power (DM units^2 / Hz)")
plt.title("OL and CL PSDs at gain = 0.25")
###Output
_____no_output_____ |
feature_engineering/03-feature-generation.ipynb | ###Markdown
**[Feature Engineering Home Page](https://www.kaggle.com/learn/feature-engineering)**--- IntroductionIn this set of exercises, you'll create new features from the existing data. Again you'll compare the score lift for each new feature compared to a baseline model. First off, run the cells below to set up a baseline dataset and model.
###Code
import numpy as np
import pandas as pd
from sklearn import preprocessing, metrics
import lightgbm as lgb
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.feature_engineering.ex3 import *
# Create features from timestamps
click_data = pd.read_csv('../input/feature-engineering-data/train_sample.csv',
parse_dates=['click_time'])
click_times = click_data['click_time']
clicks = click_data.assign(day=click_times.dt.day.astype('uint8'),
hour=click_times.dt.hour.astype('uint8'),
minute=click_times.dt.minute.astype('uint8'),
second=click_times.dt.second.astype('uint8'))
# Label encoding for categorical features
cat_features = ['ip', 'app', 'device', 'os', 'channel']
for feature in cat_features:
label_encoder = preprocessing.LabelEncoder()
clicks[feature] = label_encoder.fit_transform(clicks[feature])
def get_data_splits(dataframe, valid_fraction=0.1):
dataframe = dataframe.sort_values('click_time')
valid_rows = int(len(dataframe) * valid_fraction)
train = dataframe[:-valid_rows * 2]
# valid size == test size, last two sections of the data
valid = dataframe[-valid_rows * 2:-valid_rows]
test = dataframe[-valid_rows:]
return train, valid, test
def train_model(train, valid, test=None, feature_cols=None):
if feature_cols is None:
feature_cols = train.columns.drop(['click_time', 'attributed_time',
'is_attributed'])
dtrain = lgb.Dataset(train[feature_cols], label=train['is_attributed'])
dvalid = lgb.Dataset(valid[feature_cols], label=valid['is_attributed'])
param = {'num_leaves': 64, 'objective': 'binary',
'metric': 'auc', 'seed': 7}
num_round = 1000
print("Training model. Hold on a minute to see the validation score")
bst = lgb.train(param, dtrain, num_round, valid_sets=[dvalid],
early_stopping_rounds=20, verbose_eval=False)
valid_pred = bst.predict(valid[feature_cols])
valid_score = metrics.roc_auc_score(valid['is_attributed'], valid_pred)
print(f"Validation AUC score: {valid_score}")
if test is not None:
test_pred = bst.predict(test[feature_cols])
test_score = metrics.roc_auc_score(test['is_attributed'], test_pred)
return bst, valid_score, test_score
else:
return bst, valid_score
print("Baseline model score")
train, valid, test = get_data_splits(clicks)
_ = train_model(train, valid, test)
###Output
Baseline model score
Training model. Hold on a minute to see the validation score
Validation AUC score: 0.9622743228943659
###Markdown
1) Add interaction featuresHere you'll add interaction features for each pair of categorical features (ip, app, device, os, channel). The easiest way to iterate through the pairs of features is with `itertools.combinations`. For each new column, join the values as strings with an underscore, so 13 and 47 would become `"13_47"`. As you add the new columns to the dataset, be sure to label encode the values.
###Code
import itertools
cat_features = ['ip', 'app', 'device', 'os', 'channel']
interactions = pd.DataFrame(index=clicks.index)
# Iterate through each pair of features, combine them into interaction features
# print(itertools.combinations(cat_features, 2))
for ft1, ft2 in itertools.combinations(cat_features, 2):
new_ft_name = '_'.join([ft1, ft2])
# Convert to strings and combine
new_values = clicks[ft1].astype('str')+"_"+clicks[ft2].astype('str')
encoder = preprocessing.LabelEncoder()
interactions[new_ft_name] = encoder.fit_transform(new_values)
q_1.check()
# Uncomment if you need some guidance
# q_1.hint()
# q_1.solution()
clicks = clicks.join(interactions)
print("Score with interactions")
train, valid, test = get_data_splits(clicks)
_ = train_model(train, valid)
###Output
Score with interactions
Training model. Hold on a minute to see the validation score
Validation AUC score: 0.9626212895350978
###Markdown
Generating numerical featuresAdding interactions is a quick way to create more categorical features from the data. It's also effective to create new numerical features, you'll typically get a lot of improvement in the model. This takes a bit of brainstorming and experimentation to find features that work well.For these exercises I'm going to have you implement functions that operate on Pandas Series. It can take multiple minutes to run these functions on the entire data set so instead I'll provide feedback by running your function on a smaller dataset. 2) Number of events in the past six hoursThe first feature you'll be creating is the number of events from the same IP in the last six hours. It's likely that someone who is visiting often will download the app.Implement a function `count_past_events` that takes a Series of click times (timestamps) and returns another Series with the number of events in the last hour. **Tip:** The `rolling` method is useful for this.
###Code
def count_past_events(series):
events = pd.Series(series.index, index=series.values, name="events").sort_index()
count_last_hour = events.rolling('6H').count()-1
# count_last_hour.index = events.values
# count_last_hour = count_last_hour.reindex(series.index)
return count_last_hour
# Run to check your work
q_2.check()
# Uncomment if you need some guidance
#q_2.hint()
# q_2.solution()
###Output
_____no_output_____
###Markdown
Because this can take a while to calculate on the full data, we'll load pre-calculated versions in the cell below to test model performance.
###Code
# Loading in from saved Parquet file
past_events = pd.read_parquet('../input/feature-engineering-data/past_6hr_events.pqt')
clicks['ip_past_6hr_counts'] = past_events
train, valid, test = get_data_splits(clicks)
_ = train_model(train, valid, test)
###Output
Training model. Hold on a minute to see the validation score
Validation AUC score: 0.9647255487084245
###Markdown
3) Features from future informationIn the last exercise you created a feature that looked at past events. You could also make features that use information from events in the future. Should you use future events or not? Uncomment the following line after you've decided your answer.
###Code
q_3.solution()
###Output
_____no_output_____
###Markdown
4) Time since last eventImplement a function `time_diff` that calculates the time since the last event in seconds from a Series of timestamps. This will be ran like so:```pythontimedeltas = clicks.groupby('ip')['click_time'].transform(time_diff)```
###Code
def time_diff(series):
""" Returns a series with the time since the last timestamp in seconds """
return series.diff().dt.total_seconds()
# Uncomment if you need some guidance
#q_4.hint()
#q_4.solution()
# Run to check your work
q_4.check()
###Output
_____no_output_____
###Markdown
We'll again load pre-computed versions of the data, which match what your function would return
###Code
# Loading in from saved Parquet file
past_events = pd.read_parquet('../input/feature-engineering-data/time_deltas.pqt')
clicks['past_events_6hr'] = past_events
train, valid, test = get_data_splits(clicks.join(past_events))
_ = train_model(train, valid, test)
###Output
Training model. Hold on a minute to see the validation score
Validation AUC score: 0.9651116624672765
###Markdown
5) Number of previous app downloadsIt's likely that if a visitor downloaded an app previously, it'll affect the likelihood they'll download one again. Implement a function `previous_attributions` that returns a Series with the number of times an app has been download (`'is_attributed' == 1`) before the current event.
###Code
def previous_attributions(series):
""" Returns a series with the """
return series.expanding(min_periods=2).sum()-series
# Run to check your work
q_5.check()
# Uncomment if you need some guidance
# q_5.hint()
# q_5.solution()
###Output
_____no_output_____
###Markdown
Again loading pre-computed data.
###Code
# Loading in from saved Parquet file
past_events = pd.read_parquet('../input/feature-engineering-data/downloads.pqt')
clicks['ip_past_6hr_counts'] = past_events
train, valid, test = get_data_splits(clicks)
_ = train_model(train, valid, test)
###Output
Training model. Hold on a minute to see the validation score
Validation AUC score: 0.965236652054989
###Markdown
6) Tree-based vs Neural Network ModelsSo far we've been using LightGBM, a tree-based model. Would these features we've generated work well for neural networks as well as tree-based models?Uncomment the following line after you've decided your answer.
###Code
q_6.solution()
###Output
_____no_output_____ |
chapter3/chapter3_bakker_post.ipynb | ###Markdown
The Python code provided below is from **Analytical Groundwater Modeling: Theory and Applications Using Python** by *Mark Bakker and Vincent Post* ISBN 9781138029392The book is published by CRC press and is available [here](https://www.routledge.com/Analytical-Groundwater-Modeling-Theory-and-Applications-using-Python/Bakker-Post/p/book/9781138029392).This Notebook is provided under the [MIT license](https://github.com/pythongroundwaterbook/analytic_gw_book/blob/main/LICENSE). © 2022 Mark Bakker and Vincent Post Steady one-dimensional flow with variable saturated thickness
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (8, 3) # set default figure size
plt.rcParams["contour.negative_linestyle"] = 'solid' # set default line style
plt.rcParams["figure.autolayout"] = True # same at tight_layout after every plot
###Output
_____no_output_____
###Markdown
Areal recharge between an impermeable boundary and a river
###Code
# parameters
L = 1000 # aquifer length, m
H = 10 # aquifer thickness, m
zb = -5 # aquifer bottom, m
k = 10 # hydraulic conductivity, m/d
n = 0.3 # porosity, -
hL = 4 # specified head at the right boundary, m
N = 0.001 # areal recharge, m/d
# solution
phiL = 0.5 * k * (hL - zb) ** 2
x = np.linspace(0, L, 100)
phi = -N / 2 * (x ** 2 - L ** 2) + phiL
h = zb + np.sqrt(2 * phi / k)
happrox = -N / (2 * k * H) * (x ** 2 - L ** 2) + hL
Qx = N * x
# basic plot
plt.subplot(121)
plt.plot(x, h)
plt.plot(x, happrox, 'C0--')
plt.axhline(zb, color='k')
plt.grid()
plt.xlabel('$x$ (m)')
plt.ylabel('head (m)')
plt.subplot(122)
plt.plot(x, Qx)
plt.grid()
plt.xlabel('$x$ (m)')
plt.ylabel('$Q_x$ (m$^2$/d)');
print(f'flux to left river: {-Qx[0]:.3f} m^2/d')
print(f'flux to right river: {Qx[-1]:.3f} m^2/d')
# solution
psi = np.zeros((2, len(x)))
psi[1] = -Qx
xg = np.zeros_like(psi)
xg[:] = x
zg = np.zeros_like(psi)
zg[0] = zb
zg[1] = h
# basic streamline plot
plt.subplot(111, aspect=25)
plt.contour(xg, zg, psi, 10, colors='C1', linestyles='-')
plt.plot(x, h, 'k')
plt.xlabel('$x$ (m)')
plt.ylabel('$z$ (m)');
# solution
def integral(x):
a = 2 * phiL / N + L ** 2
return np.sqrt(a - x ** 2) - np.sqrt(a) * \
np.arctanh(np.sqrt(a - x ** 2) / np.sqrt(a))
def traveltime(x):
return n / np.sqrt(N * k) * (integral(L) - integral(x))
x = np.linspace(10, L, 100)
trtime = traveltime(x)
# basic travel time plot
plt.subplot(121)
plt.plot(x, trtime)
plt.xlabel('starting location (m)')
plt.ylabel('travel time to river (d)')
plt.grid();
###Output
_____no_output_____
###Markdown
Flow over a step in the aquifer base
###Code
# parameters
k = 10 # hydraulic conductivity, m/d
z0 = 0 # base elevation left section, m
z1 = -4 # base elevation right section, m
L0 = 500 # length of left section, m
L1 = 500 # length of right section, m
L = L0 + L1 # total distance between rivers, m
h0 = 10 # specified head at the left boundary, m
hL = 0 # specified head at the right boundary, m
# solution
phi0 = 0.5 * k * (h0 - z0)**2
phiL = 0.5 * k * (hL - z1)**2
def hmin(U, L0=L0, z0=z0, phi0=phi0):
return np.sqrt(2 * (-U * L0 + phi0) / k) + z0
def hplus(U, L1=L1, z1=z1, phiL=phiL):
return np.sqrt(2 * (U * L1 + phiL) / k) + z1
# basic plot two conditions
U = np.linspace(0, 1, 100)
plt.subplot(121)
plt.plot(U, hmin(U), label='$h^-$')
plt.plot(U, hplus(U), label='$h^+$')
plt.legend()
plt.xlabel('$U$ (m$^2$/d)')
plt.ylabel('head (m) ')
plt.grid();
from scipy.optimize import fsolve
def hdiff(U):
return hmin(U) - hplus(U)
U = fsolve(hdiff, 0.7)[0] # first value of array returned by fsolve
print(f'U: {U:0.4f} m^2/d')
# solution
x = np.hstack((np.linspace(0, L0 - 1e-6, 100), np.linspace(L0 + 1e-6, L, 100)))
phi = np.empty_like(x)
phi[x < L0] = -U * x[x < L0] + phi0
phi[x >= L0] = -U * (x[x >= L0] - L) + phiL
h = np.zeros_like(phi)
h[x < L0] = np.sqrt(2 * phi[x < L0] / k) + z0
h[x >= L0] = np.sqrt(2 * phi[x >= L0] / k) + z1
#
psi = np.zeros((2, len(x)))
psi[1] = -U
xg = np.zeros_like(psi)
xg[:] = x
zg = np.zeros_like(xg)
zg[0, :100] = z0
zg[0, 100:] = z1
zg[1] = h
# basic streamline plot
plt.subplot(111, aspect=25)
plt.contour(xg, zg, psi, np.linspace(-U, 0, 4), colors='C1', linestyles='-')
plt.plot(x, h, 'C0')
plt.plot(x, zg[0], 'k')
plt.xlabel('$x$ (m)')
plt.ylabel(f'$z$ (m) - VE:25x');
###Output
_____no_output_____
###Markdown
Combined confined/unconfined flow with areal recharge
###Code
# parameters
L = 1000 # aquifer length, m
H = 10 # aquifer thickness, m
zb = -5 # aquifer base, m
k = 10 # hydraulic conductivity, m/d
h0 = 6 # specified head at the left boundary, m
hL = 4 # specified head at the right boundary, m
N = 0.001 # areal recharge, m/d
# solution
C = -0.5 * k * H**2 - k * H * zb
phi0 = k * H * h0 + C
phi1 = 0.5 * k * (hL - zb)**2
phit = 0.5 * k * H**2 # transition potential
x = np.linspace(0, L, 400)
phi = -N / 2 * (x ** 2 - L * x) + (phi1 - phi0) * x / L + phi0
h = np.zeros_like(phi)
h[phi >= phit] = (phi[phi > phit] - C) / (k * H)
h[phi <= phit] = zb + np.sqrt(2 * phi[phi <= phit] / k)
Qx = N * (x - L / 2) - (phi1 - phi0) / L
happrox = -N / (2 * k * H) * (x ** 2 - L * x) + (hL - h0) * x / L + h0
Qxapprox = N * (x - L / 2) - k * H * (hL - h0) / L
# basic plot
plt.subplot(121)
plt.plot(x, h)
plt.plot(x, happrox, 'C0--')
plt.axhline(zb, color='k')
plt.grid()
plt.xlabel('$x$ (m)')
plt.ylabel('head (m)')
plt.subplot(122)
plt.plot(x, Qx)
plt.plot(x, Qxapprox, 'C1--')
plt.grid()
plt.xlabel('$x$ (m)')
plt.ylabel('$Q_x$ (m$^2$/d)');
print(f'flux to left river: {-Qx[0]:.3f} m^2/d')
print(f'flux to right river: {Qx[-1]:.3f} m^2/d')
# solution
psi = np.zeros((2, len(x)))
psi[1] = -Qx
xg = np.zeros_like(psi)
xg[:] = x
zg = np.zeros_like(psi)
zg[0] = zb
zg[1] = H + zb
zg[1, h < H + zb] = h[h < H + zb]
# basic streamline plot
plt.subplot(111, aspect=25)
plt.contour(xg, zg, psi, 20, colors='C1', linestyles='-')
plt.plot(xg[0], zg[1], 'C0')
plt.xlabel('$x$ (m)')
plt.ylabel('$z$ (m)');
###Output
_____no_output_____ |
notebooks/Jonas_7-1.ipynb | ###Markdown
Cross-Validation KNNChanges to `Jonas_7`:* Use new features from Jose & Roger* Fix potential bfill overfitting issue
###Code
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import RandomizedSearchCV, KFold
from sklearn.metrics import log_loss, make_scorer
lags = 100
for s in test.station.unique():
data = wide_series[list(wide_series.reset_index().day < '2015-01-01')]
to_lag = data[[c for c in data.columns if not c in ['{}_{}'.format(s, agg) for agg in agg_types]]]
features = create_lagged_features(to_lag, lags)\
.join(extra_features[extra_features.station == s].set_index('date'))\
.join(rolling_mean_features[rolling_mean_features.station == s]
.set_index('date').drop(['station', 'conc_obs', 'month', 'week_day', 'weekend'], axis=1))\
.join(obs_and_mods[obs_and_mods.station == s][['Concentration', 'day']].groupby('day').max())\
.fillna(method='ffill').fillna(0)
X = features[[c for c in features.columns if not c in [
'time', 'datetime', 'Concentration', 'target', 'station'
]]].values
y = (features['Concentration'].fillna(method='ffill').values > 100).astype(int)
params = pd.DataFrame({
'n_neighbors': list(range(1, 25)),
'score': [np.nan] * 24
})
for i, r in params[['n_neighbors']].iterrows():
kf = KFold(n_splits=3)
metric = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
p = dict(r)
model = KNeighborsClassifier(**p)
model.fit(X_train, y_train)
y_pred = model.predict_proba(X_test)
metric.append(
log_loss(y_test, y_pred, labels=(0, 1))
)
params.loc[i, 'score'] = np.mean(metric)
print(params.iloc[i])
params.to_csv('../reports/cv_knn_Jonas_7-1_{}_{}.csv'
.format(s, len(glob.glob('../reports/cv_knn_Jonas_7-1_{}_*'.format(s)))))
params.head()
###Output
n_neighbors 1.00
score 4.87
Name: 0, dtype: float64
n_neighbors 2.00
score 2.59
Name: 1, dtype: float64
n_neighbors 3.00
score 1.99
Name: 2, dtype: float64
n_neighbors 4.00
score 1.76
Name: 3, dtype: float64
n_neighbors 5.00
score 1.63
Name: 4, dtype: float64
n_neighbors 6.00
score 1.54
Name: 5, dtype: float64
n_neighbors 7.00
score 1.37
Name: 6, dtype: float64
n_neighbors 8.00
score 1.28
Name: 7, dtype: float64
n_neighbors 9.00
score 1.24
Name: 8, dtype: float64
n_neighbors 10.00
score 1.23
Name: 9, dtype: float64
n_neighbors 11.00
score 1.15
Name: 10, dtype: float64
n_neighbors 12.00
score 1.10
Name: 11, dtype: float64
n_neighbors 13.00
score 1.06
Name: 12, dtype: float64
n_neighbors 14.00
score 0.97
Name: 13, dtype: float64
n_neighbors 15.00
score 0.97
Name: 14, dtype: float64
n_neighbors 16.00
score 0.84
Name: 15, dtype: float64
n_neighbors 17.00
score 0.76
Name: 16, dtype: float64
n_neighbors 18.00
score 0.71
Name: 17, dtype: float64
n_neighbors 19.00
score 0.71
Name: 18, dtype: float64
n_neighbors 20.00
score 0.72
Name: 19, dtype: float64
n_neighbors 21.00
score 0.67
Name: 20, dtype: float64
n_neighbors 22.00
score 0.63
Name: 21, dtype: float64
n_neighbors 23.00
score 0.63
Name: 22, dtype: float64
n_neighbors 24.00
score 0.63
Name: 23, dtype: float64
n_neighbors 1.00
score 4.54
Name: 0, dtype: float64
n_neighbors 2.00
score 2.40
Name: 1, dtype: float64
n_neighbors 3.00
score 1.86
Name: 2, dtype: float64
n_neighbors 4.00
score 1.50
Name: 3, dtype: float64
n_neighbors 5.00
score 1.32
Name: 4, dtype: float64
n_neighbors 6.00
score 1.10
Name: 5, dtype: float64
n_neighbors 7.00
score 1.10
Name: 6, dtype: float64
n_neighbors 8.00
score 1.06
Name: 7, dtype: float64
n_neighbors 9.00
score 1.01
Name: 8, dtype: float64
n_neighbors 10.00
score 0.97
Name: 9, dtype: float64
n_neighbors 11.00
score 0.97
Name: 10, dtype: float64
n_neighbors 12.00
score 0.97
Name: 11, dtype: float64
n_neighbors 13.00
score 0.98
Name: 12, dtype: float64
n_neighbors 14.00
score 0.93
Name: 13, dtype: float64
n_neighbors 15.00
score 0.89
Name: 14, dtype: float64
n_neighbors 16.00
score 0.81
Name: 15, dtype: float64
n_neighbors 17.00
score 0.81
Name: 16, dtype: float64
n_neighbors 18.00
score 0.81
Name: 17, dtype: float64
n_neighbors 19.00
score 0.81
Name: 18, dtype: float64
n_neighbors 20.00
score 0.81
Name: 19, dtype: float64
n_neighbors 21.00
score 0.77
Name: 20, dtype: float64
n_neighbors 22.00
score 0.72
Name: 21, dtype: float64
n_neighbors 23.00
score 0.73
Name: 22, dtype: float64
n_neighbors 24.00
score 0.72
Name: 23, dtype: float64
n_neighbors 1.00
score 12.72
Name: 0, dtype: float64
n_neighbors 2.00
score 7.03
Name: 1, dtype: float64
n_neighbors 3.00
score 4.25
Name: 2, dtype: float64
n_neighbors 4.00
score 3.13
Name: 3, dtype: float64
n_neighbors 5.00
score 2.19
Name: 4, dtype: float64
n_neighbors 6.00
score 1.58
Name: 5, dtype: float64
n_neighbors 7.00
score 1.27
Name: 6, dtype: float64
n_neighbors 8.00
score 1.01
Name: 7, dtype: float64
n_neighbors 9.00
score 0.91
Name: 8, dtype: float64
n_neighbors 10.00
score 0.82
Name: 9, dtype: float64
n_neighbors 11.00
score 0.82
Name: 10, dtype: float64
n_neighbors 12.00
score 0.69
Name: 11, dtype: float64
n_neighbors 13.00
score 0.68
Name: 12, dtype: float64
n_neighbors 14.00
score 0.63
Name: 13, dtype: float64
n_neighbors 15.00
score 0.63
Name: 14, dtype: float64
n_neighbors 16.00
score 0.62
Name: 15, dtype: float64
n_neighbors 17.00
score 0.62
Name: 16, dtype: float64
n_neighbors 18.00
score 0.62
Name: 17, dtype: float64
n_neighbors 19.00
score 0.62
Name: 18, dtype: float64
n_neighbors 20.00
score 0.62
Name: 19, dtype: float64
n_neighbors 21.00
score 0.62
Name: 20, dtype: float64
n_neighbors 22.00
score 0.62
Name: 21, dtype: float64
n_neighbors 23.00
score 0.62
Name: 22, dtype: float64
n_neighbors 24.00
score 0.61
Name: 23, dtype: float64
n_neighbors 1.00
score 13.39
Name: 0, dtype: float64
n_neighbors 2.00
score 6.42
Name: 1, dtype: float64
n_neighbors 3.00
score 3.66
Name: 2, dtype: float64
n_neighbors 4.00
score 2.35
Name: 3, dtype: float64
n_neighbors 5.00
score 1.72
Name: 4, dtype: float64
n_neighbors 6.00
score 1.19
Name: 5, dtype: float64
n_neighbors 7.00
score 0.92
Name: 6, dtype: float64
n_neighbors 8.00
score 0.84
Name: 7, dtype: float64
n_neighbors 9.00
score 0.75
Name: 8, dtype: float64
n_neighbors 10.00
score 0.66
Name: 9, dtype: float64
n_neighbors 11.00
score 0.66
Name: 10, dtype: float64
n_neighbors 12.00
score 0.66
Name: 11, dtype: float64
n_neighbors 13.00
score 0.65
Name: 12, dtype: float64
n_neighbors 14.00
score 0.65
Name: 13, dtype: float64
n_neighbors 15.00
score 0.65
Name: 14, dtype: float64
n_neighbors 16.00
score 0.65
Name: 15, dtype: float64
n_neighbors 17.00
score 0.65
Name: 16, dtype: float64
n_neighbors 18.00
score 0.65
Name: 17, dtype: float64
n_neighbors 19.00
score 0.65
Name: 18, dtype: float64
n_neighbors 20.00
score 0.65
Name: 19, dtype: float64
n_neighbors 21.00
score 0.65
Name: 20, dtype: float64
n_neighbors 22.00
score 0.64
Name: 21, dtype: float64
n_neighbors 23.00
score 0.64
Name: 22, dtype: float64
n_neighbors 24.00
score 0.64
Name: 23, dtype: float64
n_neighbors 1.00
score 3.93
Name: 0, dtype: float64
n_neighbors 2.00
score 2.25
Name: 1, dtype: float64
n_neighbors 3.00
score 1.93
Name: 2, dtype: float64
n_neighbors 4.00
score 1.89
Name: 3, dtype: float64
n_neighbors 5.00
score 1.75
Name: 4, dtype: float64
n_neighbors 6.00
score 1.62
Name: 5, dtype: float64
n_neighbors 7.00
score 1.40
Name: 6, dtype: float64
n_neighbors 8.00
score 1.26
Name: 7, dtype: float64
n_neighbors 9.00
score 1.27
Name: 8, dtype: float64
n_neighbors 10.00
score 1.22
Name: 9, dtype: float64
n_neighbors 11.00
score 1.09
Name: 10, dtype: float64
n_neighbors 12.00
score 1.05
Name: 11, dtype: float64
n_neighbors 13.00
score 1.01
Name: 12, dtype: float64
n_neighbors 14.00
score 1.01
Name: 13, dtype: float64
n_neighbors 15.00
score 0.92
Name: 14, dtype: float64
n_neighbors 16.00
score 0.88
Name: 15, dtype: float64
n_neighbors 17.00
score 0.88
Name: 16, dtype: float64
n_neighbors 18.00
score 0.84
Name: 17, dtype: float64
n_neighbors 19.00
score 0.80
Name: 18, dtype: float64
n_neighbors 20.00
score 0.71
Name: 19, dtype: float64
n_neighbors 21.00
score 0.67
Name: 20, dtype: float64
n_neighbors 22.00
score 0.63
Name: 21, dtype: float64
n_neighbors 23.00
score 0.59
Name: 22, dtype: float64
n_neighbors 24.00
score 0.58
Name: 23, dtype: float64
n_neighbors 1.00
score 5.77
Name: 0, dtype: float64
n_neighbors 2.00
score 2.54
Name: 1, dtype: float64
n_neighbors 3.00
score 1.59
Name: 2, dtype: float64
n_neighbors 4.00
score 1.18
Name: 3, dtype: float64
n_neighbors 5.00
score 0.96
Name: 4, dtype: float64
n_neighbors 6.00
score 0.91
Name: 5, dtype: float64
n_neighbors 7.00
score 0.78
Name: 6, dtype: float64
n_neighbors 8.00
score 0.69
Name: 7, dtype: float64
n_neighbors 9.00
score 0.65
Name: 8, dtype: float64
###Markdown
KNN Classification per Station* Targeting `target`* Using imputed for 2015
###Code
tall_series_mod = obs_and_mods.fillna(0).groupby(['day', 'station']).agg({
'pred_0_days': agg_types
})['pred_0_days'].reset_index().rename(columns={
'pred_0_days': 'Concentration'
})
aggs = [tall_series_mod.pivot(index='day', columns='station', values=agg) for agg in agg_types]
aggs = [df.rename(columns={c: c + '_' + agg for c in df.columns}) for df, agg in zip(aggs, agg_types)]
wide_series_mod = pd.concat(aggs, axis=1)
wide_series_mod.tail()
to_impute = wide_series.loc[test['date'].unique()]
for s in obs_and_mods.station.unique():
columns = [s + '_' + agg for agg in agg_types]
to_impute[columns] = wide_series_mod.loc[to_impute.reset_index().day, columns]
originals = wide_series[list(~wide_series.reset_index().day.isin(test['date'].unique()))]
wide_series_imputed = pd.concat([to_impute, originals]).sort_index()
wide_series_imputed.head()
all_frames = []
for s in test.station.unique():
frames = [pd.read_csv(f, index_col=0) for f in glob.glob('../reports/cv_knn_Jonas_7-1_*{}*'.format(s))]
try:
frame = pd.concat(frames)
frame['station'] = s
all_frames.append(frame)
except ValueError:
pass
cv_results = pd.concat(all_frames)
cv_results = cv_results[cv_results['score'] > 0]
cv_results.sort_values(['score', 'station']).drop_duplicates('station')
test_params = []
for i, r in cv_results.sort_values(['score', 'station']).drop_duplicates('station').iterrows():
p = dict(r)
del p['score']
del p['station']
test_params.append((r['station'], p))
test_params
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.model_selection import RandomizedSearchCV, KFold
# from sklearn.metrics import log_loss, make_scorer
# lags = 100
# preds_by_station = []
# for s, p in test_params:
# data = pd.concat([
# wide_series[list(wide_series.reset_index().day < '2015-01-01')],
# wide_series_imputed[list(wide_series_imputed.reset_index().day >= '2015-01-01')]
# ])
# to_lag = data[[c for c in data.columns if not c in ['{}_{}'.format(s, agg) for agg in agg_types]]]
# features = create_lagged_features(to_lag, lags)\
# .join(extra_features[extra_features.station == s].set_index('date'))\
# .join(rolling_mean_features[rolling_mean_features.station == s]
# .set_index('date').drop(['station', 'conc_obs', 'weekend', 'week_day', 'month'], axis=1))\
# .join(obs_and_mods[obs_and_mods.station == s][['Concentration', 'day']].groupby('day').max())\
# .fillna(method='ffill').fillna(0)
# X = features[[c for c in features.columns if not c in [
# 'time', 'datetime', 'Concentration', 'target', 'station'
# ]]].values
# y = (features['Concentration'].values > 100).astype(int)
# y_pred_all = copy.deepcopy(y).astype(float)
# metrics_all = []
# kf = KFold(n_splits=3)
# for train_index, test_index in kf.split(X):
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
# model = KNeighborsClassifier(**p)
# model.fit(X_train, y_train)
# y_pred = model.predict_proba(X_test)[:, 1]
# y_pred_all[test_index] = y_pred
# metrics_all.append(log_loss(y_test, y_pred, labels=(0, 1)))
# preds_by_station.append((s, y_pred_all))
predictions = pd.DataFrame(np.column_stack([e[1] for e in preds_by_station]), columns=test.station.unique())
predictions['date'] = features['day']
predictions.to_csv('../reports/pred_knn_Jonas_7-1_{}.csv'.format(len(glob.glob('../reports/pred_knn_Jonas_7-1_*'))))
predictions = predictions.set_index('date')
print('CV Metric: {}'.format(np.mean(metrics_all)))
predictions.tail()
###Output
CV Metric: 1.1967581850206441
###Markdown
KNN test 2013 on 2014
###Code
to_lag = data[[c for c in data.columns if not c in ['{}_{}'.format(s, agg) for agg in agg_types]]]
features = create_lagged_features(to_lag, lags)\
.join(extra_features[extra_features.station == s].set_index('date'))\
.join(rolling_mean_features[rolling_mean_features.station == s]
.set_index('date').drop(['station', 'conc_obs', 'month', 'week_day', 'weekend'], axis=1))\
.join(obs_and_mods[obs_and_mods.station == s][['Concentration', 'day']].groupby('day').max())\
.fillna(method='ffill').fillna(0)
X = features[[c for c in features.columns if not c in [
'time', 'datetime', 'Concentration', 'target', 'station'
]]]
y = (features['Concentration'].values > 100).astype(int)
features.drop('day')
X_train = X[list(X.reset_index()['day'] < '2014-01-01')]
X_test = X[list((X.reset_index()['day'] >= '2014-01-01') & (X.reset_index()['day'] < '2015-01-01'))]
y_train = (features[list(features.reset_index()['day'] < '2014-01-01')]['Concentration'] > 100).astype(int)
y_test = (features[list((features.reset_index()['day'] >= '2014-01-01') & (features.reset_index()['day'] < '2015-01-01'))]['Concentration'] > 100).astype(int)
model = KNeighborsClassifier(n_neighbors=20)
model.fit(X_train, y_train)
log_loss(y_test, model.predict_proba(X_test), labels=(0, 1))
###Output
_____no_output_____ |
pycon-workshop-2020/1 - Introduction to Encrypted Tensors.ipynb | ###Markdown
Introduction to Encrypted Tensors
###Code
import sys
import torch
# python 3.7 is required
assert sys.version_info[0] == 3 and sys.version_info[1] == 7, "python 3.7 is required"
import crypten
crypten.init()
###Output
_____no_output_____
###Markdown
Creating a CrypTensor
###Code
x = crypten.cryptensor([1.0, 2.0, 3.0])
x
x.get_plain_text()
y = crypten.cryptensor(torch.tensor([4.0, 5.0, 6.0]))
###Output
_____no_output_____
###Markdown
* Note this is a single party case to demo the API. In a single party setting, tensors are encoded, but not encrypted since secret shares can't be exchanged with other parties. Operations on CrypTensors
###Code
x + 2.0
(x + 2.0).get_plain_text()
x + y
y.dot(x)
###Output
_____no_output_____
###Markdown
For a full list of the supported operations see the [docs](https://crypten.readthedocs.io/en/latest/) Example: Compute Mean Squared Loss
###Code
print("x", x)
print("y", y)
squared_loss = (x - y)**2
mean_squared_loss = squared_loss.mean()
print(mean_squared_loss.get_plain_text())
###Output
tensor(9.)
###Markdown
PyTorch Version
###Code
x_pytorch = torch.tensor([1.0, 2.0, 3.0])
y_pytorch = torch.tensor([4.0, 5.0, 6.0])
squared_loss_pytorch = (x_pytorch - y_pytorch)**2
print(squared_loss_pytorch.mean())
###Output
tensor(9.)
|
code/diluted_sample.ipynb | ###Markdown
Notebook to analyze the data from the diluted standardFigure 2 & 3
###Code
import os
import pandas as pd
import scanpy as sc
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from sklearn.metrics import silhouette_samples
import seaborn as sns
import sceptre as spt
# create result folder
res_dir = '../results/diluted_sample/'
if not os.path.exists(res_dir):
os.makedirs(res_dir)
# load PD tables
prot = {'MS2': pd.read_table('../data/diluted_sample/200x/MS2_Proteins.txt'),
'MS3': pd.read_table('../data/diluted_sample/200x/MS3_Proteins.txt'),
'reticle': pd.read_table('../data/diluted_sample/200x/RETICLE_Proteins.txt')}
psms = {'MS2': pd.read_table('../data/diluted_sample/200x/MS2_PSMs.txt'),
'MS3': pd.read_table('../data/diluted_sample/200x/MS3_PSMs.txt'),
'reticle': pd.read_table('../data/diluted_sample/200x/RETICLE_PSMs.txt')}
msms = {'MS2': pd.read_table('../data/diluted_sample/200x/MS2_MSMSSpectrumInfo.txt'),
'MS3': pd.read_table('../data/diluted_sample/200x/MS3_MSMSSpectrumInfo.txt'),
'reticle': pd.read_table('../data/diluted_sample/200x/RETICLE_MSMSSpectrumInfo.txt')}
quan = {'MS2': pd.read_table('../data/diluted_sample/200x/MS2_QuanSpectra.txt'),
'MS3': pd.read_table('../data/diluted_sample/200x/MS3_QuanSpectra.txt'),
'reticle': pd.read_table('../data/diluted_sample/200x/RETICLE_QuanSpectra.txt')}
files = {'MS2': pd.read_table('../data/diluted_sample/200x/MS2_InputFiles.txt'),
'MS3': pd.read_table('../data/diluted_sample/200x/MS3_InputFiles.txt'),
'reticle': pd.read_table('../data/diluted_sample/200x/RETICLE_InputFiles.txt')}
# mark and remove potential contaminants
contaminants = pd.read_table('../data/contaminants.txt')['Accession']
for p in prot.keys():
prot[p]['contaminant'] = prot[p]['Accession'].isin(contaminants)
prot[p] = prot[p][prot[p]['contaminant']==False]
# map methods and their replicates
mapping = pd.read_csv('../data/diluted_sample/200x/mapping.csv')
mapping['Method'] = mapping[['method', 'quant it']].apply(lambda x: '_'.join(x.astype(str)), axis=1)
cells = {'126': 'booster',
'127N': 'BLAST1',
'127C': 'empty',
'128N': 'LSC1',
'128C': 'empty',
'129N': 'PROG1',
'129C': 'empty',
'130N': 'empty',
'130C': 'LSC2',
'131N': 'BLAST2',
'131C': 'PROG2',
'132N': 'empty',
'132C': 'BLAST3',
'133N': 'PROG3',
'133C': 'LSC3',
'134N': 'empty'}
for p in psms.keys():
psms[p] = psms[p].merge(mapping.set_index('file')['Method'], left_on='Spectrum File', right_index=True)
msms[p] = msms[p].merge(mapping.set_index('file')['Method'], left_on='Spectrum File', right_index=True)
quan[p] = quan[p].merge(mapping.set_index('file')['Method'], left_on='Spectrum File', right_index=True)
# load into scanpy
adata = {}
for p in prot.keys():
cols = list(map(lambda x: x.split(' '), prot[p].columns[prot[p].columns.str.contains('Abundance')]))
file_id = [x[-2] for x in cols]
channel = [x[-1] for x in cols]
method = [psms[p][['File ID', 'Method']].drop_duplicates().set_index('File ID').loc[f, 'Method'] for f in file_id]
cell = [cells[x] for x in channel]
celltype = [x[:-1] for x in cell]
quant = prot[p].set_index('Accession').copy()
quant = quant[quant.columns[quant.columns.str.contains('Abundance')]]
quant.columns = pd.MultiIndex.from_tuples(zip(file_id, channel, method, cell, celltype), names=['File ID', 'Channel', 'Method', 'Cell Pool', 'Celltype'])
quant = quant.loc[:, (~quant.columns.get_level_values(3).str.contains('empty|booster'))] # remove empty channels
quant = quant.loc[:, (~quant.columns.get_level_values(3).str.contains('1'))] # remove the first cells due to contamination from the booster
quant[quant < 1.1] = pd.NA # set S/N values below 1.1 to NA
quant = quant.dropna(how='all').fillna(0) # remove all NA proteins and fill remaining NA with 0
# save to file and load it in scanpy
quant.to_csv('../results/tmp/scanpy_data.txt', sep='\t', header=False, index=True)
adata[p] = sc.read_text('../results/tmp/scanpy_data.txt', delimiter='\t', first_column_names=False).T
adata[p].obs = quant.columns.to_frame(index=False)
adata[p].obs.index = adata[p].obs.index.astype(str)
# merge adatas
adata = sc.AnnData.concatenate(*adata.values(), join='outer', batch_key='PD', batch_categories=adata.keys(), fill_value=0)
# apply file normalization to the files of each of the method.
# file normalization equalizes the median value of each protein across files.
for j in adata.obs['Method'].unique():
adata_slice = adata[adata.obs['Method']==j].copy()
spt.normalize(adata_slice, method='file', drop_na=False)
adata.X[adata.obs['Method']==j] = adata_slice.X
# calculate basic stats for each method
ms_stats = []
for m in ['MS2', 'MS3', 'reticle']:
for it in ['500', '750']:
fs = quan[m][quan[m]['Method']==f"{m}_{it}"]['File ID'].unique()
for f in fs:
quan_spectra = len(quan[m][(quan[m]['Method']==f"{m}_{it}") & (quan[m]['File ID']==f)])
identified_quan_spectra = len(quan[m][(quan[m]['Method']==f"{m}_{it}") & (quan[m]['File ID']==f) & (quan[m]['Number of PSMs']>0)])
PSM_rate_quan_spectra = identified_quan_spectra / quan_spectra
peptides = len(psms[m][(psms[m]['Method']==f"{m}_{it}") & (psms[m]['File ID']==f)]['Annotated Sequence'].unique())
proteins = len(psms[m][(psms[m]['Method']==f"{m}_{it}") & (psms[m]['File ID']==f)]['Master Protein Accessions'].unique())
adata_slice = adata[(adata.obs['Method']==f"{m}_{it}") & (adata.obs['File ID']==f)].copy()
sc.pp.filter_genes(adata_slice, min_cells=1)
avg_IDs = (~pd.DataFrame(adata_slice.X).replace(0, np.nan).isna()).sum(axis=1).mean().round().astype(int)
ms_stats.append({'method': f"{m} {it}ms", 'quan_spectra': quan_spectra, 'identified_quan_spectra': identified_quan_spectra,
'PSM_rate_quan_spectra': PSM_rate_quan_spectra, 'avg_IDs': avg_IDs})
ms_stats = pd.DataFrame(ms_stats)
ms_stats['method'] = ms_stats['method'].replace({'MS3 500ms': 'RTS-MS3 500ms', 'MS3 750ms': 'RTS-MS3 750ms', 'reticle 500ms': 'RETICLE 500ms', 'reticle 750ms': 'RETICLE 750ms'})
ms_stats
# plot the stats
fig, axs = plt.subplots(2, 2, figsize=(3.3, 5), gridspec_kw={'wspace':0.4, 'hspace':0.35})
axs = axs.flatten()
s = 3
sns.barplot(data=ms_stats, x='method', y='quan_spectra', ax=axs[0], ci=None, palette='Paired')
sns.swarmplot(data=ms_stats, x='method', y='quan_spectra', ax=axs[0], color=".15", size=s)
axs[0].title.set_text('Quant.\nspectra')
sns.barplot(data=ms_stats, x='method', y='identified_quan_spectra', ax=axs[1], ci=None, palette='Paired')
sns.swarmplot(data=ms_stats, x='method', y='identified_quan_spectra', ax=axs[1], color=".15", size=s)
axs[1].title.set_text('Identified\nquant. spectra')
sns.barplot(data=ms_stats, x='method', y='PSM_rate_quan_spectra', ax=axs[2], ci=None, palette='Paired')
sns.swarmplot(data=ms_stats, x='method', y='PSM_rate_quan_spectra', ax=axs[2], color=".15", size=s)
axs[2].title.set_text('Identification\nrate')
p = sns.barplot(data=ms_stats, x='method', y='avg_IDs', ax=axs[3], ci=None, palette='Paired')
sns.swarmplot(data=ms_stats, x='method', y='avg_IDs', ax=axs[3], color=".15", size=s)
axs[3].title.set_text('Avg. "single-cell"\n proteins')
patches = [matplotlib.patches.Patch(color=sns.color_palette(palette='Paired', desat=0.75)[i], label=t) for i,t in enumerate(t.get_text() for t in p.get_xticklabels())]
for ax in axs:
ax.grid(axis='y')
ax.set_ylabel("")
ax.set_xlabel("")
ax.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
fig.subplots_adjust(bottom=0.3, wspace=0.33)
axs[3].legend(handles = patches , loc='upper center',
bbox_to_anchor=(0.5, -0.1), fancybox=False, shadow=False, ncol=2)
plt.savefig(res_dir + 'ms_stats.pdf', transparent=True, bbox_inches='tight')
# investigate closeout
psms_closeout = pd.read_table('../data/diluted_sample/100x/RETICLE_closeout_PSMs.txt')
sic = ['Abundance 130C', 'Abundance 131N', 'Abundance 131C', 'Abundance 132C', 'Abundance 133N', 'Abundance 133C']
# only with single-cell signal
psms_closeout = psms_closeout[(psms_closeout[sic] > 1.1).any(axis=1)]
df = psms_closeout.groupby(['Spectrum File', 'Master Protein Accessions'])['Annotated Sequence'].unique().apply(len).to_frame().reset_index()
# only overlapping proteins
ps = df.groupby('Spectrum File')['Master Protein Accessions'].unique()
intersec = [p for p in ps[0] if (p in ps[1]) and (p in ps[2])]
df = df[df['Master Protein Accessions'].isin(intersec)]
# set max bin
df.loc[df['Annotated Sequence']>=8, 'Annotated Sequence'] = 8
df['Spectrum File'] = df['Spectrum File'].replace({'20210408_HB_BF_U3000_uPAC_50cm_precol_114min_TMTpro2_150nlmin_2CV_scMS_RETICLE_turbo23ms_500ms_100c_2_20210410025807.raw': '4',
'20210408_HB_BF_U3000_uPAC_50cm_precol_114min_TMTpro2_150nlmin_2CV_scMS_RETICLE_turbo23ms_500ms_100c_nocloseout.raw': 'No Close-Out',
'20210408_HB_BF_U3000_uPAC_50cm_precol_114min_TMTpro2_150nlmin_2CV_scMS_RETICLE_turbo23ms_500ms_100c_maxpep10.raw': '10'})
df.columns = ['Peptide Close-Out', 'Master Protein Accessions', 'Peptides per protein']
fig, ax = plt.subplots(figsize=(3.3, 1.5))
sns.histplot(data=df, x='Peptides per protein', hue='Peptide Close-Out', ax=ax, multiple="dodge", stat="probability", discrete=True, common_norm=False, shrink=.7, palette='Dark2')
ax.grid(axis='y')
ax.set_xticks([1, 2, 3, 4, 5, 6, 7, 8])
ax.set_xticklabels(['1', '2', '3', '4', '5', '6', '7', '> 7'])
plt.savefig(res_dir + 'closeout.pdf', transparent=True, bbox_inches='tight')
method_dict = {'MS2_500': 'MS2 500ms', 'MS2_750': 'MS2 750ms',
'MS3_500': 'RTS-MS3 500ms', 'MS3_750': 'RTS-MS3 750ms',
'reticle_500': 'RETICLE 500ms', 'reticle_750': 'RETICLE 750ms'}
adata.obs
# compare fold changes to library for BLAST and LSC including pvalues
ms3_lib = pd.read_table('../data/library_MS3_Proteins.txt')
par = {}
fcs = {}
tps = {}
for m in adata.obs['Method'].unique():
adata_slice = adata[(adata.obs['Method']==m)].copy()
sc.pp.normalize_total(adata_slice, exclude_highly_expressed=True) # median shift of total intensity across cells
sc.pp.log1p(adata_slice, base=2) # log2(x+1) transformation
# de test
spt.de_test(adata_slice, by='Celltype', group1='LSC', group2='BLAST', use_raw=False)
res = adata_slice.uns['de_test']['results']
res = res.merge(ms3_lib[['Accession',
'Abundance Ratio log2 LSC BLAST',
'Abundance Ratio Adj P-Value LSC BLAST']].rename(columns={
'Abundance Ratio log2 LSC BLAST':'log2foldchange_lib',
'Abundance Ratio Adj P-Value LSC BLAST': 'pval_adj_lib'
}), left_on='gene', right_on='Accession')
res = res.dropna()
testable = len(res)
tps[m] = res[(res['pval'] <= 0.05) & (res['pval_adj_lib'] <= 0.05) & ((res['log2foldchange']*res['log2foldchange_lib'])>= 0)]['gene']
TP = len(tps[m])
FP = len(res[((res['pval'] <= 0.05) & (res['pval_adj_lib'] <= 0.05) & ((res['log2foldchange']*res['log2foldchange_lib'])< 0)) | ((res['pval'] <= 0.05) & (res['pval_adj_lib'] > 0.05))])
TN = len(res[(res['pval'] > 0.05) & (res['pval_adj_lib'] > 0.05)])
FN = len(res[(res['pval'] > 0.05) & (res['pval_adj_lib'] <= 0.05)])
FDR = FP / (TP + FP)
sensitivity = TP / (TP + FN)
specificity = FP / (TN + FP)
par[m] = [testable, TP, FP, TN, FN, FDR, sensitivity, specificity]
means = pd.DataFrame({'gene': adata_slice.var_names, 'mean': np.mean(adata_slice.X, axis=0)})
res = res.merge(means, on='gene', how='left')
res['ratio_fc'] = res['log2foldchange'] / res['log2foldchange_lib']
res['error_fc'] = abs(res['log2foldchange'] - res['log2foldchange_lib'])
fcs[m] = res.copy()
de_stats = pd.DataFrame(par, index=['Testable', 'TP', 'FP', 'TN', 'FN', 'FDR', 'Sensitivity', 'Specificity']).T
de_stats = de_stats.round(2)
all_means = pd.concat([x[['gene', 'mean']] for x in fcs.values()]).groupby('gene').mean()
# expression matrix sorted by S/N
sorted_prot = pd.DataFrame(index=pd.DataFrame(ms3_lib[ms3_lib.columns[ms3_lib.columns.str.contains('Abundances Normalized')]].median(axis=1).values, index=ms3_lib['Accession']).dropna().sort_values(0, ascending=False).index)
sorted_prot = pd.DataFrame(index=all_means.sort_values('mean', ascending=False).index)
for m in adata.obs['Method'].unique():
fs = adata[(adata.obs['Method']==m)].obs['File ID'].unique()
for i, f in enumerate(fs):
adata_slice = adata[(adata.obs['Method']==m) & (adata.obs['File ID']==f)].copy()
sc.pp.normalize_total(adata_slice, exclude_highly_expressed=True) # median shift of total intensity across cells
sc.pp.log1p(adata_slice, base=2) # log2(x+1) transformation
df = pd.DataFrame(pd.DataFrame(adata_slice.X.T).replace(0, np.nan).mean(axis=1).values, index=adata_slice.var_names)
df.columns = [m+f]
if i == 1:
df.columns = [m]
sorted_prot = sorted_prot.merge(df, left_index=True, right_index=True, how='left')
sorted_prot = sorted_prot.dropna(axis=0, how='all')
downsampled_sorted_prot = sorted_prot.iloc[::2]
fig, ax = plt.subplots(figsize=(3.2, 8.8))
sns.heatmap(downsampled_sorted_prot, ax=ax, cmap='viridis', xticklabels=True, yticklabels=False, cbar_kws={"shrink": .3})
ax.set_xticks(ax.get_xticks()[[1, 4, 7, 10, 13, 16]])
ax.set_xticklabels(method_dict.values(), rotation=45, ha='right')
ax.set_ylabel(f'{len(downsampled_sorted_prot)} proteins ranked by mean log2 S/N in scMS')
ax.set_facecolor('white')
plt.savefig(res_dir + 'ordered_heatmap_downsampled.pdf', transparent=True, bbox_inches='tight')
fig, ax = plt.subplots(figsize=(5, 14))
sns.heatmap(sorted_prot, ax=ax, cmap='viridis', xticklabels=True, yticklabels=False, cbar_kws={"shrink": .3})
ax.set_xticks(ax.get_xticks()[[1, 4, 7, 10, 13, 16]])
ax.set_xticklabels(method_dict.values(), rotation=45, ha='right')
ax.set_ylabel(f'{len(sorted_prot)} proteins ranked by mean log2 S/N in scMS')
ax.set_facecolor('white')
plt.savefig(res_dir + 'ordered_heatmap.pdf', transparent=True, bbox_inches='tight')
# DE-analysis
de_stats.iloc[:, :4] = de_stats.iloc[:, :4].astype(int)
scaled_df = de_stats/(de_stats.max(axis=0)) # scale for colormap
scaled_df.index = list(method_dict.values())
fig, ax = plt.subplots(figsize=(4, 1.9))
sns.heatmap(scaled_df, annot=de_stats, linewidths=1, fmt='g', cmap="Greens", cbar=False, ax=ax, vmin=0.1, vmax=1, annot_kws={'color':'white', 'size':'large'})
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')
plt.savefig(res_dir + 'DE_analysis.pdf', transparent=True, bbox_inches='tight')
# cumulative distribution of TPs sorted by S/N
# testable proteins in scMS that are positiv hits in libray
pos_lib = ms3_lib[ms3_lib['Abundance Ratio Adj P-Value LSC BLAST']<= 0.05]['Accession']
all_testable = pd.concat([x['gene'] for x in fcs.values()]).unique()
de_prots = pd.DataFrame(index=pos_lib[pos_lib.isin(all_testable)])
de_prots = de_prots.merge(all_means, right_index=True, left_index=True)
for m in adata.obs['Method'].unique():
de_prots[m] = de_prots.index.isin(tps[m])
de_prots = de_prots.sort_values('mean', ascending=False)
de_prots['mean'] = [i for i in range(len(de_prots))]
de_prots = de_prots.reset_index().set_index(['index', 'mean']).apply(np.cumsum).reset_index().melt(id_vars=['index', 'mean'])
de_prots.columns = ['gene', 'rank', 'Method', 'cumsum']
de_prots['Method'] = de_prots['Method'].map(method_dict)
fig, ax = plt.subplots(figsize=(2.8, 2.5))
sns.lineplot(data=de_prots, x='rank', y='cumsum', hue='Method', palette='Paired', style='Method', dashes=['', '', (3,1), (3,1), (8,1), (8,1)], linewidth=1.2, alpha=0.8, ax=ax)
ax.set_ylabel('Cumulative TP')
ax.set_xlabel(f"{len(de_prots['gene'].unique())} proteins ranked by mean log2 S/N in scMS")
plt.savefig(res_dir + 'cumsum_TP.pdf', transparent=True, bbox_inches='tight')
# comparison of ratios of log2FCs in scMS and bulk from TPs that are down in LSC
fc_ratios = {}
tps_overlap = set.intersection(*map(set,tps.values()))
common_tp_down = ms3_lib[(ms3_lib['Accession'].isin(tps_overlap)) & (ms3_lib['Abundance Ratio log2 LSC BLAST']<0)]['Accession']
for m in adata.obs['Method'].unique():
fc_ratios[m] = fcs[m][fcs[m]['gene'].isin(common_tp_down)]['ratio_fc']
fc_ratios = pd.DataFrame(fc_ratios).melt().dropna()
fc_ratios['value'] = fc_ratios['value'].astype(float)
fig, ax = plt.subplots(figsize=(2, 2))
sns.boxplot(data=fc_ratios, x='variable', y='value', ax=ax, showfliers=False, dodge=False, width=0.6, palette='Paired')
ax.set_title('Common TP up in BLAST ({})'.format(len(common_tp_down)))
ax.axes.get_xaxis().get_label().set_visible(False)
ax.set_xticklabels(list(method_dict.values()), rotation=45, ha='right')
ax.grid()
plt.ylabel('Log2FC scMS / log2FC bulk')
print(fc_ratios.groupby('variable').median())
plt.savefig(res_dir + 'ratio_compression.pdf', transparent=True, bbox_inches='tight')
# absolute log2FC difference scMS and bulk of common proteins
all_overlap = set.intersection(*map(set,[x['gene'] for x in fcs.values()]))
all_means = pd.concat([x[['gene', 'mean']] for x in fcs.values()]).groupby('gene').mean()
fc_error = pd.DataFrame(all_overlap)
fc_error.columns = ['gene']
fc_error = fc_error.merge(all_means, on='gene')
fc_error['int_bin'] = pd.cut(fc_error['mean'], 3)
n = fc_error.groupby('int_bin').size()
fc_error = fc_error.drop('mean', axis=1)
for m in adata.obs['Method'].unique():
fc_error = fc_error.merge(fcs[m][['gene', 'error_fc']].rename(columns={'error_fc': m}), left_on='gene', right_on='gene')
fc_error = fc_error.melt(id_vars=['gene', 'int_bin'])
fc_error['value'] = fc_error['value'].astype(float)
fc_error.columns = ['gene', 'Mean log2 S/N in scMS', 'Method', 'Absolute log2FC difference']
fc_error['Method'] = fc_error['Method'].map(method_dict)
fig, ax = plt.subplots(figsize=(4.6, 2.5))
sns.boxplot(data=fc_error ,y='Absolute log2FC difference', x='Mean log2 S/N in scMS', hue='Method', showfliers=False, palette="Paired", ax=ax)
ax.set_title('Common testable proteins ({})'.format(len(all_overlap)))
x_ticks = n.reset_index().apply(lambda x: '\nn = '.join(x.astype(str)), axis=1).values
ax.set_xticklabels(x_ticks)
ax.grid()
plt.savefig(res_dir + 'accuracy.pdf', transparent=True, bbox_inches='tight')
os.system('jupyter nbconvert --to html diluted_sample.ipynb --output-dir={}'.format(res_dir))
###Output
[NbConvertApp] Converting notebook diluted_sample.ipynb to html
[NbConvertApp] Writing 1183545 bytes to ../results/diluted_sample/diluted_sample.html
|
docs/Part2_SerumMEtabolitesConcDataWrangling.ipynb | ###Markdown
Data Wrangling Serum _metabolites concentration values Part 2: Amnah Siddiqa ; 13-08-2021- Input Table: serum_metabolites_concentrations.csv coming from Part1 python Notebook - output Table: serum_metabolites_convalues_unique.csv Summary: - starting from 11076 values ; we end up with one value against each HMDB id having 2753 unique (HMDB) ids having a blood concentration value in total at the end ;
###Code
#library(dplyr)
#load libraries
shhh <- suppressPackageStartupMessages # It's a library, so shhh!
shhh(library(tidyverse))
serum.metabolites<-read.csv ("output/serum_metabolites_concentrations.csv")
#11076 rows
#remove all the empty lines in value column ; which exists because the publications exists for the Not Quantifoed cell value; see e.g. HMDB0000008
serum.metabolites<-serum.metabolites[!(serum.metabolites$concentration_value == ""), ]
#separate concentration values column based on multiple delimiters
#first +/-
serum.metabolites<-serum.metabolites %>% separate(col=concentration_value, into="value", sep = "(?=\\+|-)")
#second based on parentheses (
serum.metabolites<-serum.metabolites %>% separate(col=value, into="value", sep = "\\(| ()")
#third remove >
serum.metabolites<-serum.metabolites%>%
mutate(value= str_remove_all(value , "<"))
serum.metabolites$value<-as.numeric(as.character(serum.metabolites$value))
str(serum.metabolites)
#convert back to decimals instaed of scientific notations
options(scipen = 999)
serum.metabolites<-serum.metabolites%>%
group_by(HMDB_accession)%>%
mutate(Med=median(value))%>%
dplyr::select( HMDB_accession, Med)
serum.metabolites= serum.metabolites[!duplicated(serum.metabolites$HMDB_accession),]
length(unique(serum.metabolites$HMDB_accession))
#2753
write.csv(serum.metabolites, "Output/serum_metabolites_convalues_unique.csv", row.names = FALSE)
head(serum.metabolites)
###Output
_____no_output_____ |
solutions by participants/ex5/ex5-NamanJain-3cnot-2.36mHa-16params.ipynb | ###Markdown
Exercise 5: Variational Quantum Eigensolver (VQE) 1. Importing standard libraries
###Code
import numpy as np
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, transpile, Aer, IBMQ
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
###Output
_____no_output_____
###Markdown
2. Getting the molecule ready -> Li-HA `driver` is an interface to the classical chemistry codes available in Qiskit. By running a driver, we obtain necessary information about our molecule, to then apply `VQE`
###Code
from qiskit_nature.drivers import PySCFDriver
# The representation of Li-H molecule
molecule = 'Li 0.0 0.0 0.0; H 0.0 0.0 1.5474'
driver = PySCFDriver(atom=molecule)
qmolecule = driver.run()
# Gives an idea regarding which orbitals can be freezed and removed
print(qmolecule.one_body_integrals)
print("Number of molecular orbitals: ", qmolecule.num_molecular_orbitals)
print("Number of spin-orbitals: ", 2*qmolecule.num_molecular_orbitals)
print("Nuclear repulsion energy: ", qmolecule.nuclear_repulsion_energy)
###Output
Number of molecular orbitals: 6
Number of spin-orbitals: 12
Nuclear repulsion energy: 1.0259348796432726
###Markdown
3. Ferminonic OperatorsThe `ElectronicStructureProblem` produces a list of fermionic operators (Pauli strings) for the molecule.The `FreezeCoreTransformer` is a transformer in Qiskit-nature that is used to freeze inner core orbitals and remove unoccupied molecular orbitals.
###Code
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
from qiskit_nature.transformers import FreezeCoreTransformer
# Freeze the inner core and remove orbitals 3,4 (one body integrals from the previous cell)
# The ElectronicStructureProblem produces a list of fermionic operators before mapping to Qubits
problem = ElectronicStructureProblem(driver, q_molecule_transformers=
[FreezeCoreTransformer(freeze_core=True, remove_orbitals=[4, 3])])
# Generate the second quantized operators
second_q_ops = problem.second_q_ops()
# Get the Hamiltonian
main_op = second_q_ops[0]
# Modified one body integrals
print(problem.molecule_data_transformed.one_body_integrals)
###Output
[[-0.78066411 0.04770212 -0.12958119 0. 0. 0. ]
[ 0.04770212 -0.35909729 0.06823803 0. 0. 0. ]
[-0.12958119 0.06823803 -0.22617115 0. 0. 0. ]
[ 0. 0. 0. -0.78066411 0.04770212 -0.12958119]
[ 0. 0. 0. 0.04770212 -0.35909729 0.06823803]
[ 0. 0. 0. -0.12958119 0.06823803 -0.22617115]]
###Markdown
4. Mapping to Qubits`ParityMapper` is used to map the fermionic operators to qubit operators.A `QubitConverter`object is used to reduce the qubits even further by exploiting the symmetries.
###Code
from qiskit_nature.mappers.second_quantization import ParityMapper
from qiskit_nature.converters.second_quantization.qubit_converter import QubitConverter
mapper = ParityMapper()
converter = QubitConverter(mapper=mapper, two_qubit_reduction=True, z2symmetry_reduction= [1])
num_particles = (problem.molecule_data_transformed.num_alpha,
problem.molecule_data_transformed.num_beta)
# Fermionic operators are mapped to qubit operators
qubit_op = converter.convert(main_op, num_particles=num_particles)
print(qubit_op)
###Output
-0.20316606150558716 * IIII
- 0.3652586902160391 * ZIII
+ 0.09275994933497503 * IZII
- 0.2118898429700864 * ZZII
+ (0.3652586902160392-2.7755575615628914e-17j) * IIZI
- 0.11384335176464107 * ZIZI
+ 0.11395251883046094 * IZZI
+ (-0.060440128573164456-3.469446951953614e-18j) * ZZZI
+ (-0.09275994933497497+6.938893903907228e-18j) * IIIZ
+ 0.11395251883046094 * ZIIZ
+ (-0.12274244052543586+6.938893903907228e-18j) * IZIZ
+ 0.05628878167218207 * ZZIZ
+ (-0.21188984297008645+2.0816681711721685e-17j) * IIZZ
+ 0.060440128573164456 * ZIZZ
- 0.05628878167218208 * IZZZ
+ 0.0846013139182359 * ZZZZ
+ 0.019389408583692383 * XIII
+ (-0.019389408583692383-4.336808689942018e-19j) * XZII
+ (-0.010952773573813853+8.673617379884035e-19j) * XIZI
+ 0.010952773573813853 * XZZI
+ 0.012779333033031574 * XIIZ
- 0.012779333033031576 * XZIZ
- 0.009002501243838515 * XIZZ
+ 0.009002501243838515 * XZZZ
+ 0.0029411410873504597 * IXII
+ 0.0029411410873504597 * ZXII
- 0.01068185628295703 * IXZI
- 0.01068185628295703 * ZXZI
+ 0.011925529284512949 * IXIZ
+ (0.011925529284512949+8.673617379884035e-19j) * ZXIZ
- 0.0016974649623971746 * IXZZ
+ (-0.0016974649623971746+1.0842021724855044e-19j) * ZXZZ
- 0.0007427996394772664 * XXII
+ 0.0007427996394772658 * YYII
+ 0.0343897481404705 * XXZI
+ (-0.0343897481404705-3.469446951953614e-18j) * YYZI
- 0.03239529731985257 * XXIZ
+ 0.03239529731985257 * YYIZ
+ 0.0027372506123214914 * XXZZ
- 0.0027372506123214914 * YYZZ
+ (0.019389408583692383+4.336808689942018e-19j) * IIXI
+ 0.010952773573813853 * ZIXI
- 0.012779333033031576 * IZXI
- 0.009002501243838515 * ZZXI
+ (0.019389408583692383-8.673617379884035e-19j) * IIXZ
+ 0.010952773573813853 * ZIXZ
- 0.012779333033031574 * IZXZ
- 0.009002501243838515 * ZZXZ
+ 0.0065875841900649695 * XIXI
- 0.0065875841900649695 * XZXI
+ (0.0065875841900649695-4.336808689942018e-19j) * XIXZ
- 0.0065875841900649695 * XZXZ
+ 0.0022216108081588378 * IXXI
+ (0.0022216108081588378+2.168404344971009e-19j) * ZXXI
+ (0.0022216108081588378+2.168404344971009e-19j) * IXXZ
+ 0.0022216108081588378 * ZXXZ
- 0.007859003265909143 * XXXI
+ 0.007859003265909143 * YYXI
- 0.007859003265909143 * XXXZ
+ (0.007859003265909143-8.673617379884035e-19j) * YYXZ
+ (0.0029411410873504684-2.168404344971009e-19j) * IIIX
+ 0.01068185628295703 * ZIIX
+ (-0.011925529284512949-8.673617379884035e-19j) * IZIX
- 0.0016974649623971746 * ZZIX
+ (-0.0029411410873504684+2.168404344971009e-19j) * IIZX
- 0.01068185628295703 * ZIZX
+ (0.011925529284512949+8.673617379884035e-19j) * IZZX
+ 0.0016974649623971746 * ZZZX
+ 0.0022216108081588378 * XIIX
- 0.0022216108081588378 * XZIX
- 0.0022216108081588378 * XIZX
+ 0.0022216108081588378 * XZZX
+ 0.003139482375511508 * IXIX
+ 0.0031394823755115085 * ZXIX
- 0.003139482375511508 * IXZX
- 0.0031394823755115085 * ZXZX
- 0.008499158469823676 * XXIX
+ 0.008499158469823676 * YYIX
+ 0.008499158469823676 * XXZX
- 0.008499158469823676 * YYZX
- 0.0007427996394772482 * IIXX
- 0.0343897481404705 * ZIXX
+ 0.03239529731985257 * IZXX
+ (0.0027372506123214914-2.168404344971009e-19j) * ZZXX
+ 0.0007427996394772486 * IIYY
+ 0.0343897481404705 * ZIYY
+ (-0.03239529731985257+3.469446951953614e-18j) * IZYY
- 0.0027372506123214914 * ZZYY
+ (-0.007859003265909143+8.673617379884035e-19j) * XIXX
+ 0.007859003265909143 * XZXX
+ 0.007859003265909143 * XIYY
- 0.007859003265909143 * XZYY
- 0.008499158469823676 * IXXX
- 0.008499158469823676 * ZXXX
+ 0.008499158469823676 * IXYY
+ 0.008499158469823676 * ZXYY
+ 0.030846096963265974 * XXXX
+ (-0.030846096963265974-1.734723475976807e-18j) * YYXX
- 0.030846096963265974 * XXYY
+ 0.030846096963265974 * YYYY
###Markdown
5. Initial StateA CPU can compute efficiently the energies associated to electron hopping and interactions that represent the total energy operator, Hamiltonian, by means of `Hartree-Fock`. The `Hartree–Fock (HF)` method efficiently computes an approximate ground state wavefunction.
###Code
from qiskit_nature.circuit.library import HartreeFock
num_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals
num_particles = (problem.molecule_data_transformed.num_alpha,
problem.molecule_data_transformed.num_beta)
init_state = HartreeFock(num_spin_orbitals, num_particles, converter)
init_state.draw()
###Output
_____no_output_____
###Markdown
6. Exact EigensolverThe problem can be exactly solved with the exact diagonalization of the Hamiltonian matrix. This helps us know where to converge with VQE, for learning purposes.
###Code
from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import NumPyMinimumEigensolverFactory
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver
def exact_diagonalizer(problem, converter):
solver = NumPyMinimumEigensolverFactory()
calc = GroundStateEigensolver(converter, solver)
result = calc.solve(problem)
return result
result_exact = exact_diagonalizer(problem, converter)
exact_energy = np.real(result_exact.eigenenergies[0])
print("Exact electronic energy", exact_energy)
# Exact electronic Energy is higher than actual energy due to
# the freezing of inner core orbitals
###Output
Exact electronic energy -1.0887060157347368
###Markdown
7. AnsatzA `Heuristic (TwoLocal)` ansatz circuit is used to approximate the ground state wavefunction. The different parameters for the circuit are obtained by experimentation.
###Code
from qiskit.circuit.library import TwoLocal
ansatz_type = "TwoLocal"
rotation_blocks = ['ry','rz']
entanglement_blocks = 'cx'
entanglement = "linear"
repetitions = 1
skip_final_rotation_layer = False
ansatz = TwoLocal(qubit_op.num_qubits, rotation_blocks, entanglement_blocks, reps=repetitions,
entanglement=entanglement, skip_final_rotation_layer=skip_final_rotation_layer)
# Add the initial state
ansatz.compose(init_state, front=True, inplace=True)
ansatz.draw()
###Output
_____no_output_____
###Markdown
8. BackendA backend is a device or simulator to run the algorithm. Here the `statevector_simulator` is used.
###Code
backend = Aer.get_backend('statevector_simulator')
###Output
_____no_output_____
###Markdown
9. AlgorithmThe optimizer `SLSQP` with a `maxiter = 1000` is used. An `initial_point` is provided to the VQE, to start from a similar point.
###Code
from qiskit.algorithms.optimizers import SLSQP
from qiskit.algorithms import VQE
from IPython.display import display, clear_output
def callback(eval_count, parameters, mean, std):
# Overwrites the same line when printing
display("Evaluation: {}, Energy: {}, Std: {}".format(eval_count, mean, std))
clear_output(wait=True)
counts.append(eval_count)
values.append(mean)
params.append(parameters)
deviation.append(std)
counts = []
values = []
params = []
deviation = []
try:
initial_point = [0.01] * len(ansatz.ordered_parameters)
except:
initial_point = [0.01] * ansatz.num_parameters
optimizer = SLSQP(maxiter= 1000)
algorithm = VQE(ansatz,
optimizer=optimizer,
quantum_instance=backend,
callback=callback,
initial_point=initial_point)
result = algorithm.compute_minimum_eigenvalue(qubit_op)
print(result)
###Output
{ 'aux_operator_eigenvalues': None,
'cost_function_evals': 786,
'eigenstate': array([ 1.39131226e-03+1.62920545e-04j, -4.80344694e-03-1.47047592e-04j,
2.62447729e-02+8.47628269e-04j, -9.91330355e-01+1.36767252e-02j,
-5.37464440e-02-3.77350493e-03j, -3.69555386e-04-8.90731172e-05j,
8.79307549e-04-4.46593915e-06j, -2.32783453e-02+1.49208704e-03j,
-2.67397326e-03-7.05573705e-05j, -1.12127825e-05-8.31876644e-06j,
3.67494259e-06+2.34900094e-05j, 3.09653242e-04-9.10944922e-04j,
1.13571961e-01-2.69468702e-03j, 5.56021549e-04+1.40788574e-04j,
-5.49346724e-04+3.83200142e-05j, -3.05785674e-05-1.92967062e-05j]),
'eigenvalue': -1.0863472034410773,
'optimal_parameters': { ParameterVectorElement(θ[1]): -0.005426351911253571,
ParameterVectorElement(θ[13]): -0.04487184826402391,
ParameterVectorElement(θ[0]): 0.2521552078200679,
ParameterVectorElement(θ[6]): -0.07021334472777313,
ParameterVectorElement(θ[15]): -0.09608485821414185,
ParameterVectorElement(θ[7]): 0.00409929461324721,
ParameterVectorElement(θ[2]): -3.1415163529805747,
ParameterVectorElement(θ[14]): -0.050147118142714456,
ParameterVectorElement(θ[9]): 3.131886066481301,
ParameterVectorElement(θ[12]): -0.04587630369526314,
ParameterVectorElement(θ[3]): -2.6983061141470053,
ParameterVectorElement(θ[10]): 3.1886748366284587,
ParameterVectorElement(θ[11]): 0.44246261454081964,
ParameterVectorElement(θ[8]): 0.05228352830967145,
ParameterVectorElement(θ[5]): 0.028616565957680282,
ParameterVectorElement(θ[4]): -0.04518155158598968},
'optimal_point': array([ 0.25215521, 3.18867484, 0.44246261, -0.0458763 , -0.04487185,
-0.05014712, -0.09608486, -0.00542635, -3.14151635, -2.69830611,
-0.04518155, 0.02861657, -0.07021334, 0.00409929, 0.05228353,
3.13188607]),
'optimal_value': -1.0863472034410773,
'optimizer_evals': 786,
'optimizer_time': 9.572652339935303}
###Markdown
10. AnalysisA `chemical accuracy = 4 mHa` is to be reached, with as minimum `CNOTs` as possible.
###Code
# Store results in a dictionary
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
# Unroller transpile your circuit into CNOTs and U gates
pass_ = Unroller(['u', 'cx'])
pm = PassManager(pass_)
ansatz_tp = pm.run(ansatz)
cnots = ansatz_tp.count_ops()['cx']
score = cnots
accuracy_threshold = 4.0 # in mHa
energy = result.optimal_value
if ansatz_type == "TwoLocal":
result_dict = {
'optimizer': optimizer.__class__.__name__,
'mapping': converter.mapper.__class__.__name__,
'ansatz': ansatz.__class__.__name__,
'rotation blocks': rotation_blocks,
'entanglement_blocks': entanglement_blocks,
'entanglement': entanglement,
'repetitions': repetitions,
'skip_final_rotation_layer': skip_final_rotation_layer,
'energy (Ha)': energy,
'error (mHa)': (energy-exact_energy)*1000,
'pass': (energy-exact_energy)*1000 <= accuracy_threshold,
'# of parameters': len(result.optimal_point),
'final parameters': result.optimal_point,
'# of evaluations': result.optimizer_evals,
'optimizer time': result.optimizer_time,
'# of qubits': int(qubit_op.num_qubits),
'# of CNOTs': cnots,
'score': score}
else:
result_dict = {
'optimizer': optimizer.__class__.__name__,
'mapping': converter.mapper.__class__.__name__,
'ansatz': ansatz.__class__.__name__,
'rotation blocks': None,
'entanglement_blocks': None,
'entanglement': None,
'repetitions': None,
'skip_final_rotation_layer': None,
'energy (Ha)': energy,
'error (mHa)': (energy-exact_energy)*1000,
'pass': (energy-exact_energy)*1000 <= accuracy_threshold,
'# of parameters': len(result.optimal_point),
'final parameters': result.optimal_point,
'# of evaluations': result.optimizer_evals,
'optimizer time': result.optimizer_time,
'# of qubits': int(qubit_op.num_qubits),
'# of CNOTs': cnots,
'score': score}
# Plot the results
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.set_xlabel('Iterations')
ax.set_ylabel('Energy')
ax.grid()
fig.text(0.7, 0.75, f'Energy: {result.optimal_value:.3f}\nScore: {score:.0f}')
plt.title(f"{result_dict['optimizer']}-{result_dict['mapping']}\n{result_dict['ansatz']}")
ax.plot(counts, values)
ax.axhline(exact_energy, linestyle='--')
fig_title = f"\
{result_dict['optimizer']}-\
{result_dict['mapping']}-\
{result_dict['ansatz']}-\
Energy({result_dict['energy (Ha)']:.3f})-\
Score({result_dict['score']:.0f})\
.png"
fig.savefig(fig_title, dpi=300)
# Display and save the data
import pandas as pd
import os.path
filename = 'results_h2.csv'
if os.path.isfile(filename):
result_df = pd.read_csv(filename)
result_df = result_df.append([result_dict])
else:
result_df = pd.DataFrame.from_dict([result_dict])
result_df.to_csv(filename)
result_df[['optimizer','ansatz', '# of qubits', '# of parameters','rotation blocks', 'entanglement_blocks',
'entanglement', 'repetitions', 'error (mHa)', 'pass', 'score']]
###Output
_____no_output_____ |
Notes - Self Paced Classes/NumPy Document/Python Certification Course- Numpy.ipynb | ###Markdown
1-Dimensional Array
###Code
import numpy as np
a = np.array([1,2,3])
print(a)
###Output
[1 2 3]
###Markdown
2-Dimensional Array
###Code
import numpy as np
b = np.array([[1,2,3],[4,5,6]])
print(b)
###Output
[[1 2 3]
[4 5 6]]
###Markdown
Initialize all the elements of x X y array to 0
###Code
import numpy as np
np.zeros((3,4))
###Output
_____no_output_____
###Markdown
Arrange the numbers between x and y with an interval of z
###Code
import numpy as np
np.arange(1,10,2)
#even numbers between 10 and 20
np.arange(10,20,2)
###Output
_____no_output_____
###Markdown
Arrange 'z' numbers between x and y
###Code
np.linspace(5,10,10)
#even numbers between 0 to 10
np.linspace(0,10,6)
###Output
_____no_output_____
###Markdown
Filling SAME number in an array of dimension x X y
###Code
np.full((2,3),6)
###Output
_____no_output_____
###Markdown
Filling RANDOM number in an array of dimension x X y
###Code
np.random.random((2,3))
###Output
_____no_output_____
###Markdown
Inspecting the array: Checking the size of the array
###Code
a = np.array([[2,3,4],[4,4,6]])
print(a.shape)
s = np.array([[1,2,3,4],[2,3,4,6],[6,7,8,9]])
print(s.shape)
###Output
(3, 4)
###Markdown
Inspecting the array: Resize the Array
###Code
a = np.array([[2,3,4],[4,4,6]])
a.shape = (3,2)
print(a)
a = np.array([[2,3,4,4],[2,4,4,6]])
a.shape = (8,1) #Trick: x*y = Total number of elements in the array
print(a)
###Output
[[2]
[3]
[4]
[4]
[2]
[4]
[4]
[6]]
###Markdown
Return the dimension of the array
###Code
a = np.arange(25)
a
#print(a.ndim)
#reshape our array
#b = a.reshape(12,2) #trick: Calculate the factors of 24: 1,2,3,4,6,12,24
#print(b.ndim)
###Output
_____no_output_____
###Markdown
Find the number of elements in an array
###Code
print(a.size)
d = np.array([[1,2,3,4],[4,5,6,4],[6,7,8,9]])
print(d.size)
###Output
12
###Markdown
Find the datatype of the array
###Code
a = np.arange(24, dtype=float)
print(a.dtype)
a
###Output
float64
###Markdown
Numpy Array Mathematics: Addition
###Code
import numpy as np
np.sum([10,20])
#using a variable that is sum of a+b
a,b = 10,20
np.sum([a,b])
np.sum([[1,2],[5,6]],axis = 0)
np.sum([[1,2],[5,6]],axis = 1)
np.sum([[1,2],[5,6]])
###Output
_____no_output_____
###Markdown
Numpy Array Mathematics: Subtraction
###Code
np.subtract(10,20)
###Output
_____no_output_____
###Markdown
All other numpy Mathematics Function
###Code
np.multiply(2,3) #Multiplying two numbers
np.divide(10,5) #Dividing two numbers
a =np.array([2,4,6])
b =np.array([1,2,3])
np.multiply(a,b)
#exp,sqrt,sin,cos,log
print("Exponent : ",np.exp(a))
print("Square root : ", np.sqrt(a))
print("Sin : ", np.sin(a))
print("Cos : ", np.cos(a))
print("Log : ", np.log(a))
###Output
Exponent : [ 7.3890561 54.59815003 403.42879349]
Square root : [1.41421356 2. 2.44948974]
Sin : [ 0.90929743 -0.7568025 -0.2794155 ]
Cos : [-0.41614684 -0.65364362 0.96017029]
Log : [0.69314718 1.38629436 1.79175947]
###Markdown
Array Comparison
###Code
#Element-wise Comparison
a = [1,2,4]
b = [2,4,4]
c = [1,2,4]
np.equal(a,b)
#Array-wise Comparison
a = [1,2,4]
b = [1,4,4]
c = [1,2,4]
np.array_equal(a,c)
###Output
_____no_output_____
###Markdown
Aggregate Function
###Code
a = [1,2,4]
b = [2,4,4]
c = [1,2,4]
print("Sum: ",np.sum(a))
print("Minimum Value: ",np.min(a))
print("Mean: ",np.mean(a))
print("Median: ",np.median(a))
print("Coorelation Coefficient: ",np.corrcoef(a))
print("Standard Deviation: ",np.std(a))
###Output
Sum: 7
Minimum Value: 1
Mean: 2.3333333333333335
Median: 2.0
Coorelation Coefficient: 1.0
Standard Deviation: 1.247219128924647
###Markdown
Concept of Broadcasting
###Code
import numpy as np
a = np.array([[0,0,0],[1,2,3],[4,5,6],[5,6,7]])
b = np.array([[0,1,2]])
print("First Array: \n",a,'\n')
print("Second Array: \n",b,'\n')
print("First Array + Second Array: \n",a+b,'\n')
###Output
First Array:
[[0 0 0]
[1 2 3]
[4 5 6]
[5 6 7]]
Second Array:
[[0 1 2]]
First Array + Second Array:
[[0 1 2]
[1 3 5]
[4 6 8]
[5 7 9]]
###Markdown
Indexing and Slicing in Python
###Code
a = ['m','o','n','t','y',' ','p','y','t','h','o','n']
a[2:9]
a = np.array([[1,2,3],[4,5,6],[7,8,9]])
a[0]
a[:1]
print(a)
a[:1,1:]
a[:2,1:]
a[1:,1:]
###Output
_____no_output_____
###Markdown
Array Manipulation in Python
###Code
a = np.array([1,2,3])
b= np.array([4,5,6])
#concatenation of two arrays
np.concatenate((a,b))
#Stack array row-wise: Horizontal
np.hstack((a,b))
#Stack array row-wise: Vertically
np.vstack((a,b))
#Combining Column-wise
np.column_stack((a,b))
# Splitting Array
x = np.arange(16).reshape(4,4)
print(x,"\n\n")
print(np.hsplit(x,2))
print("\n\n", np.hsplit(x,np.array([2,3])))
###Output
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]
[12 13 14 15]]
[array([[ 0, 1],
[ 4, 5],
[ 8, 9],
[12, 13]]), array([[ 2, 3],
[ 6, 7],
[10, 11],
[14, 15]])]
[array([[ 0, 1],
[ 4, 5],
[ 8, 9],
[12, 13]]), array([[ 2],
[ 6],
[10],
[14]]), array([[ 3],
[ 7],
[11],
[15]])]
###Markdown
Advantages of Numpy Over a List
###Code
#Numpy vs List: Memory size
import numpy as np
import sys
#define a list
l = range(1000)
print("Size of a list: ",sys.getsizeof(1)*len(l))
#define a numpy array
a = np.arange(1000)
print("Size of an array: ",a.size*a.itemsize)
#Numpy vs List: Speed
import time
def using_List():
t1 = time.time()#Starting/Initial Time
X = range(10000)
Y = range(10000)
z = [X[i]+Y[i] for i in range(len(X))]
return time.time()-t1
def using_Numpy():
t1 = time.time()#Starting/Initial Time
a = np.arange(10000)
b = np.arange(10000)
z =a+b #more convient than a list
return time.time()-t1
list_time = using_List()
numpy_time = using_Numpy()
print(list_time,numpy_time)
print("In this example Numpy is "+str(list_time/numpy_time)+" times faster than a list")
###Output
0.001993894577026367 0.000997781753540039
In this example Numpy is 1.9983273596176823 times faster than a list
|
100days/day 65 - floyd-warshall.ipynb | ###Markdown
algorithm
###Code
def floyd(graph):
# initialize matrix
distance = nx.adjacency_matrix(graph).todense().astype(float)
distance[distance == 0] = np.inf
np.fill_diagonal(distance, 0)
# find shortest paths
for k, i, j in product(range(len(graph)), repeat=3):
distance[i, j] = min(distance[i, j], distance[i, k] + distance[k, j])
# negative cycle detection
if i == j and distance[i, j] < 0:
return k, i, 'negative cycle detected'
# shortest paths
return {
(i, j): distance[i, j]
for i, j in product(range(len(graph)), repeat=2)
if i != j and not np.isinf(distance[i, j])
}
###Output
_____no_output_____
###Markdown
graph
###Code
def generate_graph(n, edge_prob=.5, pos_weight_prob=.2):
graph = nx.DiGraph()
graph.add_nodes_from(range(n))
for u, v in product(range(n), repeat=2):
if u != v and np.random.rand() < edge_prob:
weight = [-1, 1][np.random.rand() < pos_weight_prob]
graph.add_edge(u, v, weight=weight)
return graph
def draw_graph(graph):
cm = {-1: 'red', 1: 'black'}
colors = [cm[e['weight']] for (u, v, e) in graph.edges(data=True)]
plt.figure(figsize=(8, 8))
plt.axis('off')
layout = nx.spring_layout(graph)
nx.draw_networkx_nodes(graph, layout, node_color='steelblue', node_size=520)
nx.draw_networkx_edges(graph, layout, edge_color=colors)
nx.draw_networkx_labels(graph, layout, font_color='white')
###Output
_____no_output_____
###Markdown
run
###Code
graph = generate_graph(5, edge_prob=.4, pos_weight_prob=.7)
draw_graph(graph)
floyd(graph)
###Output
_____no_output_____
###Markdown
run
###Code
graph = generate_graph(5, edge_prob=.4, pos_weight_prob=.6)
draw_graph(graph)
floyd(graph)
###Output
_____no_output_____ |
xgboost/BikeSharingRegression/biketrain_xgboost_localmode.ipynb | ###Markdown
Train a model with bike rental data using XGBoost algorithm Model is trained with XGBoost installed in notebook instance In the later examples, we will train using SageMaker's XGBoost algorithm
###Code
# Install xgboost in notebook instance.
#### Command to install xgboost
!conda install -y -c conda-forge xgboost
%matplotlib inline
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xgboost as xgb
column_list_file = 'bike_train_column_list.txt'
train_file = 'bike_train.csv'
validation_file = 'bike_validation.csv'
test_file = 'bike_test.csv'
columns = ''
with open(column_list_file,'r') as f:
columns = f.read().split(',')
columns
# Specify the column names as the file does not have column header
df_train = pd.read_csv(train_file,names=columns)
df_validation = pd.read_csv(validation_file,names=columns)
df_train.head()
df_validation.head()
X_train = df_train.iloc[:,1:] # Features: 1st column onwards
y_train = df_train.iloc[:,0].ravel() # Target: 0th column
X_validation = df_validation.iloc[:,1:]
y_validation = df_validation.iloc[:,0].ravel()
# XGBoost Training Parameter Reference:
# https://github.com/dmlc/xgboost/blob/master/doc/parameter.md
regressor = xgb.XGBRegressor(max_depth=5,eta=0.1,subsample=0.7,num_round=150)
regressor
regressor.fit(X_train,y_train, eval_set = [(X_train, y_train), (X_validation, y_validation)])
eval_result = regressor.evals_result()
training_rounds = range(len(eval_result['validation_0']['rmse']))
print(training_rounds)
plt.scatter(x=training_rounds,y=eval_result['validation_0']['rmse'],label='Training Error')
plt.scatter(x=training_rounds,y=eval_result['validation_1']['rmse'],label='Validation Error')
plt.grid(True)
plt.xlabel('Input Feature')
plt.ylabel('RMSE')
plt.title('Training Vs Validation Error')
plt.legend()
xgb.plot_importance(regressor)
xgb.plot_importance(regressor)
df = pd.read_csv('bike_all.csv')
df.head()
X_test = df.iloc[:,1:]
print(X_test[:5])
result = regressor.predict(X_test)
result[:5]
df['count_predicted'] = result
df.head()
# Negative Values are predicted
df['count_predicted'].describe()
df[df['count_predicted'] < 0]
df['count_predicted'].hist()
def adjust_count(x):
if x < 0:
return 0
else:
return x
df['count_predicted'] = df['count_predicted'].map(adjust_count)
df[df['count_predicted'] < 0]
plt.boxplot([df['count'],df['count_predicted']], labels=['actual','predicted'])
plt.title('Box Plot - Actual, Predicted')
plt.ylabel('Target')
plt.grid(True)
# Over prediction and Under Prediction needs to be balanced
# Training Data Residuals
residuals = (df['count_predicted'] - df['count'])
plt.hist(residuals)
plt.grid(True)
plt.xlabel('(Predicted - Actual)')
plt.ylabel('Count')
plt.title('Residuals Distribution')
plt.axvline(color='g')
import sklearn.metrics as metrics
print("RMSE: {0}".format(metrics.mean_squared_error(df['count'],df['count_predicted'])**.5))
# Metric Use By Kaggle
def compute_rmsle(y_true, y_pred):
if type(y_true) != np.ndarray:
y_true = np.array(y_true)
if type(y_pred) != np.ndarray:
y_pred = np.array(y_pred)
return(np.average((np.log1p(y_pred) - np.log1p(y_true))**2)**.5)
print("RMSLE: {0}".format(compute_rmsle(df['count'],df['count_predicted'])))
# Prepare Data for Submission to Kaggle
df_test = pd.read_csv(test_file,parse_dates=['datetime'])
df_test.head()
X_test = df_test.iloc[:,1:] # Exclude datetime for prediction
X_test.head()
result = regressor.predict(X_test)
result[:5]
df_test["count"] = result
df_test.head()
df_test[df_test["count"] < 0]
df_test["count"] = df_test["count"].map(adjust_count)
df_test[['datetime','count']].to_csv('predicted_count.csv',index=False)
# RMSLE (Kaggle) Scores
# Test 1: 1.32
# Test 2 (added new feature): 0.61646
###Output
_____no_output_____ |
2019/02/solution.ipynb | ###Markdown
Advent of Code 2019 - Day 2 Part 1
###Code
def process_program(program):
for idx in range(0, len(program), 4):
if program[idx] == 1:
# do add
(idx_1, idx_2, idx_3) = program[idx + 1:idx + 4]
program[idx_3] = program[idx_1] + program[idx_2]
elif program[idx] == 2:
# do multiply
(idx_1, idx_2, idx_3) = program[idx + 1:idx + 4]
program[idx_3] = program[idx_1] * program[idx_2]
elif program[idx] == 99:
# end program
return program
def str_to_list(string):
return [int(i) for i in string.split(',')]
print(process_program(str_to_list('1,0,0,0,99')))
print(process_program(str_to_list('2,3,0,3,99')))
print(process_program(str_to_list('2,4,4,5,99,0')))
print(process_program(str_to_list('1,1,1,4,99,5,6,0,99')))
print(process_program(str_to_list('1,9,10,3,2,3,11,0,99,30,40,50')))
program = []
with open('input.txt', 'r') as f:
for line in f:
program = [int(i) for i in line.split(',')]
program[1] = 12
program[2] = 2
processed_program = process_program(program)
print(f'Program: {processed_program}')
print(f'Solution: {processed_program[0]}')
###Output
Program: [3706713, 12, 2, 2, 1, 1, 2, 3, 1, 3, 4, 3, 1, 5, 0, 3, 2, 1, 10, 48, 1, 6, 19, 50, 1, 13, 23, 55, 1, 6, 27, 57, 1, 31, 10, 61, 1, 35, 6, 63, 1, 39, 13, 68, 2, 10, 43, 272, 1, 47, 6, 274, 2, 6, 51, 548, 1, 5, 55, 549, 2, 13, 59, 2745, 2, 63, 9, 8235, 1, 5, 67, 8236, 2, 13, 71, 41180, 1, 75, 5, 41181, 1, 10, 79, 41185, 2, 6, 83, 82370, 2, 13, 87, 411850, 1, 9, 91, 411853, 1, 9, 95, 411856, 2, 99, 9, 1235568, 1, 5, 103, 1235569, 2, 9, 107, 3706707, 1, 5, 111, 3706708, 1, 115, 2, 3706710, 1, 9, 119, 0, 99, 2, 0, 14, 0]
Solution: 3706713
###Markdown
Part 2
###Code
program = []
with open('input.txt', 'r') as f:
for line in f:
program = [int(i) for i in line.split(',')]
import copy
target = 19690720
for noun in range(0, 99):
for verb in range(0, 99):
curr_program = copy.deepcopy(program)
curr_program[1] = noun
curr_program[2] = verb
processed_program = process_program(curr_program)
if processed_program[0] == target:
print(f'noun: {noun}')
print(f'verb: {verb}')
print(f'solution: {100 * noun + verb}')
###Output
noun: 86
verb: 9
solution: 8609
|
notebook/Quantum_Communicator_Example.ipynb | ###Markdown
Using the QuantumDispatcher Class A Dispatcher has 3 functions: `run_and_transmit`, `multi_run_and_transmit`, and `batch_run_and_transmit`. `run_and_transmit` takes in 3 parameters: * pre-operation: a circuit of operations to prepare a transmitted state * post-operations: a list operations to run on different devices * the number of shots to run the circuit.`run_and_transmit` is intended to run a single job, while `multi_run_and_transmit` is intended to run multiple job. For `multi_run_and_transmit` the preparation operations are expected to be a list of operations, and the post operations are expected to be a list of lists. The ith element of each list corresponds to the circuit ran as the ith job.`batch_run_and_transmit` creates and runs all permutations of operations given to it, both pre and post transmission. ExampleConsider the scenario for measurement incompatible testing. First, we prepare the BB84 states for Alice, and Bob measures based on the input he gets, y = 0 or y = 1.So we would like to make 2 measurements, once with Bob's input 0 and another with 1. We can instantaite a `LocalDispatcher` and call the method `multi_run_and_transmit` on this instance to accomplish this. To instantiate a `LocalDispatcher`, we need to feed in one device.
###Code
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, IBMQ
# from qiskit.tools.jupyter import *
# from qiskit.visualization import *
# from qiskit.providers.ibmq.managed import IBMQJobManager
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
import numpy as np
import context
from device_independent_test.quantum_communicator import LocalDispatcher
def get_bb84_state():
qc = QuantumCircuit(4)
qc.x(1) # create 1
qc.h(2) # create +
qc.x(3) # create -
qc.h(3) # ^
return qc
def measure_circuit(y):
qc = QuantumCircuit(4)
theta = -1.0*(np.pi/4 + 0.5*y*np.pi) # -pi/4 rotation for y=0, -3pi/4 rotation for y=1
qc.u3(theta,0,0,0)
qc.measure_all()
return qc
# initialization for two measurements - both preparing bb84 state
pre_ops = [get_bb84_state(), get_bb84_state()]
# one test sequence with QuantumCircuit(4) + measure_circuit(0)
# and another with QuantumCircuit(4) + measure_circuit(1)
post_ops = [
[QuantumCircuit(4), QuantumCircuit(4)],
[measure_circuit(0), measure_circuit(1)]
]
dispatch = LocalDispatcher([provider.get_backend('ibmq_qasm_simulator')])
counts = dispatch.multi_run_and_transmit(pre_ops, post_ops, 1000)
###Output
_____no_output_____
###Markdown
The function returns the list of counts of each measurement.
###Code
counts
###Output
_____no_output_____ |
intro_apren_auto/chapters/ch7/ch7-dev-tree-02-bagging.ipynb | ###Markdown
Técnicas de AgregaciónLos árboles de predicción, al igual que todo modelo estadístico, sufren elproblema de la relación de equilibrio entre el bias y la varianza. El término bias hace referencia a cuánto se alejan las predicciones de un modelo respecto a los valores reales. La varianza hace referencia a cuánto varía elmodelo dependiendo del conjunto con el que realizamos el entrenamiento.Cuanto mayor sea la complejidad de un modelo, es posible adaptarlo alproblema que se quiere resolver, reduciendo el bias y mejorando su capacidadde predicción. Sin embargo, es peligroso tener un modelo tan ajustado alconjunto de entrenamiento que presenta una alta varianza y es incapaz de adaptarsea nuevas observaciones, entonces es cuándo aparece el problema del sobreajuste.El modelo ideal es aquel que consigue un buen equilibrio entre bias y varianza.Estos términos se pueden trasladar a los árboles de decisión vistos en lasección anterior. De manera general, los árboles pequeños no serán capaces derepresentar de manera correcta la relación existente entre las variables, porlo que tienden a tener un alto bias, pero poca varianza. En cambio, los árbolesgrandes se ajustan mucho a los datos de entrenamiento, por loque tienen menor bias pero una alta varianza.Tal como se describe en la introducción del capítulo __bagging__, __randomforests__ y __boosting__ son técnicas que utilizan los árboles de decisión comolas piezas elementales para construir modelos de predicción con mejoresresultados, consiguiendo un mejor equilibrio entre el bias y la varianza. Bagging**NOTA:** Traduir bagging? He vist recursos on ho tradueixen com Agregación debootstrap o empaquetado, si es tradueix ho faria amb empaquetadoLos árboles de decisión sufren de una gran varianza, esto significa que sidividimos los datos de entrenamiento en dos partes de manera aleatoria, yconstruimos un árbol de decisión para cada una de las dos mitades, losárboles resultantes que obtenemos podrían ser muy diferentes. Unprocedimiento con baja varianza producirá resultados similares si se aplicaa distintos conjuntos de datos.La técnica que aquí se presenta, _bagging_, es un procedimiento diseñado parareducir la varianza de un método de aprendizaje estadístico. Dado unconjunto de $n$ observaciones independientes $Z_1, \ldots, Z_n$ cada una deellas con su propia varianza $ \sigma^2$, la varianza de la media de estasobservaciones $\overline{Z}$ es $ \frac{\sigma^2}{n}$. Por lotanto, una forma natural de reducir la varianza y, por lo tanto, aumentar laprecisión de la predicción de un método de aprendizaje es construirdiferentes conjuntos de entrenamiento, construir un modelo depredicción separado usando cada uno de los conjuntos y realizar el promediode las predicciones resultantes.Desgraciadamente, normalmente no tenemos suficientes datos para creardiferentes conjuntos de entrenamiento. Es por este motivo que se aplica latécnica de _bootstrap_: simularemos los diferentes conjuntos deentrenamiento necesarios para construir diferentes árboles y reducir lavarianza del clasificador. Esto se consigue extrayendo repetidos subconjuntosde la muestra original. Estos subconjuntos deben extraerseutilizando un muestreo con reposición, de tal forma que algunos elementos noserán seleccionados y otros lo podrán ser más de una vez en cada muestreo.(_referenciar al Hasting capítol 5_)Para aplicar la técnica de _bagging_ construiremos un conjunto de árboles deregresión usando diferentes conjuntos de entrenamiento con la técnica de_bootstrapping_ y daremos como resultado final el promedio de laspredicciones individuales de cada uno de los árboles que forman el conjunto.Estos árboles crecen profundamente y no se podan . Al promediar elresultado de cada uno de estos árboles evidentemente, se consigue reducir lavarianza. Se ha demostrado que el _bagging_ proporciona mejoras en laprecisión de los resultado al combinar un gran número de árboles en unsolo modelo.Hasta ahora, hemos descrito el procedimiento de _bagging_ en el caso dequerer solventar un problema aplicando una regresión. Esta técnica tambiénes aplicable a problemas de clasificación, en este caso setoma la clase que es predicha con mayor frecuencia por los diferentes árboles,es decir usamos la moda de las predicciones.Según lo explicado anteriormente, sabemos que cada árbolindividual tiene una alta varianza, pero un bajo sesgo ya que comom hemoscomentado són árboles son profundos. Al obtener información agregada delconjunto árboles conseguimos reducir la varianza. Cálculo de el error (_out-of-bag_)Cuando usamos la técnica de _bootstrapping_ existe una manera sencilla deestimar el error del método sin necesidad de realizar validación cruzadao crear un subconjunto específico de validación.El hecho de que los árboles se ajusten de forma repetida empleando muestrasgeneradas por _bootstrapping_ conlleva que en promedio, cada árbol solamenteusa alrededor de dos tercios de las observaciones originales. Altercio restante se le llama _out-of-bag_ (OOB). Si para cada árbol construidose registran las observaciones empleadas, se puede predecir la respuesta deuna observación haciendo uso de aquellos árboles en los que esa observaciónha sido excluida (OOB) y promediándolos si queremos realizar una tarea deregresión o obteniendo la moda en el caso que queramos realizar una tarea declasificación.Siguiendo este proceso, se pueden obtener las predicciones para las todaslas observaciones y con ellas calcular el conocido como _OOB-mean square error_para casos de regresión o el _OOB-classification error_ para árboles declasificación. Como la variable respuesta de cada observación se prediceempleando únicamente los árboles en cuyo ajuste no participó dichaobservación, este cálculo sirve como estimación delerror del conjunto de test. (**Depen de si s'ha explicat o no**) _De hecho, siel número de árboles es suficientemente alto, el OOB-error es prácticamenteequivalente al leave-one-out cross-validation error_. Esto evitatener que recurrir al proceso de cross-validation para la optimización delos hiperparámetros. Un ejemplo de _bagging_Como hemos explicado anteriormente esta técnica es aplicable a diferentesmétodoes de aprendizaje. En el paquete _sklearn.ensemble_ de Scikitencontramos las clases _BaggingClassifier_ y _BaggingRegressor_ que usan losárboles de decisión como técnica por defecto.Vamos a ver un ejemplo de regresión con esta técnica:
###Code
from sklearn.ensemble import BaggingRegressor
from sklearn.datasets import make_regression
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
import numpy as np
X, y = make_regression(n_samples=50, n_features=1,
n_informative=1, n_targets=1, noise=10,
random_state=0, shuffle=True)
elem = np.linspace(np.min(X),np.max(X),150).reshape(-1, 1)
regressor = DecisionTreeRegressor(random_state=33)
regressor.fit(X, y)
predicciones = regressor.predict(elem)
regr = BaggingRegressor(n_estimators=100, random_state=0)
regr.fit(X, y)
y_predict= regr.predict(elem)
#plt.scatter(X, y);
#plt.plot(elem, y_predict,label="bagging");
#plt.plot(elem, predicciones, label="regressor tree")
#plt.legend()
###Output
_____no_output_____ |
lab 9/logistic_regression1.ipynb | ###Markdown
At first, I set beta_0 to be a positive number but I kept getting the error "This solver needs samples of at least 2 classes in the data, but the data contains only one class: 1.0". It turned out that when z is over a certain value (greater than 5 or less that -5) the logistic function is always very close to 1 and the logistic_regression would think the data it's getting only contains 1 class. So I set beta_0 to be -50 so that $z$ would be in a range that logistic($z$) would result in various values between 0 and 1.
###Code
result = models.logistic_regression("y ~ x1 + x2 + x1:x2", data = data)
models.simple_describe_lgr(result)
result = models.bootstrap_logistic_regression("y ~ x1 + x2 + x1:x2", data)
models.describe_bootstrap_lgr(result, 3)
###Output
_____no_output_____ |
for_ES.ipynb | ###Markdown
Using embeddings for similarity search Let’s suppose we had a large collection of questions and answers. A user can ask a question, and we want to retrieve the most similar question in our collection to help them find an answer.We could use text embeddings to allow for retrieving similar questions:During indexing, each question is run through a sentence embedding model to produce a numeric vector.When a user enters a query, it is run through the same sentence embedding model to produce a vector. To rank the responses, we calculate the vector similarity between each question and the query vector. When comparing embedding vectors, it is common to use cosine similarity.This notebook gives a simple example of how this could be accomplished in Elasticsearch. The main script indexes ~20,000 questions from the StackOverflow dataset, then allows the user to enter free-text queries against the dataset.
###Code
import json
import time
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# Use tensorflow 1 behavior to match the Universal Sentence Encoder
# examples (https://tfhub.dev/google/universal-sentence-encoder/2).
import tensorflow.compat.v1 as tf
#For proper memory usage of GPU
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
##### INDEXING #####
def index_data():
print("Creating the 'posts' index.")
client.indices.delete(index=INDEX_NAME, ignore=[404])
with open(INDEX_FILE) as index_file:
source = index_file.read().strip()
client.indices.create(index=INDEX_NAME, body=source)
docs = []
count = 0
with open(DATA_FILE) as data_file:
for line in data_file:
line = line.strip()
doc = json.loads(line)
if doc["type"] != "question":
continue
docs.append(doc)
count += 1
if count % BATCH_SIZE == 0:
index_batch(docs)
docs = []
print("Indexed {} documents.".format(count))
if docs:
index_batch(docs)
print("Indexed {} documents.".format(count))
client.indices.refresh(index=INDEX_NAME)
print("Done indexing.")
def index_batch(docs):
titles = [doc["title"] for doc in docs]
title_vectors = embed_text(titles)
requests = []
for i, doc in enumerate(docs):
request = doc
request["_op_type"] = "index"
request["_index"] = INDEX_NAME
request["title_vector"] = title_vectors[i]
requests.append(request)
bulk(client, requests)
##### SEARCHING #####
def run_query_loop():
for i in range (5): #т.к. прерывания не работают, делаю 5 запусков функции поиска
try:
handle_query()
except KeyboardInterrupt:
break
def handle_query():
query = input("Enter query: ")
embedding_start = time.time()
query_vector = embed_text([query])[0]
embedding_time = time.time() - embedding_start
script_query = {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, doc['title_vector']) + 1.0",
"params": {"query_vector": query_vector}
}
}
}
search_start = time.time()
response = client.search(
index=INDEX_NAME,
body={
"size": SEARCH_SIZE,
"query": script_query,
"_source": {"includes": ["title", "body"]}
}
)
search_time = time.time() - search_start
print()
print("{} total hits.".format(response["hits"]["total"]["value"]))
print("embedding time: {:.2f} ms".format(embedding_time * 1000))
print("search time: {:.2f} ms".format(search_time * 1000))
for hit in response["hits"]["hits"]:
print("id: {}, score: {}".format(hit["_id"], hit["_score"]))
print(hit["_source"])
print()
##### EMBEDDING #####
def embed_text(text):
vectors = session.run(embeddings, feed_dict={text_ph: text})
return [vector.tolist() for vector in vectors]
##### MAIN SCRIPT #####
import tensorflow_hub as hub
tf.disable_eager_execution()
if __name__ == '__main__':
print('name=main')
INDEX_NAME = "posts"
INDEX_FILE = "data/posts/index.json"
DATA_FILE = "data/posts/posts.json"
BATCH_SIZE = 1000
SEARCH_SIZE = 5
GPU_LIMIT = 0.1
print("Downloading pre-trained embeddings from tensorflow hub...")
embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder/4")
text_ph = tf.placeholder(tf.string)
embeddings = embed(text_ph)
print("Done.")
print("Creating tensorflow session...")
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = GPU_LIMIT
session = tf.Session(config=config)
print('running session...')
session.run(tf.global_variables_initializer())
#sess.run(tf.global_variables_initializer())
print('ran session...')
session.run(tf.tables_initializer())
print("Done.")
client = Elasticsearch()
'''
index_data()
'''
run_query_loop()
print("Closing tensorflow session...")
session.close()
print("Done.")
import pandas as pd
posts_data=pd.read_json('data/posts/posts.json', lines=True)
posts_data.head()
posts_data.shape
posts_data.type.describe()
###Output
_____no_output_____ |
panda/Support Vector Machine.ipynb | ###Markdown
Support Vector Machine with Gradient DescentIn this notebook, we will be building a Support Vector Machine to solve a 2-class classification problem by finding the optimal hyperplane that maximises the margin between the two data classes, through gradient descent.Support vectors are data points nearest to the hyperplane such that if they are removed, the position of the hyperplane will be affected. Dependencies
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Data Preparation
###Code
# Input Data
X = np.array([[-2,4,-1],[4,1,-1],[1,6,-1],[2,4,-1],[6,2,-1]])
y = np.array([-1,-1,1,1,1])
###Output
_____no_output_____
###Markdown
Data Visualisation
###Code
def plot_data(X):
for d, sample in enumerate(X):
if d < 2:
# Plot negative samples (first 2)
plt.scatter(sample[0], sample[1], s=120, marker='_', linewidths=2)
else:
# Plot positive samples (last 3)
plt.scatter(sample[0], sample[1], s=120, marker='+', linewidths=2)
plot_data(X)
# Plot a random hyperplane
plt.plot([-2,6], [6,0.5])
###Output
_____no_output_____
###Markdown
Loss and Objective Function Hinge loss is used for maximum margin classification: $$c(x, y, f(x))= \begin{cases} 0,& \text{if } y*f(x)\geq 1\\ 1-y*f(x), & \text{else}\end{cases}$$y refers to true label, f(x) refers to the predicted label. Objective Function$$\underset{w}{min}\ \lambda\parallel w\parallel^2 + \ \sum_{i=1}^n\big(1-y_i \langle x_i,w \rangle\big)_+$$On the left side, is the regularization term, and on the right, is the loss function. Lambda here is equal to 1/ epochsThe objective function states out in mathematical terms the goal of this machine learning problem, which is to find the optimum W that defines the equation of the hyperplane. Derivative of Objective Function w.r.t WTo apply gradient descent to find the optimum value of W (the minima), we need the partial derivatives of the loss and regularization term with respect to W.$$\frac{\delta}{\delta w_k} \lambda\parallel w\parallel^2 \ = 2 \lambda w_k$$$$\frac{\delta}{\delta w_k} \big(1-y_i \langle x_i,w \rangle\big)_+ \ = \begin{cases} 0,& \text{if } y_i \langle x_i,w \rangle\geq 1\\ -y_ix_{ik}, & \text{else}\end{cases}$$ Gradient DescentIn each step of training, we then update W by subtracting from the current W, the product of learning rate and the partial derivatives:if $y_i⟨x_i,w⟩ < 1$:$$w = w + \eta (y_ix_i - 2\lambda w)$$else:$$w = w + \eta (-2\lambda w)$$
###Code
def svm_sgd_plot(X, Y):
#Initialize our SVMs weight vector with zeros (3 values)
w = np.zeros(len(X[0]))
#The learning rate
eta = 1
#how many iterations to train for
epochs = 100000
#store misclassifications so we can plot how they change over time
errors = []
#training part, gradient descent part
for epoch in range(1, epochs):
error = 0
for i, x in enumerate(X):
if (Y[i] * np.dot(X[i], w))<1:
#missclassified weights
w = w+ eta *((X[i] * Y[i]) + (-2 * (1/epoch) *w ))
error = 1
else:
w = w + eta *(-2 * (1/epoch) *w)
errors.append(error)
def plot_errors(errors):
plt.plot(errors, '|')
plt.ylim(0.5,1.5)
plt.axes().set_yticklabels([])
plt.xlabel('Epoch')
plt.ylabel('Misclassfied')
plt.show()
def svm_with_sgd(X, Y, lr, epochs):
w = np.zeros(len(X[0]))
errors = []
for epoch in range(1, epochs):
error = 0
for i, x in enumerate(X):
# Gradient Descent
if(Y[i] * np.dot(X[i], w) < 1):
w = w + lr * (X[i] * Y[i] + (-2 * (1/epoch) * w))
error = 1
else:
w = w + lr * (-2 * (1/epoch) * w)
errors.append(error)
plot_errors(errors)
return w
w = svm_sgd_plot(X, y)
w
plot_data(X)
# Adding test samples
plt.scatter(2, 2, s=120, marker='_', linewidths=2, color='yellow')
plt.scatter(4, 3, s=120, marker='+', linewidths=2, color='blue')
# Print hyper plane calculated
x2 = [w[0], w[1], -w[1], w[0]]
x3 = [w[0], w[1], w[1], -w[0]]
x2x3 = np.array([x2, x3])
nX, nY, U, V = zip(*x2x3)
ax = plt.gca()
ax.quiver(nX,nY,U,V,scale=1, color='blue')
###Output
_____no_output_____ |
helloworld/1_flow_export_to_S3_v02.ipynb | ###Markdown
Save to S3 with a SageMaker Processing Job 💡 Quick Start To save your processed data to S3, select the Run menu above and click Run all cells. View the status of the export job and the output S3 location.This notebook executes your Data Wrangler Flow `1_helloworld.flow` on the entire dataset using a SageMaker Processing Job and will save the processed data to S3.This notebook saves data from the step `Manage Columns` from `Source: Airot201101.Csv`. To save from a different step, go to Data Wrangler to select a new step to export. --- Contents1. [Inputs and Outputs](Inputs-and-Outputs)1. [Run Processing Job](Run-Processing-Job) 1. [Job Configurations](Job-Configurations) 1. [Create Processing Job](Create-Processing-Job) 1. [Job Status & S3 Output Location](Job-Status-&-S3-Output-Location)1. [Optional Next Steps]((Optional)Next-Steps) 1. [Load Processed Data into Pandas]((Optional)-Load-Processed-Data-into-Pandas) 1. [Train a model with SageMaker]((Optional)Train-a-model-with-SageMaker)--- Inputs and OutputsThe below settings configure the inputs and outputs for the flow export. 💡 Configurable Settings In Input - Source you can configure the data sources that will be used as input by Data Wrangler1. For S3 sources, configure the source attribute that points to the input S3 prefixes2. For all other sources, configure attributes like query_string, database in the source's DatasetDefinition object.If you modify the inputs the provided data must have the same schema and format as the data used in the Flow. You should also re-execute the cells in this section if you have modified the settings in any data sources.
###Code
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.dataset_definition.inputs import AthenaDatasetDefinition, DatasetDefinition, RedshiftDatasetDefinition
data_sources = []
###Output
_____no_output_____
###Markdown
Input - S3 Source: airOT201101.csv
###Code
data_sources.append(ProcessingInput(
source="s3://from-public-data/carrier-perf/transformed/airOT2011/airOT201101.csv", # You can override this to point to other dataset on S3
destination="/opt/ml/processing/airOT201101.csv",
input_name="airOT201101.csv",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated"
))
###Output
_____no_output_____
###Markdown
Output: S3 settings 💡 Configurable Settings 1. bucket: you can configure the S3 bucket where Data Wrangler will save the output. The default bucket from the SageMaker notebook session is used. 2. flow_export_id: A randomly generated export id. The export id must be unique to ensure the results do not conflict with other flow exports 3. s3_ouput_prefix: you can configure the directory name in your bucket where your data will be saved.
###Code
import time
import uuid
import sagemaker
# Sagemaker session
sess = sagemaker.Session()
# You can configure this with your own bucket name, e.g.
# bucket = "my-bucket"
bucket = sess.default_bucket()
print(f"Data Wrangler export storage bucket: {bucket}")
# unique flow export ID
flow_export_id = f"{time.strftime('%d-%H-%M-%S', time.gmtime())}-{str(uuid.uuid4())[:8]}"
flow_export_name = f"flow-{flow_export_id}"
###Output
Data Wrangler export storage bucket: sagemaker-us-west-2-506926764659
###Markdown
Below are the inputs required by the SageMaker Python SDK to launch a processing job.
###Code
# Output name is auto-generated from the select node's ID + output name from the flow file.
output_name = "b788d6a4-beaf-44c1-a0f2-db07f3c97b00.default"
s3_output_prefix = f"export-{flow_export_name}/output"
s3_output_path = f"s3://{bucket}/{s3_output_prefix}"
print(f"Flow S3 export result path: {s3_output_path}")
processing_job_output = ProcessingOutput(
output_name=output_name,
source="/opt/ml/processing/output",
destination=s3_output_path,
s3_upload_mode="EndOfJob"
)
###Output
Flow S3 export result path: s3://sagemaker-us-west-2-506926764659/export-flow-04-17-14-40-211a5e9b/output
###Markdown
Upload Flow to S3To use the Data Wrangler as an input to the processing job, first upload your flow file to Amazon S3.
###Code
import os
import json
import boto3
# name of the flow file which should exist in the current notebook working directory
flow_file_name = "1_helloworld.flow"
# Load .flow file from current notebook working directory
!echo "Loading flow file from current notebook working directory: $PWD"
with open(flow_file_name) as f:
flow = json.load(f)
# Upload flow to S3
s3_client = boto3.client("s3")
s3_client.upload_file(flow_file_name, bucket, f"data_wrangler_flows/{flow_export_name}.flow")
flow_s3_uri = f"s3://{bucket}/data_wrangler_flows/{flow_export_name}.flow"
print(f"Data Wrangler flow {flow_file_name} uploaded to {flow_s3_uri}")
###Output
Loading flow file from current notebook working directory: /root/helloworld
Data Wrangler flow 1_helloworld.flow uploaded to s3://sagemaker-us-west-2-506926764659/data_wrangler_flows/flow-04-17-14-40-211a5e9b.flow
###Markdown
The Data Wrangler Flow is also provided to the Processing Job as an input source which we configure below.
###Code
## Input - Flow: 1_helloworld.flow
flow_input = ProcessingInput(
source=flow_s3_uri,
destination="/opt/ml/processing/flow",
input_name="flow",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated"
)
###Output
_____no_output_____
###Markdown
Run Processing Job Job Configurations 💡 Configurable Settings You can configure the following settings for Processing Jobs. If you change any configurations you will need to re-execute this and all cells below it by selecting the Run menu above and click Run Selected Cells and All Below1. IAM role for executing the processing job. 2. A unique name of the processing job. Give a unique name every time you re-execute processing jobs3. Data Wrangler Container URL.4. Instance count, instance type and storage volume size in GB.5. Content type for each output. Data Wrangler supports CSV as default and Parquet.6. Network Isolation settings
###Code
# IAM role for executing the processing job.
iam_role = sagemaker.get_execution_role()
# Unique processing job name. Give a unique name every time you re-execute processing jobs
processing_job_name = f"data-wrangler-flow-processing-{flow_export_id}"
# Data Wrangler Container URL.
container_uri = "174368400705.dkr.ecr.us-west-2.amazonaws.com/sagemaker-data-wrangler-container:1.x"
# Processing Job Instance count and instance type.
instance_count = 2
instance_type = "ml.m5.4xlarge"
#-- REF. Instance Types: https://aws.amazon.com/sagemaker/pricing/ --
# Size in GB of the EBS volume to use for storing data during processing
volume_size_in_gb = 30
# Content type for each output. Data Wrangler supports CSV as default and Parquet.
output_content_type = "CSV"
# Network Isolation mode; default is off
enable_network_isolation = False
# Output configuration used as processing job container arguments
output_config = {
output_name: {
"content_type": output_content_type
}
}
###Output
Couldn't call 'get_role' to get Role ARN from role name AmazonSageMaker-ExecutionRole-20210503T205912 to get Role path.
Assuming role was created in SageMaker AWS console, as the name contains `AmazonSageMaker-ExecutionRole`. Defaulting to Role ARN with service-role in path. If this Role ARN is incorrect, please add IAM read permissions to your role or supply the Role Arn directly.
###Markdown
Create Processing JobTo launch a Processing Job, you will use the SageMaker Python SDK to create a Processor function.
###Code
from sagemaker.processing import Processor
from sagemaker.network import NetworkConfig
processor = Processor(
role=iam_role,
image_uri=container_uri,
instance_count=instance_count,
instance_type=instance_type,
volume_size_in_gb=volume_size_in_gb,
network_config=NetworkConfig(enable_network_isolation=enable_network_isolation),
sagemaker_session=sess
)
# Start Job
processor.run(
inputs=[flow_input] + data_sources,
outputs=[processing_job_output],
arguments=[f"--output-config '{json.dumps(output_config)}'"],
wait=False,
logs=False,
job_name=processing_job_name
)
###Output
Job Name: data-wrangler-flow-processing-04-17-14-40-211a5e9b
Inputs: [{'InputName': 'flow', 'AppManaged': False, 'S3Input': {'S3Uri': 's3://sagemaker-us-west-2-506926764659/data_wrangler_flows/flow-04-17-14-40-211a5e9b.flow', 'LocalPath': '/opt/ml/processing/flow', 'S3DataType': 'S3Prefix', 'S3InputMode': 'File', 'S3DataDistributionType': 'FullyReplicated', 'S3CompressionType': 'None'}}, {'InputName': 'airOT201101.csv', 'AppManaged': False, 'S3Input': {'S3Uri': 's3://from-public-data/carrier-perf/transformed/airOT2011/airOT201101.csv', 'LocalPath': '/opt/ml/processing/airOT201101.csv', 'S3DataType': 'S3Prefix', 'S3InputMode': 'File', 'S3DataDistributionType': 'FullyReplicated', 'S3CompressionType': 'None'}}]
Outputs: [{'OutputName': 'b788d6a4-beaf-44c1-a0f2-db07f3c97b00.default', 'AppManaged': False, 'S3Output': {'S3Uri': 's3://sagemaker-us-west-2-506926764659/export-flow-04-17-14-40-211a5e9b/output', 'LocalPath': '/opt/ml/processing/output', 'S3UploadMode': 'EndOfJob'}}]
###Markdown
Job Status & S3 Output LocationBelow you wait for processing job to finish. If it finishes successfully, the raw parameters used by the Processing Job will be printed
###Code
s3_job_results_path = f"s3://{bucket}/{s3_output_prefix}/{processing_job_name}"
print(f"Job results are saved to S3 path: {s3_job_results_path}")
job_result = sess.wait_for_processing_job(processing_job_name)
job_result
###Output
Job results are saved to S3 path: s3://sagemaker-us-west-2-506926764659/export-flow-04-17-14-40-211a5e9b/output/data-wrangler-flow-processing-04-17-14-40-211a5e9b
.................................................................................!
###Markdown
(Optional)Next StepsNow that data is available on S3 you can use other SageMaker components that take S3 URIs as input such as SageMaker Training, Built-in Algorithms, etc. Similarly you can load the dataset into a Pandas dataframe in this notebook for further inspection and work. The examples below show how to do both of these steps. By default optional steps do not run automatically, set `run_optional_steps` to True if you want to execute optional steps
###Code
run_optional_steps = False
# This will stop the below cells from executing if "Run All Cells" was used on the notebook.
if not run_optional_steps:
raise SystemExit("Stop here. Do not automatically execute optional steps.")
###Output
_____no_output_____
###Markdown
(Optional) Load Processed Data into PandasWe use the [AWS Data Wrangler library](https://github.com/awslabs/aws-data-wrangler) to load the exported dataset into a Pandas dataframe for a preview of first 1000 rows.
###Code
!pip install -q awswrangler pandas
import awswrangler as wr
chunksize = 1000
if output_content_type.upper() == "CSV":
dfs = wr.s3.read_csv(s3_output_path, chunksize=chunksize)
elif output_content_type.upper() == "PARQUET":
dfs = wr.s3.read_parquet(s3_output_path, chunked=chunksize)
else:
print(f"Unexpected output content type {output_content_type}")
df = next(dfs)
df
###Output
_____no_output_____
###Markdown
(Optional)Train a model with SageMakerNow that the data has been processed, you may want to train a model using the data. The following shows an example of doing so using a popular algorithm - XGBoost. For more information on algorithms available in SageMaker, see [Getting Started with SageMaker Algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). It is important to note that the following XGBoost objective ['binary', 'regression', 'multiclass'] hyperparameters, or content_type may not be suitable for the output data, and will require changes to train a proper model. Furthermore, for CSV training, the algorithm assumes that the target variable is in the first column. For more information on SageMaker XGBoost, see https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.html. Set Training Data pathWe set the training input data path from the output of the Data Wrangler processing job..
###Code
s3_training_input_path = s3_job_results_path
print(f"training input data path: {s3_training_input_path}")
###Output
_____no_output_____
###Markdown
Configure the algorithm and training jobThe Training Job hyperparameters are set. For more information on XGBoost Hyperparameters, see https://xgboost.readthedocs.io/en/latest/parameter.html.
###Code
region = boto3.Session().region_name
container = sagemaker.image_uris.retrieve("xgboost", region, "1.2-1")
hyperparameters = {
"max_depth":"5",
"objective": "reg:squarederror",
"num_round": "10",
}
train_content_type = (
"application/x-parquet" if output_content_type.upper() == "PARQUET"
else "text/csv"
)
train_input = sagemaker.inputs.TrainingInput(
s3_data=s3_training_input_path,
content_type=train_content_type,
)
###Output
_____no_output_____
###Markdown
Start the Training JobThe TrainingJob configurations are set using the SageMaker Python SDK Estimator, and which is fit using the training data from the Processing Job that was run earlier.
###Code
estimator = sagemaker.estimator.Estimator(
container,
iam_role,
hyperparameters=hyperparameters,
instance_count=1,
instance_type="ml.m5.2xlarge",
)
estimator.fit({"train": train_input})
###Output
_____no_output_____ |
notebooks/Input Data.ipynb | ###Markdown
Input DataThis notebook parses some of the CARDiPS files to take only data that we need for this project. This notebook will only run on the Frazer lab cluster.
###Code
import glob
import os
import subprocess
import cdpybio as cpb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import ciepy
import cardipspy as cpy
%matplotlib inline
outdir = os.path.join(ciepy.root, 'output',
'input_data')
cpy.makedir(outdir)
private_outdir = os.path.join(ciepy.root, 'private_output',
'input_data')
cpy.makedir(private_outdir)
dy = '/projects/CARDIPS/data/database/20160129'
fn = os.path.join(dy, 'baseline_analyte.tsv')
baseline_analyte = pd.read_table(fn, index_col=1)
fn = os.path.join(dy, 'baseline_wgsisaac.tsv')
baseline_wgsisaac = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'baseline_ipsc.tsv')
baseline_ipsc = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'baseline_wgs.tsv')
baseline_wgs = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'baseline_cnv.tsv')
baseline_cnv = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'baseline_rnas.tsv')
baseline_rnas = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'baseline_ibd.tsv')
baseline_ibd = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'baseline_snpa.tsv')
baseline_snpa = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'baseline_tissue.tsv')
baseline_tissue = pd.read_table(fn, index_col=1)
fn = os.path.join(dy, 'family1070_rnas.tsv')
family1070_rnas = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'family1070_tissue.tsv')
family1070_tissue = pd.read_table(fn, index_col=1)
fn = os.path.join(dy, 'subject_pedigree.tsv')
subject_pedigree = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'subject_family.tsv')
subject_family = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'subject_subject.tsv')
subject_subject = pd.read_table(fn, index_col=3)
# The only columns that I should use from data_* tables
# are the seq_id and status columns. Others may be wrong.
fn = os.path.join(dy, 'data_wgs.tsv')
data_wgs = pd.read_table(fn, index_col=2)
fn = os.path.join(dy, 'data_snpa.tsv')
data_snpa = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'data_array.tsv')
data_array = pd.read_table(fn, index_col=0)
# fn = os.path.join(dy, 'data_chips.tsv')
# data_chips = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'data_atacs.tsv')
data_atacs = pd.read_table(fn, index_col=0)
# fn = os.path.join(dy, 'data_metha.tsv')
# data_metha = pd.read_table(fn, index_col=0)
# fn = os.path.join(dy, 'data_hic.tsv')
# data_hic = pd.read_table(fn, index_col=0)
fn = os.path.join(dy, 'data_rnas.tsv')
data_rnas = pd.read_table(fn, index_col=2)
fn = os.path.join(dy, 'data_sequence.tsv')
data_sequence = pd.read_table(fn, index_col=0)
###Output
_____no_output_____
###Markdown
Array CNVsI'm going to make a table of all CNVs identified by arrays. Some iPSC didn't haveany CNVs. For now, if an iPSC is in the CNV table, that means that it eitherdidn't have CNVs or we didn't test that clone/passage number for CNVs.
###Code
cnv = baseline_cnv.merge(baseline_snpa, left_on='snpa_id', right_index=True,
suffixes=['_cnv', '_snpa'])
cnv = cnv.merge(baseline_analyte, left_on='analyte_id', right_index=True,
suffixes=['_cnv', '_analyte'])
cnv = cnv.merge(baseline_tissue, left_on='tissue_id', right_index=True,
suffixes=['_cnv', '_tissue'])
cnv = cnv[['type', 'chr', 'start', 'end', 'len', 'primary_detect_method',
'clone', 'passage', 'subject_id']]
###Output
_____no_output_____
###Markdown
RNA-seq Samples for this StudyI'm going to use baseline and family 1070 samples.
###Code
# Get family1070 samples.
tdf = family1070_rnas[family1070_rnas.comment.isnull()]
tdf = tdf.merge(family1070_tissue, left_on='tissue_id', right_index=True,
suffixes=['_rna', '_tissue'])
tdf = tdf[tdf.cell_type == 'iPSC']
tdf.index = tdf.rnas_id
tdf['status'] = data_rnas.ix[tdf.index, 'status']
tdf = tdf[tdf.status == 0]
tdf = tdf[['ipsc_clone_number', 'ipsc_passage', 'subject_id']]
tdf.columns = ['clone', 'passage', 'subject_id']
tdf['isolated_by'] = 'p'
tdf.index.name = 'rna_id'
# Get the iPSC eQTL samples.
rna = baseline_rnas[baseline_rnas.rnas_id.isnull() == False]
rna.index = rna.rnas_id
rna.index.name = 'rna_id'
rna['status'] = data_rnas.ix[rna.index, 'status']
rna = rna[rna.status == 0]
#rna = rna.ix[censor[censor == False].index]
rna = rna.merge(baseline_analyte, left_on='analyte_id', right_index=True,
suffixes=['_rnas', '_analyte'])
rna = rna.merge(baseline_tissue, left_on='tissue_id', right_index=True,
suffixes=['_rnas', '_tissue'])
rna = rna[['clone', 'passage', 'subject_id']]
rna['isolated_by'] = 'a'
rna = pd.concat([rna, tdf])
# Get 222 subjects.
cohort222 = baseline_ipsc.merge(baseline_tissue, left_on='tissue_id',
right_index=True, suffixes=['_ipsc', '_tissue'])
n = len(set(rna.subject_id) & set(cohort222.subject_id))
print('We have {} of the 222 subjects in the "222 cohort."'.format(n))
rna['sequence_id'] = data_rnas.ix[rna.index, 'sequence_id']
###Output
_____no_output_____
###Markdown
I can use all of these samples that passed QC for various expression analyses. eQTL samplesNow I'm going to identify one sample per subject to use for eQTL analysis.I'll start by keeping samples whose clone/passage number matches up with those from the 222 cohort.
###Code
rna['in_eqtl'] = False
samples = (cohort222.subject_id + ':' + cohort222.clone.astype(int).astype(str) +
':' + cohort222.passage.astype(int).astype(str))
t = rna.dropna(subset=['passage'])
t.loc[:, ('sample')] = (t.subject_id + ':' + t.clone.astype(int).astype(str) +
':' + t.passage.astype(int).astype(str))
t = t[t['sample'].apply(lambda x: x in samples.values)]
# These samples are in the 222 cohort and the eQTL analysis.
rna['in_222'] = False
rna.ix[t.index, 'in_222'] = True
rna.ix[t.index, 'in_eqtl'] = True
###Output
_____no_output_____
###Markdown
Now I'll add in any samples for which we have CNVs but weren't in the 222.
###Code
samples = (cnv.subject_id + ':' + cnv.clone.astype(int).astype(str) +
':' + cnv.passage.astype(int).astype(str))
t = rna.dropna(subset=['passage'])
t.loc[:, ('sample')] = (t.subject_id + ':' + t.clone.astype(int).astype(str) +
':' + t.passage.astype(int).astype(str))
t = t[t['sample'].apply(lambda x: x in samples.values)]
t = t[t.subject_id.apply(lambda x: x not in rna.ix[rna.in_eqtl, 'subject_id'].values)]
# These samples aren't in the 222 but we have a measured CNV for them.
rna.ix[t.index, 'in_eqtl'] = True
###Output
_____no_output_____
###Markdown
Now I'll add in samples where the clone was in the 222 but we don't have the same passagenumber.
###Code
samples = (cohort222.subject_id + ':' + cohort222.clone.astype(int).astype(str))
t = rna[rna.in_eqtl == False]
t = t[t.subject_id.apply(lambda x: x not in rna.ix[rna.in_eqtl, 'subject_id'].values)]
t['samples'] = t.subject_id + ':' + t.clone.astype(int).astype(str)
t = t[t.samples.apply(lambda x: x in samples.values)]
# These clones are in the 222, we just have a different passage number.
rna['clone_in_222'] = False
rna.ix[rna.in_222, 'clone_in_222'] = True
rna.ix[t.index, 'clone_in_222'] = True
rna.ix[t.index, 'in_eqtl'] = True
###Output
_____no_output_____
###Markdown
Now I'll add in any samples from subjects we don't yet have in the eQTL analysis.
###Code
t = rna[rna.in_eqtl == False]
t = t[t.subject_id.apply(lambda x: x not in rna.ix[rna.in_eqtl, 'subject_id'].values)]
rna.ix[t.index, 'in_eqtl'] = True
n = rna.in_eqtl.value_counts()[True]
print('We potentially have {} distinct subjects in the eQTL analysis.'.format(n))
###Output
We potentially have 215 distinct subjects in the eQTL analysis.
###Markdown
WGS SamplesNow I'll assign WGS IDs for each RNA-seq sample. Some subjects have multiple WGS samplesfor different cell types. I'll preferentially use blood, fibroblast, and finally iPSC WGS.
###Code
wgs = baseline_wgs.merge(baseline_analyte, left_on='analyte_id', right_index=True,
suffixes=['_wgs', '_analyte'])
wgs = wgs.merge(baseline_tissue, left_on='tissue_id', right_index=True,
suffixes=['_wgs', '_tissue'])
wgs = wgs.merge(baseline_analyte, left_on='analyte_id', right_index=True,
suffixes=['_wgs', '_analyte'])
wgs = wgs.dropna(subset=['wgs_id'])
wgs.index = wgs.wgs_id
wgs['status'] = data_wgs.ix[wgs.index, 'status']
wgs = wgs[wgs.status == 0]
rna['wgs_id'] = ''
for i in rna.index:
s = rna.ix[i, 'subject_id']
t = wgs[wgs.subject_id == s]
if t.shape[0] == 1:
rna.ix[i, 'wgs_id'] = t.index[0]
elif t.shape[0] > 1:
if 'Blood' in t.source.values:
t = t[t.source == 'Blood']
elif 'iPSC' in t.source.values:
t = t[t.source == 'iPSC']
if t.shape[0] == 1:
rna.ix[i, 'wgs_id'] = t.index[0]
else:
print('?: {}'.format(i))
else:
#print('No WGS: {}'.format(i))
print('No WGS: {}'.format(rna.ix[i, 'subject_id']))
rna.ix[i, 'in_eqtl'] = False
rna.ix[rna['wgs_id'] == '', 'wgs_id'] = np.nan
n = rna.in_eqtl.value_counts()[True]
print('We are left with {} subjects for the eQTL analysis.'.format(n))
###Output
We are left with 215 subjects for the eQTL analysis.
###Markdown
I'm going to keep one WGS sample per person in the cohort (preferentially blood, fibroblast, and finally iPSC) even if we don'thave RNA-seq in case we want to look at phasing etc.
###Code
vc = wgs.subject_id.value_counts()
vc = vc[vc > 1]
keep = []
for s in vc.index:
t = wgs[wgs.subject_id == s]
if t.shape[0] == 1:
keep.append(t.index[0])
elif t.shape[0] > 1:
if 'Blood' in t.source.values:
t = t[t.source == 'Blood']
elif 'iPSC' in t.source.values:
t = t[t.source == 'iPSC']
if t.shape[0] == 1:
keep.append(t.index[0])
else:
print('?: {}'.format(i))
wgs = wgs.drop(set(wgs[wgs.subject_id.apply(lambda x: x in vc.index)].index) - set(keep))
wgs = wgs[['source', 'subject_id']]
wgs.columns = ['cell', 'subject_id']
subject = subject_subject.copy(deep=True)
subject = subject.ix[set(rna.subject_id) | set(wgs.subject_id)]
subject = subject[['sex', 'age', 'family_id', 'father_id', 'mother_id',
'twin_id', 'ethnicity_group']]
###Output
_____no_output_____
###Markdown
unrelateds = rna[rna.in_eqtl]unrelateds = unrelateds.merge(subject, left_on='subject_id', right_index=True)unrelateds = unrelateds.drop_duplicates(subset=['family_id'])rna['in_diff_families'] = Falserna.ix[unrelateds.index, 'in_unrelateds'] = True
###Code
fn = os.path.join(outdir, 'cnvs.tsv')
if not os.path.exists(fn):
cnv.to_csv(fn, sep='\t')
rna.index.name = 'sample_id'
fn = os.path.join(outdir, 'rnaseq_metadata.tsv')
if not os.path.exists(fn):
rna.to_csv(fn, sep='\t')
fn = os.path.join(outdir, 'subject_metadata.tsv')
if not os.path.exists(fn):
subject.to_csv(fn, sep='\t')
fn = os.path.join(outdir, 'wgs_metadata.tsv')
if not os.path.exists(fn):
wgs.to_csv(fn, sep='\t')
###Output
_____no_output_____
###Markdown
RNA-seq Data
###Code
dy = '/projects/CARDIPS/pipeline/RNAseq/combined_files'
# STAR logs.
fn = os.path.join(dy, 'star_logs.tsv')
logs = pd.read_table(fn, index_col=0, low_memory=False)
logs = logs.ix[rna.index]
logs.index.name = 'sample_id'
fn = os.path.join(outdir, 'star_logs.tsv')
if not os.path.exists(fn):
logs.to_csv(fn, sep='\t')
# Picard stats.
fn = os.path.join(dy, 'picard_metrics.tsv')
picard = pd.read_table(fn, index_col=0, low_memory=False)
picard = picard.ix[rna.index]
picard.index.name = 'sample_id'
fn = os.path.join(outdir, 'picard_metrics.tsv')
if not os.path.exists(fn):
picard.to_csv(fn, sep='\t')
# Expression values.
fn = os.path.join(dy, 'rsem_tpm_isoforms.tsv')
tpm = pd.read_table(fn, index_col=0, low_memory=False)
tpm = tpm[rna.index]
fn = os.path.join(outdir, 'rsem_tpm_isoforms.tsv')
if not os.path.exists(fn):
tpm.to_csv(fn, sep='\t')
fn = os.path.join(dy, 'rsem_tpm.tsv')
tpm = pd.read_table(fn, index_col=0, low_memory=False)
tpm = tpm[rna.index]
fn = os.path.join(outdir, 'rsem_tpm.tsv')
if not os.path.exists(fn):
tpm.to_csv(fn, sep='\t')
fn = os.path.join(dy, 'rsem_expected_counts.tsv')
ec = pd.read_table(fn, index_col=0, low_memory=False)
ec = ec[rna.index]
fn = os.path.join(outdir, 'rsem_expected_counts.tsv')
if not os.path.exists(fn):
ec.to_csv(fn, sep='\t')
ec_sf = cpb.analysis.deseq2_size_factors(ec.astype(int), meta=rna, design='~subject_id')
fn = os.path.join(outdir, 'rsem_expected_counts_size_factors.tsv')
if not os.path.exists(fn):
ec_sf.to_csv(fn, sep='\t')
ec_n = (ec / ec_sf)
fn = os.path.join(outdir, 'rsem_expected_counts_norm.tsv')
if not os.path.exists(fn):
ec_n.to_csv(fn, sep='\t')
fn = os.path.join(dy, 'gene_counts.tsv')
gc = pd.read_table(fn, index_col=0, low_memory=False)
gc = gc[rna.index]
fn = os.path.join(outdir, 'gene_counts.tsv')
if not os.path.exists(fn):
gc.to_csv(fn, sep='\t')
gc_sf = cpb.analysis.deseq2_size_factors(gc, meta=rna, design='~subject_id')
fn = os.path.join(outdir, 'gene_counts_size_factors.tsv')
if not os.path.exists(fn):
gc_sf.to_csv(fn, sep='\t')
gc_n = (gc / gc_sf)
fn = os.path.join(outdir, 'gene_counts_norm.tsv')
if not os.path.exists(fn):
gc_n.to_csv(fn, sep='\t')
# Allele counts.
cpy.makedir(os.path.join(private_outdir, 'allele_counts'))
fns = glob.glob('/projects/CARDIPS/pipeline/RNAseq/sample/'
'*/*mbased/*mbased_input.tsv')
fns = [x for x in fns if x.split('/')[-3] in rna.index]
for fn in fns:
new_fn = os.path.join(private_outdir, 'allele_counts', os.path.split(fn)[1])
if not os.path.exists(new_fn):
os.symlink(fn, new_fn)
# MBASED ASE results.
dy = '/projects/CARDIPS/pipeline/RNAseq/combined_files'
df = pd.read_table(os.path.join(dy, 'mbased_major_allele_freq.tsv'), index_col=0)
df = df[rna.index].dropna(how='all')
df.to_csv(os.path.join(outdir, 'mbased_major_allele_freq.tsv'), sep='\t')
df = pd.read_table(os.path.join(dy, 'mbased_p_val_ase.tsv'), index_col=0)
df = df[rna.index].dropna(how='all')
df.to_csv(os.path.join(outdir, 'mbased_p_val_ase.tsv'), sep='\t')
df = pd.read_table(os.path.join(dy, 'mbased_p_val_het.tsv'), index_col=0)
df = df[rna.index].dropna(how='all')
df.to_csv(os.path.join(outdir, 'mbased_p_val_het.tsv'), sep='\t')
cpy.makedir(os.path.join(private_outdir, 'mbased_snv'))
fns = glob.glob('/projects/CARDIPS/pipeline/RNAseq/sample/*/*mbased/*_snv.tsv')
fns = [x for x in fns if x.split('/')[-3] in rna.index]
for fn in fns:
new_fn = os.path.join(private_outdir, 'mbased_snv', os.path.split(fn)[1])
if not os.path.exists(new_fn):
os.symlink(fn, new_fn)
###Output
_____no_output_____
###Markdown
Variant Calls
###Code
fn = os.path.join(private_outdir, 'autosomal_variants.vcf.gz')
if not os.path.exists(fn):
os.symlink('/projects/CARDIPS/pipeline/WGS/mergedVCF/CARDIPS_201512.PASS.vcf.gz',
fn)
os.symlink('/projects/CARDIPS/pipeline/WGS/mergedVCF/CARDIPS_201512.PASS.vcf.gz.tbi',
fn + '.tbi')
###Output
_____no_output_____
###Markdown
External DataI'm going to use the expression estimates for some samples from GSE73211.
###Code
fn = os.path.join(outdir, 'GSE73211.tsv')
if not os.path.exists(fn):
os.symlink('/projects/CARDIPS/pipeline/RNAseq/combined_files/GSE73211.tsv', fn)
GSE73211 = pd.read_table(fn, index_col=0)
dy = '/projects/CARDIPS/pipeline/RNAseq/combined_files'
fn = os.path.join(dy, 'rsem_tpm.tsv')
tpm = pd.read_table(fn, index_col=0, low_memory=False)
tpm = tpm[GSE73211.index]
fn = os.path.join(outdir, 'GSE73211_tpm.tsv')
if not os.path.exists(fn):
tpm.to_csv(fn, sep='\t')
###Output
_____no_output_____ |
dd_1/Part 4/Section 03 - Project 1/02 - TimeZone Class.ipynb | ###Markdown
Project 1: TimeZone class Let's start with the timezone class. This one will have two instance attributes, offset and name. I'm going to create those as read-only properties. Offsets should be provided as a timespan (timedelta) of hours and minutes - we'll allow specifying the hour and minute offsets separately in the __init__, but the offset property will combine those as a timespan object.
###Code
import numbers
from datetime import timedelta
class TimeZone:
def __init__(self, name, offset_hours, offset_minutes):
if name is None or len(str(name).strip()) == 0:
raise ValueError('Timezone name cannot be empty.')
self._name = str(name).strip()
if not isinstance(offset_hours, numbers.Integral):
raise ValueError('Hour offset must be an integer.')
if not isinstance(offset_minutes, numbers.Integral):
raise ValueError('Minutes offset must be an integer.')
if offset_minutes < -59 or offset_minutes > 59:
raise ValueError('Minutes offset must between -59 and 59 (inclusive).')
# for time delta sign of minutes will be set to sign of hours
offset = timedelta(hours=offset_hours, minutes=offset_minutes)
# offsets are technically bounded between -12:00 and 14:00
# see: https://en.wikipedia.org/wiki/List_of_UTC_time_offsets
if offset < timedelta(hours=-12, minutes=0) or offset > timedelta(hours=14, minutes=0):
raise ValueError('Offset must be between -12:00 and +14:00.')
self._offset_hours = offset_hours
self._offset_minutes = offset_minutes
self._offset = offset
@property
def offset(self):
return self._offset
@property
def name(self):
return self._name
def __eq__(self, other):
return (isinstance(other, TimeZone) and
self.name == other.name and
self._offset_hours == other._offset_hours and
self._offset_minutes == other._offset_minutes)
def __repr__(self):
return (f"TimeZone(name='{self.name}', "
f"offset_hours={self._offset_hours}, "
f"offset_minutes={self._offset_minutes})")
###Output
_____no_output_____
###Markdown
Let's try it out and make sure it's working:
###Code
tz1 = TimeZone('ABC', -2, -15)
tz1.name
from datetime import datetime
dt = datetime.utcnow()
print(dt)
print(dt + tz1.offset)
###Output
2019-06-02 21:12:15.937254
|
Iris_DecisionTrees.ipynb | ###Markdown
Decision TreesDecision trees can be used for linear and nonlinear classification/regression tasks. ClassificationObjective: The CART algorithm used by Scikit-learn splits the training data to minimize impurity.Use DecisionTreeClassifier RegressionObjective: The CART algorithm used by Scikit-learn splits the training data to minimize MSE.Use DecisionTreeRegressor Key Terms**CART algorithm:** (Classification and Regression Tree). Used by Scikit-learn. Produces only binary trees: nonleaf nodes always have two children (yes/no answers) **Impurity:** a node is “pure” (gini=0) if all training instances it applies to belong to the same class. **MSE:** Mean Squared Error. Measures the average of the squares of the errors or deviations—that is, the difference between the estimator and what is estimated
###Code
# import libraries
%matplotlib inline
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import numpy as np
# Load and visualize data
iris = datasets.load_iris()
X = iris["data"][:,2:] # petal length and width
Y = iris["target"]
# X.shape
# Y.shape
plt.plot(X[:, 0][Y==0], X[:, 1][Y==0], "yo",label="Iris-Setosa")
plt.plot(X[:, 0][Y==1], X[:, 1][Y==1], "bs",label="Iris-Versicolor")
plt.plot(X[:,0][Y==2], X[:,1][Y==2],"g^", label="Iris-Virginica")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend()
# Train decision tree classifier
dtree_clf = DecisionTreeClassifier(max_depth=3)
dtree_clf.fit(X,Y)
# Predict
print(dtree_clf.predict_proba([[6, 1.5]])) # a flower with petal length of 5cm and width of 1.5cm
dtree_clf.predict([[5, 1.5]])
# array([2]): iris virginica
# Visualize the decision logic tree
from sklearn.tree import export_graphviz
# generates a .dot file.
export_graphviz(
dtree_clf,
out_file = "iris_tree.dot",
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True
)
# convert to png file with command below (download graphviz package):
# $ dot -Tpng iris_tree.dot -o iris_tree.png
## Or use this online graphviz vizualizer: https://dreampuf.github.io/GraphvizOnline/
###Output
_____no_output_____
###Markdown
###Code
# Visualize decision boundaries
from matplotlib.colors import ListedColormap
def plot_decision_boundary(clf, X, Y, axes=[0, 7.5, 0, 3], iris=True, legend=False, plot_training=True):
x1s = np.linspace(axes[0], axes[1], 100)
x2s = np.linspace(axes[2], axes[3], 100)
x1, x2 = np.meshgrid(x1s, x2s)
X_new = np.c_[x1.ravel(), x2.ravel()]
y_pred = clf.predict(X_new).reshape(x1.shape)
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)
if not iris:
custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])
plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)
if plot_training:
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor")
plt.plot(X[:, 0][y==2], X[:, 1][y==2], "g^", label="Iris-Virginica")
plt.axis(axes)
if iris:
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
else:
plt.xlabel(r"$x_1$", fontsize=18)
plt.ylabel(r"$x_2$", fontsize=18, rotation=0)
if legend:
plt.legend(loc="lower right", fontsize=14)
plt.figure(figsize=(8, 4))
plot_decision_boundary(dtree_clf, X, Y)
plt.plot([2.45, 2.45], [0, 3], "k-", linewidth=2)
plt.plot([2.45, 7.5], [1.75, 1.75], "k--", linewidth=2)
plt.plot([4.95, 4.95], [0, 1.75], "k:", linewidth=2)
plt.plot([4.85, 4.85], [1.75, 3], "k:", linewidth=2)
plt.text(1.40, 1.0, "Depth=0", fontsize=15)
plt.text(3.2, 1.80, "Depth=1", fontsize=13)
plt.text(4.05, 0.5, "(Depth=2)", fontsize=11)
###Output
_____no_output_____ |
1_type_variables.ipynb | ###Markdown
**Type of variables** \begin{array}{lcccl}\hline \text{Type of Variables} & \text{conveys } \leq & \text{conveys } = & \text{Contains true zero} & \text{Example}\\\hline \text{Nominal or Categorical} & No & No & No & \text{Psychological Diagnoses, Personality Types}\\ \text{Ordinal or Ranked} & Yes & No & No & \text{Finish places in a race}\\ \text{Interval} & Yes & Yes & No & \text{Celsius, Intelligence Test (IQ)}\\ \text{Ratio} & Yes & Yes & Yes & \text{Height, Weight}\\\hline\end{array} * **Nominal Variables**: Measurement scale in which numbers serve only as labels and do not indicate any quantitative relationship.* **Ordinal Variables**: Measurement scale in which numbers are ranks; equal differences between numbers do not represent equal differences between the things measured.* **Interval Variables**: Measurement scale in which equal differences between numbers represent equal differences in the thing measured. The zero point is arbitrarily defined.* **Ratio Variables**: Measurement scale with characteristics of interval scale, but it has a *true zero point*. Difference between Interval and Ratio variables:- Convert $100 ^\circ C$ and $50 ^\circ C$ to Fahrenheit (F = 1.8C + 32) and suddenly the "twice as much" relationship disappears.- Convert 16 kilograms and 4 kilograms to pounds (1 kg = 2.2 lbs) and the "four times heavier" relationship ismaintained.The difference is subtle. Therefore, those variables are referenced as ***continuos variables***. **Visualization**
###Code
#Ref: Statistical Abstract of the United States: 2013, 2012
df = pd.read_csv("data/women_man_height.csv")
df.head(3)
###Output
_____no_output_____
###Markdown
Are men taller than women?
###Code
#Simple frequency distribution
get_freq_dist = lambda ser: ser.value_counts().reset_index(name="count").rename(columns = {"index":"height"})
men = get_freq_dist(df["height-men"])
women = get_freq_dist(df["height-women"])
display_side_by_side([men, women], ["Man", "Women"], space=(50,30))
###Output
_____no_output_____
###Markdown
**Graphs of Frequency Distributions** **Histogram** * Used to present the frequencies of continuous variables.* There is no "best" number of bins, and different bin sizes can reveal different features of the data.* A common choice of bin size is given by this rules of thumb $ k=\lceil \sqrt n \text{ } \rceil$, where $n$ is the number of data points (Excel). There are many ways to calculate the number of bins https://en.wikipedia.org/wiki/HistogramNumber_of_bins_and_width. **Equally spaced bins**
###Code
aux = pd.wide_to_long(df.reset_index(),
stubnames='height',
i='index',
j='sex',
sep='-',
suffix=r'\w+').reset_index(level=1)
fig, axs = plt.subplots(1,2,sharey=True,figsize=(15,4))
qtd_bin = 10
heights = aux["height"].unique()
ax1 = sns.countplot(data=aux, x="height", hue="sex",
edgecolor="black", alpha=0.7, ax = axs[0],)
ax2 = sns.histplot(data=aux, x="height", hue="sex",
bins=qtd_bin, stat = "count", ax = axs[1],)
ax1.set_title("Simple Count")
ax2.set_title("Grouped Count")
plt.tight_layout()
plt.show()
calc_bins = aux["height"].agg(["max","min"])
bin_range = calc_bins.loc["max"] - calc_bins.loc["min"]
bin_step = bin_range/qtd_bin
bins_position = [calc_bins.loc["min"] + bin_step*i for i in range(qtd_bin)]
bins_position
###Output
_____no_output_____
###Markdown
**Variable bin widths**
###Code
fig, axs = plt.subplots(1,2,figsize=(20,6))
diff_heights_size = len(aux["height"].unique())
ax1 = sns.histplot(data=aux, x="height", hue="sex",
binwidth=1, stat = "count", ax = axs[0],)
ax2 = sns.histplot(data=aux, x="height", hue="sex",
binwidth=15, stat = "count", ax = axs[1],)
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
notebooks/BLC_Network_Routing_Explore_1.0.ipynb | ###Markdown
Network RoutingExploration by Ben Chaney. osmnx basic setup, pull a network
###Code
# The goal of this explore is to get a workflow for routing and/or isochromes using OSMNx.
# This will be used to:
# We have a table of each origin-destination pair for individual bus stops. We need to join that onto a link-node network, then create topology.
# Then, the travel sheds would be calculated using that network, with our derived travel delay values
# from google.colab import drive
# drive.mount('/content/gdrive')
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import networkx as nx
import osmnx as ox
from descartes import PolygonPatch
from shapely.geometry import Point, LineString, Polygon
ox.config(log_console=True, use_cache=True)
ox.__version__
%matplotlib inline
# This is testing how the osmnx library could fit in the workflow that we're developing.
# Right now, I'll assume that we can get a lat-long pair for any location we're intrested in.
# Stop at SE Morrison/9th, Stop ID 4026, Lat/Long 45.517244, -122.656436
# Stop at SE Morrison/Grand, Stop ID 4013, Lat/Long 45.517260, -122.660528
transit_stop_locations = [
{"Stop_ID": "4026", "Stop_Description": "SE Morrison/9th", "Lat": 45.517244, "Long": -122.656436 },
{"Stop_ID": "4026", "Stop_Description": "SE Morrison/9th", "Lat": 45.517244, "Long": -122.656436 }
]
transit_stop_locations
print(transit_stop_locations[1]["Stop_ID"])
print(transit_stop_locations[1]["Lat"])
print(transit_stop_locations[1]["Long"])
# define a point at Stop 4026
location_point = (transit_stop_locations[1]["Lat"], transit_stop_locations[1]["Long"])
# create network from point, inside bounding box of N, S, E, W each 750m from point
G2 = ox.graph_from_point(location_point, distance=750, distance_type='bbox', network_type='drive')
G2 = ox.project_graph(G2)
fig, ax = ox.plot_graph(G2, node_size=30, node_color='#66cc66')
# get one of each network type and save to disk as image and shapefile
for nt in ['all_private', 'all', 'bike', 'walk', 'drive', 'drive_service']:
G = ox.graph_from_point(location_point, distance=750, distance_type='bbox', network_type=nt)
filename = 'Stop_ID_4026-{}'.format(nt)
# save street network as GraphML file to work with in networkx or gephi
ox.save_graphml(G, filename='network_{}.graphml'.format(nt))
ox.save_graph_shapefile(G, filename=filename)
fig, ax = ox.plot_graph(G, node_color='none', save=True, filename=filename, show=False)
###Output
_____no_output_____
###Markdown
Isochromes Example(adapted from osmnx tutorial) Setup
###Code
import geopandas as gpd
import matplotlib.pyplot as plt
import networkx as nx
import osmnx as ox
from descartes import PolygonPatch
from shapely.geometry import Point, LineString, Polygon
ox.config(log_console=True, use_cache=True)
ox.__version__
# configure the place, network type, trip times, and travel speed
# place = 'Portland, Oregon, USA'
network_type = 'walk'
trip_times = [5, 10, 15] #in minutes
travel_speed = 4.5 #walking speed in km/hour
###Output
_____no_output_____
###Markdown
Download and prep the street network
###Code
# download the street network
G = ox.graph_from_point(location_point, distance=2000, distance_type='bbox', network_type='walk')
# find the centermost node and then project the graph to UTM
gdf_nodes = ox.graph_to_gdfs(G, edges=False)
x, y = gdf_nodes['geometry'].unary_union.centroid.xy
center_node = ox.get_nearest_node(G, (y[0], x[0]))
G = ox.project_graph(G)
# add an edge attribute for time in minutes required to traverse each edge
meters_per_minute = travel_speed * 1000 / 60 #km per hour to m per minute
for u, v, k, data in G.edges(data=True, keys=True):
data['time'] = data['length'] / meters_per_minute
###Output
_____no_output_____
###Markdown
Plots nodes you can reach on foot within each timeHow far can you walk in 5, 10, and 15 minutes from the origin node? We'll use NetworkX to induce a subgraph of G within each distance, based on trip time and travel speed.
###Code
# get one color for each isochrone
iso_colors = ox.get_colors(n=len(trip_times), cmap='Reds', start=0.3, return_hex=True)
# color the nodes according to isochrone then plot the street network
node_colors = {}
for trip_time, color in zip(sorted(trip_times, reverse=True), iso_colors):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance='time')
for node in subgraph.nodes():
node_colors[node] = color
nc = [node_colors[node] if node in node_colors else 'none' for node in G.nodes()]
ns = [20 if node in node_colors else 0 for node in G.nodes()]
fig, ax = ox.plot_graph(G, fig_height=8, node_color=nc, node_size=ns, node_alpha=0.8, node_zorder=2)
###Output
_____no_output_____
###Markdown
Plot the time-distances as isochronesHow far can you walk in 5, 10, 15, 20, and 25 minutes from the origin node? We'll use a convex hull, which isn't perfectly accurate. A concave hull would be better, but shapely doesn't offer that.
###Code
# make the isochrone polygons
isochrone_polys = []
for trip_time in sorted(trip_times, reverse=True):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance='time')
node_points = [Point((data['x'], data['y'])) for node, data in subgraph.nodes(data=True)]
bounding_poly = gpd.GeoSeries(node_points).unary_union.convex_hull
isochrone_polys.append(bounding_poly)
# plot the network then add isochrones as colored descartes polygon patches
fig, ax = ox.plot_graph(G, fig_height=8, show=False, close=False, edge_color='k', edge_alpha=0.2, node_color='none')
for polygon, fc in zip(isochrone_polys, iso_colors):
patch = PolygonPatch(polygon, fc=fc, ec='none', alpha=0.6, zorder=-1)
ax.add_patch(patch)
plt.show()
###Output
_____no_output_____
###Markdown
Or, plot isochrones as buffers to get more faithful isochrones than convex hulls can offerin the style of http://kuanbutts.com/2017/12/16/osmnx-isochrones/
###Code
def make_iso_polys(G, edge_buff=25, node_buff=50, infill=False):
isochrone_polys = []
for trip_time in sorted(trip_times, reverse=True):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance='time')
node_points = [Point((data['x'], data['y'])) for node, data in subgraph.nodes(data=True)]
nodes_gdf = gpd.GeoDataFrame({'id': subgraph.nodes()}, geometry=node_points)
nodes_gdf = nodes_gdf.set_index('id')
edge_lines = []
for n_fr, n_to in subgraph.edges():
f = nodes_gdf.loc[n_fr].geometry
t = nodes_gdf.loc[n_to].geometry
edge_lines.append(LineString([f,t]))
n = nodes_gdf.buffer(node_buff).geometry
e = gpd.GeoSeries(edge_lines).buffer(edge_buff).geometry
all_gs = list(n) + list(e)
new_iso = gpd.GeoSeries(all_gs).unary_union
# try to fill in surrounded areas so shapes will appear solid and blocks without white space inside them
if infill:
new_iso = Polygon(new_iso.exterior)
isochrone_polys.append(new_iso)
return isochrone_polys
isochrone_polys = make_iso_polys(G, edge_buff=25, node_buff=0, infill=True)
fig, ax = ox.plot_graph(G, fig_height=8, show=False, close=False, edge_color='k', edge_alpha=0.2, node_color='none')
for polygon, fc in zip(isochrone_polys, iso_colors):
patch = PolygonPatch(polygon, fc=fc, ec='none', alpha=0.6, zorder=-1)
ax.add_patch(patch)
plt.show()
###Output
_____no_output_____ |
examples/relaxations.ipynb | ###Markdown
These examples demonstrate the primary methods for interacting with Coramin relaxation objects.
###Code
import pyomo.environ as pe
import coramin
from coramin.utils.plot_relaxation import plot_relaxation
m = pe.ConcreteModel()
m.x = pe.Var(bounds=(-0.5, 1))
m.y = pe.Var()
m.x_sq = coramin.relaxations.PWXSquaredRelaxation()
m.x_sq.build(x=m.x, aux_var=m.y, use_linear_relaxation=True)
opt = pe.SolverFactory('gurobi_persistent')
plot_relaxation(m, m.x_sq, opt)
m.x_sq.add_partition_point(0)
m.x_sq.rebuild()
plot_relaxation(m, m.x_sq, opt)
m.x.value = 0.2
m.x_sq.add_oa_point()
m.x_sq.rebuild()
plot_relaxation(m, m.x_sq, opt)
m.x.value = 0.6
m.x_sq.add_cut()
plot_relaxation(m, m.x_sq, opt)
m.x_sq.clear_partitions()
m.x_sq.rebuild()
plot_relaxation(m, m.x_sq, opt)
m.x_sq.clear_oa_points()
m.x_sq.rebuild()
plot_relaxation(m, m.x_sq, opt)
m.x_sq.use_linear_relaxation = False
m.x_sq.rebuild()
plot_relaxation(m, m.x_sq, opt)
m.x_sq.use_linear_relaxation = True
m.x_sq.rebuild()
plot_relaxation(m, m.x_sq, opt)
m.x.setlb(0.2)
m.x_sq.rebuild()
plot_relaxation(m, m.x_sq, opt)
del m.x_sq
m.x.setlb(0.2)
m.x.setub(5)
m.log_x = coramin.relaxations.PWUnivariateRelaxation()
m.log_x.build(x=m.x, aux_var=m.y, f_x_expr=pe.log(m.x), shape=coramin.utils.FunctionShape.CONCAVE)
plot_relaxation(m, m.log_x, opt)
m.log_x.relaxation_side = coramin.utils.RelaxationSide.OVER
m.log_x.rebuild()
plot_relaxation(m, m.log_x, opt)
m.log_x.relaxation_side = coramin.utils.RelaxationSide.UNDER
m.log_x.rebuild()
plot_relaxation(m, m.log_x, opt)
del m.log_x
import math
m.x.setlb(-math.pi/2)
m.x.setub(math.pi/2)
m.y.setlb(-1)
m.y.setub(1)
m.sin_x = coramin.relaxations.PWSinRelaxation()
m.sin_x.build(x=m.x, aux_var=m.y)
plot_relaxation(m, m.sin_x, opt)
m.sin_x.add_partition_point(-1)
m.sin_x.rebuild()
plot_relaxation(m, m.sin_x, opt)
###Output
Warning for adding constraints: zero or small (< 1e-13) coefficients, ignored
###Markdown
The relaxations for sine and cosine only support a domain for x from -pi/2 to pi/2. If the lower bound is less than -pi/2 or the upper bound is larger than pi/2, then no relaxation is built.
###Code
m.x.setlb(-math.pi/2 - 0.1)
m.x.setub(math.pi/2)
m.sin_x.rebuild()
plot_relaxation(m, m.sin_x, opt)
###Output
_____no_output_____ |
.ipynb_checkpoints/bike-sharing-checkpoint.ipynb | ###Markdown
**Importing data from CSV and checking the columns against the data dictionary**
###Code
data = pd.read_csv('day.csv')
data.head()
data.info()
data.shape
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 730 entries, 0 to 729
Data columns (total 16 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 instant 730 non-null int64
1 dteday 730 non-null object
2 season 730 non-null int64
3 yr 730 non-null int64
4 mnth 730 non-null int64
5 holiday 730 non-null int64
6 weekday 730 non-null int64
7 workingday 730 non-null int64
8 weathersit 730 non-null int64
9 temp 730 non-null float64
10 atemp 730 non-null float64
11 hum 730 non-null float64
12 windspeed 730 non-null float64
13 casual 730 non-null int64
14 registered 730 non-null int64
15 cnt 730 non-null int64
dtypes: float64(4), int64(11), object(1)
memory usage: 91.4+ KB
###Markdown
We can see from the above that:- There are **730 rows** in the dataset- There are **0 null rows**- There are **16 columns** in the dataset **Data Dictionary*** **instant**: record index* **dteday** : date* **season** : season (1:spring, 2:summer, 3:fall, 4:winter)* **yr** : year (0: 2018, 1:2019)* **mnth** : month ( 1 to 12)* **holiday** : weather day is a holiday or not (extracted from http://dchr.dc.gov/page/holiday-schedule)* **weekday** : day of the week* **workingday** : if day is neither weekend nor holiday is 1, otherwise is 0.* **weathersit** : - 1: Clear, Few clouds, Partly cloudy, Partly cloudy - 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist - 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds - 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog* **temp** : temperature in Celsius* **atemp**: feeling temperature in Celsius* **hum**: humidity* **windspeed**: wind speed* **casual**: count of casual users* **registered**: count of registered users* **cnt**: count of total rental bikes including both casual and registered
###Code
# Mapping of various col values as per data dictionary
map_dict = {
'yr' : {'0':'2018','1':'2019'},
'season' : {'1':'spring', '2':'summer', '3':'fall', '4':'winter'},
'mnth' : {'1':'Jan', '2':'Feb', '3':'Mar', '4':'Apr', '5':'May', '6':'Jun', '7':'Jul', '8':'Aug', '9':'Sep', '10':'Oct', '11':'Nov', '12':'Dec'},
'weekday' : {'0': 'Tue','1':'Wed','2':'Thu', '3': 'Fri', '4': 'Sat', '5': 'Sun', '6': 'Monday'},
'weathersit' : {'1':'Clear', '2': 'Cloudy', '3': 'Light Rain', '4': 'Heavy Rain'}
}
###Output
_____no_output_____
###Markdown
**Problem Statement**Develop a model to understand the factors affecting the demand for shared bikes in the American market based on the data provided.The company wants to know:1. Which variables are significant in predicting the demand for shared bikes.2. How well those variables describe the bike demandsTarget variable is **"cnt"** **Data Quality Checks and Cleanup** - Since our target variable is **cnt** we do not need **registered** and **casual** users- Since **instant** is the record index and has no impact on the demand of the bikes, we can drop it- Since we have **yr**,**mnth**,**weekday**; we don't need **dteday** for this analysis
###Code
data.drop(['casual', 'registered', 'instant', 'dteday'], inplace=True, axis = 1)
data.head()
###Output
_____no_output_____
###Markdown
Convert **mnth, season, weekday, weathersit** to string dtype as they are categorical variables
###Code
data[['mnth','season','weekday','weathersit']] = data[['mnth','season','weekday','weathersit']].astype(str)
data.info()
# Method to map number codes in the data to their string values
def map_code_to_str(col):
return data[col].map(map_dict[col])
# converting season, mnth, weathersit to string values using the data disctionary
data['season'] = map_code_to_str('season')
data['mnth'] = map_code_to_str('mnth')
data['weathersit'] = map_code_to_str('weathersit')
data.head()
# Validate if any NaN values after replacement
data.info()
data[['season', 'mnth', 'weathersit']].isnull().sum()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 730 entries, 0 to 729
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 season 730 non-null object
1 yr 730 non-null int64
2 mnth 730 non-null object
3 holiday 730 non-null int64
4 weekday 730 non-null object
5 workingday 730 non-null int64
6 weathersit 730 non-null object
7 temp 730 non-null float64
8 atemp 730 non-null float64
9 hum 730 non-null float64
10 windspeed 730 non-null float64
11 cnt 730 non-null int64
dtypes: float64(4), int64(4), object(4)
memory usage: 68.6+ KB
###Markdown
**The above validation confirms that all values are replaced properly**
###Code
# Understanding the range and scale of data
data.describe()
###Output
_____no_output_____
###Markdown
Visual Analysis of continuous variables from Data and qualitative validation of possibility of a Linear Regression Model
###Code
# Pairplot against 'cnt' to check if a linear relationship is visible
sns.pairplot(data, y_vars=['cnt'] , hue='yr')
plt.show()
###Output
_____no_output_____
###Markdown
**Conclusion:** The plots *temp vs cnt* and *atemp vs cnt* show a clear linear relationship, hence a ***Linear Regression Model is possible*** **Additional Observations:*** temp and atemp show a very similar impact on cnt. It is highly likely that they are correlated* There are clearlt more data points for 2019, hence we can say that cnt/demand is higher for 2019 which aligns with the thought process that the bike renting platform is getting more and more popular in America with time Identifying Correlations in independent variables
###Code
#Correlation Matrix
plt.figure(figsize=(15,12))
corr_plot = sns.heatmap(data.corr(), cmap='coolwarm_r', vmin=-1, vmax=1, annot=True)
corr_plot.set_title('Correlation Matrix', fontdict={'fontsize':18}, pad=16);
plt.show(corr_plot)
###Output
_____no_output_____
###Markdown
**Observations:*** temp and atemp are highly correlated* temp and a temp are positively correlated with cnt; this is aligned with common logic that people avoid riding bikes during cold days* windspeed is negatively correlated with cnt; this validates the common logic that it's difficult to ride bikes on windy days Visual Analysis of categorical variables from Data
###Code
def draw_boxplot(x_var, y_var='cnt', df=data):
return sns.boxplot(x=x_var,y=y_var, data=df)
def draw_barplot(x_var, y_var='cnt', df=data):
return sns.barplot(x=x_var, y=y_var, data=df)
# Plotting boxplot and barplot side-by-side for each categorical variable to understand the pattern
plt.figure(figsize=(20, 40))
plt.subplot(7,2,1)
draw_boxplot('season','cnt', data)
plt.subplot(3,3,2)
draw_barplot('season','cnt', data)
plt.subplot(7,2,3)
draw_boxplot('mnth','cnt', data)
plt.subplot(7,2,4)
draw_barplot('mnth','cnt', data)
plt.subplot(7,2,5)
draw_boxplot('yr','cnt', data)
plt.subplot(7,2,6)
draw_barplot('yr','cnt', data)
plt.subplot(7,2,7)
draw_boxplot('weathersit','cnt', data)
plt.subplot(7,2,8)
draw_barplot('weathersit','cnt', data)
plt.subplot(7,2,9)
draw_boxplot('workingday','cnt', data)
plt.subplot(7,2,10)
draw_barplot('workingday','cnt', data)
plt.subplot(7,2,11)
draw_boxplot('weekday','cnt', data)
plt.subplot(7,2,12)
draw_barplot('weekday','cnt', data)
plt.subplot(7,2,13)
draw_boxplot('holiday','cnt', data)
plt.subplot(7,2,14)
draw_barplot('holiday','cnt', data)
plt.show()
###Output
_____no_output_____ |
Model/Ranking.ipynb | ###Markdown
Ranking Algorithm Installs and imports Install all required libraries
###Code
# Uncomment line below to install all required libraries
# !pip3 install -r ../requirements.txt -q
###Output
_____no_output_____
###Markdown
Import required libraries
###Code
import re
import os
from nltk.corpus import stopwords
import pandas as pd
import pickle
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
import datetime as dt
###Output
_____no_output_____
###Markdown
Importing dataset
###Code
df = pd.read_csv('./data/Final_Predictions.csv')
df.head()
###Output
_____no_output_____
###Markdown
Making bins for each MP
###Code
mp_list = df['mp'].unique()
mp_list
date_bins = dict()
for mp in mp_list:
maxdtobj = dt.datetime.strptime(max(df[df['mp']==mp].tweet_date), '%Y-%m-%d')
mindtobj = dt.datetime.strptime(min(df[df['mp']==mp].tweet_date), '%Y-%m-%d')
days =(maxdtobj.date() - mindtobj.date()).days
if mp not in date_bins:
# try:
# tmp = date_bins[days]
# except:
# tmp = []
# tmp.append(mp)
# date_bins[days] = tmp
date_bins[mp] = days
date_bins
rank_df = pd.DataFrame()
rank_df['mp'] = mp_list
rank_df['bin'] = date_bins.values()
rank_df
###Output
_____no_output_____
###Markdown
Compute positive and negative tweet percentage
###Code
percentage = []
for politician in mp_list:
mp_df = df[df.mp == politician]
# print(mp_df.head())
# print('__________________')
sentiment_values = mp_df.Ensemble_predictions.value_counts()
# print(politician)
# print(sentiment_values)
# print(sentiment_values.sum())
percentage.append((sentiment_values[1]/sentiment_values.sum())*100)
# print(percentage[-1])
rank_df['Positive_Percentage'] = percentage
rank_df['Negative_Percentage'] = 100-rank_df['Positive_Percentage']
rank_df
rank_df = rank_df.sort_values('bin')
new_df = pd.DataFrame()
for days in rank_df.bin.unique():
temp_df = rank_df[rank_df['bin'] == days]
temp_df = temp_df.sort_values('Positive_Percentage', ascending = False)
new_df = pd.concat([new_df,temp_df])
rank_df = new_df
rank_df
###Output
_____no_output_____
###Markdown
Right to csv
###Code
rank_df.to_csv('./data/rank.csv', index = False)
###Output
_____no_output_____ |
01-particles-decays-units.ipynb | ###Markdown
Particles, decays, HEP units **Quick intro to the following packages**- `hepunits`.- `Particle`.- `DecayLanguage`. hepunits - The HEP system of unitsThe package ``hepunits`` collects the most commonly used units and constants in theHEP System of Units, which are *not* the same as the international system of units (aka SI units).The HEP system of units is based on the following:| Quantity | Name | Unit|| ------------------ :| ----------------- :| -- :|| Length | millimeter | mm || Time | nanosecond | ns || Energy | Mega electron Volt| MeV || Positron charge | eplus | || Temperature | kelvin | K || Amount of substance| mole | mol || Luminous intensity | candela | cd || Plane angle | radian | rad || Solid angle | steradian | sr |Note: no need to make use of sophisticated packages (e.g. as in AstroPy) since we basically never need to change systems of units (we never use ergs as energy, for example ;-)). **Basic usage is straightforward, though it may be confusing at first. Remember, all variables are written wrt to the units:**
###Code
from hepunits import mm, ns, MeV, eplus, GeV, kelvin, mol, cd, rad, sr
mm == ns == MeV == eplus == kelvin == mol == cd == rad == sr == 1
GeV == 1000*MeV
###Output
_____no_output_____
###Markdown
Add two quantities with different length units:
###Code
from hepunits import units as u
1*u.meter + 5*u.cm
###Output
_____no_output_____
###Markdown
Indeed, the result is in HEP units, so mm.Rather obtain the result in meters:
###Code
(1*u.meter + 5*u.cm) / u.meter
###Output
_____no_output_____
###Markdown
Do you need to play a bit more to get a proper feeling? This next (non-academic) exercise should help you ... **Quick time-of-flight study**Let's try to play with units in a meaningful way, in a kind of exercise that physicists encounter. Imagine you are investigating time-of-flight (ToF) detectors for particle identification. The time it takes a particle of velocity $\beta = v/c= pc/E$ to travel a distance $L$ is given by$$\mathrm{ToF} = \frac{L}{c \beta}$$It results that the mass $m$ of the particle can be determined from$$m = \frac{p}{c}\sqrt{\frac{c^2 \mathrm{ToF}^2}{L^2}-1}$$provided the path length and the momentum can be measured, say, by a tracking system. What are typical ToF differences say for (charged) kaons and pions?It is practical to perform the calculation as$$\Delta \mathrm{ToF} = \frac{L}{c}(\frac{1}{\beta_1} - \frac{1}{\beta_2})\,,$$with $\frac{1}{\beta} = \sqrt{1+m^2c^2/p^2}$.
###Code
from hepunits import c_light, GeV, meter, ps, ns
import numpy as np
def ToF(m, p, L):
"""Time-of-Flight = particle path length L / (c * beta)"""
one_over_beta = np.sqrt(1 + m*m/(p*p)) # no c factors here because m and p given without them, hence c's cancel out ;-)
return (L * one_over_beta /c_light)
###Output
_____no_output_____
###Markdown
For convenience, get hold of data information for the proton, K+ and pi+ (see `Particle`package down this notebook):
###Code
from particle.particle.literals import proton, pi_plus, K_plus # particle name literals
###Output
_____no_output_____
###Markdown
Calculate the difference in ToF between 10 GeV kaons and pions travelling over 10 meters:
###Code
delta = ( ToF(K_plus.mass, 10*GeV, 10*meter) - ToF(pi_plus.mass, 10*GeV, 10*meter) ) / ps
print("At 10 GeV, Delta-TOF(K-pi) over 10 meters = {:.5} ps".format(delta))
###Output
At 10 GeV, Delta-TOF(K-pi) over 10 meters = 37.374 ps
###Markdown
Let's get a bit fancier:- Compare protons, kaons and pions.- Look at the ToF difference versus momentum.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
p = np.arange(0.5, 5.1, 0.05) * GeV
delta1 = ( ToF(K_plus.mass, p, 1.*meter) - ToF(pi_plus.mass, p, 1.*meter) ) / ps
delta2 = ( ToF(proton.mass, p, 1.*meter) - ToF(K_plus.mass, p, 1.*meter) ) / ps
delta3 = ( ToF(proton.mass, p, 1.*meter) - ToF(pi_plus.mass, p, 1.*meter) ) / ps
fig, ax = plt.subplots()
ax.plot(p/GeV, delta1, label='K-$\pi$')
ax.plot(p/GeV, delta2, label='p-K')
ax.plot(p/GeV, delta3, label='p-$\pi$')
ax.set(xlabel='p [GeV]', ylabel='$\Delta$ ToF [ps]',
title='Time-of-flight difference for a 1 meter path')
ax.grid()
plt.legend()
plt.ylim(bottom=0, top=500)
plt.show()
###Output
_____no_output_____
###Markdown
An example more relevant to LHCb - detector timing resolution requirement is getting tough!:
###Code
p = np.arange(5., 10.1, 0.1) * GeV
delta = ( ToF(K_plus.mass, p, 10*meter) - ToF(pi_plus.mass, p, 10*meter) ) / ps
fig, ax = plt.subplots()
ax.plot(p/GeV, delta)
ax.set(xlabel='p [GeV]', ylabel='$\Delta$ ToF [ps]',
title='Time-of-flight difference for a 10 meter path')
ax.grid()
plt.show()
p = np.arange(0.5, 5.1, 0.05) * GeV
s1 = ( ToF(K_plus.mass, p, 1.38*meter) / ToF(pi_plus.mass, p, 1.38*meter) )
s3 = ( ToF(proton.mass, p, 1.38*meter) / ToF(pi_plus.mass, p, 1.38*meter) )
fig, ax = plt.subplots()
ax.plot(p/GeV, s1, label='K-$\pi$')
ax.plot(p/GeV, s3, label='p-$\pi$')
ax.set(xlabel='p [GeV]', ylabel='ToF/ToF($\pi$)',
title='Relative Time-of-flight for a 1 meter flight')
ax.grid()
plt.ylim(bottom=1, top=2.2)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
PDG particle data, MC identification codes **Pythonic interface to**- Particle Data Group (PDG) particle data table.- Particle MC identification codes, with inter-MC converters.- With various extra goodies. Package motivation - particle data- The [PDG](http://pdg.lbl.gov/) provides a downloadable table of particle masses, widths, charges and Monte Carlo particle ID numbers (PDG IDs). - Most recent file [here](http://pdg.lbl.gov/2019/html/computer_read.html).- It also provided an experimental file with extended information(spin, quark content, P and C parities, etc.) until 2008 only, see [here](http://pdg.lbl.gov/2008/html/computer_read.html) (not widely known!).- But anyone wanting to use these data, the only readily available,has to parse the file programmatically.- Why not make a Python package to deal with all these data, for everyone? Package motivation - MC identification codes- The C++ HepPID and HepPDT libraries provide functions for processing particle ID codesin the standard particle (aka PDG) numbering scheme.- Different event generators may have their separate set of particle IDs: Geant3, etc.- Again, why not make a package providing all functionality/conversions, Python-ically, for everyone? Package, in short- Particle - loads extended PDG data tables and implements search and manipulations / display.- PDGID - find out as much as possible from the PDG ID number. No table lookup.- Converters for MC IDs used in Pythia and Geant.- Basic usage via the command line.- Fexible / advanced usage programmatically. **1. Command line usage**Search and query ...
###Code
!python -m particle --version
!python -m particle -h
###Output
usage: particle [-h] [--version] {search,pdgid} ...
Particle command line display utility. Has two modes.
positional arguments:
{search,pdgid} Subcommands
search Look up particles by PID or name (Ex.: python -m particle
search D+ D-)
pdgid Print info from PID (Ex.: python -m particle pdgid 11 13)
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
###Markdown
PDGID Print all information from a PDG ID:
###Code
!python -m particle pdgid 211
###Output
<PDGID: 211>
A None
J 0.0
L 0
S 0
Z None
abspid 211
charge 1.0
has_bottom False
has_charm False
has_down True
has_fundamental_anti False
has_strange False
has_top False
has_up True
is_Qball False
is_Rhadron False
is_SUSY False
is_baryon False
is_diquark False
is_dyon False
is_hadron True
is_lepton False
is_meson True
is_nucleus False
is_pentaquark False
is_valid True
j_spin 1
l_spin 1
s_spin 1
three_charge 3
###Markdown
Particle Search a particle by its PDG ID - return description summary of particle:
###Code
!python -m particle search 211
###Output
Name: pi+ ID: 211 Latex: $\pi^{+}$
Mass = 139.57061 ± 0.00024 MeV
Lifetime = 26.033 ± 0.005 ns
Q (charge) = + J (total angular) = 0.0 P (space parity) = -
C (charge parity) = ? I (isospin) = 1.0 G (G-parity) = -
SpinType: SpinType.PseudoScalar
Quarks: uD
Antiparticle name: pi- (antiparticle status: ChargeInv)
###Markdown
Search a particle by its name - either return the description summary of matching particle ...
###Code
!python -m particle search "pi(1400)+"
###Output
Name: pi(1)(1400)+ ID: 9000213 Latex: $\pi_{1}(1400)^{+}$
Mass = 1354 ± 25 MeV
Width = 330 ± 35 MeV
Q (charge) = + J (total angular) = 1.0 P (space parity) = -
C (charge parity) = ? I (isospin) = 1.0 G (G-parity) = -
SpinType: SpinType.Vector
Quarks: Maybe non-qQ
Antiparticle name: pi(1)(1400)- (antiparticle status: ChargeInv)
###Markdown
... or a list of particles matching the keyword in their names:
###Code
!python -m particle search "pi+"
###Output
Name: pi+ ID: 211 Latex: $\pi^{+}$
Mass = 139.57061 ± 0.00024 MeV
Lifetime = 26.033 ± 0.005 ns
Q (charge) = + J (total angular) = 0.0 P (space parity) = -
C (charge parity) = ? I (isospin) = 1.0 G (G-parity) = -
SpinType: SpinType.PseudoScalar
Quarks: uD
Antiparticle name: pi- (antiparticle status: ChargeInv)
###Markdown
Bonus feature: zipappPackage provides a [zipapp](https://docs.python.org/3/library/zipapp.html) version - **one file** that runs on **any computer with Python**, no other dependencies! Find it [attached to releases](https://github.com/scikit-hep/particle/releases). Example:```bash./particle.pyz search gamma``` All dependencies are installed inside the zipapp, and the data lookup is handled in a zip-safe way inside particle. Python 3 is used to make the zipapp, but including the backports makes it work on Python 2 as well. **2. `PDGID` class and MC ID classes**- Classes `PDGID`, `PythiaID`, `Geant3ID`.- Converters in module `particle.converters`: `Geant2PDGIDBiMap`, etc. PDG IDs module overview- Process and query PDG IDs, and more – no look-up table needed. - Current version of package reflects the latest version of the HepPID & HepPDT utility functions defined in the C++ HepPID and HepPDT versions 3.04.01 - It contains more functionality than that available in the C++ code … and minor fixes too.- Definition of a PDGID class, PDG ID literals,and set of standalone HepPID functions to query PDG IDs(is_meson, has_bottom, j_spin, charge, etc.). - All PDGID class functions are available standalone. PDGID class- Wrapper class `PDGID` for PDG IDs.- Behaves like an int, with extra goodies.- Large spectrum of properties and methods, with a Pythonic interface, and yet more!
###Code
from particle import PDGID
pid = PDGID(211)
pid
PDGID(99999999)
from particle.pdgid import is_meson
pid.is_meson, is_meson(pid)
###Output
_____no_output_____
###Markdown
To print all `PDGID` properties:
###Code
print(pid.info())
###Output
A None
J 0.0
L 0
S 0
Z None
abspid 211
charge 1.0
has_bottom False
has_charm False
has_down True
has_fundamental_anti False
has_strange False
has_top False
has_up True
is_Qball False
is_Rhadron False
is_SUSY False
is_baryon False
is_diquark False
is_dyon False
is_hadron True
is_lepton False
is_meson True
is_nucleus False
is_pentaquark False
is_valid True
j_spin 1
l_spin 1
s_spin 1
three_charge 3
###Markdown
MC ID classes and converters- Classes for MC IDs used in Pythia and Geant: `PythiaID` and `Geant3ID`.- ID converters in module `particle.converters`: `Geant2PDGIDBiMap`, etc.
###Code
from particle import PythiaID, Geant3ID
pyid = PythiaID(10221)
pyid.to_pdgid()
###Output
_____no_output_____
###Markdown
Conversions are directly available via mapping classes.E.g., bi-directional map Pythia ID - PDG ID:
###Code
from particle.converters import Pythia2PDGIDBiMap
Pythia2PDGIDBiMap[PDGID(9010221)]
Pythia2PDGIDBiMap[PythiaID(10221)]
###Output
_____no_output_____
###Markdown
**3. `Particle` class**There are lots of ways to create a particle.
###Code
from particle import Particle
###Output
_____no_output_____
###Markdown
From a PDG ID
###Code
Particle.from_pdgid(211)
###Output
_____no_output_____
###Markdown
From a name
###Code
Particle.from_string('pi+')
###Output
_____no_output_____
###Markdown
SearchingSimple and natural API to deal with the PDG particle data table,with powerful 1-line search and look-up utilities!- `Particle.find(…)` – search a single match (exception raised if multiple particles match the search specifications).- `Particle.findall(…)` – search a list of candidates.- Search methods that can query any particle property!
###Code
Particle.find('J/psi')
###Output
_____no_output_____
###Markdown
You can specify search terms as keywords - _any particle property_:
###Code
Particle.find(latex_name=r'\phi(1020)')
###Output
_____no_output_____
###Markdown
Some properties have enums available. For example, you can directly check the numeric charge:
###Code
Particle.findall('pi', charge=-1)
###Output
_____no_output_____
###Markdown
Or you can use the enum (for charge, this is 3 times the charge, hence the name `three_charge`)
###Code
from particle import Charge
Particle.findall('pi', three_charge=Charge.p)
###Output
_____no_output_____
###Markdown
Or use a **lambda function** for the ultimate in generality! For example, to find all the neutral particles with a bottom quark between 5.2 and 5.3 GeV:
###Code
from hepunits import GeV, s # Units are good. Use them.
Particle.findall(lambda p:
p.pdgid.has_bottom
and p.charge==0
and 5.2*GeV < p.mass < 5.3*GeV
)
###Output
_____no_output_____
###Markdown
Another lambda function example: You can use the width or the lifetime:
###Code
Particle.findall(lambda p: p.lifetime > 1000*s)
###Output
_____no_output_____
###Markdown
If you want infinite lifetime, you could just use the keyword search instead:
###Code
Particle.findall(lifetime=float('inf'))
###Output
_____no_output_____
###Markdown
Trivially find all pseudoscalar charm mesons:
###Code
from particle import SpinType
Particle.findall(lambda p: p.pdgid.is_meson and p.pdgid.has_charm and p.spin_type==SpinType.PseudoScalar)
###Output
_____no_output_____
###Markdown
Display Nice display in Jupyter notebooks, as well as `str` and `repr` support:
###Code
p = Particle.from_pdgid(-415)
p
print(p)
print(repr(p))
###Output
<Particle: name="D(2)*(2460)-", pdgid=-415, mass=2465.4 ± 1.3 MeV>
###Markdown
Full descriptions:
###Code
print(p.describe())
###Output
Name: D(2)*(2460)- ID: -415 Latex: $D_{2}^{*}(2460)^{-}$
Mass = 2465.4 ± 1.3 MeV
Width = 46.7 ± 1.2 MeV
Q (charge) = - J (total angular) = 2.0 P (space parity) = +
C (charge parity) = ? I (isospin) = 0.5 G (G-parity) = ?
SpinType: SpinType.Tensor
Quarks: Cd
Antiparticle name: D(2)*(2460)+ (antiparticle status: ChargeInv)
###Markdown
You may find LaTeX or HTML to be more useful in your program; both are supported:
###Code
print(p.latex_name, '\n', p.html_name)
###Output
D_{2}^{*}(2460)^{-}
D<SUB>2</SUB><SUP>*</SUP>(2460)<SUP>-</SUP>
###Markdown
It is easy to get hold of the whole list of particle (instances) as a list:
###Code
print('# of particles in loaded data table:', len(Particle.all()))
Particle.all()
###Output
# of particles in loaded data table: 536
###Markdown
Particle propertiesYou can do things to particles, like **invert** them:
###Code
~p
###Output
_____no_output_____
###Markdown
There are a plethora of properties you can access:
###Code
p.spin_type
###Output
_____no_output_____
###Markdown
You can quickly access the PDGID of a particle:
###Code
p.pdgid
PDGID(p)
###Output
_____no_output_____
###Markdown
**4. Literals**They provide a handy way to manipulate things with human-readable names!Package defines literals for most common particles, with easily recognisable names.- Literals are dynamically generated on import for both `PDGID` and `Particle` classes. **PDGID literals**
###Code
from particle.pdgid import literals as lid
lid.phi_1020
###Output
_____no_output_____
###Markdown
**Particle literals**
###Code
from particle.particle import literals as lpart
lpart.phi_1020
###Output
_____no_output_____
###Markdown
**5. Data files, stored in `particle/data/`**- PDG particle data files - Original PDG data files, which are in a fixed-width format - Code uses “digested forms” of these, stored as CSV, for optimised querying - Latest PDG data (2019) used by default - Advanced usage: user can load older PDG table, load a “user table” with new particles, append to default table- Other data files - CSV file for mapping of PDG IDs to particle LaTeX names Dump table contentsThe `Particle.dump_table(...)` method is rather flexible.(No need to dig into the package installation directory to inspect the particle data table ;-).)
###Code
help(Particle.dump_table)
fields = ['pdgid', 'pdg_name', 'mass', 'mass_upper', 'mass_lower', 'three_charge']
print(Particle.dump_table(exclusive_fields=fields, n_rows=10))
###Output
pdgid pdg_name mass mass_upper mass_lower three_charge
------- ---------- ------- ------------ ------------ --------------
1 d 4.67 0.5 0.2 -1
-1 d 4.67 0.5 0.2 1
2 u 2.16 0.5 0.3 2
-2 u 2.16 0.5 0.3 -2
3 s 93 11 5 -1
-3 s 93 11 5 1
4 c 1270 20 20 2
-4 c 1270 20 20 -2
5 b 4180 30 20 -1
-5 b 4180 30 20 1
###Markdown
Table with all pseudoscalar charm hadrons, in _reStructuredText_ format:
###Code
fields = ['pdgid', 'name', 'evtgen_name', 'mass', 'mass_upper', 'mass_lower', 'three_charge']
print(Particle.dump_table(filter_fn=lambda p: p.pdgid.is_meson and p.pdgid.has_charm and p.spin_type==SpinType.PseudoScalar,
exclusive_fields=fields, tablefmt='rst'))
###Output
======= ========== ============= ======= ============ ============ ==============
pdgid name evtgen_name mass mass_upper mass_lower three_charge
======= ========== ============= ======= ============ ============ ==============
411 D+ D+ 1869.65 0.05 0.05 3
-411 D- D- 1869.65 0.05 0.05 -3
421 D0 D0 1864.83 0.05 0.05 0
-421 D~0 anti-D0 1864.83 0.05 0.05 0
431 D(s)+ D_s+ 1968.34 0.07 0.07 3
-431 D(s)- D_s- 1968.34 0.07 0.07 -3
441 eta(c)(1S) eta_c 2983.9 0.5 0.5 0
541 B(c)+ B_c+ 6274.9 0.8 0.8 3
-541 B(c)- B_c- 6274.9 0.8 0.8 -3
100441 eta(c)(2S) eta_c(2S) 3637.5 1.1 1.1 0
======= ========== ============= ======= ============ ============ ==============
###Markdown
Notebook-friendly HTML is just as easy:
###Code
from IPython.display import HTML
HTML(Particle.dump_table(filter_fn=lambda p: p.pdgid.is_meson and p.pdgid.has_charm and p.spin_type==SpinType.PseudoScalar,
exclusive_fields=['pdgid', 'pdg_name', 'html_name'], tablefmt='html'))
###Output
_____no_output_____
###Markdown
**6. Advanced usage**You can:* Extend or replace the default particle data table in `Particle`.* Adjust properties for a particle.* Make custom particles. Decay files, universal description of decay chains`DecayLanguage` is designed for the manipulation of decay structures in Python. The current package has:- Decay file parsers: - Read DecFiles, such as the LHCb master DecFile - Manipulate adn visualize them in Python- Amplitude Analysis decay language: - Input based on AmpGen generator, output format for GooFit C++ Package motivation- Ability to describe decay-tree-like structures- Provide a translation of decay amplitude models from AmpGen to GooFit - Idea is to generalise this to other decay descriptions- Any experiment uses event generators which, among many things, need to describe particle decay chains- Programs such as EvtGen rely on so-called .dec decay files- Many experiments need decay data files- Why not make a Python package to deal with decay files, for everyone? Package, in short- Tools to parse decay files and programmatically manipulate them, query, display information. - Descriptions and parsing built atop the [Lark parser](https://github.com/lark-parser/lark/).- Tools to translate decay amplitude models from AmpGen to GooFit, and manipulate them. **1. Decay files** *Master file” DECAY.DECGigantic file defining decay modes for all relevant particles, including decay model specifications. User .dec files- Needed to produce specific MC samples.- Typically contain a single decay chain (except if defining inclusive samples). **Example user decay file:** Decay file for [B_c+ -> (B_s0 -> K+ K-) pi+]ccAlias B_c+sig B_c+Alias B_c-sig B_c-ChargeConj B_c+sig B_c-sigAlias MyB_s0 B_s0Alias Myanti-B_s0 anti-B_s0ChargeConj MyB_s0 Myanti-B_s0Decay B_c+sig 1.000 MyB_s0 pi+ PHOTOS PHSP;EnddecayCDecay B_c-sigDecay MyB_s0 1.000 K+ K- SSD_CP 20.e12 0.1 1.0 0.04 9.6 -0.8 8.4 -0.6;EnddecayCDecay Myanti-B_s0 **2. Decay file parsing**- **Parsing should be simple** - Expert users can configure parser choice and settings, etc. - **Parsing should be (reasonably) fast!**After parsing, many queries are possible!
###Code
from decaylanguage import DecFileParser
###Output
_____no_output_____
###Markdown
The LHCb "master decay file"It's a big file! ~ 450 particle decays defined, thousands of decay modes, over 11k lines in total.
###Code
#dfp = DecFileParser('data/DECAY_LHCB.DEC')
dfp = DecFileParser('data/DECAY_BELLE2.DEC')
%%time
dfp.parse()
dfp
###Output
_____no_output_____
###Markdown
Let's parse and play with a small decay file:
###Code
with open('data/Dst.dec') as f:
print(f.read())
dfp_Dst = DecFileParser('data/Dst.dec')
dfp_Dst
dfp_Dst.parse()
dfp_Dst
###Output
_____no_output_____
###Markdown
It can be handy to **parse from a multi-line string** rather than a file:
###Code
s = """
# Decay file for [B_c+ -> (B_s0 -> K+ K-) pi+]cc
Alias B_c+sig B_c+
Alias B_c-sig B_c-
ChargeConj B_c+sig B_c-sig
Alias MyB_s0 B_s0
Alias Myanti-B_s0 anti-B_s0
ChargeConj MyB_s0 Myanti-B_s0
Decay B_c+sig
1.000 MyB_s0 pi+ PHOTOS PHSP;
Enddecay
CDecay B_c-sig
Decay MyB_s0
1.000 K+ K- SSD_CP 20.e12 0.1 1.0 0.04 9.6 -0.8 8.4 -0.6;
Enddecay
CDecay Myanti-B_s0
"""
dfp = DecFileParser.from_string(s)
dfp.parse()
dfp
###Output
_____no_output_____
###Markdown
Decay file information
###Code
dfp_Dst.print_decay_modes('D*+')
dfp_Dst.list_decay_mother_names()
dfp_Dst.list_decay_modes('D*+')
###Output
_____no_output_____
###Markdown
Info such as particle aliases
###Code
dfp.dict_aliases()
dfp.dict_charge_conjugates()
###Output
_____no_output_____
###Markdown
**3. Display of decay chains**The parser can provide a simple `dict` representation of any decay chain found in the input decay file(s). Being generic and simple, that is what is used as input information for the viewer class.
###Code
dc = dfp_Dst.build_decay_chains('D+')
dc
from decaylanguage import DecayChainViewer
DecayChainViewer(dc)
DecayChainViewer(dfp_Dst.build_decay_chains('D*+'))
dc = dfp_Dst.build_decay_chains('D*+', stable_particles=['D+', 'D0', 'pi0'])
DecayChainViewer(dc)
###Output
_____no_output_____
###Markdown
**Charge conjugation**
###Code
dc_cc = dfp_Dst.build_decay_chains('D*-', stable_particles=['D-', 'anti-D0', 'pi0'])
DecayChainViewer(dc_cc)
###Output
_____no_output_____
###Markdown
**Parsing several files**Typically useful when the user decay file needs information from the master decay file.
###Code
s = u"""
Alias MyXic+ Xi_c+
Alias MyantiXic- anti-Xi_c-
ChargeConj MyXic+ MyantiXic-
Decay Xi_cc+sig
1.000 MyXic+ pi- pi+ PHSP;
Enddecay
CDecay anti-Xi_cc-sig
Decay MyXic+
1.000 p+ K- pi+ PHSP;
Enddecay
CDecay MyantiXic-
End
"""
dfp = DecFileParser.from_string(s)
dfp.parse()
dfp
###Output
/srv/conda/envs/notebook/lib/python3.7/site-packages/decaylanguage/dec/dec.py:447: UserWarning:
Corresponding 'Decay' statement for 'CDecay' statement(s) of following particle(s) not found:
anti-Xi_cc-sig.
Skipping creation of these charge-conjugate decay trees.
warnings.warn(msg)
###Markdown
Note the subtletly: 3, not 4 decays, are found! This is because the file contains no statement`ChargeConj anti-Xi_cc-sigXi_cc+sig`, hence the parser cannot know to which particle (matching `Decay` statement) the charge-conjugate decay of `anti-Xi_cc-sig` relates to (code does not rely on position of statements to guess ;-)).
###Code
d = dfp.build_decay_chains('Xi_cc+sig')
DecayChainViewer(d)
###Output
_____no_output_____
###Markdown
As said in the warning, the information provided is not enough for the anti-Xi_cc-sig to make sense:
###Code
from decaylanguage.dec.dec import DecayNotFound
try:
d = dfp.build_decay_chains('anti-Xi_cc-sig')
except DecayNotFound:
print("Decays of particle 'anti-Xi_cc-sig' not found in .dec file!")
###Output
Decays of particle 'anti-Xi_cc-sig' not found in .dec file!
###Markdown
But the missing information is easily providing **parsing two files simultaneously ...!** (Any number of files is allowed.)
###Code
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(delete=False) as tf:
tf.write(s.encode('utf-8'))
dfp = DecFileParser(tf.name, 'data/DECAY_LHCB.DEC')
dfp.parse()
dc = dfp.build_decay_chains('Xi_cc+sig')
DecayChainViewer(dc)
dc_cc = dfp.build_decay_chains('anti-Xi_cc-sig')
DecayChainViewer(dc_cc)
###Output
_____no_output_____
###Markdown
Want to save a graph? Try for example ```dcv = DecayChainViewer(...)dcv.graph.write_pdf('test.pdf')``` **4. Representation of decay chains**The universal (and digital) representation of decay chains is of interest well outside the context of decay file parsing! Building blocks- A daughters list - list of final-state particles.- A decay mode - typically a branching fraction and a list of final-state particles (may also contain _any_ metadata such as decay model and optional decay-model parameters, as defined for example in .dec decay files).- A decay chain - can be seen as a mother particle and a list of decay modes.
###Code
from decaylanguage.decay.decay import DaughtersDict, DecayMode, DecayChain
###Output
_____no_output_____
###Markdown
**Daughters list** (actually a ``Counter`` dictionary, internally):
###Code
# Constructor from a dictionary
dd = DaughtersDict({'K+': 1, 'K-': 2, 'pi+': 1, 'pi0': 1})
# Constructor from a list of particle names
dd = DaughtersDict(['K+', 'K-', 'K-', 'pi+', 'pi0'])
# Constructor from a string representing the final state
dd = DaughtersDict('K+ K- pi0')
dd
###Output
_____no_output_____
###Markdown
Decay Modes
###Code
# A 'default' and hence empty, decay mode
dm = DecayMode()
# Decay mode with minimal input information
dd = DaughtersDict('K+ K-')
dm = DecayMode(0.5, dd)
# Decay mode with decay model information and user metadata
dm = DecayMode(0.2551, # branching fraction
'pi- pi0 nu_tau', # final-state particles
model='TAUHADNU', # decay model
model_params=[-0.108, 0.775, 0.149, 1.364, 0.400], # decay-model parameters
study='toy', year=2019 # user metadata
)
dm
print(dm.describe())
###Output
Daughters: pi- pi0 nu_tau , BF: 0.2551
Decay model: TAUHADNU [-0.108, 0.775, 0.149, 1.364, 0.4]
Extra info:
study: toy
year: 2019
###Markdown
Various manipulations are available:
###Code
dm = DecayMode.from_pdgids(0.5, [321, -321])
print(dm)
dm = DecayMode(1.0, 'K+ K+ pi-')
dm.charge_conjugate()
###Output
<DecayMode: daughters=K+ K-, BF=0.5>
###Markdown
Decay chains
###Code
dm1 = DecayMode(0.0124, 'K_S0 pi0', model='PHSP')
dm2 = DecayMode(0.692, 'pi+ pi-')
dm3 = DecayMode(0.98823, 'gamma gamma')
dc = DecayChain('D0', {'D0':dm1, 'K_S0':dm2, 'pi0':dm3})
dc
dc.decays
###Output
_____no_output_____
###Markdown
Flatten the decay chain, i.e. replace all intermediate, decaying particles, with their final states:- The BF is now the *visible BF*
###Code
dc.flatten()
###Output
_____no_output_____
###Markdown
Of course you can sill just as easily visualise decays defined via this `DecayChain` class:
###Code
DecayChainViewer(dc.to_dict())
###Output
_____no_output_____
###Markdown
**Feeling nostalgic of an $e^+ e^-$ example?**Try out the following:
###Code
from decaylanguage import DecFileParser
s = """
Alias B0sig B0
Alias anti-B0sig anti-B0
Define dm 0.507e12
Define PKHplus 0.159
Define PKHzero 0.775
Define PKHminus 0.612
Define PKphHplus 1.563
Define PKphHzero 0.0
Define PKphHminus 2.712
Decay Upsilon(4S)
0.483122645 B0sig anti-B0sig VSS_BMIX dm;
Enddecay
Decay B0sig
0.001330000 J/psi K*0 SVV_HELAMP PKHplus PKphHplus PKHzero PKphHzero PKHminus PKphHminus;
Enddecay
Decay anti-B0sig
0.000024000 D_s- pi+ PHSP;
Enddecay
Decay J/psi
0.0593 mu+ mu- PHOTOS VLL;
Enddecay
Decay K*0
0.6657 K+ pi- VSS;
0.3323 K0 pi0 VSS;
0.0020 K0 gamma VSP_PWAVE;
Enddecay
Decay D_s-
0.001200000 K_S0 pi- PHSP;
Enddecay
Decay K_S0
0.69 pi+ pi- PHSP;
0.31 pi0 pi0 PHSP;
Enddecay
"""
dfp = DecFileParser.from_string(s)
dfp.parse()
dfp
dc = dfp.build_decay_chains('Upsilon(4S)')
import yaml
print(yaml.dump(dc))
from decaylanguage import DecayChainViewer
DecayChainViewer(dc)
###Output
_____no_output_____ |
face_collector.ipynb | ###Markdown
Download face images from google searchPython Scrypt for `downloading` images from Google and `saving` faces from the images. How does it work?1. Download images from Google by searching `keywords` (Used [google_images_download](https://github.com/hardikvasa/google-images-download))2. Detect `Frontal` and `Profile` faces from the images (Used [opencv-python](https://github.com/skvark/opencv-python))3. Crop and save the face images
###Code
from google_images_download import google_images_download as gid
def download_images(keywords, limit, output_dir):
downloader = gid.googleimagesdownload()
downloader.download({
'keywords': keywords,
"limit": limit,
'output_directory': output_dir
})
from os import listdir
from os.path import exists, isfile, join
import cv2 as cv
import numpy as np
frontalface_cascade = cv.CascadeClassifier('data/haarcascades/haarcascade_frontalface_default.xml')
profileface_cascade = cv.CascadeClassifier('data/haarcascades/haarcascade_profileface.xml')
def save_faces(img, faces, output_dir, file_id):
if not exists(output_dir):
os.makedirs(output_dir)
for i in range(len(faces)):
x, y, w, h = faces[i]
face_img = img[y:y+h, x:x+w]
output_file_path = join(output_dir, '{}_{}.jpeg'.format(file_id, i))
cv.imwrite(output_file_path, face_img)
def detect_and_save_faces(images_dir, faces_dir):
file_names = [f for f in listdir(images_dir) if isfile(join(images_dir, f))]
for file_name in file_names:
file_id = file_name.split('.')[0]
img = cv.imread(join(images_dir, file_name))
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
frontal_faces = frontalface_cascade.detectMultiScale(gray, 1.3, 5)
save_faces(img, frontal_faces, join(faces_dir, 'frontal'), file_id)
profile_faces = profileface_cascade.detectMultiScale(gray, 1.3, 5)
save_faces(img, profile_faces, join(faces_dir, 'profile'), file_id)
keyword = 'george clooney'
num_search_images = 10
images_dir = './output/images'
faces_dir = './output/faces'
download_images(keyword, num_search_images, images_dir)
detect_and_save_faces(join(images_dir, keyword), join(faces_dir, keyword))
###Output
Item no.: 1 --> Item name = george clooney
Evaluating...
Starting Download...
Completed Image ====> 1. 220px-George_Clooney_2016.jpg
Completed Image ====> 2. MV5BMjEyMTEyOTQ0MV5BMl5BanBnXkFtZTcwNzU3NTMzNw@@._V1_.jpg
Completed Image ====> 3. 416x416.jpg
Completed Image ====> 4. 220px-George_Clooney-4_The_Men_Who_Stare_at_Goats_TIFF09_%28cropped%29.jpg
Completed Image ====> 5. _102457094_pa-clooney.jpg
Completed Image ====> 6. george-clooney-net-worth-tequila.jpg
Completed Image ====> 7. George_Clooney-300x300.jpg
Completed Image ====> 8. george-clooney-5-2000.jpg
Completed Image ====> 9. 3a
Completed Image ====> 10. 18
Errors: 0
|
D_3_3_3.ipynb | ###Markdown
From Understanding to Preparation IntroductionIn this lab, we will continue learning about the data science methodology, and focus on the **Data Understanding** and the **Data Preparation** stages. Table of Contents1. Recap2. Data Understanding3. Data Preparation Recap In Lab **From Requirements to Collection**, we learned that the data we need to answer the question developed in the business understanding stage, namely *can we automate the process of determining the cuisine of a given recipe?*, is readily available. A researcher named Yong-Yeol Ahn scraped tens of thousands of food recipes (cuisines and ingredients) from three different websites, namely: www.allrecipes.comwww.epicurious.comwww.menupan.com For more information on Yong-Yeol Ahn and his research, you can read his paper on [Flavor Network and the Principles of Food Pairing](http://yongyeol.com/papers/ahn-flavornet-2011.pdf). We also collected the data and placed it on an IBM server for your convenience.------------ Data Understanding Important note: Please note that you are not expected to know how to program in Python. The following code is meant to illustrate the stages of data understanding and data preparation, so it is totally fine if you do not understand the individual lines of code. We have a full course on programming in Python, Python for Data Science, which is also offered on Coursera. So make sure to complete the Python course if you are interested in learning how to program in Python. Using this notebook:To run any of the following cells of code, you can type **Shift + Enter** to excute the code in a cell. Get the version of Python installed.
###Code
# check Python version
!python -V
###Output
_____no_output_____
###Markdown
Download the library and dependencies that we will need to run this lab.
###Code
import pandas as pd # import library to read data into dataframe
pd.set_option('display.max_columns', None)
import numpy as np # import numpy library
import re # import library for regular expression
###Output
_____no_output_____
###Markdown
Download the data from the IBM server and read it into a *pandas* dataframe.
###Code
recipes = pd.read_csv("https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/data/recipes.csv")
print("Data read into dataframe!") # takes about 30 seconds
###Output
_____no_output_____
###Markdown
Show the first few rows.
###Code
recipes.head()
###Output
_____no_output_____
###Markdown
Get the dimensions of the dataframe.
###Code
recipes.shape
###Output
_____no_output_____
###Markdown
So our dataset consists of 57,691 recipes. Each row represents a recipe, and for each recipe, the corresponding cuisine is documented as well as whether 384 ingredients exist in the recipe or not, beginning with almond and ending with zucchini. We know that a basic sushi recipe includes the ingredients:* rice* soy sauce* wasabi* some fish/vegetables Let's check that these ingredients exist in our dataframe:
###Code
ingredients = list(recipes.columns.values)
print([match.group(0) for ingredient in ingredients for match in [(re.compile(".*(rice).*")).search(ingredient)] if match])
print([match.group(0) for ingredient in ingredients for match in [(re.compile(".*(wasabi).*")).search(ingredient)] if match])
print([match.group(0) for ingredient in ingredients for match in [(re.compile(".*(soy).*")).search(ingredient)] if match])
###Output
_____no_output_____
###Markdown
Yes, they do!* rice exists as rice.* wasabi exists as wasabi.* soy exists as soy_sauce.So maybe if a recipe contains all three ingredients: rice, wasabi, and soy_sauce, then we can confidently say that the recipe is a **Japanese** cuisine! Let's keep this in mind!---------------- Data Preparation In this section, we will prepare data for the next stage in the data science methodology, which is modeling. This stage involves exploring the data further and making sure that it is in the right format for the machine learning algorithm that we selected in the analytic approach stage, which is decision trees. First, look at the data to see if it needs cleaning.
###Code
recipes["country"].value_counts() # frequency table
###Output
_____no_output_____
###Markdown
By looking at the above table, we can make the following observations:1. Cuisine column is labeled as Country, which is inaccurate.2. Cuisine names are not consistent as not all of them start with an uppercase first letter.3. Some cuisines are duplicated as variation of the country name, such as Vietnam and Vietnamese.4. Some cuisines have very few recipes. Let's fixes these problems. Fix the name of the column showing the cuisine.
###Code
column_names = recipes.columns.values
column_names[0] = "cuisine"
recipes.columns = column_names
recipes
###Output
_____no_output_____
###Markdown
Make all the cuisine names lowercase.
###Code
recipes["cuisine"] = recipes["cuisine"].str.lower()
###Output
_____no_output_____
###Markdown
Make the cuisine names consistent.
###Code
recipes.loc[recipes["cuisine"] == "austria", "cuisine"] = "austrian"
recipes.loc[recipes["cuisine"] == "belgium", "cuisine"] = "belgian"
recipes.loc[recipes["cuisine"] == "china", "cuisine"] = "chinese"
recipes.loc[recipes["cuisine"] == "canada", "cuisine"] = "canadian"
recipes.loc[recipes["cuisine"] == "netherlands", "cuisine"] = "dutch"
recipes.loc[recipes["cuisine"] == "france", "cuisine"] = "french"
recipes.loc[recipes["cuisine"] == "germany", "cuisine"] = "german"
recipes.loc[recipes["cuisine"] == "india", "cuisine"] = "indian"
recipes.loc[recipes["cuisine"] == "indonesia", "cuisine"] = "indonesian"
recipes.loc[recipes["cuisine"] == "iran", "cuisine"] = "iranian"
recipes.loc[recipes["cuisine"] == "italy", "cuisine"] = "italian"
recipes.loc[recipes["cuisine"] == "japan", "cuisine"] = "japanese"
recipes.loc[recipes["cuisine"] == "israel", "cuisine"] = "jewish"
recipes.loc[recipes["cuisine"] == "korea", "cuisine"] = "korean"
recipes.loc[recipes["cuisine"] == "lebanon", "cuisine"] = "lebanese"
recipes.loc[recipes["cuisine"] == "malaysia", "cuisine"] = "malaysian"
recipes.loc[recipes["cuisine"] == "mexico", "cuisine"] = "mexican"
recipes.loc[recipes["cuisine"] == "pakistan", "cuisine"] = "pakistani"
recipes.loc[recipes["cuisine"] == "philippines", "cuisine"] = "philippine"
recipes.loc[recipes["cuisine"] == "scandinavia", "cuisine"] = "scandinavian"
recipes.loc[recipes["cuisine"] == "spain", "cuisine"] = "spanish_portuguese"
recipes.loc[recipes["cuisine"] == "portugal", "cuisine"] = "spanish_portuguese"
recipes.loc[recipes["cuisine"] == "switzerland", "cuisine"] = "swiss"
recipes.loc[recipes["cuisine"] == "thailand", "cuisine"] = "thai"
recipes.loc[recipes["cuisine"] == "turkey", "cuisine"] = "turkish"
recipes.loc[recipes["cuisine"] == "vietnam", "cuisine"] = "vietnamese"
recipes.loc[recipes["cuisine"] == "uk-and-ireland", "cuisine"] = "uk-and-irish"
recipes.loc[recipes["cuisine"] == "irish", "cuisine"] = "uk-and-irish"
recipes
###Output
_____no_output_____
###Markdown
Remove cuisines with < 50 recipes.
###Code
# get list of cuisines to keep
recipes_counts = recipes["cuisine"].value_counts()
cuisines_indices = recipes_counts > 50
cuisines_to_keep = list(np.array(recipes_counts.index.values)[np.array(cuisines_indices)])
rows_before = recipes.shape[0] # number of rows of original dataframe
print("Number of rows of original dataframe is {}.".format(rows_before))
recipes = recipes.loc[recipes['cuisine'].isin(cuisines_to_keep)]
rows_after = recipes.shape[0] # number of rows of processed dataframe
print("Number of rows of processed dataframe is {}.".format(rows_after))
print("{} rows removed!".format(rows_before - rows_after))
###Output
_____no_output_____
###Markdown
Convert all Yes's to 1's and the No's to 0's
###Code
recipes = recipes.replace(to_replace="Yes", value=1)
recipes = recipes.replace(to_replace="No", value=0)
###Output
_____no_output_____
###Markdown
Let's analyze the data a little more in order to learn the data better and note any interesting preliminary observations. Run the following cell to get the recipes that contain **rice** *and* **soy** *and* **wasabi** *and* **seaweed**.
###Code
recipes.head()
check_recipes = recipes.loc[
(recipes["rice"] == 1) &
(recipes["soy_sauce"] == 1) &
(recipes["wasabi"] == 1) &
(recipes["seaweed"] == 1)
]
check_recipes
###Output
_____no_output_____
###Markdown
Based on the results of the above code, can we classify all recipes that contain **rice** *and* **soy** *and* **wasabi** *and* **seaweed** as **Japanese** recipes? Why? Your Answer: Double-click __here__ for the solution.<!-- The correct answer is:No, because other recipes such as Asian and East_Asian recipes also contain these ingredients.--> Let's count the ingredients across all recipes.
###Code
# sum each column
ing = recipes.iloc[:, 1:].sum(axis=0)
# define each column as a pandas series
ingredient = pd.Series(ing.index.values, index = np.arange(len(ing)))
count = pd.Series(list(ing), index = np.arange(len(ing)))
# create the dataframe
ing_df = pd.DataFrame(dict(ingredient = ingredient, count = count))
ing_df = ing_df[["ingredient", "count"]]
print(ing_df.to_string())
###Output
_____no_output_____
###Markdown
Now we have a dataframe of ingredients and their total counts across all recipes. Let's sort this dataframe in descending order.
###Code
ing_df.sort_values(["count"], ascending=False, inplace=True)
ing_df.reset_index(inplace=True, drop=True)
print(ing_df)
###Output
_____no_output_____
###Markdown
What are the 3 most popular ingredients? Your Answer:1.2.3. Double-click __here__ for the solution.<!-- The correct answer is:// 1. Egg with 21,025 occurrences. // 2. Wheat with 20,781 occurrences. // 3. Butter with 20,719 occurrences.--> However, note that there is a problem with the above table. There are ~40,000 American recipes in our dataset, which means that the data is biased towards American ingredients. **Therefore**, let's compute a more objective summary of the ingredients by looking at the ingredients per cuisine. Let's create a *profile* for each cuisine.In other words, let's try to find out what ingredients Chinese people typically use, and what is **Canadian** food for example.
###Code
cuisines = recipes.groupby("cuisine").mean()
cuisines.head()
###Output
_____no_output_____
###Markdown
As shown above, we have just created a dataframe where each row is a cuisine and each column (except for the first column) is an ingredient, and the row values represent the percentage of each ingredient in the corresponding cuisine.**For example**:* *almond* is present across 15.65% of all of the **African** recipes.* *butter* is present across 38.11% of all of the **Canadian** recipes. Let's print out the profile for each cuisine by displaying the top four ingredients in each cuisine.
###Code
num_ingredients = 4 # define number of top ingredients to print
# define a function that prints the top ingredients for each cuisine
def print_top_ingredients(row):
print(row.name.upper())
row_sorted = row.sort_values(ascending=False)*100
top_ingredients = list(row_sorted.index.values)[0:num_ingredients]
row_sorted = list(row_sorted)[0:num_ingredients]
for ind, ingredient in enumerate(top_ingredients):
print("%s (%d%%)" % (ingredient, row_sorted[ind]), end=' ')
print("\n")
# apply function to cuisines dataframe
create_cuisines_profiles = cuisines.apply(print_top_ingredients, axis=1)
###Output
_____no_output_____ |
jupyter_timing_killer.ipynb | ###Markdown
根据设定的运行时间,清理jupyter notebook进程。 避免过多的note打开一直占用系统资源测试环境: psutil 5.4.3 (在5.0.x版本后出现了psutil.process_iter函数的参数变化,会报错,建议升级版本或者修改代码 )python3.6 结合定时脚本,定期检查执行时间过长的进程,kill掉0 */1 * * * python3 /root/jupyter_timing_killer.py
###Code
import psutil
import os
import time
#http://psutil.readthedocs.io/en/latest/#
#time interval to kill in seconds
time2die = 120 ##3 * 3600 #3hour
'''
cmdline 匹配进程id list notebook的cmd是这样的,匹配'ipykernel_launcher'正合适. 字符串部分包含需要改改if条件咯
['/root/miniconda3/bin/python',
'-m',
'ipykernel_launcher',
'-f',
'/run/user/0/jupyter/kernel-5c2390e6-55d8-4858-9162-1b90dd58132d.json']
'''
def match_procs_by_cmdline(cmd):
ls = []
for p in psutil.process_iter(attrs=["pid",'cmdline']):
if p.info['cmdline'] and ( cmd in p.info['cmdline']) :
ls.append(p.info['pid'])
return ls
'''
check string against Process.name(), Process.exe() and Process.cmdline():
'''
def find_procs_by_name(name):
"Return a list of processes matching 'name'."
ls = []
for p in psutil.process_iter(attrs=["name", "exe", "cmdline"]):
if name == p.info['name'] or \
p.info['exe'] and os.path.basename(p.info['exe']) == name or \
p.info['cmdline'] and p.info['cmdline'][0] == name:
ls.append(p)
return ls
notes=match_procs_by_cmdline('ipykernel_launcher')
print("check in : %s" %(notes))
for n in notes:
p = psutil.Process(n)
if(time.time() - p.create_time() >= time2die):
p.kill()
print("time to die for pid: %s cmd: %s" %( n ,p.cmdline() ) )
for n in notes:
p = psutil.Process(n)
print(p.children())
print(p.parent()) ##parent会拉起子进程??
###Output
[]
psutil.Process(pid=12412, name='jupyter-noteboo', started='2018-01-15 18:09:33')
[]
psutil.Process(pid=12412, name='jupyter-noteboo', started='2018-01-15 18:09:33')
[]
psutil.Process(pid=12412, name='jupyter-noteboo', started='2018-01-15 18:09:33')
[]
psutil.Process(pid=12412, name='jupyter-noteboo', started='2018-01-15 18:09:33')
[]
psutil.Process(pid=12412, name='jupyter-noteboo', started='2018-01-15 18:09:33')
[]
psutil.Process(pid=12412, name='jupyter-noteboo', started='2018-01-15 18:09:33')
[]
psutil.Process(pid=12412, name='jupyter-noteboo', started='2018-01-15 18:09:33')
[]
psutil.Process(pid=12412, name='jupyter-noteboo', started='2018-01-15 18:09:33')
|
code/FEN1_all_generate_FPs.ipynb | ###Markdown
Fingerprint moleculesThe whole set of fingerprints won't fit in memory (even sparse) so we have to save them as chunks. This iterates over the SMILES codes, generating fingerprint_matrices and score arrays, saving them as chunks of 10,000,000
###Code
@ray.remote
def create_fingerprint(smiles, score, i):
if i % 10000 == 0:
logging.basicConfig(level=logging.INFO)
logging.info(i)
mol = Chem.MolFromSmiles(smiles)
pars = { "radius": 2,
"nBits": 8192,
"invariants": [],
"fromAtoms": [],
"useChirality": False,
"useBondTypes": True,
"useFeatures": True,
}
fp = rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, **pars)
onbits = list(fp.GetOnBits())
return onbits, float(score)
def get_fingerprints(ligands_df, fp_size=8192):
future_values = [create_fingerprint.remote(smiles=smiles, score=score, i=i) for (i, (smiles, score)) in enumerate(zip(ligands_df["Smiles"], ligands_df["Active"]))]
values = [v for v in ray.get(future_values) if v]
all_bits, scores = zip(*values)
row_idx = []
col_idx = []
for i, bits in enumerate(all_bits):
# these bits all have the same row:
row_idx += [i] * len(bits)
#and the column indices of those bits:
col_idx += bits
# generate a sparse matrix out of the row,col indices:
unfolded_size = 8192
fingerprint_matrix = sparse.coo_matrix((np.ones(len(row_idx)).astype(bool),
(row_idx, col_idx)),
shape=(max(row_idx)+1, unfolded_size))
# convert to csr matrix, it is better:
fingerprint_matrix = sparse.csr_matrix(fingerprint_matrix)
return fingerprint_matrix, scores
ligands_df = get_data()
fingerprint_matrix, scores = get_fingerprints(ligands_df=ligands_df)
sparse.save_npz(f"{OUTPUT_DATA_DIR}/{RECEPTOR}_fingerprints.npz", fingerprint_matrix)
np.save(f"{OUTPUT_DATA_DIR}/{RECEPTOR}_scores.npy", np.array(scores))
###Output
_____no_output_____ |
IGTI - Bootcamp Desenvolvedor Python/04/cap2/concorrencia.ipynb | ###Markdown
**Compartilhamento de tempo**
###Code
import threading #módulo para a construção de multithreads
import time #módulo para a medição de tempo
import random #módulo para geração de números randomicos
counter = 10 #contador inicial
#função que adiciona um número para o contador
def tarefaA():
global counter #variável global
while counter < 30:
counter += 1 #incrementa o contador
print("A tarefaA incrementou o contador para {}".format(counter))
sleepTime = random.randint(0,1) #escolhe, randomicamente, um valor entre 0 e 3
time.sleep(sleepTime) #coloca a tread para dormir
#função que retira um número do contador
def tarefaB():
global counter #variável global
while counter > -30:
counter -= 1 #decrementa o contador
print("A tarefaB decrementou o contador para {}".format(counter))
sleepTime = random.randint(0,3) #escolhe, randomicamente, um valor entre 0 e 3
time.sleep(sleepTime) #coloca a tread para dormir
t0 = time.time()
thread1 = threading.Thread(target=tarefaA) #instancia um objeto da classe Thread para executar
#a tarefaA
thread2 = threading.Thread(target=tarefaB) #instancia um objeto da classe Thread para executar
#a tarefaB
thread1.start() #inicia a tread1
thread2.start() #inicia thread2
thread1.join() #ceritifica o fim da execução
thread2.join() #certifica do fim da execução
t1 = time.time()
print("Tempo total de execução {}".format(t1-t0))
###Output
_____no_output_____
###Markdown
**Estados de execução de uma Thread**
###Code
import threading
import time
# função que, simplesmente, realiza o print de uma mensagem de execução e
def threadWorker():
# Neste ponto é onde ocorre a mudança do 'Runnable' para o 'Running'
print("A thread entrou no estado 'Running'")
# quando chamamos a função time.sleep() a
#thread entra para o estado de not-running
print('A thread entrou no estado "Non-Running"')
time.sleep(10)
# quando a tarefa é finalizada, a thread é terminada
print("Execução da thread foi finalizada")
#garbage collector
# neste momento a threada ainda "não possui estados"
#não existe alocação de recursos
print("Thread Criada")
myThread = threading.Thread(target=threadWorker)
#quando é chamado o método myThread.start(), python realiza a
#alocação de recursos e, posteriormente, passa para o estado de
# "Start" para o "Runnable", mas sem entrar em execução.
print("Thread no estado 'Runnable'")
myThread.start()
#quando o metodo join é chamado, a thread passa para o estado
# "terminated"
myThread.join()
print("A thread está no estado de 'Terminated'")
###Output
Thread Criada
Thread no estado 'Runnable'
A thread entrou no estado 'Running'
A thread entrou no estado "Non-Running"
Execução da thread foi finalizada
A thread está no estado de 'Terminated'
###Markdown
**Outro exemplo de execução de uma Thread**
###Code
import threading
import time
import random
#função que recebe um número e cira uma thread
def executeThread(i):
print("Thread {} incializada \n".format(i))
sleepTime = random.randint(1,10)
time.sleep(sleepTime)
print("Thread {} finalizou a execução".format(i))
for i in range(10):
thread = threading.Thread(target=executeThread, args=(i,))
thread.start()
print("Número de threads ativas:" , threading.enumerate())
###Output
Thread 0 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>]
Thread 1 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>]
Thread 2 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>, <Thread(Thread-7, started 140017041127168)>]
Thread 3 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>, <Thread(Thread-7, started 140017041127168)>, <Thread(Thread-8, started 140017032734464)>]
Thread 4 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>, <Thread(Thread-7, started 140017041127168)>, <Thread(Thread-8, started 140017032734464)>, <Thread(Thread-9, started 140017024341760)>]
Thread 5 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>, <Thread(Thread-7, started 140017041127168)>, <Thread(Thread-8, started 140017032734464)>, <Thread(Thread-9, started 140017024341760)>, <Thread(Thread-10, started 140016470718208)>]
Thread 6 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>, <Thread(Thread-7, started 140017041127168)>, <Thread(Thread-8, started 140017032734464)>, <Thread(Thread-9, started 140017024341760)>, <Thread(Thread-10, started 140016470718208)>, <Thread(Thread-11, started 140016462325504)>]
Thread 7 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>, <Thread(Thread-7, started 140017041127168)>, <Thread(Thread-8, started 140017032734464)>, <Thread(Thread-9, started 140017024341760)>, <Thread(Thread-10, started 140016470718208)>, <Thread(Thread-11, started 140016462325504)>, <Thread(Thread-12, started 140016453932800)>]
Thread 8 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>, <Thread(Thread-7, started 140017041127168)>, <Thread(Thread-8, started 140017032734464)>, <Thread(Thread-9, started 140017024341760)>, <Thread(Thread-10, started 140016470718208)>, <Thread(Thread-11, started 140016462325504)>, <Thread(Thread-12, started 140016453932800)>, <Thread(Thread-13, started 140016445540096)>]
Thread 9 incializada
Número de threads ativas: [<_MainThread(MainThread, started 140017367697216)>, <Thread(Thread-2, started daemon 140017310988032)>, <Heartbeat(Thread-3, started daemon 140017302595328)>, <HistorySavingThread(IPythonHistorySavingThread, started 140017074697984)>, <ParentPollerUnix(Thread-1, started daemon 140017066305280)>, <Thread(Thread-5, started 140017057912576)>, <Thread(Thread-6, started 140017049519872)>, <Thread(Thread-7, started 140017041127168)>, <Thread(Thread-8, started 140017032734464)>, <Thread(Thread-9, started 140017024341760)>, <Thread(Thread-10, started 140016470718208)>, <Thread(Thread-11, started 140016462325504)>, <Thread(Thread-12, started 140016453932800)>, <Thread(Thread-13, started 140016445540096)>, <Thread(Thread-14, started 140016437147392)>]
Thread 5 finalizou a execução
Thread 9 finalizou a execução
Thread 0 finalizou a execução
Thread 2 finalizou a execução
Thread 7 finalizou a execução
Thread 4 finalizou a execução
Thread 1 finalizou a execução
Thread 3 finalizou a execução
Thread 6 finalizou a execução
Thread 8 finalizou a execução
###Markdown
**Herança com Threads**
###Code
from threading import Thread
#define a classe como filha da classe Thread
class MinhaClasseThread(Thread):
def __init__(self):
print("Olá, construtor thread!!")
Thread.__init__(self)
#define a função run() que é chamada quando thread.start()
def run(self):
print("\nThread em execução.")
#instanciando um objeto da classe criada
minhaThread=MinhaClasseThread()
print("Objeto criado")
minhaThread.start()
print("Thread inicalizada")
minhaThread.join()
print("Thread finalizada")
###Output
Olá, construtor thread!!
Objeto criado
Thread em execução.
Thread inicalizada
Thread finalizada
###Markdown
**Multiplas Threads**
###Code
import threading
class minhaThread (threading.Thread):
def __init__(self, threadID, nome, contador):
threading.Thread.__init__(self)
self.threadID = threadID
self.nome = nome
self.contador = contador
def run(self):
print("Iniciando thread %s com %d processos" %(self.name,self.contador))
processo(self.nome, self.contador)
print("Finalizando " + self.nome)
def processo(nome, contador):
while contador:
print( "Thread %s fazendo o processo %d" % (nome, contador))
contador -= 1
# Criando as threads
thread1 = minhaThread(1, "Alice", 8)
thread2 = minhaThread(2, "Bob", 8)
# Comecando novas Threads
thread1.start()
thread2.start()
threads = []
threads.append(thread1)
threads.append(thread2)
for t in threads:
t.join()
print("Saindo da main")
###Output
Iniciando thread Thread-4 com 8 processos
Thread Alice fazendo o processo 8
Thread Alice fazendo o processo 7
Thread Alice fazendo o processo 6
Thread Alice fazendo o processo 5
Thread Alice fazendo o processo 4
Thread Alice fazendo o processo 3
Thread Alice fazendo o processo 2
Thread Alice fazendo o processo 1
Finalizando Alice
Iniciando thread Thread-5 com 8 processos
Thread Bob fazendo o processo 8
Thread Bob fazendo o processo 7
Thread Bob fazendo o processo 6
Thread Bob fazendo o processo 5
Thread Bob fazendo o processo 4
Thread Bob fazendo o processo 3
Thread Bob fazendo o processo 2
Thread Bob fazendo o processo 1
Finalizando Bob
Saindo da main
###Markdown
**Contando Threads ativas**
###Code
import threading
import time
import random
def minhaThread(i):
print("Thread {}: inicializada".format(i))
time.sleep(random.randint(1,5))
print("\nThread {}: finalizada".format(i))
for i in range(random.randint(2,50)):
thread=threading.Thread(target=minhaThread,args=(i,))
thread.start()
time.sleep(4)
print("Total de Threads ativas: {}".format(threading.active_count()))
###Output
Thread 0: inicializada
Thread 1: inicializada
Thread 2: inicializada
Thread 3: inicializadaThread 4: inicializada
Thread 5: inicializada
Thread 6: inicializada
Thread 7: inicializada
Thread 8: inicializada
Thread 9: inicializada
Thread 10: inicializada
Thread 11: inicializada
Thread 12: inicializada
Thread 13: inicializada
Thread 14: inicializada
Thread 15: inicializada
Thread 16: inicializada
Thread 17: inicializada
Thread 18: inicializada
Thread 19: inicializadaThread 20: inicializada
Thread 21: inicializada
Thread 22: inicializada
Thread 23: inicializada
Thread 24: inicializada
Thread 25: inicializada
Thread 26: inicializadaThread 27: inicializada
Thread 28: inicializada
Thread 29: inicializada
Thread 30: inicializada
Thread 31: inicializada
Thread 32: inicializada
Thread 33: inicializada
Thread 34: inicializada
Thread 35: inicializadaThread 36: inicializada
Thread 1: finalizada
Thread 27: finalizada
Thread 32: finalizada
Thread 33: finalizada
Thread 34: finalizada
Thread 12: finalizada
Thread 14: finalizada
Thread 16: finalizada
Thread 20: finalizada
Thread 30: finalizada
Thread 31: finalizada
Thread 35: finalizada
Thread 3: finalizada
Thread 7: finalizada
Thread 8: finalizada
Thread 11: finalizada
Thread 13: finalizada
Thread 19: finalizada
Thread 22: finalizada
Thread 25: finalizada
Thread 28: finalizada
Thread 36: finalizada
Thread 5: finalizada
Thread 9: finalizada
Thread 10: finalizada
Thread 15: finalizada
Thread 23: finalizada
Thread 24: finalizada
Thread 26: finalizada
Thread 29: finalizada
Total de Threads ativas: 12
Thread 0: finalizada
Thread 2: finalizada
Thread 4: finalizada
Thread 6: finalizada
Thread 17: finalizada
Thread 18: finalizada
Thread 21: finalizada
###Markdown
**Em qual thread estamos?**
###Code
import threading
import time
def threadTarget():
print("Thread atual: {}".format(threading.current_thread()))
threads = []
for i in range(10):
thread = threading.Thread(target=threadTarget)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
###Output
Thread atual: <Thread(Thread-43, started 139775927371520)>
Thread atual: <Thread(Thread-44, started 139775902193408)>
Thread atual: <Thread(Thread-45, started 139775902193408)>
Thread atual: <Thread(Thread-46, started 139775902193408)>
Thread atual: <Thread(Thread-47, started 139775902193408)>
Thread atual: <Thread(Thread-48, started 139775902193408)>
Thread atual: <Thread(Thread-49, started 139775902193408)>
Thread atual: <Thread(Thread-50, started 139775902193408)>
Thread atual: <Thread(Thread-51, started 139775927371520)>
Thread atual: <Thread(Thread-52, started 139775927371520)>
###Markdown
**Main Thread**
###Code
import threading
import time
def myChildThread():
print("Thread Filha inicializada ----")
time.sleep(5)
print("Thread Atual ----------")
print(threading.current_thread())
print("-------------------------")
print("Main Thread -------------")
print(threading.main_thread())
print("-------------------------")
print("Thread Filha Finalizada")
child = threading.Thread(target=myChildThread)
child.start()
child.join()
###Output
Thread Filha inicializada ----
Thread Atual ----------
<Thread(Thread-53, started 139775927371520)>
-------------------------
Main Thread -------------
<_MainThread(MainThread, started 139777687885632)>
-------------------------
Thread Filha Finalizada
###Markdown
**Identificando as Threads**
###Code
import threading
import time
def myThread():
print("Thread {} inicializada".format(threading.currentThread().getName()))
time.sleep(10)
print("Thread {} finalizada".format(threading.currentThread().getName()))
for i in range(4):
threadName = "Thread-" + str(i)
thread = threading.Thread(name=threadName, target=myThread)
thread.start()
print("{}".format(threading.enumerate()))
###Output
Thread Thread-0 inicializada
Thread Thread-1 inicializada
Thread Thread-2 inicializada
Thread Thread-3 inicializada
[<_MainThread(MainThread, started 139777687885632)>, <Thread(Thread-2, started daemon 139777563162368)>, <Heartbeat(Thread-3, started daemon 139777554769664)>, <HistorySavingThread(IPythonHistorySavingThread, started 139777529591552)>, <ParentPollerUnix(Thread-1, started daemon 139777521198848)>, <Thread(Thread-0, started 139775927371520)>, <Thread(Thread-1, started 139775902193408)>, <Thread(Thread-2, started 139775935764224)>, <Thread(Thread-3, started 139776984327936)>]
Thread Thread-0 finalizada
Thread Thread-1 finalizada
Thread Thread-2 finalizada
Thread Thread-3 finalizada
###Markdown
**Deadlock**
###Code
import threading
import time
import random
class Filosofos(threading.Thread):
def __init__(self, name, leftFork, rightFork):
print("{} sentou na mesa".format(name))
threading.Thread.__init__(self, name=name)
self.leftFork = leftFork
self.rightFork = rightFork
def run(self):
print("{} começou a pensar".format(threading.currentThread().getName()))
while True:
time.sleep(random.randint(1,5))
print("{} parou de pensar".format(threading.currentThread().getName()))
self.leftFork.acquire()
time.sleep(random.randint(1,5))
try:
print("{} pegou o garfo da esquerda.".format(threading.currentThread().getName()))
self.rightFork.acquire()
try:
print("{} pegou os dois garfos e começou a comer".format(threading.currentThread().getName()))
finally:
self.rightFork.release()
print("{} soltou o garfo da direita".format(threading.currentThread().getName()))
finally:
self.leftFork.release()
print("{} soltou o garfo da esquerda".format(threading.currentThread().getName()))
fork1 = threading.RLock()
fork2 = threading.RLock()
fork3 = threading.RLock()
fork4 = threading.RLock()
fork5 = threading.RLock()
philosopher1 = Filosofos("Kant", fork1, fork2)
philosopher2 = Filosofos("Aristotle", fork2, fork3)
philosopher3 = Filosofos("Spinoza", fork3, fork4)
philosopher4 = Filosofos("Marx", fork4, fork5)
philosopher5 = Filosofos("Russell", fork5, fork1)
philosopher1.start()
philosopher2.start()
philosopher3.start()
philosopher4.start()
philosopher5.start()
philosopher1.join()
philosopher2.join()
philosopher3.join()
philosopher4.join()
philosopher5.join()
###Output
Kant sentou na mesa
Aristotle sentou na mesa
Spinoza sentou na mesa
Marx sentou na mesa
Russell sentou na mesa
Kant começou a pensar
Aristotle começou a pensar
Spinoza começou a pensar
Marx começou a pensar
Russell começou a pensar
Spinoza parou de pensar
Kant parou de pensar
Marx parou de pensar
Kant pegou o garfo da esquerda.
Kant pegou os dois garfos e começou a comer
Aristotle parou de pensar
Kant soltou o garfo da direita
Kant soltou o garfo da esquerda
Russell parou de pensar
Spinoza pegou o garfo da esquerda.
Aristotle pegou o garfo da esquerda.
Kant parou de pensar
Marx pegou o garfo da esquerda.
Russell pegou o garfo da esquerda.
Kant pegou o garfo da esquerda.
###Markdown
**Semáforos**
###Code
import threading
import time
import random
class TicketSeller(threading.Thread):
ticketsSold = 0
def __init__(self, semaphore):
threading.Thread.__init__(self);
self.sem = semaphore
print("Venda de ingressos inicializada")
def run(self):
global ticketsAvailable
running = True
while running:
self.randomDelay()
self.sem.acquire()
if(ticketsAvailable <= 0):
running = False
else:
self.ticketsSold = self.ticketsSold + 1
ticketsAvailable = ticketsAvailable - 1
print("{} acabou de vender 1 ({} restantes)".format(self.getName(), ticketsAvailable))
self.sem.release()
print("Venda de ingresso {} Ingressos vendidos no total {}".format(self.getName(), self.ticketsSold))
def randomDelay(self):
time.sleep(random.randint(0,4)/4)
# definição do nosso semáforo
semaphore = threading.Semaphore()
# Número de ingressos disponíveis
ticketsAvailable = 200
# os nossos vendedores
sellers = []
for i in range(4):
seller = TicketSeller(semaphore)
seller.start()
sellers.append(seller)
# joining all our sellers
for seller in sellers:
seller.join()
###Output
Venda de ingressos inicializada
Venda de ingressos inicializada
Venda de ingressos inicializada
Venda de ingressos inicializada
Thread-57 acabou de vender 1 (199 restantes)
Thread-55 acabou de vender 1 (198 restantes)
Thread-57 acabou de vender 1 (197 restantes)
Thread-57 acabou de vender 1 (196 restantes)
Thread-54 acabou de vender 1 (195 restantes)
Thread-56 acabou de vender 1 (194 restantes)
Thread-55 acabou de vender 1 (193 restantes)
Thread-55 acabou de vender 1 (192 restantes)
Thread-55 acabou de vender 1 (191 restantes)
Thread-54 acabou de vender 1 (190 restantes)
Thread-56 acabou de vender 1 (189 restantes)
Thread-56 acabou de vender 1 (188 restantes)
Thread-56 acabou de vender 1 (187 restantes)
Thread-55 acabou de vender 1 (186 restantes)
Thread-57 acabou de vender 1 (185 restantes)
Thread-54 acabou de vender 1 (184 restantes)
Thread-56 acabou de vender 1 (183 restantes)
Thread-57 acabou de vender 1 (182 restantes)
Thread-55 acabou de vender 1 (181 restantes)
Thread-56 acabou de vender 1 (180 restantes)
Thread-57 acabou de vender 1 (179 restantes)
Thread-54 acabou de vender 1 (178 restantes)
Thread-56 acabou de vender 1 (177 restantes)
Thread-56 acabou de vender 1 (176 restantes)
Thread-56 acabou de vender 1 (175 restantes)
Thread-56 acabou de vender 1 (174 restantes)
Thread-56 acabou de vender 1 (173 restantes)
Thread-57 acabou de vender 1 (172 restantes)
Thread-55 acabou de vender 1 (171 restantes)
Thread-56 acabou de vender 1 (170 restantes)
Thread-54 acabou de vender 1 (169 restantes)
Thread-57 acabou de vender 1 (168 restantes)
Thread-56 acabou de vender 1 (167 restantes)
Thread-55 acabou de vender 1 (166 restantes)
Thread-55 acabou de vender 1 (165 restantes)
Thread-55 acabou de vender 1 (164 restantes)
Thread-56 acabou de vender 1 (163 restantes)
Thread-54 acabou de vender 1 (162 restantes)
Thread-54 acabou de vender 1 (161 restantes)
Thread-54 acabou de vender 1 (160 restantes)
Thread-57 acabou de vender 1 (159 restantes)
Thread-56 acabou de vender 1 (158 restantes)
Thread-55 acabou de vender 1 (157 restantes)
Thread-54 acabou de vender 1 (156 restantes)
Thread-56 acabou de vender 1 (155 restantes)
Thread-55 acabou de vender 1 (154 restantes)
Thread-57 acabou de vender 1 (153 restantes)
Thread-55 acabou de vender 1 (152 restantes)
Thread-55 acabou de vender 1 (151 restantes)
Thread-55 acabou de vender 1 (150 restantes)
Thread-54 acabou de vender 1 (149 restantes)
Thread-55 acabou de vender 1 (148 restantes)
Thread-54 acabou de vender 1 (147 restantes)
Thread-56 acabou de vender 1 (146 restantes)
Thread-54 acabou de vender 1 (145 restantes)
Thread-54 acabou de vender 1 (144 restantes)
Thread-57 acabou de vender 1 (143 restantes)
Thread-56 acabou de vender 1 (142 restantes)
Thread-54 acabou de vender 1 (141 restantes)
Thread-56 acabou de vender 1 (140 restantes)
Thread-55 acabou de vender 1 (139 restantes)
Thread-54 acabou de vender 1 (138 restantes)
Thread-54 acabou de vender 1 (137 restantes)
Thread-57 acabou de vender 1 (136 restantes)
Thread-57 acabou de vender 1 (135 restantes)
Thread-55 acabou de vender 1 (134 restantes)
Thread-56 acabou de vender 1 (133 restantes)
Thread-55 acabou de vender 1 (132 restantes)
Thread-54 acabou de vender 1 (131 restantes)
Thread-54 acabou de vender 1 (130 restantes)
Thread-56 acabou de vender 1 (129 restantes)
Thread-57 acabou de vender 1 (128 restantes)
Thread-54 acabou de vender 1 (127 restantes)
Thread-56 acabou de vender 1 (126 restantes)
Thread-57 acabou de vender 1 (125 restantes)
Thread-55 acabou de vender 1 (124 restantes)
Thread-55 acabou de vender 1 (123 restantes)
Thread-54 acabou de vender 1 (122 restantes)
Thread-57 acabou de vender 1 (121 restantes)
Thread-56 acabou de vender 1 (120 restantes)
Thread-54 acabou de vender 1 (119 restantes)
Thread-54 acabou de vender 1 (118 restantes)
Thread-57 acabou de vender 1 (117 restantes)
Thread-55 acabou de vender 1 (116 restantes)
Thread-56 acabou de vender 1 (115 restantes)
Thread-54 acabou de vender 1 (114 restantes)
Thread-57 acabou de vender 1 (113 restantes)
Thread-55 acabou de vender 1 (112 restantes)
Thread-54 acabou de vender 1 (111 restantes)
Thread-54 acabou de vender 1 (110 restantes)
Thread-56 acabou de vender 1 (109 restantes)
Thread-57 acabou de vender 1 (108 restantes)
Thread-55 acabou de vender 1 (107 restantes)
Thread-57 acabou de vender 1 (106 restantes)
Thread-54 acabou de vender 1 (105 restantes)
Thread-56 acabou de vender 1 (104 restantes)
Thread-54 acabou de vender 1 (103 restantes)
Thread-57 acabou de vender 1 (102 restantes)
Thread-57 acabou de vender 1 (101 restantes)
Thread-55 acabou de vender 1 (100 restantes)
Thread-56 acabou de vender 1 (99 restantes)
Thread-55 acabou de vender 1 (98 restantes)
Thread-56 acabou de vender 1 (97 restantes)
Thread-57 acabou de vender 1 (96 restantes)
Thread-54 acabou de vender 1 (95 restantes)
Thread-56 acabou de vender 1 (94 restantes)
Thread-55 acabou de vender 1 (93 restantes)
Thread-55 acabou de vender 1 (92 restantes)
Thread-54 acabou de vender 1 (91 restantes)
Thread-56 acabou de vender 1 (90 restantes)
Thread-54 acabou de vender 1 (89 restantes)
Thread-57 acabou de vender 1 (88 restantes)
Thread-57 acabou de vender 1 (87 restantes)
Thread-55 acabou de vender 1 (86 restantes)
Thread-56 acabou de vender 1 (85 restantes)
Thread-54 acabou de vender 1 (84 restantes)
Thread-56 acabou de vender 1 (83 restantes)
Thread-56 acabou de vender 1 (82 restantes)
Thread-57 acabou de vender 1 (81 restantes)
Thread-55 acabou de vender 1 (80 restantes)
Thread-56 acabou de vender 1 (79 restantes)
Thread-54 acabou de vender 1 (78 restantes)
Thread-55 acabou de vender 1 (77 restantes)
Thread-57 acabou de vender 1 (76 restantes)
Thread-57 acabou de vender 1 (75 restantes)
Thread-57 acabou de vender 1 (74 restantes)
Thread-55 acabou de vender 1 (73 restantes)
Thread-55 acabou de vender 1 (72 restantes)
Thread-56 acabou de vender 1 (71 restantes)
Thread-56 acabou de vender 1 (70 restantes)
Thread-57 acabou de vender 1 (69 restantes)
Thread-57 acabou de vender 1 (68 restantes)
Thread-54 acabou de vender 1 (67 restantes)
Thread-57 acabou de vender 1 (66 restantes)
Thread-57 acabou de vender 1 (65 restantes)
Thread-55 acabou de vender 1 (64 restantes)
Thread-57 acabou de vender 1 (63 restantes)
Thread-56 acabou de vender 1 (62 restantes)
Thread-55 acabou de vender 1 (61 restantes)
Thread-54 acabou de vender 1 (60 restantes)
Thread-54 acabou de vender 1 (59 restantes)
Thread-54 acabou de vender 1 (58 restantes)
Thread-57 acabou de vender 1 (57 restantes)
Thread-55 acabou de vender 1 (56 restantes)
Thread-55 acabou de vender 1 (55 restantes)
Thread-55 acabou de vender 1 (54 restantes)
Thread-55 acabou de vender 1 (53 restantes)
Thread-56 acabou de vender 1 (52 restantes)
Thread-54 acabou de vender 1 (51 restantes)
Thread-57 acabou de vender 1 (50 restantes)
Thread-56 acabou de vender 1 (49 restantes)
Thread-55 acabou de vender 1 (48 restantes)
Thread-55 acabou de vender 1 (47 restantes)
Thread-56 acabou de vender 1 (46 restantes)
Thread-56 acabou de vender 1 (45 restantes)
Thread-57 acabou de vender 1 (44 restantes)
Thread-54 acabou de vender 1 (43 restantes)
Thread-56 acabou de vender 1 (42 restantes)
Thread-56 acabou de vender 1 (41 restantes)
Thread-56 acabou de vender 1 (40 restantes)
Thread-55 acabou de vender 1 (39 restantes)
Thread-54 acabou de vender 1 (38 restantes)
Thread-56 acabou de vender 1 (37 restantes)
Thread-57 acabou de vender 1 (36 restantes)
Thread-55 acabou de vender 1 (35 restantes)
Thread-55 acabou de vender 1 (34 restantes)
Thread-56 acabou de vender 1 (33 restantes)
Thread-56 acabou de vender 1 (32 restantes)
Thread-56 acabou de vender 1 (31 restantes)
Thread-54 acabou de vender 1 (30 restantes)
Thread-54 acabou de vender 1 (29 restantes)
Thread-54 acabou de vender 1 (28 restantes)
Thread-54 acabou de vender 1 (27 restantes)
Thread-57 acabou de vender 1 (26 restantes)
Thread-56 acabou de vender 1 (25 restantes)
Thread-57 acabou de vender 1 (24 restantes)
Thread-55 acabou de vender 1 (23 restantes)
Thread-55 acabou de vender 1 (22 restantes)
Thread-55 acabou de vender 1 (21 restantes)
Thread-54 acabou de vender 1 (20 restantes)
Thread-54 acabou de vender 1 (19 restantes)
Thread-56 acabou de vender 1 (18 restantes)
Thread-54 acabou de vender 1 (17 restantes)
Thread-55 acabou de vender 1 (16 restantes)
Thread-57 acabou de vender 1 (15 restantes)
Thread-57 acabou de vender 1 (14 restantes)
Thread-57 acabou de vender 1 (13 restantes)
Thread-57 acabou de vender 1 (12 restantes)
Thread-56 acabou de vender 1 (11 restantes)
Thread-55 acabou de vender 1 (10 restantes)
Thread-54 acabou de vender 1 (9 restantes)
Thread-57 acabou de vender 1 (8 restantes)
Thread-57 acabou de vender 1 (7 restantes)
Thread-56 acabou de vender 1 (6 restantes)
Thread-55 acabou de vender 1 (5 restantes)
Thread-56 acabou de vender 1 (4 restantes)
Thread-55 acabou de vender 1 (3 restantes)
Thread-57 acabou de vender 1 (2 restantes)
Thread-54 acabou de vender 1 (1 restantes)
Thread-56 acabou de vender 1 (0 restantes)
Venda de ingresso Thread-54 Ingressos vendidos no total 48
Venda de ingresso Thread-55 Ingressos vendidos no total 51
Venda de ingresso Thread-57 Ingressos vendidos no total 47
Venda de ingresso Thread-56 Ingressos vendidos no total 54
###Markdown
**Queue em Python**
###Code
#código adaptado de http://www.learn4master.com/algorithms/python-queue-for-multithreading
# put(), get(), join() e task_done().
import threading
import time
from queue import Queue
def consumidor(q):
while(True):
name = threading.currentThread().getName()
print("Thread: {0} deseja obter um item da queue[tamanho atual = {1}] na data = {2} \n".format(name, q.qsize(), time.strftime('%H:%M:%S')))
item = q.get();
time.sleep(3) # demora 3 segundos para adicionar um item
print("Thread: {0} terminou de processar o item da queue[tamanho atual = {1}] na data = {2} \n".format(name, q.qsize(), time.strftime('%H:%M:%S')))
q.task_done()
def produtor(q):
# a thread principal vai adicionar itens para a queue
for i in range(10):
name = threading.currentThread().getName()
print("Thread: {0} começou a adicionar um item na queue[tamanho atual = {1}] na data = {2} \n".format(name, q.qsize(), time.strftime('%H:%M:%S')))
item = "item-" + str(i)
q.put(item)
print("Thread: {0} adicionou um item na queue[tamanho atual = {1}] na data = {2} \n".format(name, q.qsize(), time.strftime('%H:%M:%S')))
q.join()
if __name__ == '__main__':
q = Queue(maxsize = 3)
threads_num = 3 # criação de 3 threads consumidoras
for i in range(threads_num):
t = threading.Thread(name = "ThreadConsumidora-"+str(i), target=consumidor, args=(q,))
t.start()
# criação de uma thread produtora
t = threading.Thread(name = "ThreadProdutora", target=produtor, args=(q,))
t.start()
q.join()
###Output
Thread: ThreadConsumidora-0 deseja obter um item da queue[tamanho atual = 0] na data = 23:11:53
Thread: ThreadConsumidora-1 deseja obter um item da queue[tamanho atual = 0] na data = 23:11:53
Thread: ThreadConsumidora-2 deseja obter um item da queue[tamanho atual = 0] na data = 23:11:53
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 0] na data = 23:11:53
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 1] na data = 23:11:53
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 1] na data = 23:11:53
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 2] na data = 23:11:53
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 2] na data = 23:11:53
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 3] na data = 23:11:53
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 3] na data = 23:11:53
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 1] na data = 23:11:53
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 1] na data = 23:11:53
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 2] na data = 23:11:53
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 2] na data = 23:11:53
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 3] na data = 23:11:53
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 3] na data = 23:11:53
Thread: ThreadConsumidora-1 terminou de processar o item da queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadConsumidora-2 terminou de processar o item da queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadConsumidora-0 terminou de processar o item da queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadConsumidora-0 deseja obter um item da queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadConsumidora-1 deseja obter um item da queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 2] na data = 23:11:56
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadConsumidora-2 deseja obter um item da queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadProdutora começou a adicionar um item na queue[tamanho atual = 3] na data = 23:11:56
Thread: ThreadConsumidora-0 terminou de processar o item da queue[tamanho atual = 3] na data = 23:11:59
Thread: ThreadConsumidora-0 deseja obter um item da queue[tamanho atual = 3] na data = 23:11:59
Thread: ThreadProdutora adicionou um item na queue[tamanho atual = 3] na data = 23:11:59
Thread: ThreadConsumidora-2 terminou de processar o item da queue[tamanho atual = 3] na data = 23:11:59
Thread: ThreadConsumidora-2 deseja obter um item da queue[tamanho atual = 3] na data = 23:11:59
Thread: ThreadConsumidora-1 terminou de processar o item da queue[tamanho atual = 3] na data = 23:11:59
Thread: ThreadConsumidora-1 deseja obter um item da queue[tamanho atual = 2] na data = 23:11:59
Thread: ThreadConsumidora-0 terminou de processar o item da queue[tamanho atual = 1] na data = 23:12:02
Thread: ThreadConsumidora-0 deseja obter um item da queue[tamanho atual = 1] na data = 23:12:02
Thread: ThreadConsumidora-2 terminou de processar o item da queue[tamanho atual = 0] na data = 23:12:02
Thread: ThreadConsumidora-1 terminou de processar o item da queue[tamanho atual = 0] na data = 23:12:02
Thread: ThreadConsumidora-1 deseja obter um item da queue[tamanho atual = 0] na data = 23:12:02
Thread: ThreadConsumidora-2 deseja obter um item da queue[tamanho atual = 0] na data = 23:12:02
Thread: ThreadConsumidora-0 terminou de processar o item da queue[tamanho atual = 0] na data = 23:12:05
Thread: ThreadConsumidora-0 deseja obter um item da queue[tamanho atual = 0] na data = 23:12:05
|
labs/lab_04_BLN.ipynb | ###Markdown
MAT281 - Laboratorio N°04 Problema 01En la carpeta data se encuentra el archivo `nba.db`, el cual muestra informacion básica de algunos jugadores de la NBA.
###Code
from sqlalchemy import create_engine
import pandas as pd
import os
# Crear conector
conn = create_engine(os.path.join('sqlite:///','data', 'nba.db'))
# funcion de consultas
def consulta(query,conn):
return pd.read_sql_query(query, con=conn)
# ejemplo
query = """
SELECT * FROM player
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
El objetivo es tratar de obtener la mayor información posible de este conjunto de datos mediante código **SQL**. Para cumplir con este objetivo debe resolver las siguientes problemáticas: 1. Mostrar las primeras 5 filas
###Code
query = """
SELECT * FROM player
LIMIT 5
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
2. Seleccionar los valores únicos de la columna `position`.
###Code
query ="""
SELECT DISTINCT POSITION FROM PLAYER
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
3. Seleccionar y cambiar el nombre de la columna `name` por `nombre`
###Code
query = """
SELECT name as nombre FROM player
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
4. Determinar el tiempo (en años) de cada jugador en su posición
###Code
#el tiempo de cada jugador=año de término- año de comienzo
query = """
SELECT name,year_end-year_start FROM player
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
5. Encontrar el valor máximo de la columna `weight` por cada valor de la columna `position`
###Code
query = """
SELECT DISTINCT position, Max(weight)FROM player GROUP BY (position)
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
6. Encontrar el total de jugadores por cada valor de la columna `year_start`
###Code
query = """
SELECT year_start, count(year_start) FROM player GROUP BY (year_start)
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
7. Encontrar el valor mínimo, máximo y promedio de la columna `weight` por cada valor de la columnas `college`
###Code
query = """
SELECT college, AVG(weight) as Prom, MAX(weight) as Máx , MIN(weight) as Mín FROM player GROUP BY college
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
8. Filtrar por aquellos jugadores que cumplan con :* Para la columna `year_start` tienen un valor mayor 1990 y menor a 2000* Para la columna `position` tienen un valor de `C`,`C-F` o `F-C`* Para la columna `college` tienen un valor distinto de `Duke University`
###Code
query = """
SELECT * FROM player WHERE year_start > 1990 AND year_start<2000 and (position='C' or position='C-F' or position='F-C') and college <>'Duke University'
"""
consulta(query,conn)
###Output
_____no_output_____
###Markdown
9. Crear dos conjuntos de datos y juntarlos en una misma *query*. Las condiciones de cada uno de los cojunto de datos son:* **df1**: * Para la columna `year_start` tienen un valor mayor 1990 y menor a 2000 * Para la columna `position` tienen un valor de `C`,`C-F` o `F-C` * **df2**: * Para la columna `year_end` tienen un valor menor a 2000 * Para la columna `position` tienen un valor de `G`o `F`
###Code
#Se crean dos conjuntos y se unen mediante la función UNION
df1 = """
SELECT * FROM player WHERE year_start>1990 AND year_start<2000 and (position='C' or position='C-F' or position='F-C')
"""
df2="""
SELECT * FROM player WHERE year_end< 2000 and (position='G' or position='F')
"""
query=df1+'UNION'+df2
consulta(query,conn)
###Output
SELECT * FROM player WHERE year_start>1990 AND year_start<2000 and (position='C' or position='C-F' or position='F-C')
UNION
SELECT * FROM player WHERE year_end< 2000 and (position='G' or position='F')
|
HA02_milara_andree_DataCleansing.ipynb | ###Markdown
Machine LearningAssignment 2: Data Conditioning Andree Vela Miguel Lara Data Collection and Cleaning To begin, it is required to import the libraries that will be used in the analysis of the data. **Numpy and Pandas** are used to manipulate easily the data frames. **Matplotlib and seaborn** are used to plot the graphs.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
In the following lines the data frame **grouped** is created. The data set is pulled from the github repository and then grouped by hod (hour of death) and cod (cause of death). Then, the variable **freq** is computed by the count of people that died in each specific combine index of hod and cod. Also, the dataset is cleaned by dropping numbers in hod column that does not correspond to a real hour. At the end, the Multi-index is moved inside the data frame but the index column is removed because it does not provide any added value.
###Code
grouped = pd.read_csv( 'https://github.com/hadley/mexico-mortality/blob/master/deaths/deaths08.csv.bz2?raw=true', compression='bz2' )
grouped = grouped.groupby( [ 'hod', 'cod' ] )[ 'sex' ].count().rename( 'freq' ).reset_index()
grouped.drop( grouped[ ( grouped[ 'hod' ] == 99 ) | ( grouped[ 'hod' ] == 0 ) ].index, axis=0, inplace=True )
grouped[ 'hod' ] = grouped[ 'hod' ].replace( 24, 0 )
grouped = grouped.reset_index()
del grouped[ 'index' ]
###Output
_____no_output_____
###Markdown
Later, the **grouped** data frame is updated. The information about the name of the cod (cause of death) is pulled from another dataset that is located in a github repository. The datasets are merged but only keeping the instances of **grouped** data frame. In other words, the length of the **grouped** data frame is not changed, instead a new column is added to the data frame with the name of each cod.
###Code
icd_df = pd.read_csv( "https://raw.githubusercontent.com/hadley/mexico-mortality/master/disease/icd-main.csv" )
icd_df = icd_df.drop_duplicates( 'code' )
icd_df = icd_df.rename( columns = { 'code': 'cod' } )
grouped = pd.merge( grouped, icd_df, on = 'cod', how = 'left' )
###Output
_____no_output_____
###Markdown
Data Computation (prop, freq_all, prop_all) Three new columns are added to the **grouped** data frame. The columns are computed by using the information of the data frame. Each new value computed is added to each instance of the data frame. ***prop*** column is the ratio of the number of deaths for a specific disease that happened in one hour and the total number of deaths of the same disease in the complete span of 24hrs. Example: 300 persons died of Amebiasis at 1pm / 1000 persons died of Amebiasis in the span of 24hrs. ***freq_all*** column is the total number of deaths that happened at a specific hour. Example: 24000 persons died at 4am. ***prop_all*** column is the ratio of ***freq_all*** and the sum of all the deaths in the span of 24hrs. Example: 2500 persons died at 5pm / 500,000 all the persons of the data set.
###Code
grouped[ 'prop' ] = grouped[ 'freq' ] / grouped.groupby( 'cod' )[ 'freq' ].transform( 'sum')
grouped[ 'freq_all' ] = grouped.groupby( 'hod' )[ 'freq' ].transform( 'sum')
grouped[ 'prop_all' ] = grouped[ 'freq_all' ] / grouped[ 'freq' ].sum()
###Output
_____no_output_____
###Markdown
Plot Number of Deaths by Hour The purpose of the following code is to show the amount of persons that died during each hour of the day. To make this possible is was neccessary to compute the number of deaths at a specific hour (***deathsbyHour***). It can be seen three peaks around 6am, 10am and 6pm, meaning that people is more like to die at these hours.
###Code
deathsByHour = grouped.groupby( 'hod' )[ 'freq' ].sum().rename( 'deaths' ).reset_index()
sns.set( rc={ 'figure.figsize':( 10, 4 ) } )
fig, ax = plt.subplots( figsize=(9,6) )
ax.plot( deathsByHour[ 'hod' ] , deathsByHour[ 'deaths' ], 'r')
ax.set_xlabel('hour')
ax.set_ylabel('number of deaths')
ax.set_title('Number of Deaths by Hour')
ax.set( ylim=[ 18000, 25000 ] )
###Output
_____no_output_____
###Markdown
Data Computation (Deviation) The following code is used to compute the mean square deviation between ***prop*** and ***prop_all***. The idea of this computation is to know how far the proportion of a disease at a specific hour is from the proportion of all the deaths at that specific hour. Example: Ambiasis proportion of deaths at 2pm is 0.0034, and the proportion of all deaths at 2pm is 0.00521. There might be diseases that have a different pattern than all of the other diseases.
###Code
devi = grouped.copy()
devi[ 'n' ] = devi.groupby( 'cod' )[ 'freq' ].transform( 'sum' )
devi[ 'dif2' ] = ( devi[ 'prop' ] - devi[ 'prop_all' ] ) ** 2
devi[ 'dist' ] = devi.groupby( 'cod' )[ 'dif2' ].transform( 'mean' )
devi = devi[ devi[ 'n' ] > 50 ]
###Output
_____no_output_____
###Markdown
Data Visualization The following section shows plots that help to understand the behavior of the data. The next plot explores the results provided by the deviation. It can be seen that small samples of diseases have larger deviation.
###Code
linear_scale = devi.filter(["cod", "n", "dist"])
linear_scale = linear_scale.drop_duplicates()
sns.set( rc={ 'figure.figsize':( 10, 4 ) } )
fig, ax = plt.subplots( figsize=(9,6) )
ax = sns.scatterplot(x="n", y="dist", data=linear_scale)
ax.set_xlabel('n')
ax.set_ylabel('dist')
ax.set_title('Linear Scales')
ax.set( ylim=[ -0.0004 ,0.006 ])
###Output
_____no_output_____
###Markdown
However, the previous plot does not provide much useful information. If the scales are changed to logarithmic, then something more intersting happens. The relation between the sample size and the distance (deviation) is confirmed. But it also can be seen that there are diseases that are far from the **pattern** (tendency line going downwards).
###Code
sns.set( rc={ 'figure.figsize':( 10, 4 ) } )
fig, ax = plt.subplots( figsize=(9,6))
ax = sns.scatterplot(x="n", y="dist", data=linear_scale)
ax.set_xlabel('n')
ax.set_ylabel('dist')
ax.set_title('Log Scales')
ax.set( ylim=[ 0.000001,0.01 ])
ax.set_yscale('log')
ax.set_xscale('log')
###Output
_____no_output_____
###Markdown
Just to be clearer, the following plot shows the line that the diseases are following when the sample is increased. The line is computed by the sns library and the method is Robust Linear Regression.
###Code
test_dist = linear_scale['dist'].to_numpy()
test_n = linear_scale['n'].to_numpy()
log_dist = np.log10(test_dist)
log_n = np.log10(test_n)
sns.set( rc={ 'figure.figsize':( 10, 4 ) } )
fig, ax = plt.subplots( figsize=(9,6))
ax = sns.regplot(x=log_n, y=log_dist, robust=True);
ax.set_xlabel('n')
ax.set_ylabel('dist')
ax.set_title('Log Scales')
ax.set( ylim=[ -6, -1 ])
###Output
_____no_output_____
###Markdown
As aforementioned, there are diseases that are far from the ***pattern***. To confirm this it is necessary to compute the residuals between the dist and the linear model (which is the ***pattern*** of the diseases as the sample is increased). To make this possible it is used the Huber Regressor from scikit library, which is a Robust Linear Model. Then, the *residual* is computed by the substraction of the real value and the predicted value.
###Code
from sklearn.linear_model import HuberRegressor
x = np.log(devi['n']).values[:, np.newaxis]
y = np.log(devi['dist']).values
model = HuberRegressor()
model.fit(x, y)
devi['residuals'] = y - model.predict(x)
###Output
_____no_output_____
###Markdown
After the residuals are computed, then it is required to visualize the results to see if there is something unusual. The following code generates a plot that shows that some of the diseases are very far from the ***pattern***.
###Code
fig, ax = plt.subplots(figsize=(9, 6))
sns.scatterplot(x = 'n', y = 'residuals', data=devi)
ax.set_xscale('log')
ax.set_xlim(50, 200000)
ax.set_ylim(-1, 3)
ax.hlines(1.5, 0, 200000);
###Output
_____no_output_____
###Markdown
Plots of Unusual Pattern Diseases Therefore, it is arbitrarily decided that the residuals above 1.5 are considered unusual. The next piece of code gets the information of the diseases that are above the 1.5 threshold. Besides, the unusual disease are separated in two datasets. The first dataset is for the diseases which sample is above 350 instances and the other dataset is for the rest of the diseases.
###Code
#Devi grouping by disease
devi_reduced = devi.drop_duplicates(subset=['cod'])
devi_reduced = devi_reduced[['cod','disease','n','dist','residuals']]
#get the unusual diseases
unusual = devi_reduced[devi_reduced['residuals'] > 1.5]
#get the hod of the unusual diseases
hod_unusual = pd.merge( unusual, grouped[['cod','hod','freq','prop','prop_all']], on = 'cod', how = 'left' )
#split the hod unusual
hod_unusual_M = hod_unusual[hod_unusual['n'] > 350] # 8 cods
hod_unusual_m = hod_unusual[hod_unusual['n'] < 350] # 5 cods
###Output
_____no_output_____
###Markdown
The next code is used to get the values of each disease from the data set that contains the group of diseases with samples **above 350**.
###Code
#Get the values of each disease, n above 350
temp = hod_unusual_M[ hod_unusual_M['cod'] == 'V09']
temp_sort = temp.sort_values('hod')
temp2 = hod_unusual_M[ hod_unusual_M['cod'] == 'V87']
temp2_sort = temp2.sort_values('hod')
temp3 = hod_unusual_M[ hod_unusual_M['cod'] == 'V89']
temp3_sort = temp3.sort_values('hod')
temp4 = hod_unusual_M[ hod_unusual_M['cod'] == 'W69']
temp4_sort = temp4.sort_values('hod')
temp5 = hod_unusual_M[ hod_unusual_M['cod'] == 'W74']
temp5_sort = temp5.sort_values('hod')
temp6 = hod_unusual_M[ hod_unusual_M['cod'] == 'W87']
temp6_sort = temp6.sort_values('hod')
temp7 = hod_unusual_M[ hod_unusual_M['cod'] == 'X95']
temp7_sort = temp7.sort_values('hod')
temp8 = hod_unusual_M[ hod_unusual_M['cod'] == 'X99']
temp8_sort = temp8.sort_values('hod')
###Output
_____no_output_____
###Markdown
The following code plots each one of the diseases above metioned. Each plot has two lines, one for ***prop*** vs ***hod*** and the other for ***prop_all*** vs ***hod***.
###Code
#Plotting
fig=plt.figure(figsize=(19,15))
ax=fig.add_subplot(3,3,6)
ax.plot( temp_sort['hod'] , temp_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp_sort['hod'] , temp_sort['prop_all'], 'r', color='black', linewidth=1)
ax.set_xlabel('hod')
#ax.set_ylabel('prop')
ax.set_title('Pedestrian injured in other and \n unspecified transport accidents')
ax.set( ylim=[ 0, 0.125])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(3,3,7)
ax.plot( temp2_sort['hod'] , temp2_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp2_sort['hod'] , temp2_sort['prop_all'], 'r', color='black', linewidth=1)
ax.set_xlabel('hod')
ax.set_ylabel('prop')
ax.set_title("Traffic accident of specified type \n but victim's mode of transport unknown")
ax.set( ylim=[ 0, 0.125])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(3,3,5)
ax.plot( temp3_sort['hod'] , temp3_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp3_sort['hod'] , temp3_sort['prop_all'], 'r', color='black', linewidth=1)
#ax.set_xlabel('hod')
#ax.set_ylabel('prop')
ax.set_title('Motor− or nonmotor−vehicle accident, \n type of vehicle unspecified')
ax.set( ylim=[ 0, 0.125])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(3,3,3)
ax.plot( temp4_sort['hod'] , temp4_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp4_sort['hod'] , temp4_sort['prop_all'], 'r', color='black', linewidth=1)
#ax.set_xlabel('hod')
#ax.set_ylabel('prop')
ax.set_title('Drowning and submersion \n while in natural water')
ax.set( ylim=[ 0, 0.125])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(3,3,8)
ax.plot( temp5_sort['hod'] , temp5_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp5_sort['hod'] , temp5_sort['prop_all'], 'r', color='black', linewidth=1)
ax.set_xlabel('hod')
#ax.set_ylabel('prop')
ax.set_title('Unspecified drowning and submersion')
ax.set( ylim=[ 0, 0.125])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(3,3,4)
ax.plot( temp6_sort['hod'] , temp6_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp6_sort['hod'] , temp6_sort['prop_all'], 'r', color='black', linewidth=1)
#ax.set_xlabel('hod')
ax.set_ylabel('prop')
ax.set_title('Exposure to unspecified electric current')
ax.set( ylim=[ 0, 0.125])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(3,3,1)
ax.plot( temp7_sort['hod'] , temp7_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp7_sort['hod'] , temp7_sort['prop_all'], 'r', color='black', linewidth=1)
#ax.set_xlabel('hod')
ax.set_ylabel('prop')
ax.set_title('Assault (homicide) by other and \n unspecified firearm discharge')
ax.set( ylim=[ 0, 0.125])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(3,3,2)
ax.plot( temp8_sort['hod'] , temp8_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp8_sort['hod'] , temp8_sort['prop_all'], 'r', color='black', linewidth=1)
#ax.set_xlabel('hod')
#ax.set_ylabel('prop')
ax.set_title('Assault (homicide) by sharp object')
ax.set( ylim=[ 0, 0.125])
ax.set( xlim=[ 1, 24])
#plt.savefig('D:/Courses/Machine Learning/HW2/above_350.png')
###Output
_____no_output_____
###Markdown
The next code is used to get the values of each disease from the data set that contains the group of diseases with samples **below 350**.
###Code
#Get the values of each disease, n below 350
temp9 = hod_unusual_m[ hod_unusual_m['cod'] == 'R95']
temp9_sort = temp9.sort_values('hod')
temp10 = hod_unusual_m[ hod_unusual_m['cod'] == 'V79']
temp10_sort = temp10.sort_values('hod')
temp11 = hod_unusual_m[ hod_unusual_m['cod'] == 'V95']
temp11_sort = temp11.sort_values('hod')
temp12 = hod_unusual_m[ hod_unusual_m['cod'] == 'W73']
temp12_sort = temp12.sort_values('hod')
temp13 = hod_unusual_m[ hod_unusual_m['cod'] == 'X33']
temp13_sort = temp13.sort_values('hod')
#Plotting
fig=plt.figure(figsize=(16,10))
ax=fig.add_subplot(2,3,4)
ax.plot( temp9_sort['hod'] , temp9_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp9_sort['hod'] , temp9_sort['prop_all'], 'r', color='black', linewidth=1)
ax.set_xlabel('hod', fontsize=12)
ax.set_ylabel('prop', fontsize=12)
ax.set_title('Sudden infant death syndrome', fontsize=12)
ax.set( ylim=[ 0, 0.3])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(2,3,2)
ax.plot( temp10_sort['hod'] , temp10_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp10_sort['hod'] , temp10_sort['prop_all'], 'r', color='black', linewidth=1)
#ax.set_xlabel('hod', fontsize=12)
#ax.set_ylabel('prop', fontsize=12)
ax.set_title("Bus occupant injured in other and \n unspecified transport accidents", fontsize=12)
ax.set( ylim=[ 0, 0.3])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(2,3,1)
ax.plot( temp11_sort['hod'] , temp11_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp11_sort['hod'] , temp11_sort['prop_all'], 'r', color='black', linewidth=1)
#ax.set_xlabel('hod', fontsize=12)
ax.set_ylabel('prop', fontsize=12)
ax.set_title('Accident to powered aircraft \n causing injury to occupant', fontsize=12)
ax.set( ylim=[ 0, 0.3])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(2,3,3)
ax.plot( temp12_sort['hod'] , temp12_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp12_sort['hod'] , temp12_sort['prop_all'], 'r', color='black', linewidth=1)
ax.set_xlabel('hod', fontsize=12)
#ax.set_ylabel('prop', fontsize=12)
ax.set_title('Other specified drowning and \n submersion', fontsize=12)
ax.set( ylim=[ 0, 0.3])
ax.set( xlim=[ 1, 24])
ax=fig.add_subplot(2,3,5)
ax.plot( temp13_sort['hod'] , temp13_sort['prop'], 'r', color='blue', linewidth=2)
ax.plot( temp13_sort['hod'] , temp13_sort['prop_all'], 'r', color='black', linewidth=1)
ax.set_xlabel('hod', fontsize=12)
#ax.set_ylabel('prop', fontsize=12)
ax.set_title('Victim of lightning', fontsize=12)
ax.set( ylim=[ 0, 0.3])
ax.set( xlim=[ 1, 24])
#plt.savefig('D:/Courses/Machine Learning/HW2/below_350.png')
###Output
_____no_output_____ |
databricks_pyspark_examples/delta_lake_expungement.ipynb | ###Markdown
Work with Virginia Criminal Expungement DataSource: https://virginiacourtdata.org/Goal: upload the data, save as a delta table, and perform various analyses with Spark
###Code
from delta import *
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("MyApp") \
.config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
.config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \
.getOrCreate()
spark
# File location and type
file_location = "/FileStore/tables/circuit_criminal_2000_anon_00.csv"
file_type = "csv"
# CSV options
infer_schema = "false"
first_row_is_header = "true"
delimiter = ","
# The applied options are for CSV files. For other file types, these will be ignored.
df = spark.read.format(file_type) \
.option("inferSchema", infer_schema) \
.option("header", first_row_is_header) \
.option("sep", delimiter) \
.load(file_location)
if DeltaTable.isDeltaTable(spark, '/tmp/delta-table-1'):
print('Table exists. Removing old table...')
dbutils.fs.rm('/tmp/delta-table-1',recurse=True) # remove if it already exists
df.write.format("delta").save('/tmp/delta-table-1')
df = spark.read.format("delta").load("/tmp/delta-table-1")
df.show(vertical=True)
from delta.tables import *
from pyspark.sql.functions import *
# set the path
deltaTable = DeltaTable.forPath(spark, "/tmp/delta-table-1")
# deltaTable.toDF().show()
type(deltaTable)
%sql
-- selecting first 5 rows
SELECT * FROM delta.`/tmp/delta-table-1`
LIMIT 5;
%sql
-- Showing the number of each ChargeType, descending by count
SELECT ChargeType, COUNT(*) as count
FROM delta.`/tmp/delta-table-1`
GROUP BY ChargeType
ORDER BY count DESC
deltaTable.toDF().groupby('ChargeType').count().orderBy(col('count'), ascending=False).show()
# Creating a df with 2 rows, a subset of cols from Delta table, and a col not in the Delta table
deltaTable.update(
set = { "fips" : expr("fips + 100")})
tbl1 = deltaTable.toDF().select('ChargeType', 'fips').limit(2)
tbl1.show()
# Updating ChargeType: 'Infraction' to ChargeType: 'Minor Infraction' and updating Delta table
from pyspark.sql.functions import regexp_replace
df = deltaTable.toDF().withColumn('ChargeType', regexp_replace('ChargeType', 'Infraction', 'Minor Infraction'))
df.write.format("delta").mode("overwrite").save("/tmp/delta-table-1")
df.groupby('ChargeType').count().orderBy(col('count'), ascending=False).show()
%sql
SELECT ChargeType, COUNT(*) as count
FROM delta.`/tmp/delta-table-1`
GROUP BY ChargeType
ORDER BY count DESC
# Using time travel feature to load original version of delta table, where ChargeType is still 'Infraction'
df0 = spark.read.format("delta").option("versionAsOf", 0).load("/tmp/delta-table-1")
df0.select('HearingDate', 'HearingResult', 'ChargeType').filter(df0.ChargeType == 'Infraction').show()
###Output
_____no_output_____ |
Dag10.ipynb | ###Markdown
**Deel 1**
###Code
device = max(adapters) + 3
adapters.extend((0, device))
adapters.sort()
diff1 = 0
diff3 = 0
for i, v in enumerate(adapters):
if v == max(adapters):
print(f'1-jolt differences: {diff1} * 3-jolt differences {diff3} = {diff1*diff3}')
break
verschil = adapters[i+1] - adapters[i]
if verschil == 1:
diff1 += 1
elif verschil == 3:
diff3 += 1
elif (verschil == 0) | (verschil == 2):
pass
else:
print('Can`t connect: difference is larger than 3.')
break
###Output
1-jolt differences: 67 * 3-jolt differences 28 = 1876
###Markdown
**Deel 2** Oplossing: graventheorie. Elke adapter is een node, elke edge is een mogelijke verbinding (dus met richting en max. 3 verschil).
###Code
graaf = {}
for i, v in enumerate(adapters):
aansluitend = [x for x in adapters if ((x > v) & (x <= (v+3)))]
graaf.setdefault(v, aansluitend)
def vindallepaden(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if start not in list(graph):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = vindallepaden(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
###Output
_____no_output_____
###Markdown
Hak in stukjes bij node die in elke combinatie voor moet komen om bereken tijd behapbaar te houden
###Code
breaklist = [50, 105, device]
combinaties = []
start = 0
for b in breaklist:
combinaties.append(len(vindallepaden(graaf, start, b)))
start = b
print(combinaties[0] * combinaties[1] * combinaties[2])
###Output
14173478093824
|
ai/notebooks/model.ipynb | ###Markdown
Model TrainingUsing transfer learning from VGG-16[Keras Sample 1](https://medium.com/@14prakash/transfer-learning-using-keras-d804b2e04ef8)[Keras Sample 2](https://www.kaggle.com/venuraja79/using-transfer-learning-with-keras)
###Code
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers import Dropout, Dense, GlobalAveragePooling2D, Flatten
from keras import backend as k
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
img_width, img_height = 224, 224
model = applications.VGG16(weights = "imagenet", include_top=False, input_shape = (img_width, img_height, 3))
# freeze layers
for layer in model.layers:
layer.trainable = False
# add custom layers
x = model.output
x = GlobalAveragePooling2D()(x)
#x = Dense(128, activation="relu")(x)
#x = Dropout(0.5)(x)
x = Dense(256, activation="relu")(x)
prediction = Dense(2, activation="softmax", name="prediction")(x)
# generate new model
m = Model(inputs = model.input, outputs = prediction)
m.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
m.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 224, 224, 3) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, 7, 7, 512) 0
_________________________________________________________________
global_average_pooling2d_1 ( (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 256) 131328
_________________________________________________________________
prediction (Dense) (None, 2) 514
=================================================================
Total params: 14,846,530
Trainable params: 131,842
Non-trainable params: 14,714,688
_________________________________________________________________
###Markdown
Data LoaderSample [data loader](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
###Code
train_data_dir = 'data/catsndogs/train'
validation_data_dir = 'data/catsndogs/validation'
nb_train_samples = 855
nb_validation_samples = 95
epochs = 10
batch_size = 24
train = ImageDataGenerator(rescale = 1./255,
horizontal_flip = True,
fill_mode = "nearest",
zoom_range = 0.3,
width_shift_range = 0.3,
height_shift_range=0.3,
rotation_range=30)
train_generator = train.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
test = ImageDataGenerator(rescale = 1./255)
test_generator = test.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
dir(train_generator)
train_generator.samples
m.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=test_generator,
validation_steps=nb_validation_samples // batch_size)
m.save('latest.h5')
###Output
Epoch 1/10
35/35 [==============================] - 63s 2s/step - loss: 0.6237 - acc: 0.6510 - val_loss: 0.5186 - val_acc: 0.6528
Epoch 2/10
35/35 [==============================] - 61s 2s/step - loss: 0.4852 - acc: 0.7757 - val_loss: 0.3312 - val_acc: 0.9167
Epoch 3/10
35/35 [==============================] - 57s 2s/step - loss: 0.4308 - acc: 0.8118 - val_loss: 0.2748 - val_acc: 0.9167
Epoch 4/10
35/35 [==============================] - 59s 2s/step - loss: 0.3908 - acc: 0.8241 - val_loss: 0.2667 - val_acc: 0.9583
Epoch 5/10
35/35 [==============================] - 61s 2s/step - loss: 0.3633 - acc: 0.8486 - val_loss: 0.3703 - val_acc: 0.8472
Epoch 6/10
35/35 [==============================] - 60s 2s/step - loss: 0.3443 - acc: 0.8607 - val_loss: 0.2439 - val_acc: 0.9167
Epoch 7/10
35/35 [==============================] - 56s 2s/step - loss: 0.3448 - acc: 0.8427 - val_loss: 0.2347 - val_acc: 0.9583
Epoch 8/10
35/35 [==============================] - 58s 2s/step - loss: 0.3245 - acc: 0.8500 - val_loss: 0.2435 - val_acc: 0.9306
Epoch 9/10
35/35 [==============================] - 57s 2s/step - loss: 0.3000 - acc: 0.8616 - val_loss: 0.2416 - val_acc: 0.9167
Epoch 10/10
35/35 [==============================] - 60s 2s/step - loss: 0.3057 - acc: 0.8643 - val_loss: 0.2971 - val_acc: 0.8611
|
live_inference_multiple.ipynb | ###Markdown
Image Classification Live Inference Check connected video device address
###Code
!ls -ltrh /dev/video*
###Output
_____no_output_____
###Markdown
Run the camera
###Code
from jetcam.csi_camera import CSICamera
from jetcam.usb_camera import USBCamera
# USB Camera
camera1 = USBCamera(width=224, height=224, capture_device=1) # confirm the capture_device number
#camera = USBCamera(width=244, height=244, capture_width=640, capture_height=480, capture_de)
# CSI Camera (Raspberry Pi Camera Module V2)
camera = CSICamera(width=224, height=224, capture_device=0)
#camera1 = CSICamera(width=244, height=244, capture_device=1)
camera.running = True
camera1.running = True
print("camera created")
import ipywidgets
import traitlets
from IPython.display import display
from jetcam.utils import bgr8_to_jpeg
image_widget = ipywidgets.Image(format='jpeg')
image_widget2 = ipywidgets.Image(format='jpeg')
camera_link = traitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg)
camera_link2 = traitlets.dlink((camera1, 'value'), (image_widget2, 'value'), transform=bgr8_to_jpeg)
#display(image_widget)
#display(image_widget2)
###Output
_____no_output_____
###Markdown
Uncomment/edit the associated lines for the classification task you're building and execute the cell.This cell should only take a few seconds to execute. Model
###Code
import torch
import torchvision
model = torchvision.models.resnet18(pretrained=False)
model.fc = torch.nn.Linear(512, 3)
model.load_state_dict(torch.load('/nvdli-nano/data/classification/my_model.pth'))
device = torch.device('cuda')
model = model.to(device).eval()
###Output
_____no_output_____
###Markdown
Live ExecutionExecute the cell below to set up the live execution widget. This cell should only take a few seconds to execute.
###Code
import threading
import time
from utils import preprocess
import torch.nn.functional as F
CATEGORIES = ['background', 'bluecar', 'yellowcar']
state_widget = ipywidgets.ToggleButtons(options=['stop', 'live'], description='state', value='stop')
prediction_widget = ipywidgets.Text(description='prediction')
prediction_widget1 = ipywidgets.Text(description='prediction')
score_widgets = []
score_widgets1 = []
for category in CATEGORIES:
score_widget = ipywidgets.FloatSlider(min=0.0, max=1.0, description=category, orientation='vertical')
score_widget1 = ipywidgets.FloatSlider(min=0.0, max=1.0, description=category, orientation='vertical')
score_widgets.append(score_widget)
score_widgets1.append(score_widget1)
def live(state_widget, model, camera, camera1, prediction_widget, prediction_widget1, score_widget, score_widget1):
while state_widget.value == 'live':
image = camera.value
image1 = camera1.value
preprocessed = preprocess(image)
preprocessed1 = preprocess(image1)
output = model(preprocessed)
output1 = model(preprocessed1)
output = F.softmax(output, dim=1).detach().cpu().numpy().flatten()
output1 = F.softmax(output1, dim=1).detach().cpu().numpy().flatten()
category_index = output.argmax()
category_index1 = output1.argmax()
prediction_widget.value = CATEGORIES[category_index]
prediction_widget1.value = CATEGORIES[category_index1]
for i, score in enumerate(list(output)):
score_widgets[i].value = score
for i, score1 in enumerate(list(output1)):
score_widgets1[i].value = score1
def start_live(change):
if change['new'] == 'live':
execute_thread = threading.Thread(target=live, args=(state_widget, model, camera, camera1, prediction_widget, prediction_widget1, score_widget, score_widget1))
execute_thread.start()
state_widget.observe(start_live, names='value')
live_execution_widget = ipywidgets.VBox([
ipywidgets.HBox(score_widgets),
prediction_widget
])
live_execution_widget1 = ipywidgets.VBox([
ipywidgets.HBox(score_widgets1),
prediction_widget1
])
live_execution_widget2 = state_widget
# display(live_execution_widget)
print("live_execution_widget created")
###Output
_____no_output_____
###Markdown
Display the Interactive Tool!
###Code
first_camera = ipywidgets.HBox([image_widget, live_execution_widget])
second_camera = ipywidgets.HBox([image_widget2, live_execution_widget1])
print("Drive-thru camera 1")
display(first_camera)
print("Drive-thru camera 2")
display(second_camera)
display(live_execution_widget2)
camera.running = False
camera.cap.release()
import os
os._exit(00)
###Output
_____no_output_____ |
TLA_4D_Example.ipynb | ###Markdown
In this notebook, we will run a ready-made network starting from some ATLAS data, which is already normalized. There is also an alternative to train the network from scratch. Look into the dataset First we need to make sure that Python 3.8 is used in the notebook. It is required in order to open this certain .pkl-file.
###Code
import sys
sys.version
###Output
_____no_output_____
###Markdown
We take a pickle dataset, and open into Pandas (after importing pandas). Note that you have to change the paths to the directory where your processed files are.
###Code
import pandas as pd
# Change these paths to point to where you have stored the datasets.
train_path = 'train_jet_objects.pkl'
test_path = 'test_jet_objects.pkl'
# Reads the .pkl-files with Pandas
train = pd.read_pickle(train_path)
test = pd.read_pickle(test_path)
# To get an idea of the order of magnitude we are going to see in the plots we show the first elements
# in the samples:
print('Training sample:')
print(train.head())
print('\n')
print('Testing sample:')
print(test.head())
print('\n')
print('The number of entries in the training data:', len(train))
print('The number of entries in the validation data:', len(test))
###Output
Training sample:
E pt eta phi
0 4.906713 4.700341 -0.349047 -0.590943
1 6.347139 5.855345 0.599470 -1.025347
2 5.997825 5.637058 0.490727 -0.623413
3 5.372660 4.761313 0.694790 0.697643
4 5.797168 5.707606 -0.218154 0.219345
Testing sample:
E pt eta phi
0 5.154570 4.361824 -0.837267 0.611330
1 5.669887 5.194112 0.585920 -0.487810
2 5.136172 4.816144 -0.455033 0.025235
3 5.028673 4.574284 0.568423 -0.929800
4 6.243935 4.664594 -1.443177 -0.803610
The number of entries in the training data: 18128
The number of entries in the validation data: 4533
###Markdown
Now we plot the data using the matplotlib library. The units reflect the normalization, but it's the shape that we care about.
###Code
import matplotlib.pyplot as plt
unit_list = ['[log(GeV)]', '[rad/3]', '[rad/3]', '[log(GeV)]']
variable_list = [r'$pt$', r'$\eta$', r'$\phi$', r'$E$'] # replace m with E
branches=["pt","eta","phi","E"] # replace m with E
n_bins = 200
for kk in range(0,4):
n_hist_data, bin_edges, _ = plt.hist(train[branches[kk]], color='gray', label='Input', alpha=1, bins=n_bins)
plt.xlabel(xlabel=variable_list[kk] + ' ' + unit_list[kk])
plt.ylabel('# of events')
plt.yscale('log')
#plt.savefig("fourmomentum_"+branches[kk],dpi=300)
plt.show()
###Output
_____no_output_____
###Markdown
Setting up the network Preparing the data Adding the two datasets as TensorDatasets to PyTorch (also loading all other classes we'll need later)
###Code
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
from fastai import learner
from fastai.data import core
train_x = train
test_x = test
train_y = train_x # y = x since we are building an autoencoder
test_y = test_x
# Constructs a tensor object of the data and wraps them in a TensorDataset object.
train_ds = TensorDataset(torch.tensor(train_x.values, dtype=torch.float), torch.tensor(train_y.values, dtype=torch.float))
valid_ds = TensorDataset(torch.tensor(test_x.values, dtype=torch.float), torch.tensor(test_y.values, dtype=torch.float))
###Output
_____no_output_____
###Markdown
We now set things up to load the data, and we use a batch size that was optimized by previous students...note also that this is fastai v2, migration thanks to Jessica Lastow.
###Code
bs = 256
# Converts the TensorDataset into a DataLoader object and combines into one DataLoaders object (a basic wrapper
# around several DataLoader objects).
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=bs * 2)
dls = core.DataLoaders(train_dl, valid_dl)
###Output
_____no_output_____
###Markdown
Preparing the network Here we have an example network. Details aren't too important, as long as they match what was already trained for us...in this case we have a LeakyReLU, tanh activation function, and a number of layers that goes from 4 to 200 to 20 to 3 (number of features in the hidden layer that we pick for testing compression) and then back all the way to 4.
###Code
class AE_3D_200_LeakyReLU(nn.Module):
def __init__(self, n_features=4):
super(AE_3D_200_LeakyReLU, self).__init__()
self.en1 = nn.Linear(n_features, 200)
self.en2 = nn.Linear(200, 200)
self.en3 = nn.Linear(200, 20)
self.en4 = nn.Linear(20, 3)
self.de1 = nn.Linear(3, 20)
self.de2 = nn.Linear(20, 200)
self.de3 = nn.Linear(200, 200)
self.de4 = nn.Linear(200, n_features)
self.tanh = nn.Tanh()
def encode(self, x):
return self.en4(self.tanh(self.en3(self.tanh(self.en2(self.tanh(self.en1(x)))))))
def decode(self, x):
return self.de4(self.tanh(self.de3(self.tanh(self.de2(self.tanh(self.de1(self.tanh(x))))))))
def forward(self, x):
z = self.encode(x)
return self.decode(z)
def describe(self):
return 'in-200-200-20-3-20-200-200-out'
#model = AE_3D_200_LeakyReLU().double()
model = AE_3D_200_LeakyReLU()
model.to('cpu')
###Output
_____no_output_____
###Markdown
We now have to pick a loss function - MSE loss is appropriate for a compression autoencoder since it reflects the [(input-output)/input] physical quantity that we want to minimize.
###Code
from fastai.metrics import mse
loss_func = nn.MSELoss()
#bn_wd = False # Don't use weight decay for batchnorm layers
#true_wd = True # weight decay will be used for all optimizers
wd = 1e-6
recorder = learner.Recorder()
learn = learner.Learner(dls, model=model, wd=wd, loss_func=loss_func, cbs=recorder)
#was: learn = basic_train.Learner(data=db, model=model, loss_func=loss_func, wd=wd, callback_fns=ActivationStats, bn_wd=bn_wd, true_wd=true_wd)
###Output
_____no_output_____
###Markdown
Alternative 1: Running a pre-trained network Now we load the pre-trained network.
###Code
# commented code as pre-trained network not available
# learn.load("4D_TLA_leading")
###Output
_____no_output_____
###Markdown
Then we evaluate the MSE on this network - it should be of the order of 0.001 or less if all has gone well...if it has not trained as well (note the pesky 0-mass peak above...) then it's going to be a bit higher.
###Code
# commented code as pre-trained network not available
# learn.validate()
###Output
_____no_output_____
###Markdown
Alternative 2: Training a new network Instead of using a pre-trained network, an alternative is to train a new network and use that instead. First, we want to find the best learning rate. The learning rate is a hyper-paramater that sets how much the weights of the network will change each step with respect to the loss gradient.Then we plot the loss versus the learning rates. We're interested in finding a good order of magnitude of learning rate, so we plot with a log scale.A good value for the learning rates is then either:- one tenth of the minimum before the divergence- when the slope is the steepest
###Code
from fastai.callback import schedule
lr_min, lr_steep = learn.lr_find()
print('Learning rate with the minimum loss:', lr_min)
print('Learning rate with the steepest gradient:', lr_steep)
###Output
Learning rate with the minimum loss: 0.010000000149011612
Learning rate with the steepest gradient: 0.002511886414140463
###Markdown
Now we want to run the training!User-chosen variables:- n_epoch: The number of epochs, i.e how many times the to run through all of the training data once (i.e the 1266046 entries, see cell 2)- lr: The learning rate. Either choose lr_min, lr_steep from above or set your own.
###Code
import time
start = time.perf_counter() # Starts timer
# recreate learner object with lr_min
learn = learner.Learner(dls, model=model, wd=wd, loss_func=loss_func, cbs=recorder, lr=lr_min)
learn.fit_one_cycle(n_epoch=100)
# learn.fit_one_cycle(n_epoch=100, lr=lr_min)
end = time.perf_counter() # Ends timer
delta_t = end - start
print('Training took', delta_t, 'seconds')
###Output
[0, 2.6382343769073486, 0.5987152457237244, '00:00']
[0, 2.6382343769073486, 0.5987152457237244, '00:00']
[1, 0.7484664916992188, 0.2767142057418823, '00:00']
[1, 0.7484664916992188, 0.2767142057418823, '00:00']
[2, 0.3774736225605011, 0.26980987191200256, '00:00']
[2, 0.3774736225605011, 0.26980987191200256, '00:00']
[3, 0.29330411553382874, 0.27003103494644165, '00:00']
[3, 0.29330411553382874, 0.27003103494644165, '00:00']
[4, 0.27410122752189636, 0.27162206172943115, '00:00']
[4, 0.27410122752189636, 0.27162206172943115, '00:00']
[5, 0.27172231674194336, 0.26590967178344727, '00:00']
[5, 0.27172231674194336, 0.26590967178344727, '00:00']
[6, 0.20943409204483032, 0.17311528325080872, '00:00']
[6, 0.20943409204483032, 0.17311528325080872, '00:00']
[7, 0.1338045746088028, 0.08276692777872086, '00:00']
[7, 0.1338045746088028, 0.08276692777872086, '00:00']
[8, 0.08329608291387558, 0.025152618065476418, '00:00']
[8, 0.08329608291387558, 0.025152618065476418, '00:00']
[9, 0.03340597450733185, 0.011742938309907913, '00:00']
[9, 0.03340597450733185, 0.011742938309907913, '00:00']
[10, 0.013028712011873722, 0.002852021949365735, '00:00']
[10, 0.013028712011873722, 0.002852021949365735, '00:00']
[11, 0.009550459682941437, 0.005441850982606411, '00:00']
[11, 0.009550459682941437, 0.005441850982606411, '00:00']
[12, 0.009743746370077133, 0.0030632494017481804, '00:00']
[12, 0.009743746370077133, 0.0030632494017481804, '00:00']
[13, 0.01371017750352621, 0.009297072887420654, '00:00']
[13, 0.01371017750352621, 0.009297072887420654, '00:00']
[14, 0.01360910851508379, 0.004726691171526909, '00:00']
[14, 0.01360910851508379, 0.004726691171526909, '00:00']
[15, 0.017949499189853668, 0.010420458391308784, '00:00']
[15, 0.017949499189853668, 0.010420458391308784, '00:00']
[16, 0.01768053136765957, 0.026887616142630577, '00:00']
[16, 0.01768053136765957, 0.026887616142630577, '00:00']
[17, 0.019222935661673546, 0.0076900688000023365, '00:00']
[17, 0.019222935661673546, 0.0076900688000023365, '00:00']
[18, 0.021366534754633904, 0.021194320172071457, '00:00']
[18, 0.021366534754633904, 0.021194320172071457, '00:00']
[19, 0.024771874770522118, 0.022641293704509735, '00:00']
[19, 0.024771874770522118, 0.022641293704509735, '00:00']
[20, 0.01996437832713127, 0.01883481629192829, '00:00']
[20, 0.01996437832713127, 0.01883481629192829, '00:00']
[21, 0.018869858235120773, 0.04511656239628792, '00:00']
[21, 0.018869858235120773, 0.04511656239628792, '00:00']
[22, 0.019367938861250877, 0.018232915550470352, '00:00']
[22, 0.019367938861250877, 0.018232915550470352, '00:00']
[23, 0.018419716507196426, 0.007732638157904148, '00:00']
[23, 0.018419716507196426, 0.007732638157904148, '00:00']
[24, 0.020736750215291977, 0.009938101284205914, '00:00']
[24, 0.020736750215291977, 0.009938101284205914, '00:00']
[25, 0.014374219812452793, 0.011452450416982174, '00:00']
[25, 0.014374219812452793, 0.011452450416982174, '00:00']
[26, 0.022706130519509315, 0.00981816928833723, '00:00']
[26, 0.022706130519509315, 0.00981816928833723, '00:00']
[27, 0.014701624400913715, 0.005716172978281975, '00:00']
[27, 0.014701624400913715, 0.005716172978281975, '00:00']
[28, 0.015943538397550583, 0.004746624734252691, '00:00']
[28, 0.015943538397550583, 0.004746624734252691, '00:00']
[29, 0.013367768377065659, 0.010359439998865128, '00:00']
[29, 0.013367768377065659, 0.010359439998865128, '00:00']
[30, 0.019249090924859047, 0.009718472138047218, '00:00']
[30, 0.019249090924859047, 0.009718472138047218, '00:00']
[31, 0.011446424759924412, 0.016356512904167175, '00:00']
[31, 0.011446424759924412, 0.016356512904167175, '00:00']
[32, 0.010594001039862633, 0.022515397518873215, '00:00']
[32, 0.010594001039862633, 0.022515397518873215, '00:00']
[33, 0.015714915469288826, 0.003678384702652693, '00:00']
[33, 0.015714915469288826, 0.003678384702652693, '00:00']
[34, 0.009162187576293945, 0.01326412707567215, '00:00']
[34, 0.009162187576293945, 0.01326412707567215, '00:00']
[35, 0.009746813215315342, 0.00446401396766305, '00:00']
[35, 0.009746813215315342, 0.00446401396766305, '00:00']
[36, 0.010197213850915432, 0.007327872794121504, '00:00']
[36, 0.010197213850915432, 0.007327872794121504, '00:00']
[37, 0.00863906741142273, 0.008116386830806732, '00:00']
[37, 0.00863906741142273, 0.008116386830806732, '00:00']
[38, 0.01030751969665289, 0.012611386366188526, '00:00']
[38, 0.01030751969665289, 0.012611386366188526, '00:00']
[39, 0.00576302083209157, 0.0032039813231676817, '00:01']
[39, 0.00576302083209157, 0.0032039813231676817, '00:01']
[40, 0.007629137486219406, 0.007397005334496498, '00:00']
[40, 0.007629137486219406, 0.007397005334496498, '00:00']
[41, 0.006427006796002388, 0.0023837382905185223, '00:00']
[41, 0.006427006796002388, 0.0023837382905185223, '00:00']
[42, 0.005313813220709562, 0.0018796641379594803, '00:00']
[42, 0.005313813220709562, 0.0018796641379594803, '00:00']
[43, 0.00883725006133318, 0.0043104346841573715, '00:00']
[43, 0.00883725006133318, 0.0043104346841573715, '00:00']
[44, 0.004289956297725439, 0.0015926578780636191, '00:00']
[44, 0.004289956297725439, 0.0015926578780636191, '00:00']
[45, 0.005174288991838694, 0.0009207372204400599, '00:00']
[45, 0.005174288991838694, 0.0009207372204400599, '00:00']
[46, 0.0057618883438408375, 0.003286140039563179, '00:00']
[46, 0.0057618883438408375, 0.003286140039563179, '00:00']
[47, 0.0035762907937169075, 0.002873206278309226, '00:00']
[47, 0.0035762907937169075, 0.002873206278309226, '00:00']
[48, 0.003741980530321598, 0.002046581357717514, '00:00']
[48, 0.003741980530321598, 0.002046581357717514, '00:00']
[49, 0.003406725125387311, 0.002186483470723033, '00:00']
[49, 0.003406725125387311, 0.002186483470723033, '00:00']
[50, 0.003225636435672641, 0.0007253590738400817, '00:00']
[50, 0.003225636435672641, 0.0007253590738400817, '00:00']
[51, 0.003127356292679906, 0.005289153195917606, '00:00']
[51, 0.003127356292679906, 0.005289153195917606, '00:00']
[52, 0.003159807762131095, 0.0031607637647539377, '00:00']
[52, 0.003159807762131095, 0.0031607637647539377, '00:00']
[53, 0.003324283054098487, 0.002467961749061942, '00:00']
[53, 0.003324283054098487, 0.002467961749061942, '00:00']
[54, 0.0020954895298928022, 0.0008704090723767877, '00:00']
[54, 0.0020954895298928022, 0.0008704090723767877, '00:00']
[55, 0.0016475105658173561, 0.0005258604069240391, '00:01']
[55, 0.0016475105658173561, 0.0005258604069240391, '00:01']
[56, 0.0016812544781714678, 0.0011338713811710477, '00:00']
[56, 0.0016812544781714678, 0.0011338713811710477, '00:00']
[57, 0.0014948714524507523, 0.0008937170496210456, '00:00']
[57, 0.0014948714524507523, 0.0008937170496210456, '00:00']
[58, 0.0013052646536380053, 0.0003936357097700238, '00:00']
[58, 0.0013052646536380053, 0.0003936357097700238, '00:00']
[59, 0.0012832866050302982, 0.0009342640987597406, '00:00']
[59, 0.0012832866050302982, 0.0009342640987597406, '00:00']
[60, 0.001101718400605023, 0.0013187829172238708, '00:00']
[60, 0.001101718400605023, 0.0013187829172238708, '00:00']
[61, 0.0008100321865640581, 0.001759143895469606, '00:00']
[61, 0.0008100321865640581, 0.001759143895469606, '00:00']
[62, 0.0007796925492584705, 0.0004335931735113263, '00:00']
[62, 0.0007796925492584705, 0.0004335931735113263, '00:00']
[63, 0.0006531934486702085, 0.0025217467918992043, '00:00']
[63, 0.0006531934486702085, 0.0025217467918992043, '00:00']
[64, 0.0009222666267305613, 0.0016178853111341596, '00:00']
[64, 0.0009222666267305613, 0.0016178853111341596, '00:00']
[65, 0.000484772608615458, 0.0010688501643016934, '00:00']
[65, 0.000484772608615458, 0.0010688501643016934, '00:00']
[66, 0.00041559446253813803, 0.0003998331376351416, '00:00']
[66, 0.00041559446253813803, 0.0003998331376351416, '00:00']
[67, 0.00033218591124750674, 0.0006047837086953223, '00:00']
[67, 0.00033218591124750674, 0.0006047837086953223, '00:00']
[68, 0.00039181485772132874, 0.000517115811817348, '00:00']
[68, 0.00039181485772132874, 0.000517115811817348, '00:00']
[69, 0.0003846036270260811, 0.00040234116022475064, '00:00']
[69, 0.0003846036270260811, 0.00040234116022475064, '00:00']
[70, 0.00026625185273587704, 8.234923734562472e-05, '00:00']
[70, 0.00026625185273587704, 8.234923734562472e-05, '00:00']
###Markdown
Then we plot the loss as a function of batches and epochs to check if we reach a plateau.
###Code
recorder.plot_loss()
###Output
_____no_output_____
###Markdown
Then we evaluate the MSE on this network - it should be of the order of 0.001 or less if all has gone well...if it has not trained as well (note the pesky 0-mass peak above...) then it's going to be a bit higher.
###Code
learn.validate()
###Output
_____no_output_____
###Markdown
Let's plot all of this, with ratios (thanks to code by Erik Wallin) Plotting the outputs of the network Lazy-save of our output files (they'll also be on screen)
###Code
import os
save_dir = "plotOutput"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
###Output
_____no_output_____
###Markdown
A function in case we want to un-normalize and get back to physical quantities...
###Code
def custom_unnormalize(df):
df['eta'] = df['eta'] * 5
df['phi'] = df['phi'] * 3
df['E'] = 10**df['E']
# df['m'] = 10**df['m'] # replace m with E
df['pt'] = 10**(df['pt'])
return df
###Output
_____no_output_____
###Markdown
Make the histograms from the dataset...- Histograms depicting normalized data- Histograms depicting unnormalized data Histograms depicting normalized data
###Code
import numpy as np
plt.close('all')
unit_list = ['[GeV]', '[rad]', '[rad]', '[GeV]']
variable_list = [ r'$E$', r'$pt$', r'$\eta$', r'$\phi$']
line_style = ['--', '-']
colors = ['orange', 'c']
markers = ['*', 's']
model.to('cpu')
save = True # Option to save figure
# Histograms
idxs = (0, 100000) # Choose events to compare
data = torch.tensor(test[idxs[0]:idxs[1]].values, dtype=torch.float)
#data = torch.tensor(test[idxs[0]:idxs[1]].values, dtype=torch.float).double()
pred = model(data)
pred = pred.detach().numpy()
data = data.detach().numpy()
data_df = pd.DataFrame(data, columns=test.columns)
pred_df = pd.DataFrame(pred, columns=test.columns)
alph = 0.8
n_bins = 200
for kk in np.arange(4):
plt.figure()
n_hist_data, bin_edges, _ = plt.hist(data[:, kk], color=colors[1], label='Input', alpha=1, bins=n_bins)
n_hist_pred, _, _ = plt.hist(pred[:, kk], color=colors[0], label='Output', alpha=alph, bins=bin_edges)
plt.suptitle(test.columns[kk])
plt.xlabel(test.columns[kk])
plt.ylabel('Number of events')
# ms.sciy()
plt.yscale('log')
plt.legend()
if save:
plt.savefig(os.path.join(save_dir,test.columns[kk]+'_normalized.png'))
###Output
_____no_output_____
###Markdown
Histograms depicting unnormalized data
###Code
import numpy as np
plt.close('all')
unit_list = ['[GeV]', '[rad]', '[rad]', '[GeV]']
variable_list = [ r'$E$', r'$pt$', r'$\eta$', r'$\phi$']
line_style = ['--', '-']
colors = ['orange', 'c']
markers = ['*', 's']
model.to('cpu')
save = True # Option to save figure
# Histograms
idxs = (0, 100000) # Choose events to compare
data = torch.tensor(test[idxs[0]:idxs[1]].values, dtype=torch.float)
#data = torch.tensor(test[idxs[0]:idxs[1]].values, dtype=torch.float).double()
pred = model(data)
pred = pred.detach().numpy()
data = data.detach().numpy()
data_df = pd.DataFrame(data, columns=test.columns)
pred_df = pd.DataFrame(pred, columns=test.columns)
unnormalized_data_df = custom_unnormalize(data_df)
unnormalized_pred_df = custom_unnormalize(pred_df)
alph = 0.8
n_bins = 200
for kk in np.arange(4):
plt.figure()
n_hist_data, bin_edges, _ = plt.hist(data[:, kk], color=colors[1], label='Input', alpha=1, bins=n_bins)
n_hist_pred, _, _ = plt.hist(pred[:, kk], color=colors[0], label='Output', alpha=alph, bins=bin_edges)
plt.suptitle(test.columns[kk])
plt.xlabel(test.columns[kk])
plt.ylabel('Number of events')
# ms.sciy()
plt.yscale('log')
plt.legend()
if save:
plt.savefig(os.path.join(save_dir,test.columns[kk]+'_unnormalized.png'))
def getRatio(bin1,bin2):
bins = []
for b1,b2 in zip(bin1,bin2):
if b1==0 and b2==0:
bins.append(0.)
elif b2==0:
bins.append(None)
else:
bins.append((float(b2)-float(b1))/b1)
return bins
rat = getRatio(n_hist_data,n_hist_pred)
# print(rat)
###Output
_____no_output_____ |
day03/additional materials/5.1 Custom Layer.ipynb | ###Markdown
Custom Keras Layer Idea:We build a custom activation layer called **Antirectifier**,which modifies the shape of the tensor that passes through it.We need to specify two methods: `get_output_shape_for` and `call`.Note that the same result can also be achieved via a `Lambda` layer (`keras.layer.core.Lambda`). ```pythonkeras.layers.core.Lambda(function, output_shape=None, arguments=None)``` Because our custom layer is written with primitives from the Keras backend (`K`), our code can run both on TensorFlow and Theano.
###Code
from keras.models import Sequential
from keras.layers import Dense, Dropout, Layer, Activation
from keras.datasets import mnist
from keras import backend as K
from keras.utils import np_utils
###Output
Using Theano backend.
Using gpu device 0: GeForce GTX 760 (CNMeM is enabled with initial size: 90.0% of memory, cuDNN 5110)
/home/valerio/anaconda3/envs/deep-learning/lib/python3.5/site-packages/theano/sandbox/cuda/__init__.py:600: UserWarning: Your cuDNN version is more recent than the one Theano officially supports. If you see any problems, try updating Theano or downgrading cuDNN to version 5.
warnings.warn(warn)
###Markdown
AntiRectifier Layer
###Code
class Antirectifier(Layer):
'''This is the combination of a sample-wise
L2 normalization with the concatenation of the
positive part of the input with the negative part
of the input. The result is a tensor of samples that are
twice as large as the input samples.
It can be used in place of a ReLU.
# Input shape
2D tensor of shape (samples, n)
# Output shape
2D tensor of shape (samples, 2*n)
# Theoretical justification
When applying ReLU, assuming that the distribution
of the previous output is approximately centered around 0.,
you are discarding half of your input. This is inefficient.
Antirectifier allows to return all-positive outputs like ReLU,
without discarding any data.
Tests on MNIST show that Antirectifier allows to train networks
with twice less parameters yet with comparable
classification accuracy as an equivalent ReLU-based network.
'''
def get_output_shape_for(self, input_shape):
shape = list(input_shape)
assert len(shape) == 2 # only valid for 2D tensors
shape[-1] *= 2
return tuple(shape)
def call(self, x, mask=None):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
###Output
_____no_output_____
###Markdown
Parametrs and Settings
###Code
# global parameters
batch_size = 128
nb_classes = 10
nb_epoch = 40
###Output
_____no_output_____
###Markdown
Data Preparation
###Code
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
###Output
60000 train samples
10000 test samples
###Markdown
Model with Custom Layer
###Code
# build the model
model = Sequential()
model.add(Dense(256, input_shape=(784,)))
model.add(Antirectifier())
model.add(Dropout(0.1))
model.add(Dense(256))
model.add(Antirectifier())
model.add(Dropout(0.1))
model.add(Dense(10))
model.add(Activation('softmax'))
# compile the model
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# train the model
model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
###Output
Train on 60000 samples, validate on 10000 samples
Epoch 1/40
60000/60000 [==============================] - 1s - loss: 0.6011 - acc: 0.9140 - val_loss: 0.1505 - val_acc: 0.9613
Epoch 2/40
60000/60000 [==============================] - 0s - loss: 0.1260 - acc: 0.9656 - val_loss: 0.0982 - val_acc: 0.9703
Epoch 3/40
60000/60000 [==============================] - 0s - loss: 0.0831 - acc: 0.9763 - val_loss: 0.0782 - val_acc: 0.9747
Epoch 4/40
60000/60000 [==============================] - 0s - loss: 0.0636 - acc: 0.9813 - val_loss: 0.0827 - val_acc: 0.9741
Epoch 5/40
60000/60000 [==============================] - 0s - loss: 0.0511 - acc: 0.9841 - val_loss: 0.0724 - val_acc: 0.9758
Epoch 6/40
60000/60000 [==============================] - 0s - loss: 0.0429 - acc: 0.9866 - val_loss: 0.0667 - val_acc: 0.9788
Epoch 7/40
60000/60000 [==============================] - 0s - loss: 0.0366 - acc: 0.9883 - val_loss: 0.0715 - val_acc: 0.9792
Epoch 8/40
60000/60000 [==============================] - 0s - loss: 0.0315 - acc: 0.9904 - val_loss: 0.0809 - val_acc: 0.9771
Epoch 9/40
60000/60000 [==============================] - 0s - loss: 0.0282 - acc: 0.9913 - val_loss: 0.0706 - val_acc: 0.9803
Epoch 10/40
60000/60000 [==============================] - 0s - loss: 0.0236 - acc: 0.9925 - val_loss: 0.0687 - val_acc: 0.9803
Epoch 11/40
60000/60000 [==============================] - 0s - loss: 0.0215 - acc: 0.9931 - val_loss: 0.0670 - val_acc: 0.9795
Epoch 12/40
60000/60000 [==============================] - 0s - loss: 0.0195 - acc: 0.9938 - val_loss: 0.0704 - val_acc: 0.9811
Epoch 13/40
60000/60000 [==============================] - 0s - loss: 0.0181 - acc: 0.9941 - val_loss: 0.0667 - val_acc: 0.9820
Epoch 14/40
60000/60000 [==============================] - 0s - loss: 0.0149 - acc: 0.9955 - val_loss: 0.0687 - val_acc: 0.9823
Epoch 15/40
60000/60000 [==============================] - 1s - loss: 0.0146 - acc: 0.9959 - val_loss: 0.0723 - val_acc: 0.9799
Epoch 16/40
60000/60000 [==============================] - 0s - loss: 0.0137 - acc: 0.9958 - val_loss: 0.0795 - val_acc: 0.9799
Epoch 17/40
60000/60000 [==============================] - 1s - loss: 0.0130 - acc: 0.9957 - val_loss: 0.0697 - val_acc: 0.9826
Epoch 18/40
60000/60000 [==============================] - 1s - loss: 0.0113 - acc: 0.9965 - val_loss: 0.0688 - val_acc: 0.9823
Epoch 19/40
60000/60000 [==============================] - 0s - loss: 0.0107 - acc: 0.9963 - val_loss: 0.0737 - val_acc: 0.9819
Epoch 20/40
60000/60000 [==============================] - 0s - loss: 0.0103 - acc: 0.9967 - val_loss: 0.0746 - val_acc: 0.9812
Epoch 21/40
60000/60000 [==============================] - 0s - loss: 0.0094 - acc: 0.9968 - val_loss: 0.0727 - val_acc: 0.9811
Epoch 22/40
60000/60000 [==============================] - 1s - loss: 0.0084 - acc: 0.9972 - val_loss: 0.0805 - val_acc: 0.9820
Epoch 23/40
60000/60000 [==============================] - 0s - loss: 0.0088 - acc: 0.9971 - val_loss: 0.0809 - val_acc: 0.9809
Epoch 24/40
60000/60000 [==============================] - 1s - loss: 0.0075 - acc: 0.9974 - val_loss: 0.0773 - val_acc: 0.9817
Epoch 25/40
60000/60000 [==============================] - 0s - loss: 0.0078 - acc: 0.9975 - val_loss: 0.0758 - val_acc: 0.9817
Epoch 26/40
60000/60000 [==============================] - 1s - loss: 0.0074 - acc: 0.9976 - val_loss: 0.0751 - val_acc: 0.9816
Epoch 27/40
60000/60000 [==============================] - 0s - loss: 0.0076 - acc: 0.9975 - val_loss: 0.0785 - val_acc: 0.9809
Epoch 28/40
60000/60000 [==============================] - 0s - loss: 0.0067 - acc: 0.9978 - val_loss: 0.0782 - val_acc: 0.9816
Epoch 29/40
60000/60000 [==============================] - 1s - loss: 0.0070 - acc: 0.9976 - val_loss: 0.0834 - val_acc: 0.9808
Epoch 30/40
60000/60000 [==============================] - 1s - loss: 0.0055 - acc: 0.9983 - val_loss: 0.0775 - val_acc: 0.9817
Epoch 31/40
60000/60000 [==============================] - 0s - loss: 0.0056 - acc: 0.9982 - val_loss: 0.0930 - val_acc: 0.9814
Epoch 32/40
60000/60000 [==============================] - 1s - loss: 0.0056 - acc: 0.9981 - val_loss: 0.0886 - val_acc: 0.9812
Epoch 33/40
60000/60000 [==============================] - 1s - loss: 0.0057 - acc: 0.9982 - val_loss: 0.0778 - val_acc: 0.9812
Epoch 34/40
60000/60000 [==============================] - 1s - loss: 0.0047 - acc: 0.9984 - val_loss: 0.0839 - val_acc: 0.9824
Epoch 35/40
60000/60000 [==============================] - 0s - loss: 0.0049 - acc: 0.9984 - val_loss: 0.0900 - val_acc: 0.9809
Epoch 36/40
60000/60000 [==============================] - 1s - loss: 0.0046 - acc: 0.9984 - val_loss: 0.0851 - val_acc: 0.9816
Epoch 37/40
60000/60000 [==============================] - 0s - loss: 0.0053 - acc: 0.9985 - val_loss: 0.0932 - val_acc: 0.9801
Epoch 38/40
60000/60000 [==============================] - 0s - loss: 0.0049 - acc: 0.9983 - val_loss: 0.0917 - val_acc: 0.9804
Epoch 39/40
60000/60000 [==============================] - 0s - loss: 0.0044 - acc: 0.9984 - val_loss: 0.0931 - val_acc: 0.9816
Epoch 40/40
60000/60000 [==============================] - 0s - loss: 0.0047 - acc: 0.9986 - val_loss: 0.0874 - val_acc: 0.9820
###Markdown
Excercise Compare with an equivalent network that is **2x bigger** (in terms of Dense layers) + **ReLU**)
###Code
## your code here
###Output
_____no_output_____ |
II Machine Learning & Deep Learning/03_Model Selection. Decision Tree vs Support Vector Machines vs Logistic Regression/03practice.ipynb | ###Markdown
03 | Model Selection. Decision Tree vs Support Vector Machines vs Logistic Regression - Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)- Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄 Discipline to Search Solutions in Google > Apply the following steps when **looking for solutions in Google**:>> 1. **Necesity**: How to load an Excel in Python?> 2. **Search in Google**: by keywords> - `load excel python`> - ~~how to load excel in python~~> 3. **Solution**: What's the `function()` that loads an Excel in Python?> - A Function to Programming is what the Atom to Phisics.> - Every time you want to do something in programming> - **You will need a `function()`** to make it> - Theferore, you must **detect parenthesis `()`**> - Out of all the words that you see in a website> - Because they indicate the presence of a `function()`. Load the Data > - The goal of this dataset is> - To predict if **bank's customers** (rows) could have the approval for a credit card `target`> - Based on their **socio-demographical characteristics** (columns)
###Code
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/credit-screening/crx.data',
na_values='?', header=None)
df.rename(columns={15: 'target'}, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Build & Compare Models `DecisionTreeClassifier()`
###Code
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/7VeUPuFGJHk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
`RandomForestClassifier()`
###Code
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/J4Wdy0Wc_xQ" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____
###Markdown
`KNeighborsClassifier()`
###Code
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/HVXime0nQeI" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
###Output
_____no_output_____ |
3. EDA_stock + XGBoost.ipynb | ###Markdown
- stock price feature fields: - Date - Past N price - Past N - 1 price - ... - Past 1 price - Label
###Code
time_window = 2
def stock_price_feature_label_generator(time_window = 3):
if time_window <= 0:
raise ValueError("argument time_window has to be at least 1")
# use "time_window" previous days to predict the movement of the next day
label = [-1] + [1 if close_prices[i] > close_prices[i - 1] else 0 for i in range(1, len(close_prices))]
result = pd.DataFrame(dates[time_window: ], columns = ['Date'])
result.insert(loc=result.shape[1], column="Label", value=label[time_window: ])
for i in range(time_window):
result.insert(loc=result.shape[1], column="Past " + str(time_window - i) + " Day Adj. Price", value=np.divide(close_prices[i: len(close_prices) - time_window + i], volumes[i: len(volumes) - time_window + i]))
# for i in range(time_window):
# result.insert(loc=result.shape[1], column="Past " + str(time_window - i) + " Day Price", value=close_prices[i: len(close_prices) - time_window + i])
# for i in range(time_window):
# result.insert(loc=result.shape[1], column="Past " + str(time_window - i) + " Volume", value=volumes[i: len(volumes) - time_window + i])
return result
# stock_price_feature = stock_price_feature_label_generator(time_window)
# stock_price_feature.head()
only_date_and_label = stock_price_feature_label_generator(1).drop(columns=['Past 1 Day Adj. Price'])
only_date_and_label.head()
stock_price_feature = stock_data_df[['Date', 'Adj Close']]
stock_price_feature['Label'] = [-1] + only_date_and_label.Label.to_list()
stock_price_feature = stock_price_feature[['Date', 'Label', 'Adj Close']]
stock_price_feature.head()
aggregated_vector = pd.read_csv("aggregated_vector.csv")
aggregated_vector.shape
only_date_and_label.shape
feature_with_label_date = stock_price_feature.merge(aggregated_vector, left_on="Date", right_on="Date")
feature_with_label_date.head()
feature_without_label_date = feature_with_label_date.iloc[:, 3:]
feature_without_label_date.head()
###Output
_____no_output_____
###Markdown
- adding lagged info
###Code
# regenerate feature with time window
historial_feature_columns = [aggregated_vector.columns.to_list()[0]]
for lag_num in range(1, time_window + 1):
historial_feature_columns += [fea + '_lag' + str(lag_num) for fea in aggregated_vector.columns.to_list()[1:]]
feature_with_lagged_info = pd.DataFrame(columns=historial_feature_columns)
for i in range(len(feature_without_label_date) - 1, time_window - 1, -1):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Generating: %.02f" % ((len(feature_without_label_date) - 1 - i) / (len(feature_without_label_date) - 1 - time_window) * 100))
sys.stdout.flush()
# concatenate lagged feature horizontally to add temporal info
prev_N_days_sum_vector = feature_without_label_date.loc[i - 1].to_numpy()
for delta in range(2, 1 + time_window):
prev_N_days_sum_vector = np.concatenate((prev_N_days_sum_vector,
feature_without_label_date.loc[i - delta].to_numpy()),
axis=None)
# normalize
# prev_N_days_sum_vector = prev_N_days_sum_vector / np.linalg.norm(prev_N_days_sum_vector)
# add date
feature_with_date = np.concatenate((np.array([feature_with_label_date.iloc[i, 0]]), prev_N_days_sum_vector))
new_row_df = pd.DataFrame(feature_with_date).transpose()
# generate column name for lagged feature
new_row_df.columns = historial_feature_columns
# insert row
feature_with_lagged_info = pd.concat([new_row_df, feature_with_lagged_info], ignore_index=True)
feature_with_lagged_info.head()
###Output
_____no_output_____
###Markdown
- fusion with prev price and volume
###Code
fused_feature = stock_price_feature.merge(feature_with_lagged_info, left_on="Date", right_on="Date")
fused_feature.head()
###Output
_____no_output_____
###Markdown
- check imbalance
###Code
np.count_nonzero(fused_feature.Label.to_list()) / len(fused_feature)
fused_feature.head()
###Output
_____no_output_____
###Markdown
- import ML lib
###Code
# from bert_serving.client import BertClient
import math
from string import punctuation
import matplotlib.pyplot as plt
import re
import os
# from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# import warnings
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import cross_validate, GridSearchCV
import joblib
from sklearn.metrics import classification_report, confusion_matrix, f1_score, accuracy_score, roc_auc_score
from sklearn.model_selection import GridSearchCV,train_test_split
from sklearn.svm import SVC
import xgboost as xgb
# feature selection
from sklearn.feature_selection import SelectKBest, chi2, f_classif
fused_feature.iloc[:, 1:].shape
# import seaborn as sns
# # get first 20 dimensions from the 768-dimensional vector
# feature = fused_feature.iloc[:, 1:21]
# corr = feature.corr()
# _ = plt.figure(figsize=(30, 30))
# ax = sns.heatmap(
# corr,
# vmin=-1, vmax=1, center=0,
# cmap=sns.diverging_palette(20, 220, n=11), # better if the palette color num is odd
# square=True
# )
# ax.set_xticklabels(
# ax.get_xticklabels(),
# rotation=45,
# horizontalalignment='right'
# );
###Output
_____no_output_____
###Markdown
- feature selection
###Code
# apply SelectKBest class to extract top 10 best features
X = fused_feature.iloc[:, 2:]
y = fused_feature.Label
bestfeatures = SelectKBest(score_func=f_classif, k=25)
fit = bestfeatures.fit(X, y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
# concat two dataframes for better visualization
featureScores = pd.concat([dfcolumns, dfscores], axis=1)
featureScores.columns = ['Specs', 'Score'] # naming the dataframe columns
print(featureScores.nlargest(25, 'Score')) # print 10 best features
selected_feature_columns = np.add(2, featureScores.nlargest(110, 'Score').index.to_list())
def string_to_datetime(string):
date_parts = [int(part) for part in string.split('-')]
return datetime.date(date_parts[0], date_parts[1], date_parts[2])
fused_feature.Date = fused_feature.Date.apply(lambda x: string_to_datetime(x))
fused_feature.to_csv("final_feature_from_twitter.csv", index=False)
###Output
_____no_output_____
###Markdown
- train test split
###Code
X = fused_feature.iloc[:, 2:].to_numpy()
# X = fused_feature.iloc[:, selected_feature_columns].to_numpy()
y = fused_feature.Label.to_list()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
xgb_model = xgb.XGBClassifier(learning_rate=0.03,
n_estimators=600,
max_depth=7,
min_child_weight=5,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective='binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_train)
y_pred_prob = xgb_model.predict_proba(X_train)[:,1]
print("\nModel Report")
print("Accuracy : %.4g" % accuracy_score(y_train, y_pred))
print("AUC Score (Train): %f" % roc_auc_score(y_train, y_pred_prob))
y_pred = xgb_model.predict(X_test)
y_pred_prob = xgb_model.predict_proba(X_test)[:,1]
print("\nModel Report")
print("Accuracy : %.4g" % accuracy_score(y_test, y_pred))
print("AUC Score (Train): %f" % roc_auc_score(y_test, y_pred_prob))
###Output
Model Report
Accuracy : 0.4337
AUC Score (Train): 0.499412
|
Ciclotron.ipynb | ###Markdown
Datos experimentales tomados para el ciclotrón de protones: $R=1m$, $E_{max}=200MeV$, $m_p=1.673\times 10^{-27}kg$, $q=1,602\times 10^{-19}C$ y $v_0=0.0\frac{m}{s}$.
###Code
R=1
E=200
q=1.602E-19
E=E*10**6*q
m=1.673E-27
v_0=0
###Output
_____no_output_____
###Markdown
Cálculo del campo magnético $B$ perpendicular aplicado, distancia $d$ entre las D's del cilotrón, frecuencia de ciclotrón $\omega_{ciclotrón}$ y campo eléctrico máximo aplicado $E_0$ entre las D's.
###Code
B=np.sqrt(2*m*E)/(q*R)
d=R/10
omega=q*B/m
E0=1.0E7*B
###Output
_____no_output_____
###Markdown
Definición del campo eléctrico aplicado en el Ciclotrón.
###Code
def campoElectrico(x,t):
E=0.0
if x>=-d/2 and x<=d/2:
E=E0*np.cos(omega*t)
return E
###Output
_____no_output_____
###Markdown
Definición de la trayectoria del protón en el Ciclotrón, con el paso a paso de Feynmann. Allí se tiene en cuenta las fuerzas presentes en cada una de las regiones del acelerador y se retorna el tiempo, posición y velocidad del protón en el plano $xy$.
###Code
def pasoApaso(q,B,v_0,theta_0,m,N,R):
t=[0.0]
omega=q*B/m
dt=2*np.pi/(omega*N)
x=[0]
y=[0]
v_x=[-v_0*np.sin(theta_0)]
v_y=[v_0*np.cos(theta_0)]
r=0
while r<=R:
a_x=omega*v_y[-1]
a_y=-omega*v_x[-1]+campoElectrico(y[-1],t[-1])*q/m
x_new=x[-1]+v_x[-1]*dt
y_new=y[-1]+v_y[-1]*dt
v_x.append(v_x[-1]+a_x*dt)
v_y.append(v_y[-1]+a_y*dt)
x.append(x_new)
y.append(y_new)
t.append(t[-1]+dt)
r=np.sqrt(x[-1]**2+y[-1]**2)
x=np.array(x)
y=np.array(y)
v_x=np.array(v_x)
v_y=np.array(v_y)
t=np.array(t)
return t,x,y,v_x,v_y
###Output
_____no_output_____
###Markdown
Cálculo de la trayectoria del protón con los datos experimentales dados y con pasos de tiempo $dt=\frac{2\pi}{N\omega_{ciclotrón}}$.
###Code
N=1000
t,x,y,vx,vy=pasoApaso(q,B,v_0,0.0,m,N,R)
0.5*m*(vx[-1]**2+vy[-1]**2),E
###Output
_____no_output_____
###Markdown
Gráfica de la trayectoria descrita por el protón en el Ciclotrón.
###Code
plt.figure(figsize=(7,7))
plt.plot(x,y)
plt.xlabel("Posición en "+r"$x$ "+"(m)")
plt.ylabel("Posición en "+r"$y$ "+"(m)")
plt.savefig("trayectoriaCiclotron.svg")
###Output
_____no_output_____
###Markdown
Elección de índices en $x$ cercano a cero o en el rango $-\epsilon\leq x\leq \epsilon$ para $\epsilon=0.002 m$.
###Code
ii=x<0.0007
ii1=x>-0.0007
jj=ii*ii1
###Output
_____no_output_____
###Markdown
Cálculo de la evolución del momento y determinación del radio de la trayectoria en $-\epsilon\leq x\leq \epsilon$ a partir de $r=\sqrt{x^2+y^2}$ y $r_p=\frac{1}{qB}\sqrt{p_x^2+p_y^2}$.
###Code
px=m*vx
py=m*vy
r=np.sqrt(x[jj]**2+y[jj]**2)
rp=np.sqrt(px[jj]**2+py[jj]**2)/(q*B)
###Output
_____no_output_____
###Markdown
Regresión lineal de los datos $r$ y $r_p$ para radios grandes.
###Code
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
regr=linear_model.LinearRegression()
regr.fit(r.reshape(-1, 1),rp)
rp_fit=regr.predict(r.reshape(-1, 1))
print("Los datos se comportan linealmente según la ecuación: "+r"$r_p={}r+{}m$.".format(regr.coef_[0],regr.intercept_))
#+" Con un error de varianza: {}".format(r2_score(rp,rp_fit))
print("La pendiente se acerca a 1 tal como se esperaba.")
###Output
Los datos se comportan linealmente según la ecuación: $r_p=0.9831385758042884r+0.008939700432790909m$.
La pendiente se acerca a 1 tal como se esperaba.
###Markdown
Gráfica de los radios cálculados con posición y momento para las posiciones de $x$ cercanas a cero.
###Code
plt.figure(figsize=(7,7))
plt.scatter(r,rp, s=12)
plt.plot(r,rp_fit, c="red", label="Fit lineal")
plt.xlabel("Radio según la posición (m)")
plt.ylabel("Radio según el momento (m)")
plt.legend()
plt.savefig("radiosCiclo.svg")
###Output
_____no_output_____
###Markdown
Escogencia de tiempos en las posiciones $x>0$ y $-\epsilon\leq y\leq \epsilon$.
###Code
ll0=x>0
ll1=y<0.0007
ll2=y>-0.0007
ll=ll0*ll1*ll2
###Output
_____no_output_____
###Markdown
Determinación de los periodos consecutivos y el número de vueltas que satistacen las condicones en $x$ y $y$; eliminando las diferencias en tiempo $T<0.01\times 10^{-8}s$ ya que se deben a puntos consecutivos.
###Code
T=np.zeros(len(t[ll])-1)
for j in range(len(t[ll])-1):
T[j]=t[ll][j+1]-t[ll][j]
vuelta=np.arange(0,len(T[T>0.01E-8]),1)
###Output
_____no_output_____
###Markdown
Gráfica de los periodos consecutivos en función de cada una de las vueltas comparadas con el valor teórico $T=\frac{2\pi}{\omega_{ciclotrón}}=3,2105\times 10^{-8}s$.
###Code
plt.figure(figsize=(5,5))
plt.scatter(vuelta,T[T>0.01E-8], s=12, label="Datos Ciclotrón")
plt.plot(vuelta,2*np.pi/omega*np.ones(len(vuelta)), c="orange", label="Periodo teórico T")
plt.legend()
plt.ylabel("Periodo (s)")
plt.xlabel("Vueltas")
plt.savefig("periodoVueltas.eps")
###Output
The PostScript backend does not support transparency; partially transparent artists will be rendered opaque.
The PostScript backend does not support transparency; partially transparent artists will be rendered opaque.
###Markdown
Se tiene un error cuadrático medio en el periodo hallado como sigue:
###Code
errorT=np.sqrt((1/len(vuelta))*np.sum((T[T>0.01E-8]-2*np.pi/omega*np.ones(len(vuelta)))**2))
print("La desviación en el periodo es de {}s.".format(errorT))
###Output
La desviación en el periodo es de 1.0945948199892092e-08s.
###Markdown
Cálculo de la energía de la trayectoria de Ciclotrón a partir de $E=\frac{p_x^2+p_y^2}{2m}$ a los distintos tiempos.
###Code
energy=(px**2+py**2)/(2*m)
###Output
_____no_output_____
###Markdown
Gráfica de Energía en función del tiempo con el fin de ver el comportamiento del Ciclotrón hasta alcanzar $E=200 MeV$.
###Code
plt.figure(figsize=(7,7))
plt.scatter(t,energy, s=1)
plt.plot(t,E*np.ones(len(t)), c="red")
plt.xlabel("Tiempo (s)")
plt.ylabel("Energía (J)")
plt.savefig("energiaTiempo.svg")
###Output
_____no_output_____
###Markdown
Diagrama de fase de posición y momento en $x$.
###Code
plt.figure(figsize(7,7))
plt.plot(x,px)
plt.xlabel("Posición en x (m)")
plt.ylabel("Momento en x (kg m/s)")
plt.savefig("diagramaFaseX.svg")
###Output
_____no_output_____
###Markdown
Diagrama de fase de posición y momento en $y$.
###Code
plt.figure(figsize(7,7))
plt.plot(y,py)
plt.xlabel("Posición en y (m)")
plt.ylabel("Momento en y (kg m/s)")
plt.savefig("diagramaFaseY.eps")
###Output
_____no_output_____ |
src/dev/justify_sampling/Justifying_sampling-for_mut2.ipynb | ###Markdown
A,x, için B ve C interactor'ları olsun. A,y için D ve E interactor'ları olsun.protein, interactor yaparsan common interactor yok ki..ama yapman gereken B-C, B-D, B-E, C-D,C-E arasında corelasyon hesaplayıp bir variable'e eklemen. sonra ortalama gösterebilirsin..
###Code
mutation_justifier = MutationJustifier(TRAINING_DATA_PATH)
mutation_justifier.export_data("dev/justify_sampling/mutation_justifier")
mutation_justifier.unique_proteins_corr_data["PEARSON_CORR"].value_counts()
mutation_justifier.unique_proteins_corr_data[
(mutation_justifier.unique_proteins_corr_data["PEARSON_CORR"] != "HAS ONLY ONE INTERACTOR ACROSS ALL MUTATIONS") &
(mutation_justifier.unique_proteins_corr_data["PEARSON_CORR"] != "NOT APPLICABLE")
].sample()
from helpers.helpers_analysis.justify_sampling import (
get_corr_values, get_entries_with_protein
)
get_entries_with_protein("P55957", mutation_justifier.training_data)
get_corr_values(get_entries_with_protein("P55957", mutation_justifier.training_data))
get_entries_with_protein("Q9Y570", mutation_justifier.training_data)
get_entries_with_protein("O95149", mutation_justifier.training_data)
mutation_justifier.unique_proteins_corr_data[
mutation_justifier.unique_proteins_corr_data["PROTEIN"] == "O00311"
]
get_entries_with_protein("O00311", mutation_justifier.training_data)
get_corr_values()
###Output
_____no_output_____ |
2-Additional_Figures/Fig11_Example-Corner-Plot.ipynb | ###Markdown
From `analysis_notebooks/R68_MCMC_plots_v2.ipynb`
###Code
#Import libraries and settings
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
exec(open("../python/nb_setup.py").read())#Is there a better way to do this?
from constants import *
import R68_spec_tools as spec
import R68_yield as Yield
from tqdm.notebook import tqdm
from matplotlib.pyplot import *
style.use('../mplstyles/stylelib/standard.mplstyle')
import pickle as pkl
import corner
fname='../data/mcmc_Sor_128walk_50kstep_SNorm_v4.pkl' #Here is where we pick the file!
#Turns out we need the helper functions to still be defined since the stored samplers rely on them.
#Don't think it matters if they're exactly the same as what was used, since we don't use them here.
def Fit_helper(theta):
return calc_log_prob(model='Sor', theta=theta, theta_bounds=((0,1),(0,3e-2),(0,10),(0,5)),
spec_bounds=(5,101), likelihood='Pois')
basename=os.path.splitext(os.path.basename(fname))[0]
with open(fname,'rb') as file:
mcmc_data=pkl.load(file)
sampler=mcmc_data['sampler']
guesses=mcmc_data['guesses']
labels=mcmc_data['labels']
model=mcmc_data['Y_model']
RQs=mcmc_data['RQs']
relabel=True
if relabel:
for i in range(len(labels)):
labels[i]=labels[i].replace('scale','f')
labels
#######Warning, rescaling factors for plotting##############
f_rescale=np.ones(len(labels))
f_rescale[5]=2.04
#Look at the chain of parameter values
samples = sampler.get_chain()
ndim = samples.shape[2]
tau=sampler.get_autocorr_time(tol=0)
maxtau=RQs['maxtau']
flat_samples = sampler.get_chain(discard=int(2.*maxtau), thin=int(round(maxtau/2.)), flat=True)
#Pretty version of Sorensen
fig,ax = subplots(ndim,ndim) #Set up axes for plotting
plt.clf()
labsize_temp=rcParams['xtick.labelsize']
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
fig,ax = subplots(ndim,ndim)
flat_samples_plot=flat_samples/f_rescale*np.array([1,1e3,1,1,1,1])
labels_plot=labels.copy()
labels_plot[1]+=' ($10^{-3}$)'
fig = corner.corner(
flat_samples_plot, labels=labels_plot, quantiles=[0.16, 0.5, 0.84], show_titles=True,
title_fmt='0.3f', fig=fig,range=0.99*np.ones(ndim),
max_n_ticks=3,
title_kwargs={'fontsize':20}, label_kwargs={'fontsize':20},xmin=0,xmax=0.25);
matplotlib.rc('xtick', labelsize=labsize_temp)
matplotlib.rc('ytick', labelsize=labsize_temp)
for i in range(ndim):
ax[i][0].set(xlim=(0.125,0.255))
ax[i][1].set(xlim=(0.5,5.5))
ax[i][2].set(xlim=(2,27))
ax[i][3].set(xlim=(4.4,6.39))
ax[i][4].set(xlim=(3.15,5.45))
ax[i][5].set(xlim=(1.4,3.95))
#ax[0][i].set(ylim=()) #Default seems fine
ax[1][i].set(ylim=(0.5,5.5))
ax[2][i].set(ylim=(2,27))
ax[3][i].set(ylim=(4.4,6.39))
ax[4][i].set(ylim=(3.1,5.5))
ax[5][i].set(ylim=(1.4,4))
savefig('../figures/'+basename+'_corner_pretty.pdf')
plt.show()
###Output
findfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.
findfont: Font family ['Arial'] not found. Falling back to DejaVu Sans.
|
8-Labs/Lab02/ComputationBasics.ipynb | ###Markdown
Basics of Computation in Python Python- Programming language- High-level- Interpreted- Data science and machine learning Expressions- The Python interpreter can evaluate an expression- An expression alone doesn’t do anything - `3 + 2` - `8 + 2 * 3`- An expression alone is not a line of code (instruction) - Useful for testing - Bad for maintainability
###Code
# Did you know? The command for commenting a block of code in Jupyter notebook is CTRL-/
# Highlight some lines of code and type CTRL-/ to comment or uncomment them all
# Try some expressions
3 + 2
# 3 + 2 * 8
# 8 - 4
###Output
_____no_output_____
###Markdown
Arithmetic Operators- Python’s basic arithmetic operators:| Operator | Meaning | Example ||----------|---------|---------||`+`|add|`3+2`||`-`|subtract|`5-2`||`*`|multiply|`6*2`||`\`|divide|`8/2`||`**`|raise to the power|`2**3`||`%`|modulus (remainder)|`7%3`||`//`|floor division (integer division)|`7//3`|- The order of precedence among the arithmetic operators is: - First: expressions in parentheses - Second: exponentiation - Third: multiplication and division operators - Last: addition and subtraction operators
###Code
# Try some arithmetic operators
# Notice how Python only outputs the value of the expression on the last line
2 + 3 * (8 + 2**3)
8%3
###Output
_____no_output_____
###Markdown
Comparison and Logical Operators- Evaluate to a Boolean value- Comparison operators| Operator | Meaning | Example ||----------|---------|---------||`<`|add|`3 < 2`||`>`|greater than|`3 > 2`||`<=`|less than or equal to|`3 <= 3`||`>=`|greater than or equal to|`5 >= 3`||`==`|equal to|`4 == 2`||`%`|not equal to|`4 != 2`|- Logical operators| Operator | Meaning | Example ||----------|---------|---------||`and`|True if both operands are True|`3 > 2 and 4 != 2`||`or`|True if at least one operand is True|`3 < 2 and 4 != 2`||`not`|True if the operand is False|`not 4 == 3`|
###Code
# Try some logical operators
3 < 2 and 4 != 2
###Output
_____no_output_____
###Markdown
Python Data Types- Every piece of data has a type- Basic Data Types| Name | Type | Example ||------|------|---------||`int`|integer|`-27`||`float`|floating point (decimal)|`27.1`||`bool`|Boolean|`True`||`str`|character string|`"hello"`|- There are many more built-in data types- Programmers can define their own custom data types Variables- A variable is a name for a memory area where data is stored - it allows data values to *change* rather than remain *constant*- Defined by initial assignment to a value- Variable names - should be meaningful, for readability - can contain letters, digits and underscore character - may not start with a digit - may not be a Python *reserved word*- The same variable can refer to values of any data type Assignment Operators- Assigns a value to a variable - Example: `val = 4` - Read it as: set `val` to `4` - Interpret it as: Put `4` into the memory area that `val` refers to- Operators - Simple assignment `size = 17.5` - The assignment expression evaluates to the assigned value `size := 17.5` **This type of assignment is available in python 3.9, and will generate an exception in 3.8 or lower** - Shorthand (`+=`, `-=`, `*=`, etc.) `size += 2` is shorthand for `size = size + 2`
###Code
# Demonstrate some assignment operations
width = 4
#length := width
#length += 2
# area = length * width
###Output
_____no_output_____
###Markdown
Data Type Conversions- A variable can refer to values of different types- Check the data type of a variable's value: `type(length)`- Convert a value to a different type (if legal)`val = 4fval = float(val)sval = str(fval)`- Note: a conversion function does not change the data type of the input parameter
###Code
# Try conversions
val = 4
type(val)
# fval = float(val)
# type(fval)
# type(val)
# bval = bool(val) # what does this mean? what number is False?
# bval
# sval = str(fval)
# type(sval)
# sval
# sval = "hello"
# int(sval)
###Output
_____no_output_____
###Markdown
Input and Output- *Useful* programs take inoput and generate output - Command-line interface - Graphical user interface (GUI) - Files - Network sources - Databases Command-line input- Function `input(prompt)` - prints the *optional* prompt on the command line - waits for the user to type something - the text is sent to the program only after the user types enter/return - the entered data is always interpreted as text, *even if it's numeric*
###Code
# Try getting some input
value = input( "Please enter a value: ")
value
# type(value)
# input()
###Output
_____no_output_____
###Markdown
Command-line output- Function `print()` - prints the value(s) passed to it - automatically converts data values to strings - goes to the next line by default - separates values with space by default - optional arguments can set different separator and end of line values- Format output using string formatting functions Examples of outputprint( "Here is a message." )print( "Here is a value:", val ) where did val come from?print( "Don't go to next line.", end = ' ' )print("more text")print("more text") Strings- A string is a complex data type - a *sequence* of characters that is *immutable* - individual characters are identified using indexing syntax: `s[position]`  - the general `len()` function returns the length of a string
###Code
# Experiment with indexing syntax
s = "birds"
print(s[3])
print(s[-1])
print(len(s))
type(s[3])
###Output
_____no_output_____
###Markdown
String Operators- `+` - concatenate strings- `==` `!=` - test the equality of strings- `` `>=` - compare strings alphabetically
###Code
# String operators
s1 = "cat"
s2 = "dog"
s3 = "cat"
print(s1 + s2)
print(s1 == s3)
print(s2 < s1)
print("Dog" < "cat")
###Output
_____no_output_____
###Markdown
Special and unprintable characters- Represented with *escape* sequences - preceded by backslash `\`- Common special characters|Character|Escape Sequence||---------|---------------||newline|`\n`||tab|`\t`||backslash|`\\`||quotes|`\'` `\"`|
###Code
# Escape sequences
print("hello\n")
print("1\t2")
print("\xEA")
###Output
_____no_output_____
###Markdown
String slicing- expanded indexing- a slice of a string is a new string, composed of characters from the initial string|Syntax|Result||------|------||`s[start]`|a single character||`s[start:end]`|a substring||`s[start:end:step]`|a selection of characters|- the end position is not inclusive (up to but not including *end*)- the step can be positive or negative - a negative step proceeds backwards through the string Empty and default values in slicing- the default step is `1`- the default end is `start+1`- an empty value for end means the end of the string (in the *step* direction)- an empty value for start means the start of the string (in the *step* direction)
###Code
# What will this do?
# s[-1::-1]
###Output
_____no_output_____
###Markdown
String functions|Name|Behavior||----|--------||s.split()|Split a string into pieces based on a delimiter||s.strip()|Remove leading/trailing whitespace or other characters||s.upper()|Convert a string to uppercase||s.lower()|Convert a string to lowercase||s.isnumeric()|Return True if a string is numeric||s.find()|Return the index of a substring||s.replace()|Replace one substring with another||*Many, many, more ...*|Look them up as needed|
###Code
# some string functions
s = "hello"
print(s.upper())
print(s)
str = s.upper()
print(str)
str = "I am a string."
print(str.replace("am", "am not"))
"222".isnumeric()
s = "Fox. Socks. Box. Knox. Knox in box. Fox in socks."
print(s[1])
print(s[-1])
print(s[:3])
print(s.replace("ocks", "ox"))
print(s[0] + s[5] + s[12])
###Output
o
.
Fox
Fox. Sox. Box. Knox. Knox in box. Fox in sox.
FSB
|
notebooks/cloud_and_shadow_mask.ipynb | ###Markdown
**Instituto Nacional de Pesquisa Espacial (INPE) - 2020****Disciplina: SER-347***** Projeto 9*** Detecção de nuvens e sombra de nuvens: Informação espectral e metadados*** Alunos* Aline Casassola * Felipe Rafael de Sá Menezes Lucena* Grazieli Rodigheri Importação das bibliotecas usadas
###Code
# Importa a gdal, ogr e osr
from osgeo import gdal, ogr, osr
# Importa *
from gdalconst import *
# Uso de exceções
gdal.UseExceptions()
# Importa o matplotlib
import matplotlib.pyplot as plt
# Importa o Numpy
import numpy as np
# Importa os widgets
import ipywidgets as widgets
#Importa shape e mapping
from shapely.geometry import shape, mapping
import fiona
import math
import xml.etree.ElementTree as ET
import os
###Output
_____no_output_____
###Markdown
Função que abre e retorna a imagem NIR do CBERS para calcular a máscara de nuvens:
###Code
# Função para abrir dataset:
def abrir_dataset (nome_arquivo):
print ("Abrindo o arquivo: " + nome_arquivo)
# Tenta abrir a imagem
dataset = None
try:
dataset = gdal.Open(nome_arquivo, GA_ReadOnly)
print("Arquivo aberto com sucesso!")
except:
print("Erro na abertura do arquivo!")
return dataset
###Output
_____no_output_____
###Markdown
Seleção da imagem pelo usuário
###Code
cenas = []
for i in os.listdir("../imagens/"):
if '.' not in i:
cenas.append("../imagens/"+i)
# Cria um dropdown com a lista de imagens
cena_folder = widgets.Dropdown(options = cenas)
print ("Selecione a pasta da imagem")
cena_folder
bandas = []
values = []
for i in os.listdir(str(cena_folder.value)):
if '.' not in i:
values.append(i)
for i in values:
bandas.append(('Banda ' + i[-1], cena_folder.value + '/' + i +"/"+ i[-14:] +".tif"))
# Cria um dropdown com a lista de imagens
bandas_folder = widgets.ToggleButtons(
options=bandas,
description='Selecione:',
disabled=False,
button_style=''
)
bandas_folder
###Output
_____no_output_____
###Markdown
Abertura do dataset e conversão da banda NIR para array
###Code
# Abre a imagem NIR:
raster_NIR = abrir_dataset(bandas_folder.value)
# Obtém a banda única do dataset
banda_NIR = raster_NIR.GetRasterBand(1)
# Transforma a banda em array
array_banda_NIR = banda_NIR.ReadAsArray()
###Output
Abrindo o arquivo: ../imagens/CBERS_4_MUX_20200606_156_109_L2/200606_BAND8/200606_BAND8.tif
Arquivo aberto com sucesso!
###Markdown
Máscara de nuvens Seleção do limiar pelo usuário
###Code
limiar = widgets.IntSlider( min=0, max=255, value=120, step=1)
print("Selecione um limiar (DN)")
limiar
###Output
Selecione um limiar (DN)
###Markdown
Geração da máscara de nuvens
###Code
# Cria uma cópia da banda NIR para trabalhar
mascara_nuvens = np.copy(array_banda_NIR)
# Cria a máscara em função do limiar
mascara_nuvens[mascara_nuvens < limiar.value] = False
mascara_nuvens[mascara_nuvens >= limiar.value] = True
###Output
_____no_output_____
###Markdown
Mínimos e máximos dos pixels
###Code
print("Valores banda NIR:")
print("Min: ", array_banda_NIR.min())
print("Max: ", array_banda_NIR.max())
print("")
print("Valores máscara de Nuvens:")
print("Min: ", mascara_nuvens.min())
print("Max: ", mascara_nuvens.max())
print("")
###Output
Valores banda NIR:
Min: 0
Max: 222
Valores máscara de Nuvens:
Min: 0
Max: 1
###Markdown
Apresenta as imagens
###Code
# Cria a figura
plt.figure(figsize = (16, 8))
# Mostra a imagem NIR
plt.subplot(121)
plt.title("Banda Selecionada")
plt.imshow(array_banda_NIR, cmap='gray');
# Mostra a máscara de nuvens
plt.subplot(122)
plt.title("Máscara de Nuvens")
plt.imshow(mascara_nuvens, cmap='gray');
###Output
_____no_output_____
###Markdown
Estatísticas da máscara de nuvens Estatísticas da imagem inteira
###Code
num_total_pixels = mascara_nuvens.size
print ("Número total de pixels:", num_total_pixels)
num_pixels_nuvem = len(mascara_nuvens[mascara_nuvens == 1])
print ("Número total de pixels com nuvens:", num_pixels_nuvem)
per_pixels_nuvem = (num_pixels_nuvem/num_total_pixels)*100
print ("Percentual de pixels com nuvens:", (round(per_pixels_nuvem, 3)))
###Output
Número total de pixels: 51050025
Número total de pixels com nuvens: 1182349
Percentual de pixels com nuvens: 2.316
###Markdown
Classificação da imagem de acordo com percentual de nuvens
###Code
# Limiares de classificação definidos como constantes
limiar_ceu_coberto = 65
limiar_ceu_claro = 35
print ("Classificação da imagem de acordo com o percentual de nuvens: ")
if per_pixels_nuvem > limiar_ceu_coberto:
print("Céu coberto")
elif per_pixels_nuvem < limiar_ceu_claro:
print("Céu claro")
else:
print("Céu parcialmente coberto")
###Output
Classificação da imagem de acordo com o percentual de nuvens:
Céu coberto
###Markdown
Estatísticas por quadrantes
###Code
# Obtém a metade da linha e da coluna:
meio_linhas = int(np.size(mascara_nuvens,0)/2)
meio_colunas = int(np.size(mascara_nuvens,1)/2)
# Obtém o array de cada quadrante:
Q1 = mascara_nuvens[:meio_linhas, :meio_colunas]
Q2 = mascara_nuvens[:meio_linhas, meio_colunas:]
Q3 = mascara_nuvens[meio_linhas:, :meio_colunas]
Q4 = mascara_nuvens[meio_linhas:, meio_colunas:]
# Quadrante 1:
total_pixels_Q1 = Q1.size
pixels_nuvem_Q1 = len(Q1[Q1 == 1])
per_nuvem_Q1 = (pixels_nuvem_Q1/total_pixels_Q1)*100
print ("Percentual de pixels com nuvens no Q1: ", (round(per_nuvem_Q1, 2)), "%", sep='')
# Quadrante 2:
total_pixels_Q2 = Q2.size
pixels_nuvem_Q2 = len(Q2[Q2 == 1])
per_nuvem_Q2 = (pixels_nuvem_Q2/total_pixels_Q2)*100
print ("Percentual de pixels com nuvens no Q2: ", (round(per_nuvem_Q2, 2)), "%", sep='')
# Quadrante 3:
total_pixels_Q3 = Q3.size
pixels_nuvem_Q3 = len(Q3[Q3 == 1])
per_nuvem_Q3 = (pixels_nuvem_Q3/total_pixels_Q3)*100
print ("Percentual de pixels com nuvens no Q3: ", (round(per_nuvem_Q3, 2)), "%", sep='')
# Quadrante 4:
total_pixels_Q4 = Q4.size
pixels_nuvem_Q4 = len(Q4[Q4 == 1])
per_nuvem_Q4 = (pixels_nuvem_Q4/total_pixels_Q4)*100
print ("Percentual de pixels com nuvens no Q4: ", (round(per_nuvem_Q4, 2)), "%", sep='')
###Output
Percentual de pixels com nuvens no Q1: 3.48%
Percentual de pixels com nuvens no Q2: 1.27%
Percentual de pixels com nuvens no Q3: 1.72%
Percentual de pixels com nuvens no Q4: 2.8%
###Markdown
Salvando a máscara como arquivo tif
###Code
def save_mask(matriz_de_pixels, nome_do_arquivo, dataset_de_referencia):
# Obtém as informações de tamanho do raster de referência
linhas = dataset_de_referencia.RasterYSize
colunas = dataset_de_referencia.RasterXSize
bandas = 1
# Carrega o driver para salvar tif
driver = gdal.GetDriverByName('GTiff')
# Obtém o tipo de dado do dataset de referência
data_type = dataset_de_referencia.GetRasterBand(1).DataType
# Criar novo dataset
dataset_output = driver.Create(nome_do_arquivo, colunas, linhas, bandas, data_type)
# Copiar informações espaciais da banda já existente
dataset_output.SetGeoTransform(dataset_de_referencia.GetGeoTransform())
# Copiar informações de projeção
dataset_output.SetProjection(dataset_de_referencia.GetProjectionRef())
# Escrever dados da matriz NumPy na banda
dataset_output.GetRasterBand(1).WriteArray(matriz_de_pixels)
# Salvar valores
dataset_output.FlushCache()
# Fechar dataset
dataset_output = None
# Nome do arquivo de saída
nome_arquivo_mascara_destino = '../produtos/imagens/mascara_nuvens_' + bandas_folder.value[-16:]
# Chama a função para salvar a máscara de nuvens
save_mask(mascara_nuvens,nome_arquivo_mascara_destino,raster_NIR)
###Output
_____no_output_____
###Markdown
Máscara de sombras Abrindo a máscara de nuvens
###Code
# Abre a mascara de nuvem gerada anteriormente
mascara_nuvens = abrir_dataset (nome_arquivo_mascara_destino)
# Obtém a banda única do dataset
banda_mascara = mascara_nuvens.GetRasterBand(1)
###Output
Abrindo o arquivo: ../produtos/imagens/mascara_nuvens_200606_BAND8.tif
Arquivo aberto com sucesso!
###Markdown
Gerando a máscara de sombras inicial (vetorização da máscara de nuvens)
###Code
srs = osr.SpatialReference()
srs.ImportFromWkt(mascara_nuvens.GetProjectionRef())
shp_layername_cloud = '../produtos/shp/mascara_nuvens_' + bandas_folder.value[-16:-4]
driver = ogr.GetDriverByName("ESRI Shapefile")
shp_datasource = driver.CreateDataSource(shp_layername_cloud + '.shp')
shp_layer = shp_datasource.CreateLayer(shp_layername_cloud, srs=srs)
new_field = ogr.FieldDefn('DN', ogr.OFTReal)
shp_layer.CreateField(new_field)
gdal.Polygonize(banda_mascara, banda_mascara, shp_layer, 0, [], callback=None)
shp_datasource.Destroy()
shp_layername_cloud = shp_layername_cloud + ".shp"
###Output
_____no_output_____
###Markdown
Deslocamento da máscara de nuvens a partir de um delta_x e delta_y  Delta a partir dos metadados 
###Code
tree = ET.parse(bandas_folder.value[:-3]+'xml')
root = tree.getroot()
sun_incidence = root.find("{http://www.gisplan.com.br/xmlsat}sunIncidenceAngle")
degree_sun_incidence = int(sun_incidence.find('{http://www.gisplan.com.br/xmlsat}degree').text)
minute_sun_incidence = int(sun_incidence.find('{http://www.gisplan.com.br/xmlsat}minute').text)
second_sun_incidence = int(sun_incidence.find('{http://www.gisplan.com.br/xmlsat}second').text)
sun_incidence = degree_sun_incidence + minute_sun_incidence/60 + second_sun_incidence/3600
image = root.find("{http://www.gisplan.com.br/xmlsat}image")
sun_position = image.find('{http://www.gisplan.com.br/xmlsat}sunPosition')
sun_azim = float(sun_position.find('{http://www.gisplan.com.br/xmlsat}sunAzimuth').text)
sun_zenith = float(sun_position.find('{http://www.gisplan.com.br/xmlsat}elevation').text)
sun_azim = math.radians(sun_azim)
sun_zenith = math.radians(sun_zenith)
view_azim = math.radians(0)
view_zenith = math.radians(0)
sen_sun_azi = math.sin(sun_azim)
cos_sun_azi = math.cos(sun_azim)
sen_view_azi = math.sin(view_azim)
cos_view_azi = math.cos(view_azim)
tan_sun_zenith = math.tan(sun_zenith)
tan_view_zenith = math.tan(view_zenith)
shadow_direction = math.pi + math.atan(sen_sun_azi*tan_sun_zenith-sen_view_azi*tan_view_zenith/
(cos_sun_azi*tan_sun_zenith-cos_view_azi*tan_view_zenith))
h = 1500 #Altura das nuvens
shadow_distance = h * math.tan(math.radians(sun_incidence))
# shadow_distance2 = h * math.sqrt(
# ( (sen_sun_azi*tan_sun_zenith) - (sen_view_azi*tan_view_zenith) )**2 +
# ( (cos_sun_azi*tan_sun_zenith) - (cos_view_azi*tan_view_zenith) )**2
# )
delta = (shadow_distance*math.sin(shadow_direction), shadow_distance*math.cos(shadow_direction))
###Output
_____no_output_____
###Markdown
\begin{equation} {\phi _s} = \pi + \arctan \left({\frac{{\sin {\phi _s}\tan {\theta _s} - \sin {\phi _v}\tan {\theta _v}}}{{\cos {\phi _s}\tan {\theta _s} - \cos {\phi _v}\tan {\theta _v}}}} \right) \end{equation}B. Zhong et al. (2017)\begin{equation} {d _s} = h * \tan(\Theta _s) \end{equation} Deslocamento da máscara
###Code
# Essas três funções realizam o deslocamento das feições da camada. (Elas são recursivas entre si)
def movePoint_Coords(coords, delta): # "delta" é uma tupla (delta_x, delta_y)
return tuple(c + d for c, d in zip(coords, delta))
def moveLine_Coords(coords, delta):
return list(movePoint_Coords(pt_coords, delta) for pt_coords in coords)
def movePolygon_Coords(coords, delta):
return list(moveLine_Coords(ring_coords, delta) for ring_coords in coords)
with fiona.open(shp_layername_cloud, "r") as shadow_cloud:
with fiona.open("../produtos/shp/mascara_sombra_deslocada.shp",
"w",
driver=shadow_cloud.driver,
schema=shadow_cloud.schema,
crs=shadow_cloud.crs) as moved_shadow:
# Deslocamento de todas as feições da camada de máscara de nuvens vetorizada
for feature in shadow_cloud:
try:
feature['geometry']['coordinates'] = movePolygon_Coords(feature['geometry']['coordinates'], delta)
moved_shadow.write(feature)
except:
print("Error processing record %s:", feature)
###Output
_____no_output_____
###Markdown
Subtração da mascara deslocada pela mascara de nuvens Observação: a intersecção entre a nuvem e a sombra deve ser removida, visto que não é somente sombra. Com a subtração, na máscara, permanece o que é exclusivamente sombra. 
###Code
with fiona.open(shp_layername_cloud, "r") as shadow_cloud:
with fiona.open("../produtos/shp/mascara_sombra_deslocada.shp", "r") as moved_shadow:
shp_layername_shadow = "../produtos/shp/mascara_sombra_" + bandas_folder.value[-16:-3] +"shp"
with fiona.open(shp_layername_shadow,
"w",
driver=shadow_cloud.driver,
schema=shadow_cloud.schema,
crs=shadow_cloud.crs) as dif:
for i, j in zip(moved_shadow,shadow_cloud):
# Corrigir erros topológico que impedem a subtração dos poligonos
polygons_source = shape(j['geometry'])
polygons_shift = shape(i['geometry'])
polygons_source_correct = polygons_source.buffer(0)
polygons_shift_correct = polygons_shift.buffer(0)
j['geometry'] = mapping(polygons_source_correct)
i['geometry'] = mapping(polygons_shift_correct)
#################################################################
try:
difference = shape(i['geometry']).difference(shape(j['geometry']))
i['geometry'] = mapping(difference)
dif.write(i)
except:
print("Error processing record %s:", i)
# Apagar arquivo shapefile auxiliar
driver.DeleteDataSource('../produtos/shp/mascara_sombra_deslocada.shp');
###Output
_____no_output_____
###Markdown
Transformar o shp das sombras em máscara
###Code
# Abrir a máscara de nuvens para setar a de sombras com a mesma configuração (limites, tamanho do pixel...)
mascara_nuvens = abrir_dataset (nome_arquivo_mascara_destino)
linhas = mascara_nuvens.RasterXSize
colunas = mascara_nuvens.RasterYSize
# Criação da camada vetorial
shp_mask = ogr.Open(shp_layername_shadow)
layer = shp_mask.GetLayer()
output = '../produtos/imagens/mascara_sombras_' + bandas_folder.value[-16:]
driver = gdal.GetDriverByName('GTiff')
dataset_output = driver.Create(output, linhas, colunas, 1, gdal.GDT_Byte)
# Copiar informações espaciais da mascara de nuvens
dataset_output.SetGeoTransform(mascara_nuvens.GetGeoTransform())
# Copiar informações de projeção da mascara de nuvens
dataset_output.SetProjection(mascara_nuvens.GetProjectionRef())
# Escrever dados na banda
band = dataset_output.GetRasterBand(1)
# Salvar valores
band.FlushCache()
# Ralizar a transformação de poligonos para raster
gdal.RasterizeLayer(dataset_output, [1], layer)
# Fechar dataset
dataset_output = None
###Output
_____no_output_____ |
Presentation 21 Dec 2018/Text Illustration.ipynb | ###Markdown
Text Demos Basic Frequency Analysis Load a Text Document
###Code
# Use Curl to get a document from GitHub and open it
doc1 = open("Moon.txt", "r")
# Read the document and print its contents
doc1Txt = doc1.read()
print(doc1Txt)
###Output
We set sail on this new sea because there is new knowledge to be gained, and new rights to be won, and they must be won and used for the progress of all people. For space science, like nuclear science and all technology, has no conscience of its own. Whether it will become a force for good or ill depends on man, and only if the United States occupies a position of pre-eminence can we help decide whether this new ocean will be a sea of peace or a new terrifying theater of war. I do not say that we should or will go unprotected against the hostile misuse of space any more than we go unprotected against the hostile use of land or sea, but I do say that space can be explored and mastered without feeding the fires of war, without repeating the mistakes that man has made in extending his writ around this globe of ours.
There is no strife, no prejudice, no national conflict in outer space as yet. Its hazards are hostile to us all. Its conquest deserves the best of all mankind, and its opportunity for peaceful cooperation may never come again. But why, some say, the Moon? Why choose this as our goal? And they may well ask, why climb the highest mountain? Why, 35 years ago, fly the Atlantic? Why does Rice play Texas?
We choose to go to the Moon! We choose to go to the Moon in this decade and do the other things, not because they are easy, but because they are hard; because that goal will serve to organize and measure the best of our energies and skills, because that challenge is one that we are willing to accept, one we are unwilling to postpone, and one we intend to win!
###Markdown
Normalize the Text
###Code
from string import punctuation
# remove numeric digits
txt = ''.join(c for c in doc1Txt if not c.isdigit())
# remove punctuation and make lower case
txt = ''.join(c for c in txt if c not in punctuation).lower()
# print the normalized text
print (txt)
###Output
we set sail on this new sea because there is new knowledge to be gained and new rights to be won and they must be won and used for the progress of all people for space science like nuclear science and all technology has no conscience of its own whether it will become a force for good or ill depends on man and only if the united states occupies a position of preeminence can we help decide whether this new ocean will be a sea of peace or a new terrifying theater of war i do not say that we should or will go unprotected against the hostile misuse of space any more than we go unprotected against the hostile use of land or sea but i do say that space can be explored and mastered without feeding the fires of war without repeating the mistakes that man has made in extending his writ around this globe of ours
there is no strife no prejudice no national conflict in outer space as yet its hazards are hostile to us all its conquest deserves the best of all mankind and its opportunity for peaceful cooperation may never come again but why some say the moon why choose this as our goal and they may well ask why climb the highest mountain why years ago fly the atlantic why does rice play texas
we choose to go to the moon we choose to go to the moon in this decade and do the other things not because they are easy but because they are hard because that goal will serve to organize and measure the best of our energies and skills because that challenge is one that we are willing to accept one we are unwilling to postpone and one we intend to win
###Markdown
Get the Frequency Distribution
###Code
import nltk
import pandas as pd
from nltk.probability import FreqDist
# nltk.download("punkt")
# Tokenize the text into individual words
words = nltk.tokenize.word_tokenize(txt)
# Get the frequency distribution of the words into a data frame
fdist = FreqDist(words)
count_frame = pd.DataFrame(fdist, index =[0]).T
count_frame.columns = ['Count']
print (count_frame)
###Output
Count
we 9
set 1
sail 1
on 2
this 5
new 5
sea 3
because 5
there 2
is 3
knowledge 1
to 11
be 5
gained 1
and 12
rights 1
won 2
they 4
must 1
used 1
for 4
the 14
progress 1
of 11
all 4
people 1
space 4
science 2
like 1
nuclear 1
... ...
ask 1
climb 1
highest 1
mountain 1
years 1
ago 1
fly 1
atlantic 1
does 1
rice 1
play 1
texas 1
decade 1
other 1
things 1
easy 1
hard 1
serve 1
organize 1
measure 1
energies 1
skills 1
challenge 1
one 3
willing 1
accept 1
unwilling 1
postpone 1
intend 1
win 1
[152 rows x 1 columns]
###Markdown
Plot the distribution as a pareto chart
###Code
%matplotlib inline
import matplotlib.pyplot as plt
# Sort the data frame by frequency
counts = count_frame.sort_values('Count', ascending = False)
# Display the top 60 words as a bar plot
fig = plt.figure(figsize=(16, 9))
ax = fig.gca()
counts['Count'][:60].plot(kind = 'bar', ax = ax)
ax.set_title('Frequency of the most common words')
ax.set_ylabel('Frequency of word')
ax.set_xlabel('Word')
plt.show()
###Output
_____no_output_____
###Markdown
Remove stop words
###Code
# Get standard stop words from NLTK
# nltk.download("stopwords")
from nltk.corpus import stopwords
# Filter out the stop words
txt = ' '.join([word for word in txt.split() if word not in (stopwords.words('english'))])
# Get the frequency distribution of the remaining words
words = nltk.tokenize.word_tokenize(txt)
fdist = FreqDist(words)
count_frame = pd.DataFrame(fdist, index =[0]).T
count_frame.columns = ['Count']
# Plot the frequency of the top 60 words
counts = count_frame.sort_values('Count', ascending = False)
fig = plt.figure(figsize=(16, 9))
ax = fig.gca()
counts['Count'][:60].plot(kind = 'bar', ax = ax)
ax.set_title('Frequency of the most common words')
ax.set_ylabel('Frequency of word')
ax.set_xlabel('Word')
plt.show()
###Output
_____no_output_____
###Markdown
Term Frequency - Inverse Document Frequency View the documents
###Code
# remind ourselves of the first document
print(doc1Txt)
print("------------------------------------------------")
# Get a second document, normalize it, and remove stop words
doc2 = open("Gettysburg.txt", "r")
doc2Txt = doc2.read()
print (doc2Txt)
from string import punctuation
txt2 = ''.join(c for c in doc2Txt if not c.isdigit())
txt2 = ''.join(c for c in txt2 if c not in punctuation).lower()
txt2 = ' '.join([word for word in txt2.split() if word not in (stopwords.words('english'))])
# and a third
print("------------------------------------------------")
doc3 = open("about_aims.txt", "r")
# doc3 = open("Cognitive.txt", "r")
doc3Txt = doc3.read()
print (doc3Txt)
from string import punctuation
txt3 = ''.join(c for c in doc3Txt if not c.isdigit())
txt3 = ''.join(c for c in txt3 if c not in punctuation).lower()
txt3 = ' '.join([word for word in txt3.split() if word not in (stopwords.words('english'))])
###Output
We set sail on this new sea because there is new knowledge to be gained, and new rights to be won, and they must be won and used for the progress of all people. For space science, like nuclear science and all technology, has no conscience of its own. Whether it will become a force for good or ill depends on man, and only if the United States occupies a position of pre-eminence can we help decide whether this new ocean will be a sea of peace or a new terrifying theater of war. I do not say that we should or will go unprotected against the hostile misuse of space any more than we go unprotected against the hostile use of land or sea, but I do say that space can be explored and mastered without feeding the fires of war, without repeating the mistakes that man has made in extending his writ around this globe of ours.
There is no strife, no prejudice, no national conflict in outer space as yet. Its hazards are hostile to us all. Its conquest deserves the best of all mankind, and its opportunity for peaceful cooperation may never come again. But why, some say, the Moon? Why choose this as our goal? And they may well ask, why climb the highest mountain? Why, 35 years ago, fly the Atlantic? Why does Rice play Texas?
We choose to go to the Moon! We choose to go to the Moon in this decade and do the other things, not because they are easy, but because they are hard; because that goal will serve to organize and measure the best of our energies and skills, because that challenge is one that we are willing to accept, one we are unwilling to postpone, and one we intend to win!
------------------------------------------------
Four score and seven years ago our fathers brought forth on this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal.
Now we are engaged in a great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure. We are met on a great battlefield of that war.
We have come to dedicate a portion of that field, as a final resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper that we should do this.
But, in a larger sense, we can not dedicate, we can not consecrate, we can not hallow this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add or detract.
The world will little note, nor long remember what we say here, but it can never forget what they did here. It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so nobly advanced.
It is rather for us to be here dedicated to the great task remaining before us that from these honored dead we take increased devotion to that cause for which they gave the last full measure of devotion, that we here highly resolve that these dead shall not have died in vain; that this nation, under God, shall have a new birth of freedom and that government of the people, by the people, for the people, shall not perish from the earth.
------------------------------------------------
The African Institute for Mathematical Sciences (AIMS) is Africa's first network of centres of excellence in mathematical sciences.
We enable the continent's youth to shape the continent's future through Science, Technology, Engineering and Maths (STEM) education- training Africa's next generation of leaders. AIMS South Africa is one of the centres of excellence for training, research and public engagement in Cape Town, South Africa.
AIMS South Africa was established in 2003 as a partnership project of the following 6 universities: Cambridge, Cape Town, Oxford, Paris Sud XI, Stellenbosch, and Western Cape.
###Markdown
Get TF-IDF Values for the top three words in each document
###Code
# install textblob library and define functions for TF-IDF
# !pip install -U textblob
import math
from textblob import TextBlob as tb
def tf(word, doc):
return doc.words.count(word) / len(doc.words)
def contains(word, docs):
return sum(1 for doc in docs if word in doc.words)
def idf(word, docs):
return math.log(len(docs) / (1 + contains(word, docs)))
def tfidf(word, doc, docs):
return tf(word,doc) * idf(word, docs)
# Create a collection of documents as textblobs
doc1 = tb(txt)
doc2 = tb(txt2)
doc3 = tb(txt3)
docs = [doc1, doc2, doc3]
# Use TF-IDF to get the three most important words from each document
print('-----------------------------------------------------------')
for i, doc in enumerate(docs):
print("Top words in document {}".format(i + 1))
scores = {word: tfidf(word, doc, docs) for word in doc.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words[:5]:
print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5)))
###Output
-----------------------------------------------------------
Top words in document 1
Word: space, TF-IDF: 0.01193
Word: go, TF-IDF: 0.01193
Word: sea, TF-IDF: 0.00894
Word: hostile, TF-IDF: 0.00894
Word: moon, TF-IDF: 0.00894
Top words in document 2
Word: nation, TF-IDF: 0.01662
Word: dedicated, TF-IDF: 0.01329
Word: great, TF-IDF: 0.00997
Word: dead, TF-IDF: 0.00997
Word: shall, TF-IDF: 0.00997
Top words in document 3
Word: aims, TF-IDF: 0.01994
Word: south, TF-IDF: 0.01994
Word: africa, TF-IDF: 0.01994
Word: cape, TF-IDF: 0.01994
Word: mathematical, TF-IDF: 0.01329
###Markdown
Stemming View frequency of unstemmed words from Kennedy's inauguration speech
###Code
# Load and print text
doc4 = open("KennedyInaugural.txt", "r")
kenTxt = doc4.read()
print(kenTxt)
# Normalize and remove stop words
from string import punctuation
kenTxt = ''.join(c for c in kenTxt if not c.isdigit())
kenTxt = ''.join(c for c in kenTxt if c not in punctuation).lower()
kenTxt = ' '.join([word for word in kenTxt.split() if word not in (stopwords.words('english'))])
# Get Frequency distribution
words = nltk.tokenize.word_tokenize(kenTxt)
fdist = FreqDist(words)
count_frame = pd.DataFrame(fdist, index =[0]).T
count_frame.columns = ['Count']
# Plot frequency
counts = count_frame.sort_values('Count', ascending = False)
fig = plt.figure(figsize=(16, 9))
ax = fig.gca()
counts['Count'][:60].plot(kind = 'bar', ax = ax)
ax.set_title('Frequency of the most common words')
ax.set_ylabel('Frequency of word')
ax.set_xlabel('Word')
plt.show()
###Output
Vice President Johnson, Mr. Speaker, Mr. Chief Justice, President Eisenhower, Vice President Nixon, President Truman, reverend clergy, fellow citizens:
We observe today not a victory of party, but a celebration of freedom -- symbolizing an end, as well as a beginning -- signifying renewal, as well as change. For I have sworn before you and Almighty God the same solemn oath our forebears prescribed nearly a century and three-quarters ago.
The world is very different now. For man holds in his mortal hands the power to abolish all forms of human poverty and all forms of human life. And yet the same revolutionary beliefs for which our forebears fought are still at issue around the globe -- the belief that the rights of man come not from the generosity of the state, but from the hand of God.
We dare not forget today that we are the heirs of that first revolution. Let the word go forth from this time and place, to friend and foe alike, that the torch has been passed to a new generation of Americans -- born in this century, tempered by war, disciplined by a hard and bitter peace, proud of our ancient heritage, and unwilling to witness or permit the slow undoing of those human rights to which this nation has always been committed, and to which we are committed today at home and around the world.
Let every nation know, whether it wishes us well or ill, that we shall pay any price, bear any burden, meet any hardship, support any friend, oppose any foe, to assure the survival and the success of liberty.
This much we pledge -- and more.
To those old allies whose cultural and spiritual origins we share, we pledge the loyalty of faithful friends. United there is little we cannot do in a host of cooperative ventures. Divided there is little we can do -- for we dare not meet a powerful challenge at odds and split asunder.
To those new states whom we welcome to the ranks of the free, we pledge our word that one form of colonial control shall not have passed away merely to be replaced by a far more iron tyranny. We shall not always expect to find them supporting our view. But we shall always hope to find them strongly supporting their own freedom -- and to remember that, in the past, those who foolishly sought power by riding the back of the tiger ended up inside.
To those people in the huts and villages of half the globe struggling to break the bonds of mass misery, we pledge our best efforts to help them help themselves, for whatever period is required -- not because the Communists may be doing it, not because we seek their votes, but because it is right. If a free society cannot help the many who are poor, it cannot save the few who are rich.
To our sister republics south of our border, we offer a special pledge: to convert our good words into good deeds, in a new alliance for progress, to assist free men and free governments in casting off the chains of poverty. But this peaceful revolution of hope cannot become the prey of hostile powers. Let all our neighbors know that we shall join with them to oppose aggression or subversion anywhere in the Americas. And let every other power know that this hemisphere intends to remain the master of its own house.
To that world assembly of sovereign states, the United Nations, our last best hope in an age where the instruments of war have far outpaced the instruments of peace, we renew our pledge of support -- to prevent it from becoming merely a forum for invective, to strengthen its shield of the new and the weak, and to enlarge the area in which its writ may run.
Finally, to those nations who would make themselves our adversary, we offer not a pledge but a request: that both sides begin anew the quest for peace, before the dark powers of destruction unleashed by science engulf all humanity in planned or accidental self-destruction.
We dare not tempt them with weakness. For only when our arms are sufficient beyond doubt can we be certain beyond doubt that they will never be employed.
But neither can two great and powerful groups of nations take comfort from our present course -- both sides overburdened by the cost of modern weapons, both rightly alarmed by the steady spread of the deadly atom, yet both racing to alter that uncertain balance of terror that stays the hand of mankind's final war.
So let us begin anew -- remembering on both sides that civility is not a sign of weakness, and sincerity is always subject to proof. Let us never negotiate out of fear, but let us never fear to negotiate.
Let both sides explore what problems unite us instead of belaboring those problems which divide us.
Let both sides, for the first time, formulate serious and precise proposals for the inspection and control of arms, and bring the absolute power to destroy other nations under the absolute control of all nations.
Let both sides seek to invoke the wonders of science instead of its terrors. Together let us explore the stars, conquer the deserts, eradicate disease, tap the ocean depths, and encourage the arts and commerce.
Let both sides unite to heed, in all corners of the earth, the command of Isaiah -- to "undo the heavy burdens, and [to] let the oppressed go free."¹
And, if a beachhead of cooperation may push back the jungle of suspicion, let both sides join in creating a new endeavor -- not a new balance of power, but a new world of law -- where the strong are just, and the weak secure, and the peace preserved.
All this will not be finished in the first one hundred days. Nor will it be finished in the first one thousand days; nor in the life of this Administration; nor even perhaps in our lifetime on this planet. But let us begin.
In your hands, my fellow citizens, more than mine, will rest the final success or failure of our course. Since this country was founded, each generation of Americans has been summoned to give testimony to its national loyalty. The graves of young Americans who answered the call to service surround the globe.
Now the trumpet summons us again -- not as a call to bear arms, though arms we need -- not as a call to battle, though embattled we are -- but a call to bear the burden of a long twilight struggle, year in and year out, "rejoicing in hope; patient in tribulation,"² a struggle against the common enemies of man: tyranny, poverty, disease, and war itself.
Can we forge against these enemies a grand and global alliance, North and South, East and West, that can assure a more fruitful life for all mankind? Will you join in that historic effort?
In the long history of the world, only a few generations have been granted the role of defending freedom in its hour of maximum danger. I do not shrink from this responsibility -- I welcome it. I do not believe that any of us would exchange places with any other people or any other generation. The energy, the faith, the devotion which we bring to this endeavor will light our country and all who serve it. And the glow from that fire can truly light the world.
And so, my fellow Americans, ask not what your country can do for you; ask what you can do for your country.
My fellow citizens of the world, ask not what America will do for you, but what together we can do for the freedom of man.
Finally, whether you are citizens of America or citizens of the world, ask of us here the same high standards of strength and sacrifice which we ask of you. With a good conscience our only sure reward, with history the final judge of our deeds, let us go forth to lead the land we love, asking His blessing and His help, but knowing that here on earth God's work must truly be our own.
###Markdown
Stem the words using the Porter stemmer
###Code
from nltk.stem.porter import PorterStemmer
# Get the word stems
ps = PorterStemmer()
stems = [ps.stem(word) for word in words]
# Get Frequency distribution
fdist = FreqDist(stems)
count_frame = pd.DataFrame(fdist, index =[0]).T
count_frame.columns = ['Count']
# Plot frequency
counts = count_frame.sort_values('Count', ascending = False)
fig = plt.figure(figsize=(16, 9))
ax = fig.gca()
counts['Count'][:60].plot(kind = 'bar', ax = ax)
ax.set_title('Frequency of the most common words')
ax.set_ylabel('Frequency of word')
ax.set_xlabel('Word')
plt.show()
###Output
_____no_output_____ |
notebooks/03_mnist-conditional.ipynb | ###Markdown
Conditional MNIST exampleIn the last notebook we looked at the creation of handwritten digits. However we could not control which numbers where generated because we had no idea how the GAN mapped the different digits from 0-9 in the latent space. In this tutorial where forcing the network to learn a specific distribution for every digit such that we have control over the output when generating new examples. Note that this comes at a cost: While so far all examples where performed in an unsupervised way (menaing you didn't use the labels of the data) this approach needs labeled data.First import the usual libraries:
###Code
import os
import torch
import pickle
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
os.chdir("/home/thomas/Backup/Algorithmen/GAN-pytorch")
from vegans.GAN import ConditionalWassersteinGAN, ConditionalWassersteinGANGP
from vegans.utils.utils import plot_losses, plot_images, get_input_dim
###Output
_____no_output_____
###Markdown
Check if your machine has an available GPU for usage.
###Code
print('Cuda is available: {}'.format(torch.cuda.is_available()))
device = "cuda" if torch.cuda.is_available() else "cpu"
###Output
Cuda is available: True
###Markdown
Now download the mnist dataset and set the parameters below (To get exactly the same format as in this tutorial download from [here](https://github.com/tneuer/GAN-pytorch/tree/main/data/mnist), but of course you can load it from anywhere you want):
###Code
# This directory should contain "train_images.pickle and test_images.pickle"
datapath_train = "/home/thomas/Backup/Algorithmen/GAN-pytorch/data/mnist/train_images.pickle"
datapath_test = "/home/thomas/Backup/Algorithmen/GAN-pytorch/data/mnist/test_images.pickle"
# Hidden layer channels for generator / critic
ngf = 8
ncf = 4
# Padding for mnist images (28x28) -> (32x32)
pad = 2
###Output
_____no_output_____
###Markdown
Now load and preprocess the data:- The images are saved in gray scale from 0-255, so we scale it to 0-1. Then we can use a Sigmoid as the last layer of the generator.- The original image shape is (28, 28) but when working with convolutional layers it is often beneficial to have a power of two. Therefore we pad two empty rows and columns to every image.- Finally we reshape the images because we need the images in the shape of (nr_channels, nr_heiht_pixels, nr_width_pixels). In out case this results in [1, 32, 32]
###Code
""" Create dataset
"""
with open(datapath_train, "rb") as f:
X_train, y_train = pickle.load(f)
with open(datapath_test, "rb") as f:
X_test, y_test = pickle.load(f)
X_train = X_train / np.max(X_train)
X_test = X_test / np.max(X_test)
X_train = np.pad(X_train, [(0, 0), (pad, pad), (pad, pad)], mode='constant').reshape(-1, 1, 32, 32)
X_test = np.pad(X_test, [(0, 0), (pad, pad), (pad, pad)], mode='constant').reshape(-1, 1, 32, 32)
print(X_train.shape, X_test.shape)
###Output
(60000, 1, 32, 32) (10000, 1, 32, 32)
###Markdown
Now we plot the handwritten digits, this time using the labels because we anyway need them later for this supervised algortihm.
###Code
fig, axs = plot_images(images=X_train.reshape(-1, 32, 32), labels=y_train, n=16)
###Output
_____no_output_____
###Markdown
We need to pass the labels as one hot encoded vectors so we use the scikit-learn library to transform the data.
###Code
one_hot_encoder = OneHotEncoder(sparse=False)
y_train = one_hot_encoder.fit_transform(y_train.reshape(-1, 1))
y_test = one_hot_encoder.transform(y_test.reshape(-1, 1))
print(y_train.shape, y_test.shape)
###Output
(60000, 10) (10000, 10)
###Markdown
We now define all the different input sizes for the discriminator and generator. Note that internally the images X_train are concatenated with the labels before passing them to the discriminator / critic. The labels are also concatenated with the noise so that the generator as well as the adversariat can learn to differentiate between images of different digits. To calculate the number of input channels / features we can use a utility functiion called `get_input_dim(dim1, dim2)`.
###Code
x_dim = X_train.shape[1:]
y_dim = y_train.shape[1:]
z_dim = [1, 4, 4]
print("x_dim:", x_dim, "y_dim:", y_dim, "z_dim:", z_dim)
adv_in_dim = get_input_dim(dim1=x_dim, dim2=y_dim)
gen_in_dim = get_input_dim(dim1=z_dim, dim2=y_dim)
print("Adv_dim:", adv_in_dim, "Gen_dim:", gen_in_dim)
###Output
x_dim: (1, 32, 32) y_dim: (10,) z_dim: [1, 4, 4]
Adv_dim: [11, 32, 32] Gen_dim: [11, 4, 4]
###Markdown
Note that the labels get concatenated with the channel axis of both the `z_dim` and `x_dim`. You could choose for `z_dim` a single integer as well and it would return the correct amount of features. Definition of Generator and Discriminator / CriticWe'll specify the architecture of the generator and discriminator / critic networks. It's difficult to know which architectures to choose before training. Here we used a architecture which proved to work.Since we want to train a Wasserstein GAN, the output of the critic should be a real number and not a probability. Therefore we drop the last sigmoid and use the identity function. If you want to switch to a architecture that uses a discriminator switch the `nn.Identity` with `nn.Sigmoid` for the adversariat.
###Code
""" Generator
"""
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
ngf = 20
nc = 1
self.hidden_part = nn.Sequential(
nn.ConvTranspose2d(in_channels=gen_in_dim[0], out_channels=ngf * 8, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.1),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.1),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.1),
nn.Conv2d(ngf * 2, ngf * 2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(0.1),
nn.Conv2d(ngf, ngf, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 5, 1, 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nc, nc, kernel_size=3, stride=1, padding=1),
)
self.output = nn.Sigmoid()
def forward(self, x):
x = self.hidden_part(x)
x = self.output(x)
return x
""" Adversariat
"""
class Critic(nn.Module):
def __init__(self):
super(Critic, self).__init__()
ncf = 8
self.hidden_part = nn.Sequential(
# input is (nc) x 32 x 32
nn.Conv2d(in_channels=adv_in_dim[0], out_channels=ncf, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ncf) x 16 x 16
nn.Conv2d(ncf, ncf * 2, 4, 2, 1),
nn.BatchNorm2d(ncf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ncf*2) x 8 x 8
nn.Conv2d(ncf * 2, ncf * 4, 4, 2, 1),
nn.BatchNorm2d(ncf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ncf*4) x 4 x 4
nn.Conv2d(ncf * 4, ncf * 8, 4, 2, 1),
nn.BatchNorm2d(ncf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ncf*8) x 2 x 2
nn.Flatten(),
nn.Linear(in_features=ncf*8*2*2, out_features=32),
nn.ReLU(),
nn.Linear(in_features=32, out_features=1)
)
self.output = nn.Identity()
def forward(self, x):
x = self.hidden_part(x)
x = self.output(x)
return x
generator = Generator()
critic = Critic()
###Output
_____no_output_____
###Markdown
Train our GANBuild a Wasserstein GAN trainer, using default optimizers (we can also specify our own). To use a different GAN algorithm, just use the corresponding class (e.g., `VanillaGAN` for original GAN).Here you can specify some optional GAN parameters, such as the latent space dimension `z_dim`, the number of samples to save (`fixed_noise_size`) and the optimizer keyword arguments (`optim_kwargs`). We set `folder=None` so that no folder is created where all results would be stored. Otherwise we could give a path like `folder="TrainedModels/GAN"`. All results (summary, images, loss functions, tensorboard information, models) would be saved in that folder. You can control what should be saved in the `fit` method. This folder will never overwrite an existing folder. If the path already exists a new path of the form `folder=path_{TimeStamp}` is created.We also decrease the learning rate of the critic a little.For this conditional algorithm we also need to pass in the dimension of the one hot encoded labels.
###Code
optim_kwargs = {"Generator": {"lr": 0.0005}, "Adversariat": {"lr": 0.0001}}
gan = ConditionalWassersteinGAN(
generator, critic, z_dim=z_dim, x_dim=x_dim, y_dim=y_dim,
optim_kwargs=optim_kwargs, fixed_noise_size=20, folder=None
)
gan.summary()
###Output
Generator
Input shape: (11, 4, 4)
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
ConvTranspose2d-1 [-1, 160, 4, 4] 16,000
BatchNorm2d-2 [-1, 160, 4, 4] 320
LeakyReLU-3 [-1, 160, 4, 4] 0
ConvTranspose2d-4 [-1, 80, 8, 8] 204,880
BatchNorm2d-5 [-1, 80, 8, 8] 160
LeakyReLU-6 [-1, 80, 8, 8] 0
ConvTranspose2d-7 [-1, 40, 16, 16] 51,240
BatchNorm2d-8 [-1, 40, 16, 16] 80
LeakyReLU-9 [-1, 40, 16, 16] 0
Conv2d-10 [-1, 40, 16, 16] 14,440
BatchNorm2d-11 [-1, 40, 16, 16] 80
LeakyReLU-12 [-1, 40, 16, 16] 0
ConvTranspose2d-13 [-1, 20, 32, 32] 12,820
BatchNorm2d-14 [-1, 20, 32, 32] 40
LeakyReLU-15 [-1, 20, 32, 32] 0
Conv2d-16 [-1, 20, 32, 32] 3,620
BatchNorm2d-17 [-1, 20, 32, 32] 40
LeakyReLU-18 [-1, 20, 32, 32] 0
ConvTranspose2d-19 [-1, 1, 32, 32] 501
LeakyReLU-20 [-1, 1, 32, 32] 0
Conv2d-21 [-1, 1, 32, 32] 10
Sigmoid-22 [-1, 1, 32, 32] 0
Generator-23 [-1, 1, 32, 32] 0
================================================================
Total params: 304,231
Trainable params: 304,231
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 1.62
Params size (MB): 1.16
Estimated Total Size (MB): 2.78
----------------------------------------------------------------
Adversariat
Input shape: (11, 32, 32)
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 8, 16, 16] 800
LeakyReLU-2 [-1, 8, 16, 16] 0
Conv2d-3 [-1, 16, 8, 8] 2,064
BatchNorm2d-4 [-1, 16, 8, 8] 32
LeakyReLU-5 [-1, 16, 8, 8] 0
Conv2d-6 [-1, 32, 4, 4] 8,224
BatchNorm2d-7 [-1, 32, 4, 4] 64
LeakyReLU-8 [-1, 32, 4, 4] 0
Conv2d-9 [-1, 64, 2, 2] 32,832
BatchNorm2d-10 [-1, 64, 2, 2] 128
LeakyReLU-11 [-1, 64, 2, 2] 0
Flatten-12 [-1, 256] 0
Linear-13 [-1, 32] 8,224
ReLU-14 [-1, 32] 0
Linear-15 [-1, 1] 33
Identity-16 [-1, 1] 0
Critic-17 [-1, 1] 0
================================================================
Total params: 52,401
Trainable params: 52,401
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.04
Forward/backward pass size (MB): 0.07
Params size (MB): 0.20
Estimated Total Size (MB): 0.32
----------------------------------------------------------------
###Markdown
Train the networks by calling the `fit()` method. Here you can specify some parameters for training like `eochs`, `batch_size`, `save_model_every`, `save_images_every`, `print_every`, `enable_tensorboard` and others.You can interrupt training at any time and still access train stats from within the `gan` object. You can resume training later. Note that we increase the number of steps the critic (adversariat) is trained, which is common for Wasserstein GANs but not VanillaGANs so take care when switching out algorithms.
###Code
steps = {"Adversariat": 5}
gan.fit(
X_train, y_train, X_test, y_test, epochs=5, steps=steps,
print_every="0.25e", save_losses_every=10, enable_tensorboard=False
)
###Output
------------------------------------------------------------
EPOCH: 1
------------------------------------------------------------
Step: 468 / 9375 (Epoch: 1 / 5, Batch: 468 / 1875)
------------------------------------------------------------
Generator: 0.008447149768471718
Adversariat: 1.896452158689499e-05
Adversariat_fake: -0.008447149768471718
Adversariat_real: 0.008485078811645508
RealFakeRatio: -1.0044901371002197
Time left: ~14.863 minutes (Steps remaining: 8907).
Step: 936 / 9375 (Epoch: 1 / 5, Batch: 936 / 1875)
------------------------------------------------------------
Generator: 0.006292358972132206
Adversariat: -0.00021411944180727005
Adversariat_fake: -0.006292358972132206
Adversariat_real: 0.005864120088517666
RealFakeRatio: -0.9319430589675903
Time left: ~13.936 minutes (Steps remaining: 8439).
Step: 1404 / 9375 (Epoch: 1 / 5, Batch: 1404 / 1875)
------------------------------------------------------------
Generator: 0.007013081107288599
Adversariat: -0.0005701233167201281
Adversariat_fake: -0.007013080641627312
Adversariat_real: 0.005872834008187056
RealFakeRatio: -0.8374114632606506
Time left: ~13.761 minutes (Steps remaining: 7971).
Step: 1872 / 9375 (Epoch: 1 / 5, Batch: 1872 / 1875)
------------------------------------------------------------
Generator: 0.0070505570620298386
Adversariat: -0.0008188504725694656
Adversariat_fake: -0.0070505570620298386
Adversariat_real: 0.005412856116890907
RealFakeRatio: -0.7677203416824341
Time left: ~13.056 minutes (Steps remaining: 7503).
------------------------------------------------------------
EPOCH: 2
------------------------------------------------------------
Step: 2340 / 9375 (Epoch: 2 / 5, Batch: 465 / 1875)
------------------------------------------------------------
Generator: 0.008230520412325859
Adversariat: -3.941543400287628e-05
Adversariat_fake: -0.008230520412325859
Adversariat_real: 0.008151689544320107
RealFakeRatio: -0.9904221296310425
Time left: ~12.461 minutes (Steps remaining: 7035).
Step: 2808 / 9375 (Epoch: 2 / 5, Batch: 933 / 1875)
------------------------------------------------------------
Generator: 0.007539210841059685
Adversariat: -0.00020751380361616611
Adversariat_fake: -0.0075392103753983974
Adversariat_real: 0.007124182768166065
RealFakeRatio: -0.9449507594108582
Time left: ~12.068 minutes (Steps remaining: 6567).
Step: 3276 / 9375 (Epoch: 2 / 5, Batch: 1401 / 1875)
------------------------------------------------------------
Generator: 0.005336410365998745
Adversariat: -0.00021827570162713528
Adversariat_fake: -0.005336410365998745
Adversariat_real: 0.004899858962744474
RealFakeRatio: -0.9181938171386719
Time left: ~11.183 minutes (Steps remaining: 6099).
Step: 3744 / 9375 (Epoch: 2 / 5, Batch: 1869 / 1875)
------------------------------------------------------------
Generator: 0.007225080858916044
Adversariat: -0.0003593084402382374
Adversariat_fake: -0.007225080858916044
Adversariat_real: 0.0065064639784395695
RealFakeRatio: -0.9005385637283325
Time left: ~10.323 minutes (Steps remaining: 5631).
------------------------------------------------------------
EPOCH: 3
------------------------------------------------------------
Step: 4212 / 9375 (Epoch: 3 / 5, Batch: 462 / 1875)
------------------------------------------------------------
Generator: 0.007071155589073896
Adversariat: -0.00048354663886129856
Adversariat_fake: -0.007071155589073896
Adversariat_real: 0.006104062311351299
RealFakeRatio: -0.8632340431213379
Time left: ~9.514 minutes (Steps remaining: 5163).
Step: 4680 / 9375 (Epoch: 3 / 5, Batch: 930 / 1875)
------------------------------------------------------------
Generator: 0.006354435347020626
Adversariat: -0.0005235334392637014
Adversariat_fake: -0.006354435347020626
Adversariat_real: 0.005307368468493223
RealFakeRatio: -0.8352226614952087
Time left: ~8.648 minutes (Steps remaining: 4695).
Step: 5148 / 9375 (Epoch: 3 / 5, Batch: 1398 / 1875)
------------------------------------------------------------
Generator: 0.007101232185959816
Adversariat: -0.00035530119203031063
Adversariat_fake: -0.007101231720298529
Adversariat_real: 0.006390629336237907
RealFakeRatio: -0.8999325037002563
Time left: ~7.781 minutes (Steps remaining: 4227).
Step: 5616 / 9375 (Epoch: 3 / 5, Batch: 1866 / 1875)
------------------------------------------------------------
Generator: 0.005256102420389652
Adversariat: 8.794013410806656e-07
Adversariat_fake: -0.005256102420389652
Adversariat_real: 0.005257861223071814
RealFakeRatio: -1.000334620475769
Time left: ~6.872 minutes (Steps remaining: 3759).
------------------------------------------------------------
EPOCH: 4
------------------------------------------------------------
Step: 6084 / 9375 (Epoch: 4 / 5, Batch: 459 / 1875)
------------------------------------------------------------
Generator: 0.006300562992691994
Adversariat: -0.00032224133610725403
Adversariat_fake: -0.006300562992691994
Adversariat_real: 0.005656080320477486
RealFakeRatio: -0.8977103233337402
Time left: ~5.958 minutes (Steps remaining: 3291).
Step: 6552 / 9375 (Epoch: 4 / 5, Batch: 927 / 1875)
------------------------------------------------------------
Generator: 0.007304485887289047
Adversariat: -0.0003767390735447407
Adversariat_fake: -0.007304485887289047
Adversariat_real: 0.006551007740199566
RealFakeRatio: -0.8968471884727478
Time left: ~5.09 minutes (Steps remaining: 2823).
Step: 7020 / 9375 (Epoch: 4 / 5, Batch: 1395 / 1875)
------------------------------------------------------------
Generator: 0.005823986139148474
Adversariat: -4.1250837966799736e-05
Adversariat_fake: -0.005823986139148474
Adversariat_real: 0.005741484463214874
RealFakeRatio: -0.9858341813087463
Time left: ~4.215 minutes (Steps remaining: 2355).
Step: 7488 / 9375 (Epoch: 4 / 5, Batch: 1863 / 1875)
------------------------------------------------------------
Generator: 0.006456256844103336
Adversariat: -0.0003023564349859953
Adversariat_fake: -0.006456256844103336
Adversariat_real: 0.005851543974131346
RealFakeRatio: -0.9063369035720825
Time left: ~3.374 minutes (Steps remaining: 1887).
------------------------------------------------------------
EPOCH: 5
------------------------------------------------------------
Step: 7956 / 9375 (Epoch: 5 / 5, Batch: 456 / 1875)
------------------------------------------------------------
Generator: 0.006882342044264078
Adversariat: -0.00023832428269088268
Adversariat_fake: -0.006882342044264078
Adversariat_real: 0.006405693478882313
RealFakeRatio: -0.9307432770729065
Time left: ~2.524 minutes (Steps remaining: 1419).
Step: 8424 / 9375 (Epoch: 5 / 5, Batch: 924 / 1875)
------------------------------------------------------------
Generator: 0.007469529751688242
Adversariat: -0.0005542321596294641
Adversariat_fake: -0.007469530217349529
Adversariat_real: 0.006361065898090601
RealFakeRatio: -0.8516018986701965
Time left: ~1.691 minutes (Steps remaining: 951).
Step: 8892 / 9375 (Epoch: 5 / 5, Batch: 1392 / 1875)
------------------------------------------------------------
Generator: 0.007559577003121376
Adversariat: -0.00046766363084316254
Adversariat_fake: -0.007559577003121376
Adversariat_real: 0.006624249741435051
RealFakeRatio: -0.8762725591659546
Time left: ~0.856 minutes (Steps remaining: 483).
Step: 9360 / 9375 (Epoch: 5 / 5, Batch: 1860 / 1875)
------------------------------------------------------------
Generator: 0.008250530809164047
Adversariat: -6.311200559139252e-05
Adversariat_fake: -0.008250530809164047
Adversariat_real: 0.008124306797981262
RealFakeRatio: -0.9847010970115662
Time left: ~0.027 minutes (Steps remaining: 15).
###Markdown
Investigate the results and loss curves.
###Code
samples, losses = gan.get_training_results()
fig, axs = plot_losses(losses)
print(samples.shape)
fixed_labels = np.argmax(gan.fixed_labels.cpu().detach().numpy(), axis=1)
fig, axs = plot_images(samples.reshape(-1, 32, 32), n=9, labels=fixed_labels)
###Output
_____no_output_____
###Markdown
Now we want to generate new images and have control over the number of generated images. Note that the `get_training_results` returns as many images as were specified with the `fixed_noise_size` argument in the constructor when creating the GAN.
###Code
my_labels = np.zeros(shape=(10, 10))
np.fill_diagonal(my_labels, 1)
new_samples = gan.generate(y=my_labels)
print(new_samples.shape)
fig, axs = plot_images(samples.reshape(-1, 32, 32), labels=list(range(10)))
###Output
_____no_output_____ |
code/algorithms/course_udemy_1/Array Sequences/Array Sequences Interview Questions/Array Sequence Interview Questions - PRACTICE/Largest Continuous Sum .ipynb | ###Markdown
Largest Continuous Sum ProblemGiven an array of integers (positive and negative) find the largest continuous sum. SolutionFill out your solution below:
###Code
def large_cont_sum(arr):
if len(arr) == 0:
return 0
max_num = sum = arr[0]# max=sum=arr[0] bug: TypeError: 'int' object is not callable. (Do not use the keyword!)
for n in arr[1:]:
sum = max(sum+n, n)
max_num = max(sum, max_num)
return max_num
pass
large_cont_sum([1,2,-1,3,4,10,10,-10,-1])
###Output
_____no_output_____
###Markdown
____Many times in an interview setting the question also requires you to report back the start and end points of the sum. Keep this in mind and see if you can solve that problem, we'll see it in the mock interview section of the course! Test Your Solution
###Code
from nose.tools import assert_equal
class LargeContTest(object):
def test(self,sol):
assert_equal(sol([1,2,-1,3,4,-1]),9)
assert_equal(sol([1,2,-1,3,4,10,10,-10,-1]),29)
assert_equal(sol([-1,1]),1)
print ('ALL TEST CASES PASSED')
#Run Test
t = LargeContTest()
t.test(large_cont_sum)
###Output
ALL TEST CASES PASSED
|
01 python/Lecture_03.ipynb | ###Markdown
Лекция 3 ДЗ* решить яндекс контест https://official.contest.yandex.ru/contest/20310/enter/ до 7 октября* 3_1 regexp problems - сделать задачи* 2020_DPO_3_0_strings_methods_problems -- уточнить последние 2 задачи * почему 1 в предпоследней - ошибка в задании * корректное ли задание в последней? забить* regexr.com* https://leetcode.com/* https://www.hackerrank.com/ Множества (set)
###Code
primes = {2, 3, 5, 7}
animals = {"cat", "dog", "horse", 'cat'}
print(primes)
print(animals)
len({1, 2, 9})
5 in primes
def test(set_in, value=3):
for i in set_in:
if value == i:
return(True)
return(False)
test(primes, 2)
a = {1, 2, 3, 4}
b = {3, 4, 5, 6}
c = {2, 3}
###Output
_____no_output_____
###Markdown
входит ли c в а?
###Code
print(c <= a)
print(c < a)
print(a.intersection(b))
print(a.union(b)) # объединение
print(a|b)
# проверка на подмножество (с подномжество a)
print(c <= b) # не подмножество, т.к. в b нет 2
print(a >= c)
print(a | b) # объединение a.union(b) aka a+b
print(a & b) # пересечение a.intersection(b)
print(a - b) # разность множеств (все что в a, кроме b) a.difference(b)
print(a ^ b) # симметрическая разность множеств (объединение без пересечения)
c = a.copy() # копирование множества, или set(a)
print(c)
###Output
False
True
{1, 2, 3, 4, 5, 6}
{3, 4}
{1, 2}
{1, 2, 5, 6}
{1, 2, 3, 4}
###Markdown
модификация множеств
###Code
c.add('dog')
print(c)
c.discard('dog')
print(c)
s = c.pop()
print(s)
print(c)
###Output
1
{2, 3, 4}
###Markdown
словари имеет ключь и содержимое
###Code
d = {'ivanov': {1,3,5}}
type(d)
d['ivanov']
a = dict()
type(a)
a[1] = 'first_element content'
a[2] = 'second element'
print(a)
a['last'] = 'last element'
###Output
_____no_output_____
###Markdown
картеж может быть ключем
###Code
a[(1,3)] = 'из картежа идентификатор'
print(a)
s = a.pop(1)
print(s)
print(a)
###Output
first_element content
{2: 'second element', 'last': 'last element', (1, 3): 'из картежа идентификатор'}
###Markdown
копирвоание словаря
###Code
d4 = dict(a)
d4 == a
d4['last']
d4['last'] *= 2
print(d4)
d4.get(2)
d4.get(1) # выдает None если нет
d4.get(1, 'missing') # настравиваем, что выдается, если нет
d4
2 in d4
'second element' not in d4 # testing for a key
del d4[(1, 3)]
d4
print(d4.values())
print(d4.keys())
print(d4.items())
###Output
dict_values(['second element', 'last elementlast element'])
dict_keys([2, 'last'])
dict_items([(2, 'second element'), ('last', 'last elementlast element')])
###Markdown
обновления словорей
###Code
dict1 = {'a': 1, 'b': 2}
dict2 = {'c': 3, 'b': 4}
dict3 = dict1.copy()
print(dict1)
print(dict3)
dict3.update(dict2) # переписывает самым свежим значеним
print(dict3)
###Output
{'a': 1, 'b': 2}
{'a': 1, 'b': 2}
{'a': 1, 'b': 4, 'c': 3}
###Markdown
**d means "treat the key-value pairs in the dictionary as additional named arguments to this function call."
###Code
dict1 = {'a': 1, 'b': 2}
dict2 = {'c': 3, 'd': 4}
dict3 = {**dict1, **dict2} #get values from 1 and 2 items and paste them as elements into dict3 function call
print(dict3)
###Output
{'a': 1, 'b': 2, 'c': 3, 'd': 4}
###Markdown
if else
###Code
num = int(input())
if (num % 3 == 0) and (num % 5 == 0):
print('делится и на 3 и на пять')
elif num % 3 == 0:
print('делится на 3')
elif num % 5 == 0:
print('делится на 5')
else:
print(f'{num} не делится на 3 или 5')
###Output
15
|
.ipynb_checkpoints/MSDS_Practicing_with_pandas_4-checkpoint.ipynb | ###Markdown
MSDS 620 Week 3 Lab Natalia Weakly 02/01/2019
###Code
#Imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Load data
# Central Government Debt Total, % of GDP
# Countries- members of OECD (Organition for Economic Cooperation and Development)
# Data source: World Bank
# https://databank.worldbank.org/data/reports.aspx?source=2&series=GC.DOD.TOTL.GD.ZS&country=#
debt = pd.read_csv('CentralGovtDebt.csv')
#check the size of the data frame
debt.shape
#check the data
debt.head()
debt.tail(10)
###Output
_____no_output_____
###Markdown
The data was loaded as a data frame with 42 observations of 16 variables, but it contains a large number of NaN, empty rows in the end, and columns (for Y2017 and Y2018) that appear to be completely empty. The data frame requires initial clean up.
###Code
#Delete empty and non-informative rows in the end
debt.drop(range(37,42), inplace=True)
#check results
debt.tail(10)
#drop first two columns - 'Series Name" and 'Series Code'
debt.drop(['Series Name', 'Series Code'], axis=1, inplace=True)
#Check results
debt.head()
debt.tail()
#Display detailed information about the data frame
debt.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 37 entries, 0 to 36
Data columns (total 14 columns):
Country Name 37 non-null object
Country Code 37 non-null object
1990 [YR1990] 37 non-null object
2000 [YR2000] 37 non-null object
2009 [YR2009] 37 non-null object
2010 [YR2010] 37 non-null object
2011 [YR2011] 37 non-null object
2012 [YR2012] 37 non-null object
2013 [YR2013] 37 non-null object
2014 [YR2014] 37 non-null object
2015 [YR2015] 37 non-null object
2016 [YR2016] 37 non-null object
2017 [YR2017] 37 non-null object
2018 [YR2018] 37 non-null object
dtypes: object(14)
memory usage: 4.3+ KB
###Markdown
debt.info() command shows that all columns were loaded as strings, and while there are no non-null objects, a visual inspection of the data frame suggests that multiple missing values were entered as (..).
###Code
#replace '..' with NaN
debt.replace(['..'], np.NaN, inplace=True )
#Check results
debt.head()
debt.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 37 entries, 0 to 36
Data columns (total 14 columns):
Country Name 37 non-null object
Country Code 37 non-null object
1990 [YR1990] 16 non-null object
2000 [YR2000] 10 non-null object
2009 [YR2009] 11 non-null object
2010 [YR2010] 11 non-null object
2011 [YR2011] 11 non-null object
2012 [YR2012] 12 non-null object
2013 [YR2013] 11 non-null object
2014 [YR2014] 11 non-null object
2015 [YR2015] 11 non-null object
2016 [YR2016] 12 non-null object
2017 [YR2017] 0 non-null float64
2018 [YR2018] 0 non-null float64
dtypes: float64(2), object(12)
memory usage: 4.3+ KB
###Markdown
The above output shows that there are no non-null objects in the last two columns. In addition, there is a lot of missing values in other columns. Each year's data is available for a limited number of countries - from 11 to 16.
###Code
# Drop the last two columns as no data is available for 2017 and 2018
debt.drop(['2017 [YR2017]', '2018 [YR2018]'], axis=1, inplace=True)
#check results
debt.info()
# Display the table
debt
# Drop records for those countries that have 7 and more annual indicators missing, as trying to impute these values will skew the analysis.
debt.drop([0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16, 17, 19, 20, 21, 22, 23, 24, 26, 27, 28, 29, 30, 32], inplace=True)
#Display results
debt
#convert columns with annual debt percentages from strings to numeric
#columns to convert to numeric
columns = ['1990 [YR1990]', '2000 [YR2000]', '2009 [YR2009]', '2010 [YR2010]', '2011 [YR2011]', '2012 [YR2012]', '2013 [YR2013]', '2014 [YR2014]', '2015 [YR2015]', '2016 [YR2016]']
for column in columns:
debt[column]= pd.to_numeric(debt[column], errors='ignore')
#check resulting data types
debt.info()
#Display the data frame
debt
#For the purposes of this exercise, use interpolation fo fill in missing values
debt.interpolate(method='linear', axis=0, inplace=True)
#check results
debt
#Reshape data into long format using melt
debt_new= pd.melt(debt, id_vars=['Country Name', 'Country Code'], var_name='Year', value_name='Debt Total, % of GDP')
#preview results
debt_new
#Summary statistics for all debt values for all countries included in the group for all years
debt_new.describe()
###Output
_____no_output_____
###Markdown
Overall, the maximum ratio of the central government debt to a country' s GDP among all countries included in the data frame during this time period was at approximately 197.04% of GDP, and the minimum debt was observed at the 12.25%. The mean central government debt for all countries was about 72.99% of GDP, with 75% of the countries included in our sample of OCDE countries maintained their government debt to GDP ration below 98.5%.
###Code
#Calculate mean values for this group of OECD countries for each year
#group by year
debt_new.groupby('Year').mean()
###Output
_____no_output_____
###Markdown
As shown above, the mean indicators of the central government borrowings expressed in relation to the size of their economies (their respective GDP) for the OECD countries were increasing over the last two and a half decades.
###Code
#mean central government debt as % of GDP for each country
debt_new.groupby('Country Name').mean()
###Output
_____no_output_____
###Markdown
On average, Japan had the highest debt ratio during 1990-2016 (161.44%), with Switzerland enjoying the lowest mean ratio of 20.89%. The United States' central government debt (mean of 81.09% of GDP) was higher than 50% of the OECD countries included in the dataset.
###Code
#min and max for each country
debt_new.groupby('Country Name').agg(['min', 'max'])
###Output
_____no_output_____
###Markdown
However, as the output above shows, the US reached its maximum central government total debt as % of GDP in 2016 (99.45%) which above 75 percentile for the OECD countries.
###Code
#Plot government debt ratios (in % to GDP) for each country
debt_new.groupby('Country Name').plot(x='Year', y='Debt Total, % of GDP', subplots=True)
#Compare all countries on the same plot
fig, ax=plt.subplots()
debt_new.groupby('Country Name').plot(x='Year', y='Debt Total, % of GDP', ax=ax, legend=False)
###Output
_____no_output_____ |
FB15K-237 ETL.ipynb | ###Markdown
Prepare KB triples train/validate/test sets
###Code
train_kb_triples = pd.read_csv(TRAIN_FILE, sep='\t', names=['subj', 'rel', 'obj'])
add_id_columns(train_kb_triples)
print 'Train KB triples:', len(train_kb_triples)
train_kb_triples.to_csv(TRAIN_CSV_FILE, sep='\t', header=True, columns=['subj', 'rel', 'obj', 'pid', 'rid'])
print 'Saved to', TRAIN_CSV_FILE
valid_kb_triples = pd.read_csv(VALID_FILE, sep='\t', names=['subj', 'rel', 'obj'])
add_id_columns(valid_kb_triples)
print 'Validation KB triples:', len(valid_kb_triples)
valid_kb_triples.to_csv(VALID_CSV_FILE, sep='\t', header=True, columns=['subj', 'rel', 'obj', 'pid', 'rid'])
print 'Saved to', VALID_CSV_FILE
test_kb_triples = pd.read_csv(TEST_FILE, sep='\t', names=['subj', 'rel', 'obj'])
add_id_columns(test_kb_triples)
print 'Test KB triples:', len(test_kb_triples)
test_kb_triples.to_csv(TEST_CSV_FILE, sep='\t', header=True, columns=['subj', 'rel', 'obj', 'pid', 'rid'])
print 'Saved to', TEST_CSV_FILE
###Output
Test KB triples: 20466
Saved to fb15k_test.csv
###Markdown
Prepare CVSC datasets
###Code
cvsc_text_triples = pd.read_csv(TEXT_CVSC_FILE, sep='\t', names=['subj', 'rel', 'obj', 'occ'])
add_id_columns(cvsc_text_triples)
print 'Text triples (CVSC):', len(cvsc_text_triples)
cvsc_train_triples = pd.concat([train_kb_triples, cvsc_text_triples], join="outer")
print 'Training triples (CVSC):', len(cvsc_train_triples)
cvsc_train_triples.to_csv(CVSC_TRAIN_CSV_FILE, sep='\t', header=True, columns=['subj', 'rel', 'obj', 'pid', 'rid', 'occ'])
print 'Saved to', CVSC_TRAIN_CSV_FILE
cvsc_entities = cvsc_text_triples['subj'].combine_first(cvsc_text_triples['obj']).drop_duplicates()
cvsc_entities.name = "entity"
print 'Entities:', len(cvsc_entities)
cvsc_entities.to_csv(CVSC_ENTITIES_CSV_FILE, sep='\t', header=True)
print 'Saved to', CVSC_ENTITIES_CSV_FILE
cvsc_pairs = cvsc_train_triples[['subj', 'obj', 'pid']].drop_duplicates()
print 'Entity pairs (CVSC):', len(cvsc_pairs)
cvsc_pairs.to_csv(CVSC_PAIRS_CSV_FILE, sep='\t', header=True, columns=['subj', 'obj', 'pid'])
print 'Saved to', CVSC_PAIRS_CSV_FILE
cvsc_relations = cvsc_train_triples[['rel', 'rid']].drop_duplicates()
print 'Relations (CVSC):', len(cvsc_relations)
cvsc_relations.to_csv(CVSC_RELATIONS_CSV_FILE, sep='\t', header=True, columns=['rel', 'rid'])
print 'Saved to', CVSC_RELATIONS_CSV_FILE
print 'Pairs:', cvsc_train_triples['pid'].max() + 1
print 'Relations:', cvsc_train_triples['rid'].max() + 1
###Output
Pairs: 2995738
Relations: 26154
|
01-Linear_Algebra/Week_04/notebooks/GramSchmidtProcess-SOLVED.ipynb | ###Markdown
Gram-Schmidt process InstructionsIn this assignment you will write a function to perform the Gram-Schmidt procedure, which takes a list of vectors and forms an orthonormal basis from this set.As a corollary, the procedure allows us to determine the dimension of the space spanned by the basis vectors, which is equal to or less than the space which the vectors sit.You'll start by completing a function for 4 basis vectors, before generalising to when an arbitrary number of vectors are given.Again, a framework for the function has already been written.Look through the code, and you'll be instructed where to make changes.We'll do the first two rows, and you can use this as a guide to do the last two. Matrices in PythonRemember the structure for matrices in *numpy* is,```pythonA[0, 0] A[0, 1] A[0, 2] A[0, 3]A[1, 0] A[1, 1] A[1, 2] A[1, 3]A[2, 0] A[2, 1] A[2, 2] A[2, 3]A[3, 0] A[3, 1] A[3, 2] A[3, 3]```You can access the value of each element individually using,```pythonA[n, m]```You can also access a whole row at a time using,```pythonA[n]```Building on last assignment, in this exercise you will need to select whole columns at a time.This can be done with,```pythonA[:, m]```which will select the m'th column (starting at zero).In this exercise, you will need to take the dot product between vectors. This can be done using the @ operator.To dot product vectors u and v, use the code,```pythonu @ v```All the code you should complete will be at the same level of indentation as the instruction comment. How to submitEdit the code in the cell below to complete the assignment.Once you are finished and happy with it, press the *Submit Assignment* button at the top of this notebook.Please don't change any of the function names, as these will be checked by the grading script.If you have further questions about submissions or programming assignments, here is a [list](https://www.coursera.org/learn/linear-algebra-machine-learning/discussions/weeks/1/threads/jB4klkn5EeibtBIQyzFmQg) of Q&A. You can also raise an issue on the discussion forum. Good luck!
###Code
# GRADED FUNCTION
import numpy as np
import numpy.linalg as la
verySmallNumber = 1e-14 # That's 1×10⁻¹⁴ = 0.00000000000001
# Our first function will perform the Gram-Schmidt procedure for 4 basis vectors.
# We'll take this list of vectors as the columns of a matrix, A.
# We'll then go through the vectors one at a time and set them to be orthogonal
# to all the vectors that came before it. Before normalising.
# Follow the instructions inside the function at each comment.
# You will be told where to add code to complete the function.
def gsBasis4(A) :
B = np.array(A, dtype=np.float_) # Make B as a copy of A, since we're going to alter it's values.
# The zeroth column is easy, since it has no other vectors to make it normal to.
# All that needs to be done is to normalise it. I.e. divide by its modulus, or norm.
B[:, 0] = B[:, 0] / la.norm(B[:, 0])
# For the first column, we need to subtract any overlap with our new zeroth vector.
B[:, 1] = B[:, 1] - B[:, 1] @ B[:, 0] * B[:, 0]
# If there's anything left after that subtraction, then B[:, 1] is linearly independant of B[:, 0]
# If this is the case, we can normalise it. Otherwise we'll set that vector to zero.
if la.norm(B[:, 1]) > verySmallNumber :
B[:, 1] = B[:, 1] / la.norm(B[:, 1])
else :
B[:, 1] = np.zeros_like(B[:, 1])
# Now we need to repeat the process for column 2.
# Insert two lines of code, the first to subtract the overlap with the zeroth vector,
# and the second to subtract the overlap with the first.
B[:, 2] = B[:, 2] - B[:, 2] @ B[:, 0] * B[:, 0]
B[:, 2] = B[:, 2] - B[:, 2] @ B[:, 1] * B[:, 1]
# Again we'll need to normalise our new vector.
# Copy and adapt the normalisation fragment from above to column 2.
if la.norm(B[:, 2]) > verySmallNumber :
B[:, 2] = B[:, 2] / la.norm(B[:, 2])
else :
B[:, 2] = np.zeros_like(B[:, 2])
# Finally, column three:
# Insert code to subtract the overlap with the first three vectors.
B[:, 3] = B[:, 3] - B[:, 3] @ B[:, 0] * B[:, 0]
B[:, 3] = B[:, 3] - B[:, 3] @ B[:, 1] * B[:, 1]
B[:, 3] = B[:, 3] - B[:, 3] @ B[:, 2] * B[:, 2]
# Now normalise if possible
if la.norm(B[:, 3]) > verySmallNumber :
B[:, 3] = B[:, 3] / la.norm(B[:, 3])
else :
B[:, 3] = np.zeros_like(B[:, 3])
# Finally, we return the result:
return B
# The second part of this exercise will generalise the procedure.
# Previously, we could only have four vectors, and there was a lot of repeating in the code.
# We'll use a for-loop here to iterate the process for each vector.
def gsBasis(A) :
B = np.array(A, dtype=np.float_) # Make B as a copy of A, since we're going to alter it's values.
# Loop over all vectors, starting with zero, label them with i
for i in range(B.shape[1]) :
# Inside that loop, loop over all previous vectors, j, to subtract.
for j in range(i) :
# Complete the code to subtract the overlap with previous vectors.
# you'll need the current vector B[:, i] and a previous vector B[:, j]
B[:, i] = B[:, i] - B[:, i] @ B[:, j] * B[:, j]
# Next insert code to do the normalisation test for B[:, i]
if la.norm(B[:, i]) > verySmallNumber :
B[:, i] = B[:, i] / la.norm(B[:, i])
else :
B[:, i] = np.zeros_like(B[:, i])
# Finally, we return the result:
return B
# This function uses the Gram-schmidt process to calculate the dimension
# spanned by a list of vectors.
# Since each vector is normalised to one, or is zero,
# the sum of all the norms will be the dimension.
def dimensions(A) :
return np.sum(la.norm(gsBasis(A), axis=0))
###Output
_____no_output_____
###Markdown
Test your code before submissionTo test the code you've written above, run the cell (select the cell above, then press the play button [ ▶| ] or press shift-enter).You can then use the code below to test out your function.You don't need to submit this cell; you can edit and run it as much as you like.Try out your code on tricky test cases!
###Code
V = np.array([[1,0,2,6],
[0,1,8,2],
[2,8,3,1],
[1,-6,2,3]], dtype=np.float_)
gsBasis4(V)
# Once you've done Gram-Schmidt once,
# doing it again should give you the same result. Test this:
U = gsBasis4(V)
gsBasis4(U)
# Try the general function too.
gsBasis(V)
# See what happens for non-square matrices
A = np.array([[3,2,3],
[2,5,-1],
[2,4,8],
[12,2,1]], dtype=np.float_)
gsBasis(A)
dimensions(A)
B = np.array([[6,2,1,7,5],
[2,8,5,-4,1],
[1,-6,3,2,8]], dtype=np.float_)
gsBasis(B)
dimensions(B)
# Now let's see what happens when we have one vector that is a linear combination of the others.
C = np.array([[1,0,2],
[0,1,-3],
[1,0,2]], dtype=np.float_)
gsBasis(C)
dimensions(C)
###Output
_____no_output_____ |
notebooks/pangeo/intake_cmip6.ipynb | ###Markdown
Intaking CMIP6
###Code
from intake import open_catalog
cat = open_catalog("https://raw.githubusercontent.com/pangeo-data/pangeo-datastore/master/intake-catalogs/master.yaml")
# list(cat)
import pprint
uni_dict = cat["climate"]["cmip6_gcs"].unique(["source_id"])
cat["climate"]["cmip6_gcs"].unique(["source_id"])["source_id"]["values"]
pprint.pprint(uni_dict["source_id"]["values"], compact=True)
import pprint
uni_dict = cat["climate"]["cmip6_gcs"].unique(["source_id", "experiment_id", "table_id"])
pprint.pprint(uni_dict, compact=True)
# Define our query
query = dict(
variable_id=["ts"],
experiment_id=["historical"],#, "ssp585"],
table_id=["Amon"],
institution_id=["NOAA-GFDL"],
)
ts = cat["climate"]["cmip6_gcs"].search(**query)
ds_d = ts.to_dataset_dict(zarr_kwargs={"consolidated": True})
ds_d
for key in ds_d:
print(key)
ds_d["CMIP.NOAA-GFDL.GFDL-ESM4.historical.Amon.gr1"].sel(time=slice("1958", "2014")).mean("member_id").mean("time").ts.plot()
ds_d["CMIP.NOAA-GFDL.GFDL-CM4.historical.Amon.gr1"].sel(time=slice("1958", "2014")).mean("member_id").mean("time").ts.plot()
for k, ds in ds_d.items():
print(f"dataset key={k}\n\tdimensions={sorted(list(ds.dims))}\n")
###Output
dataset key=CMIP.NOAA-GFDL.GFDL-ESM4.historical.Amon.gr1
dimensions=['bnds', 'lat', 'lon', 'member_id', 'time']
dataset key=CMIP.NOAA-GFDL.GFDL-CM4.historical.Amon.gr1
dimensions=['bnds', 'lat', 'lon', 'member_id', 'time']
###Markdown
CMIP6 preprocessing testing
###Code
from cmip6_preprocessing.preprocessing import rename_cmip6
import dask
cmip6 = cat["climate"]["cmip6_gcs"]
# load a few models to illustrate the problem
query = dict(experiment_id=['piControl'], table_id='Amon',
variable_id='vas', grid_label=['gn', 'gr'],
source_id=['IPSL-CM6A-LR']
)
cmip6_subset = cmip6.search(**query)
cmip6_subset.df['source_id'].unique()
z_kwargs = {'consolidated': True, 'decode_times':False}
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
dset_dict = cmip6_subset.to_dataset_dict(zarr_kwargs=z_kwargs)
coords = [c for c in dset_dict['CMIP.IPSL.IPSL-CM6A-LR.piControl.Amon.gr'].coords]
# IPSL data is a bit of a mess
ds = dset_dict['CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn']
ds = rename_cmip6(ds)
ds
from cmip6_preprocessing.preprocessing import promote_empty_dims, broadcast_lonlat, replace_x_y_nominal_lat_lon
# check out the previous datasets
ds = dset_dict['CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn']
ds
ds = promote_empty_dims(ds)
ds
ds = broadcast_lonlat(ds)
ds = replace_x_y_nominal_lat_lon(ds)
ds
ds["o2"].isel(time=200, olevel=0).plot()#.sel(nav_lat=slice(-40, 40)).plot()
def wrapper(ds):
ds = ds.copy()
ds = rename_cmip6(ds)
ds = promote_empty_dims(ds)
ds = broadcast_lonlat(ds)
ds = replace_x_y_nominal_lat_lon(ds)
return ds
# pass the preprocessing directly
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
dset_dict_processed1 = cmip6_subset.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=wrapper)
dset_dict_processed1['CMIP.NOAA-GFDL.GFDL-ESM4.piControl.Oyr.gr'].isel(lev=0).mean("time").sel(x=slice(100, 300), y=slice(-30, 30)).o2.plot()
###Output
/srv/conda/envs/notebook/lib/python3.8/site-packages/dask/array/numpy_compat.py:40: RuntimeWarning: invalid value encountered in true_divide
x = np.divide(x1, x2, out)
|
jupyter/runbooks/0.0. Connect to the target system.ipynb | ###Markdown
Connect to the target system **Objective**:Make a connection to a target system when required. Ask for the name of the target system and the username and password to connect.note: would it be also required a key file?
###Code
import getpass
target_host=input ("Which is the hostname of the target machine? ")
user=input ("Which is your user to execute admin processes? ")
password = getpass.getpass("Which is the password to use? ")
%store target_host
%store user
%store password
###Output
Stored 'target_host' (str)
Stored 'user' (str)
Stored 'password' (str)
|
BingImageSearchAPI.ipynb | ###Markdown
Call and response: your first Bing Image Search query in PythonThe Bing Image Search API provides an experience similar to Bing.com/Images by letting you send a user search query to Bing and get back a list of relevant images.This walkthrough demonstrates a simple example of calling into the Bing Image Search API and post-processing the resulting JSON object. For more information, see [Bing Image Search documentation](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference).You can run this example as a Jupyter notebook on [MyBinder](https://mybinder.org) by clicking on the launch Binder badge: [](https://mybinder.org/v2/gh/Microsoft/cognitive-services-notebooks/master?filepath=BingImageSearchAPI.ipynb) PrerequisitesYou must have a [Cognitive Services API account](https://docs.microsoft.com/azure/cognitive-services/cognitive-services-apis-create-account) with **Bing Search APIs**. The [free trial](https://azure.microsoft.com/try/cognitive-services/?api=bing-web-search-api) is sufficient for this quickstart. You need the access key provided when you activate your free trial, or you may use a paid subscription key from your Azure dashboard. Running the walkthroughTo continue with the walkthrough, set `subscription_key` to your API key for the Bing API service.
###Code
subscription_key = None
assert subscription_key
###Output
_____no_output_____
###Markdown
Next, verify that the `search_url` endpoint is correct. At this writing, only one endpoint is used for Bing search APIs. If you encounter authorization errors, double-check this value against the Bing search endpoint in your Azure dashboard.
###Code
search_url = "https://api.cognitive.microsoft.com/bing/v7.0/images/search"
###Output
_____no_output_____
###Markdown
Set `search_term` to look for images of puppies.
###Code
search_term = "puppies"
###Output
_____no_output_____
###Markdown
The following block uses the `requests` library in Python to call out to the Bing search APIs and return the results as a JSON object. Observe that we pass in the API key via the `headers` dictionary and the search term via the `params` dictionary. To see the full list of options that can be used to filter search results, refer to the [REST API](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference) documentation.
###Code
import requests
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": search_term, "license": "public", "imageType": "photo"}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
###Output
_____no_output_____
###Markdown
The `search_results` object contains the actual images along with rich metadata such as related items. For example, the following line of code can extract the thumbnail URLS for the first 16 results.
###Code
thumbnail_urls = [img["thumbnailUrl"] for img in search_results["value"][:16]]
###Output
_____no_output_____
###Markdown
Then, we can use the `PIL` library to download the thumbnail images and the `matplotlib` library to render them on a $4 \times 4$ grid.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
f, axes = plt.subplots(4, 4)
for i in range(4):
for j in range(4):
image_data = requests.get(thumbnail_urls[i+4*j])
image_data.raise_for_status()
image = Image.open(BytesIO(image_data.content))
axes[i][j].imshow(image)
axes[i][j].axis("off")
###Output
_____no_output_____ |
DAT263x/DATA263.ipynb | ###Markdown
Suprevised Learning : Regression : a regression model to predict a numeric value. Function(feature) = outcome/Label example F(age, weight, heart rate, duration) = calorie burnt Train dataEvaluation DataTest DataResidual /Error Residual: The difference between the predicted and actual levels are what we call the residuals.And they can tell us something about the level of error in the model.Residual : Score - Label Predicted - actual To Measure error in the model absolute measures of error in the model. Root-Mean-Square Error (RMSE): Mean absolute Error (MAE) :relative measures of error in the model : a metric where the closer to 0 the error, the better the model. Relative absolute error(RAE) relative squared error (RSE) Coefficient of determination, which we sometimes call R squared (In this case a value closer to 1 indicates a good fit for model ) Classification : at we can use to predict which class, or category, something belongs toBinary Classification :0 or 1Confusion Matrix Accuracy :Prcision:Recall :True Positive RateFalse Positive RateROC Chart AUC Grpah Unsupervised : No Known label value - Finding Similarities Clustering : K-means clustering : True Center Centroid To measure this :1. we can compare the average distance between the cluster centers.And the average distance between the points in the cluster and their centers.Clusters that maximize this ratio have the greatest separation.2. We can also use the ratio of the average distance between clusters, and the maximum distance between the points and the centroid of the cluster.3. Now another way we could evaluate the results of a clustering algorithm is to use a method called principal component analysis, or PCA. from azureml import Workspacews = Workspace()experiment = ws.experiments['c6b9c73741954b789c7a4b37b1a003aa.f-id.351580e8ab314144933d9e9f697872eb']ds = experiment.get_intermediate_dataset( node_id='eceea19d-c9d5-4b04-aa17-691ee4c54271-78', port_name='Results dataset', data_type_id='GenericCSV')frame = ds.to_dataframe()
###Code
%matplotlib inline
import seaborn as sns
num_cols = ["Age","Height","Weight", "Duration", "Heart_Rate","Body_Temp","Calories"]
sns.pairplot(frame[num_cols], size=2)
###Output
_____no_output_____ |
Mathematics/CombinedLogLaw/combined-log-law.ipynb | ###Markdown
 Logarithmic Laws
###Code
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
###Output
_____no_output_____
###Markdown
Introduction:Logarithms are the inverse operation to exponentials. They are useful because they change the arithmetic operations of multiplication, division, and powers into addition, subtraction, and products. While modern calculators can do all these operations quickly, for us mere humans logarithms can be useful for doing quick approximations in our heads. Motivation:Going about our day, we often run into powers of ten, when we see kilograms of food in a grocery store (1000 grams), megawatts of power from an electrical generator (1,000,000 watts) or gigabytes of memory in our computer (1,000,000,000 bytes). It is the **power** of ten that is important, and the logarithm captures this idea with the formulas$$ \log(10) = 1$$$$ \log(1000) = 3$$$$ \log(1,000,000) = 6$$$$ \log(1,000,000,000) = 9.$$The logarithm of a number $x$ is defined as the power $n$ it takes so that $$x = 10^n.$$ So, for instance, since $1000 = 10^3$, we know that $\log(1000) = 3,$ as indicated in the list above.For numbers that aren't integer powers of 10, the logarithm is still defined by the above formula, where $n$ can be any real number solving $x = 10^n$. For instance, you might guess that $\log(5000)$ is somewhere between 3 and 4, since the number 5000 is halfway between $10^3 = 1000$ and $10^4 = 10,000$. You might even guess that $\log(5000) = 3.5$, which is not a bad approximation: in fact, a calculator shows that$$\log(5000) = 3.69897...,$$which is the same as saying$$5000 = 10^{3.69897...}.$$We can also take logarithms of small numbers, like this:$$\log(0.01) = \log\left(\frac{1}{100}\right) = \log(10^{-2}) = -2.$$But you cannot take logarithms of negative numbers. (Unless you are willing to learn about something called complex numbers!) Base for logarithm:In the examples above, we worked with powers of ten, so ten is called the **base** for the logarithm.We can work with other bases. For instance, with computers we often work with power of two. A KB of memory is actually $1024 = 2^{10}$ bytes. If you aren't sure about this, multiply out $2\times 2 \times 2 \times \ldots \times 2$ with ten 2's, to see you get $1024= 2^{10}.$A MB of memory is $1,048,576 = 2^{20}$ bytes, or just over a million bytes. A GB is $1073741824 = 2^{30}$ bytes, or just over a billion bytes. (It's a funny coincidence that $10^3 \approx 2^{10}$ so that kilo =1000 is about the same as Kilo = 1024.)We write this down in logarithm form, adding a subscript to keep track of the base. So$$ \log_2(1024) = 10$$$$ \log_2(1048576) = 20$$$$ \log_2(1073741824) = 30.$$In general, the number $\log_2(x)$ is defined as the solution to $$x = 2^n.$$Logarithms can be defined with any number $B$ as a base, provided $B$ is positive and not equal to one. The function is then written as $\log_B(x).$ Three important bases:In practice, there are only three log functions that occur in most of math and science:$$\log_2(x), \log_{10}(x), \mbox{ and } \log_e(x),$$which have bases 2, 10 and $e$, respectively, where $e = 2.71...$ is the natural exponential that occurs in calculus. The base ten logarithm $\log_{10}(x)$ occurs so often that it is sometimes abbreviated as $\log(x)$, as we did in the first section of this notebook.The base $e$ logarithm is called the natural log, written $\ln(x)$. The natural logarithm arises in calculus,where it is often denoted simply as $\log x$. So one must pay attention to the context when the base is unspecified! Examples:- {1} Can we find $\log_2(4000)$ approximately, without using a calculator?Sure. Here's one way. We know that $4 = 2^2$, and that $1000 \approx 2^{10}$. So $4000 \approx 2^2 \times 2^{10} = 2^{12}$.So we conclude$$ \log_2(4000) \approx 12.$$A quick check with a calculator shows $\log_2(4000) = 11.96578...$ so that was a pretty good approximation!- {2} Can we find $\log(\pi)$ approximately?Well, our friends the ancient Egyptians thought that $\pi$ was the square root of 10. It's not, but that's a pretty good approximation. So we have$$\log(\pi) \approx \log(10^{1/2}) = 1/2.$$In fact, a check with a calculator shows $\log(\pi) = 0.49715...$, so again we have a pretty good approximation. Basics of Logarithms:Even though logarithms can seem very complicated, we can look at the basic relationship between logarithms and exponentials in order to simplify these expressions to furture enhance our understandings. Before looking deeper in these relationships, we will first identify the main components of a logarithmic function. Logarithms are written in the following form:$\log_B(x)=m$ where B is the base of the logarithm. Given a number $x$, we define $\log_B(x)=m$ as the solution to the exponential relationship$$x=B^m.$$ Logarithmic LawsThere are 4 main logarithmic laws which help show the relationship between exponential and logarithmic functions.- Product Law: $\log_{B}(x \times y)=\log_{B}(x)+\log_{B}(y)$ - Quotient Law: $\log_{B}( x \div y) =\log_{B}(x)-\log_{B}(y)$- Power Law: $\log_{B}(x^p)=p\times \log_B(x)$- Changing Base Rule: $\log_{B}(x)=\frac{\log_C(x)}{\log_C(B)}$ Background: Exponential LawsSince logarithms are closely related with exponents, we will be using exponential laws when deriving logarithmic laws. Exponential Laws state: - $B^m \times B^n=B^{m+n} \quad (1) $- $\frac{B^m}{B^n}=B^{m-n} \quad \quad \; \;\;\,(2)$- $(B^m)^n=B^{mn} \quad \quad \,(3)$- $(BC)^m=B^m C^m \quad \,(4)$We will be referring to these laws throughout the program using the number in the brackets above.
###Code
from IPython.display import display, Latex, clear_output
from math import log
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
import numpy as np
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual,IntSlider, Output, VBox, HBox, Label
from abc import ABCMeta, abstractmethod
class Logarithm:
#############################################
# Description: stores all functions that will be using as shortcuts when graphing functions.
#############################################
@staticmethod
#A static method is contains an object that does not change.
#In this case, the functions are created to pass in arguments and should not change.
def log(m,b,x):
#########################################
# Function: y= log(mx)
#
# @Args:
# m: constant inside the logarithm
# b: base of the logarithm
# x: vector with all of the x-coordinates
#
# @Returns:
# y: y-coordinates of the graph based on the x-coordinates and function
#
#########################################
i=x*m
return [log(y,b) for y in i]
@staticmethod
def log_exp(r,m,b,x):
#########################################
# Function: y=log((mx)^r)
#
# @Args:
# m: constant inside the logarithm
# b: base of the logarithm
# x: vector with all of the x-coordinates
# r: exponent within the logarith
#
# @Returns:
# y: y-coordinates of the graph based on the x-coordinates and function
#
#########################################
x= (x*m)**r
return [log(y,b) for y in x]
@staticmethod
def constant_x_log(r,m,b,x):
#########################################
# Function: y=r*log(mx)
#
# @Args:
# r: constant multiplied by the logarithm
# m: constant inside the logarithm
# b: base of the logarithm
# x: vector with all of the x-coordinates
#
# @Returns:
# y: y-coordinates of the graph based on the x-coordinates and function
#
#########################################
x= x*m
return [r*log(y,b) for y in x]
@staticmethod
def division_of_logs(m,n,b,x):
#########################################
# Function: y=log_m(nx)/log_b(nx)
#
# @Args:
# m: base of logarithm in the numerator
# b: base of logarithm in the denominator
# n: constant inside each logarithm
# x: vector with all of the x-coordinates
#
# @Returns:
# y: y-coordinates of the graph based on the x-coordinates and function
#
#########################################
y1=Logarithm.log(m,n,x)
y2=Logarithm.log(b,n,x)
y=np.divide(y1,y2)
return y
#########################################
# Variables:
# base - Value of the base of the logarithms
# - Over the coarse of the program, we will set the base to be 10.
# x - The range of numbers that are shown on the x-axis for the graphical proofs
#########################################
base=10
x=np.linspace(1,10)
###Output
_____no_output_____
###Markdown
Product LawThe first law we are looking at is the Product Law. This is used when finding the sum of two logarithmic functions with the same base. The law states that - $\log_{B}(xy)=\log_{B}x+\log_{B}y$. An example- $\log(100\times 1000) = \log(100) + \log(1000)$ or equivalently- $\log(100,000) = 5 = 2 + 3.$ Mathematical proofWe will look at the mathematical proof. It may look complicated, however, it can simply be broken down. First we fix quantities $x,y$ and then define- $p=\log_B x$ and $q=\log_B y$.The equivalent exponential forms are - $B^p=x$ and $B^q=y$.We take the product of these two equations to obtain- $B^p \times B^q=x \times y$, and from the Exponential Law (1), we can get the equivalent expression- $B^{p+q}=x \times y$.We apply log to both sides- $\log_B(B^{p+q})=\log_B(x \times y),$ and then by the definition of a logarithm, we have - $p+q=\log_B(x \times y)$.Since we know $ p=\log_B x$ and $ q=\log_B y$, we obtain- $\log_{B}x+\log_{B}y = \log_{B}(x \times y).$That completes the mathematical proof of the product law. Graphical DemonstrationAs we know, the product law states: $\log_{B}x+\log_{B}y = \log_{B}(x \times y).$ To go about this, we introduce a parameter $t$ that allows us to trace the graph of the logarithm function. We also introduce two constant integers, $m$ and $n$.We let $x=mt$ and $y=nt$ and set the base $B$ to 10, abbreviating $\log_{10}(x)$ as $\log(x)$.For these values of $x$ and $y$, the product law becomes $\log(mt)+\log(nt) = \log(mnt^2)$For the graphical demonstration, we graph the three terms in the above equation separately with respect to $t$. When looking at a $t$ value, the sum of the corresponding values of the functions on the left side of the equation should be equivalent to the function on the right side of the equation, thus providing a demonstration of the Product Law.
###Code
class ProductLaw():
# Create 2x2 sub plots
gs = gridspec.GridSpec(2, 2)
axis=5
x=6
y=3
x_axis_bar = widgets.IntSlider(
value=5,
min=1,
max=10,
step=1,
description='$t$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
x_bar = widgets.IntSlider(
value=x,
min=1,
max=10,
step=1,
description='$m$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
y_bar = widgets.IntSlider(
value=y,
min=1,
max=10,
step=1,
description='$n$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def create_graph():
#########################################
# Description: generates a graph in order to prove Product Law
#
# @Args: Inputs are for the variables shown in ProductLaw
# x-coordinate: based on a sliding bar in range 1-10
# M constant: based on a sliding bar in range 1-10
# N constant: based on a sliding bar in range 1-10
#
# @Return: graph for graphical proof as well as the y-coordinate corresponding to the graphed points
#
#########################################
#Plot the 3 log functions from the left and right side of the Product Law
ax1 = plt.subplot(ProductLaw.gs[0, 0]) # row 0, col 0
ax1.plot(x,Logarithm.log(ProductLaw.x,base,x),'-b',label='$y=\log_{B}(x)$')
p1=log(ProductLaw.x*ProductLaw.axis,base)
ax1.plot(ProductLaw.axis,p1,'ob')
ax1.annotate('%1.3f' %p1,xy=(ProductLaw.axis,p1),xytext=(-10,-20),textcoords='offset points')
ax1.set_title('Left side of Product law')
plt.ylabel('$\log_{B}(mt)$')
ax1.yaxis.set_label_position("left")
plt.grid()
ax2 = plt.subplot(ProductLaw.gs[1, 0])
ax2.plot(x,Logarithm.log(ProductLaw.y,base,x),'-g',label='$y=\log_{B}(y)$')
p2=log(ProductLaw.y*ProductLaw.axis,base)
ax2.plot(ProductLaw.axis,p2,'og')
ax2.annotate('%1.3f' %p2,xy=(ProductLaw.axis,p2),xytext=(-10,-20),textcoords='offset points')
plt.ylabel('$\log_{B}(nt)$')
ax2.yaxis.set_label_position("left")
plt.xlabel('$t$')
plt.grid()
ax3 = plt.subplot(ProductLaw.gs[:, 1])
ax3.plot(x,Logarithm.log(ProductLaw.x*ProductLaw.y,base,x**2),'-r',label='$y=\log_{B}(xy)$')
p3=log(ProductLaw.x*ProductLaw.y*(ProductLaw.axis**2),base)
ax3.plot(ProductLaw.axis,p3,'or')
ax3.annotate('%1.3f' %p3,xy=(ProductLaw.axis,p3),xytext=(-10,-20),textcoords='offset points')
ax3.set_title('Right side of Product Law')
plt.ylabel('$\log_{B}(mnt^2)$')
ax3.yaxis.set_label_position("right")
plt.xlabel('$t$')
plt.grid()
plt.show()
display(Latex('When $m$={1:1d} and $n$={2:1d}'.format(ProductLaw.axis,ProductLaw.x,ProductLaw.y)))
#Display the value of the points to prove that the law is valid
display(Latex('From the marked y-coordinates on the graph above, the points at log({0:1d}$t$), log({1:1d}$t$) and log({2:1d}$t^2$) are at {3:1.3f}, {4:1.3f} and {5:1.3f} respectively'.format(ProductLaw.x,ProductLaw.y,ProductLaw.x*ProductLaw.y,p1, p2, p3)))
display(Latex('{0:1.3f}+{1:1.3f}={2:1.3f}'.format(p1,p2,p1+p2)))
display(Latex('{0:1.3f}={1:1.3f}'.format(p3,p3)))
display(Latex('This means that the left side of the equation equals the right side'))
display(Latex('thus'))
display(Latex(r'$\log_{B}x+\log_{B}y = \log_{B}(x \times y)$'))
def clear_display():
clear_output(wait=True)
display(ProductLaw.x_bar)
display(ProductLaw.y_bar)
display(ProductLaw.x_axis_bar)
ProductLaw.create_graph()
ProductLaw.observe()
def observe():
ProductLaw.x_axis_bar.observe(ProductLaw.xv, names='value')
ProductLaw.x_bar.observe(ProductLaw.x_barv, names='value')
ProductLaw.y_bar.observe(ProductLaw.y_barv, names='value')
#ProductLaw.clear_display()
def xv(value):
ProductLaw.axis=value['new']
ProductLaw.clear_display()
def x_barv(value):
ProductLaw.x=value['new']
ProductLaw.clear_display()
def y_barv(value):
ProductLaw.y=value['new']
ProductLaw.clear_display()
ProductLaw.clear_display()
###Output
_____no_output_____
###Markdown
ResultsIn the mathematical proof, we used the relationship between logarithms and exponents in order to derive the Product Law. Based on the values recorded during the graphical proof, we see that the left-hand side of the law is equivalent to sum of the two functions on the right-hand side. Quotient LawThe next law we will be looking at is the Quotient Law. This is used when finding the difference of two logarithmic functions. The law states that- $\log_{B}(x \div y)=\log_{B}x -\log_{B}y$. An example- $\log(1000 \div 100) = \log(1000) - \log(100)$ or equivalently- $\log(10) = 1 = 3 -2.$ Mathematical proofLet's create a proof of the Quotient law.First, fix quantities $x$ and $y$ and define the values - $ p = \log_B x$ and $ q = \log_B y.$The equivalent exponential forms are- $B^p= x$ and $B^q = y$.Divide these two equations to obtain: - $B^p \div B^q = x \div y.$Using Exponential Law (2), the above equation is equivalent to: - $B^{p-q}=x \div y.$Taking logs, we have- $\log_{B}(B^{p-q}) = \log_B(x\div y) $ which becomes- $p - q = \log_{B}(x \div y).$Recalling our definition of m,n this becomes- $\log_B x - \log_B y = \log_B(x\div y),$which completes the proof of the Quotient Law. Graphical DemonstrationAs we know, the Quotient Law states: $\log_{B}x+\log_{B}y = \log_{B}(x \times y).$ To go about this, we introduce a parameter $t$ that allows us to trace the graph of the logarithm function. We will also introduce two constant integers, $m$ and $n$. We let $x=mt$ and $y=nt$ and set the base $B$ to 10, abbreviating $\log_{10}(x)$ as $\log(x)$.For these values of $x$ and $y$, the product law becomes $\log(mt)-\log(nt) = \log\left(\frac{mt}{nt}\right)$which reduces to:$\log(mt)-\log(nt) = \log\left(\frac{m}{n}\right)$For the graphical demonstration, we will graph the three terms in the above equation separately with respect to $t$. When looking at a $t$ value, the difference of the corresponding values of the functions on the left side of the equation should be equivalent to the function on the right side of the equation, thus providing a demonstration of the Quotient Law.
###Code
class QuotientLaw():
# Create 2x2 sub plots
gs = gridspec.GridSpec(2, 2)
axis=5
x=6
y=3
x_axis_bar = widgets.IntSlider(
value=5,
min=1,
max=10,
step=1,
description='x',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
x_bar = widgets.IntSlider(
value=x,
min=1,
max=10,
step=1,
description='$m$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
y_bar = widgets.IntSlider(
value=y,
min=1,
max=10,
step=1,
description='$n$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def create_graph():
#########################################
# Description: generates a graph in order to prove Quotient Law
#
# @Args: Inputs are for the variables shown in Quotient Law
# x-coordinate: based on a sliding bar in range 1-10
# M constant: based on a sliding bar in range 1-10
# N constant: based on a sliding bar in range 1-10
#
# @Return: graph for graphical proof as well as the y-coordinate corresponding to the graphed points
#
#########################################
y_value=np.linspace(QuotientLaw.x/QuotientLaw.y,QuotientLaw.x/QuotientLaw.y)
#Plot the 3 log functions from the left and right side of the Product Law
ax1 = plt.subplot(QuotientLaw.gs[0, 0]) # row 0, col 0
ax1.plot(x,Logarithm.log(QuotientLaw.x,base,x),'-b')
p1=log(QuotientLaw.x*QuotientLaw.axis,base)
ax1.plot(QuotientLaw.axis,p1,'ob')
ax1.annotate('%1.3f' %p1,xy=(QuotientLaw.axis,p1),xytext=(-10,-20),textcoords='offset points')
ax1.set_title('Left side of Quotient Law')
plt.ylabel('$\log(m)$')
plt.grid()
ax2 = plt.subplot(QuotientLaw.gs[1, 0])
ax2.plot(x,Logarithm.log(QuotientLaw.y,base,x),'-g')
p2=log(QuotientLaw.y*QuotientLaw.axis,base)
ax2.plot(QuotientLaw.axis,p2,'og')
ax2.annotate('%1.3f' %p2,xy=(QuotientLaw.axis,p2),xytext=(-10,-20),textcoords='offset points')
plt.ylabel('$\log(n)$')
plt.xlabel('x')
plt.grid()
ax3 = plt.subplot(QuotientLaw.gs[:, 1])
ax3.plot(x,Logarithm.log(1,base,y_value),'-r')
p3=log(QuotientLaw.x/QuotientLaw.y,base)
ax3.plot(QuotientLaw.axis,p3,'or')
ax3.annotate('%1.3f' %p3,xy=(QuotientLaw.axis,p3),xytext=(-10,-20),textcoords='offset points')
ax3.set_title('Right side of Quotient Law')
plt.ylabel(r'$\log(\frac{m}{n})$')
ax3.yaxis.set_label_position("right")
plt.xlabel('x')
plt.grid()
plt.show()
display(Latex('When $m$={1:2.0f} and $n$={2:2.0f}'.format(QuotientLaw.axis,QuotientLaw.x,QuotientLaw.y)))
display(Latex('The y-coordinates at log({0:1.0f}$t$), log({1:1.0f}$t$) and log({2:1.0f}) are at {3:1.3f}, {4:1.3f} and {5:1.3f} respectively'.format(QuotientLaw.x,QuotientLaw.y,QuotientLaw.x/QuotientLaw.y,p1, p2, p3)))
display(Latex('{0:1.3f}-{1:1.3f}={2:1.3f}'.format(p1,p2,p3)))
display(Latex('thus'))
display(Latex(r'$\log(m) - \log(n) = \log(\frac{m}{n})$'))
def clear_display():
clear_output(wait=True)
display(QuotientLaw.x_bar)
display(QuotientLaw.y_bar)
display(QuotientLaw.x_axis_bar)
QuotientLaw.create_graph()
QuotientLaw.observe()
def observe():
QuotientLaw.x_axis_bar.observe(QuotientLaw.x_value, names='value')
QuotientLaw.x_bar.observe(QuotientLaw.xv, names='value')
QuotientLaw.y_bar.observe(QuotientLaw.yv, names='value')
def x_value(value):
QuotientLaw.axis=value['new']
QuotientLaw.clear_display()
def xv(value):
QuotientLaw.x=value['new']
QuotientLaw.clear_display()
def yv(value):
QuotientLaw.y=value['new']
QuotientLaw.clear_display()
QuotientLaw.clear_display()
###Output
_____no_output_____
###Markdown
ResultIn the mathematical proof, we used the relationship between logarithms and exponents as well as exponential laws in order to derive the Quotient Law. When we look at the graphical demonstration, we see that the functions on the right hand side both resemble very similar curves. On the left hand side of the law, we can see that the function remains as a constant number. We also see that the left-hand side of the law is equivalent to the difference to the two functions on the right-hand side. Power LawThe next law we will look at is power law. This is used in the case when there is an exponential power inside the logarithmic function. The law states that- $\log_{B}(x^p)=p \times \log_B(x)$. An example- $\log(1000^2) = 2\log(1000) $ or equivalently- $\log(1,000,000) = 6 = 2 \times 3.$ Mathematical Proof First we fix quantities $x$ and $p$ then define- $ m = \log_B (x^p).$The equivalent exponential form is- $B^m=x^p$. Bring each side of the equation to the power of $1/p$ to obtain- $(B^m)^{\frac{1}{p}}=(x^p)^{\frac{1}{p}}.$ By using Exponential Law (3), we can multiply the exponents to the one inside the brackets to get- $B^{\frac{m}{p}}= x.$ Apply the log function to both sides to get - $\log_B(B^{\frac{m}{p}})=\log_B(x) $, resulting in - $\frac{m}{p} = \log_B(x).$Multiply by $p$ to obtain- $m = p \times log_B(x),$ and recalling the definition of m, we have- $\log_B(x^p) = p \times \log_B(x).$This completes the proof. Graphical DemonstrationIn this case, there is one function on each the left and right hand sides of the law. For this reason 2 functions will be graphed. Since they are theoretically be equivalent to each other, we can expect that the functions will be identical on the graph. If this is seen on the graph, we can validate Power Law.As we know, the power Law states: $\log_B(x^p) = p \times \log_B(x).$ To go about this, we introduce a parameter $t$ that allows us to trace the graph of the logarithm function. We will also introduce a constant interger, $m$. We let $x=mt$ and set the base $B$ to 10, abbreviating $\log{10}(x)$ as $\log(x)$.For these values of $x$ and $y$, the product law becomes$\log_B(mt^p) = p \times \log_B(mt)$ For the graphical demonstration, we will graph the three terms in the above equation separately with respect to $t$. When looking at a $t$ value, the function on the left side of the equation should be equivalent to the function on the right side of the equation, thus providing a demonstration of the power law.
###Code
class PowerLaw():
# Create 2x2 sub plots
gs = gridspec.GridSpec(1, 2)
x=np.linspace(1,10)
axis=5
x=6
p=2
x_axis_bar = widgets.IntSlider(
value=5,
min=1,
max=10,
step=1,
description='x',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
x_bar = widgets.IntSlider(
value=x,
min=1,
max=10,
step=1,
description='$m$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
p_bar = widgets.IntSlider(
value=p,
min=1,
max=10,
step=1,
description='$p$',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def create_graph():
#########################################
# Description: generates a graph in order to prove Power Law
#
# @Args: Inputs are for the variables shown in Power Law
# x-coordinate: based on a sliding bar in range 1-10
# M constant: based on a sliding bar in range 1-10
# N constant: based on a sliding bar in range 1-10
# R exponential constant: based on a sliding bar in range 1-10
#
# @Return: graph for graphical proof as well as the y-coordinate corresponding to the graphed points
#
#########################################
#Plot the 3 log functions from the left and right side of the Product Law
ax1 = plt.subplot(PowerLaw.gs[0,1]) # row 0, col 0
ax1.plot(x,Logarithm.log_exp(PowerLaw.p,PowerLaw.x,base,x),'-g')
p1=log((PowerLaw.x*PowerLaw.axis)**PowerLaw.p,base)
ax1.plot(PowerLaw.axis,p1,'ob')
ax1.annotate('%1.3f' %p1,xy=(PowerLaw.axis,p1),xytext=(-10,-20),textcoords='offset points')
ax1.set_title('Right side of Power law')
plt.ylabel('$y=\log_{B}(Mx)$')
ax1.yaxis.set_label_position("right")
plt.xlabel('x')
plt.grid()
ax2 = plt.subplot(PowerLaw.gs[0, 0])
ax2.plot(x,Logarithm.constant_x_log(PowerLaw.p,PowerLaw.x,base,x),'-b')
p2=PowerLaw.p*log(PowerLaw.x*PowerLaw.axis,base)
ax2.plot(PowerLaw.axis,p2,'og')
ax2.annotate('%1.3f' %p2,xy=(PowerLaw.axis,p2),xytext=(-10,-20),textcoords='offset points')
plt.ylabel('$y=\log_{B}(Nx)$')
ax2.yaxis.set_label_position("left")
ax2.set_title('Left side of Power Law')
plt.xlabel('x')
plt.grid()
plt.show()
display(Latex('at $m$={0:1d} and $p$={1:1d}'.format(PowerLaw.x,PowerLaw.p)))
display(Latex(r'We can see that the y-coordinates are labeled on the graph. At the points log(${0:1d}^{1:1d}x$) and {2:1d} $\times$ log({3:1d}$x$) the y-coordinates are {4:1.3f} and {5:1.3f} respectively'.format(PowerLaw.x,PowerLaw.p,PowerLaw.p,PowerLaw.x,p1,p2)))
display(Latex('{0:1.3f}={1:1.3f}'.format(p1,p2)))
display(Latex('thus'))
display(Latex(r'$\log_{B}(x^p)=p \times \log_B(x)$'))
def clear_display():
clear_output(wait=True)
display(PowerLaw.x_bar)
display(PowerLaw.p_bar)
display(PowerLaw.x_axis_bar)
PowerLaw.create_graph()
PowerLaw.observe()
def observe():
PowerLaw.x_axis_bar.observe(PowerLaw.x_value, names='value')
PowerLaw.x_bar.observe(PowerLaw.xv, names='value')
PowerLaw.p_bar.observe(PowerLaw.pv, names='value')
def x_value(value):
PowerLaw.axis=value['new']
PowerLaw.clear_display()
def xv(value):
PowerLaw.x=value['new']
PowerLaw.clear_display()
def pv(value):
PowerLaw.p=value['new']
PowerLaw.clear_display()
PowerLaw.clear_display()
###Output
_____no_output_____
###Markdown
ResultsThe Mathematical proof shows that by first converting the logarithmic functions into exponents then using the exponential laws we can derive the power Law. When looking at the graph, we see that the function on the left-hand side are equavalent to the right-hand side. Change of Base RuleThis rule is useful for changing the base of a logarithmic function which can be useful for proofs or comparing certain functions. The law states that: $\log_{B}(x)=\frac{\log_C(x)}{\log_C(B)}$ An example- $\log_8(64) = \frac{\log_2(64)}{\log_2(8)}$ or equivalently- $2 = \frac{6}{3}.$ Mathematical ProofFirst we need to define a variable. In this case, we will use x.- $\text{Let }x=\log_{B}(M)$ When converting this to exponents by using basic logarithmic properties, we get: - $B^x=M$ $\text{Next, is to apply } \log_N \text{ to both sides of the equation}$ - $\log_N(B^x)=\log_N(M)$By Power Law (see above) this can be simplified to: - $x\log_N(B)=\log_N(M)$ Isolating for x: - $x=\frac{\log_N(M)}{\log_N(B)}$ After inputing the x value we defined earlier, we get:- $\log_{B}(M)=\frac{\log_N(M)}{\log_N(B)}$ DiscussionThe change of base law says that- $\log_B(x) = \frac{\log_C(x)}{\log_C(B)}.$Another way to write this is- $\log_B(x) = \log_C(x)\times \log_B(C)).$ (Can you see why?)The point is, the two functions $\log_B(x), \log_C(x)$ are related by a proportionality constant, so we can write$$ \log_B(x) = k\cdot \log_C(x).$$For instance, the two functions $\log_2(x)$ and $\log_{10}(x)$ are the same, up to some constant $k$. Perhaps you can explain why this constant is approximately $10/3$. That is$$\log_2(x) \approx \frac{10}{3} \log_{10}(x).$$Equivalently, $$\log_{10}(x) \approx 0.3 \log_{2}(x).$$(Hint: this has something to do with our discussion of kilos in the first section of this notebook.) EvidenceAs it is hard to graph this function, as there is no good place to put $x$, this function with be proved through evidence. We will plug numbers into each side of the equation to calculate the values obtained on each side of the law. Notice that changing the new base value has no affect on the final value.
###Code
class ChangeOfBase():
#First set random variables
M=5
base=10
new_base=5
def create_graph():
#########################################
# Description: Plugs in numbers to prove Change of Base Rules
#
# @Args: Inputs are for the variables shown in Power Law
# M constant: based on a sliding bar in range 1-10
# base: based on a sliding bar in range 1-10
# new base: based on a sliding bar in range 1-10
#
# @Return: The corresponding value of each side of the equation which result after plugging in the numbers.
#########################################
p1=log(ChangeOfBase.M,ChangeOfBase.base)
p2=log(ChangeOfBase.M,ChangeOfBase.new_base)/ log(ChangeOfBase.base,ChangeOfBase.new_base)
display(Latex('On the left hand side $\log_B(M)$ = {0:1.3f}.'.format(p1)))
display(Latex(r'On the right hand side is $\log_C(M) \div \log_C(B)$ = {0:1.3f}.'.format(p2)))
display(Latex('{0:1.3f} = {1:1.3f}'.format(p1,p2)))
display(Latex('thus'))
display(Latex(r'$\log_{B}(M) = \frac{\log_C(M)}{\log_C(B)}$'))
def clear_display():
clear_output(wait=True)
display(m_box)
display(base_box)
display(new_base_box)
ChangeOfBase.create_graph()
def xv(value):
ChangeOfBase.axis=value['new']
ChangeOfBase.clear_display()
def Mv(value):
ChangeOfBase.M=value['new']
ChangeOfBase.clear_display()
def Basev(value):
ChangeOfBase.base=value['new']
ChangeOfBase.clear_display()
def New_basev(value):
ChangeOfBase.new_base=value['new']
ChangeOfBase.clear_display()
M_bar = widgets.IntSlider(
value=ChangeOfBase.M,
min=1,
max=10,
step=1,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
m_box = HBox([Label('M value'), M_bar])
base_bar = widgets.IntSlider(
value=ChangeOfBase.base,
min=2,
max=10,
step=1,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
base_box = HBox([Label('Original base value'), base_bar])
new_base_bar = widgets.IntSlider(
value=ChangeOfBase.new_base,
min=2,
max=10,
step=1,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
new_base_box = HBox([Label('New base value'), new_base_bar])
ChangeOfBase.clear_display()
M_bar.observe(ChangeOfBase.Mv, names='value')
base_bar.observe(ChangeOfBase.Basev, names='value')
new_base_bar.observe(ChangeOfBase.New_basev, names='value')
###Output
_____no_output_____ |
ipynb/double_descent.ipynb | ###Markdown
###Code
import numpy as np
import pylab as plt
#matplotlib inline
ORDER = 8
def make_fake_data(N, sigma=0.1):
xtrain = np.random.normal(size=N)
ytrain = xtrain + sigma * np.random.normal(size=N)
xtest = np.random.uniform()
ytest = xtest + sigma * np.random.normal()
return xtrain, ytrain, xtest, ytest
def design_matrix(xs):
A = np.vstack([xs ** k for k in range(ORDER + 1)]).T
return A
def predict(xtrain, ytrain, xtest):
pars = np.linalg.lstsq(design_matrix(xtrain), ytrain, rcond=1e-12)[0]
prediction = design_matrix(xtest) @ pars
return prediction, pars
def resid(xtrain, ytrain, xtest, ytest):
return ytest - predict(xtrain, ytrain, xtest)[0]
np.random.seed(42)
xtr, ytr, xte, yte = make_fake_data(5)
ypr, pars = predict(xtr, ytr, xte)
plt.plot(xtr, ytr, "ko")
plt.plot(xte, yte, "ko", alpha=0.5)
plt.text(xte, yte, " test object")
plt.plot(xte, ypr, "ro")
plt.text(xte, ypr, " prediction at polynomial order {:d}".format(ORDER))
def estimate_mse_with_trials(Ntrain, Ntrial):
dys = np.array([resid(*make_fake_data(Ntrain)) for t in range(Ntrial)])
return np.median(dys ** 2)
print(estimate_mse_with_trials(3, 128))
Ns = np.arange(ORDER // 2, ORDER * 2 + 1)
mses = [estimate_mse_with_trials(N, 8192) for N in Ns]
plt.axvline(ORDER + 1)
plt.plot(Ns, mses, "ko")
plt.ylabel("median squared error polynomial prediction")
plt.xlabel("size of training set N")
plt.title("polynomial order {:d}".format(ORDER))
###Output
_____no_output_____ |
data_steward/analytics/table_metrics/Table_Metrics_part_2.ipynb | ###Markdown
Foreign key references (i.e. visit_occurrence_id in the condition table) should be valid. Person
###Code
print(("There is no _mapping table for person table so I could not separete results by sites "))
###Output
_____no_output_____
###Markdown
gender_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
gender_concept_id,
concept_name,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_person` AS p
INNER JOIN
`{}.concept` AS c
ON
p.gender_concept_id=c.concept_id
GROUP BY
1,2
'''.format(DATASET, DATASET,DATASET,DATASET, DATASET,DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
foreign_key_df.loc[foreign_key_df["gender_concept_id"]==0,["cnt"]]
#success_rate=100-round(100*(foreign_key_df.loc[foreign_key_df["gender_concept_id"]==0,["cnt"]])/sum(foreign_key_df.iloc[:,2]),1)
#print("success rate for gender_concept_id is: ", success_rate.iloc[0,0])
foreign_key_df
###Output
_____no_output_____
###Markdown
race_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
race_concept_id,
concept_name,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_person` AS p
INNER JOIN
`{}.concept` AS c
ON
p.race_concept_id=c.concept_id
GROUP BY
1,2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
#success_rate=100-round(100*(foreign_key_df.loc[foreign_key_df["race_concept_id"]==0,["cnt"]])/sum(foreign_key_df.iloc[:,2]),1)
#print("success rate for race_concept_id is: ", success_rate.iloc[0,0])
###Output
_____no_output_____
###Markdown
ethnicity_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
ethnicity_concept_id,
concept_name,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_person` AS p
INNER JOIN
`{}.concept` AS c
ON
p.ethnicity_concept_id=c.concept_id
GROUP BY
1,2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
#success_rate=100-round(100*(foreign_key_df.loc[foreign_key_df["ethnicity_concept_id"]==0,["cnt"]])/sum(foreign_key_df.iloc[:,2]),1)
#print("success rate for ethnicity_concept_id is: ", round(success_rate.iloc[0,0],1))
###Output
_____no_output_____
###Markdown
location_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
location_id,
COUNT(*) AS total_cnt
FROM
`{}.unioned_ehr_person` AS p
GROUP BY
1
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df.head()
print("location_id is NULL ")
###Output
_____no_output_____
###Markdown
provider_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
provider_id,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_person` AS p
GROUP BY
1
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df.head()
print("provider_id is NULL ")
###Output
_____no_output_____
###Markdown
care_site_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
care_site_id,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_person` AS p
GROUP BY
1
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df.head()
print("care_site_id is NULL ")
###Output
_____no_output_____
###Markdown
gender_source_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
gender_source_concept_id,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_person` AS p
GROUP BY
1
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
print("gender_source_concept_id is NULL ")
###Output
_____no_output_____
###Markdown
race_source_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
race_source_concept_id,
concept_name,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_person` AS p
INNER JOIN
`{}.concept` AS c
ON
p.race_source_concept_id=c.concept_id
GROUP BY
1,2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
print("race_source_concept_id is NULL ")
###Output
_____no_output_____
###Markdown
ethnicity_source_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
ethnicity_source_concept_id,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_person` AS p
GROUP BY
1
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
print("ethnicity_source_concept_id is NULL ")
###Output
_____no_output_____
###Markdown
VISIT_OCCURANCE TABLE person_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
COUNT(*) AS total,
sum(case when (vo.person_id is null or vo.person_id=0) then 1 else 0 end) as missing
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.unioned_ehr_observation` AS o
ON
vo.person_id=o.person_id
WHERE
o.observation_source_concept_id=1586099 and o.value_as_concept_id=45877994
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
###Output
_____no_output_____
###Markdown
visit_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
concept_name,
visit_concept_id,
COUNT(*)
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.concept` AS c
ON
vo.visit_concept_id=c.concept_id
GROUP BY
1,2
ORDER BY
2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df.head()
###Output
_____no_output_____
###Markdown
visit_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
visit_occurrence_visit_concept_id_df = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(vo.person_id) as total_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.concept` AS c
ON
vo.visit_concept_id=c.concept_id
LEFT OUTER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as missing_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.concept` AS c
ON
vo.visit_concept_id=c.concept_id
LEFT OUTER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
WHERE
(vo.visit_concept_id is null or vo.visit_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
visit_occurrence_visit_concept_id_df.shape
print(visit_occurrence_visit_concept_id_df.shape[0], 'records received.')
visit_occurrence_visit_concept_id_df
visit_occurrence_visit_concept_id_df=visit_occurrence_visit_concept_id_df.rename(columns={"success_rate":"visit_occurrence_visit_concept_id"})
visit_occurrence_visit_concept_id_df=visit_occurrence_visit_concept_id_df[["src_hpo_id","visit_occurrence_visit_concept_id"]]
visit_occurrence_visit_concept_id_df=visit_occurrence_visit_concept_id_df.fillna(100)
visit_occurrence_visit_concept_id_df
###Output
_____no_output_____
###Markdown
visit_type_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
concept_name,
visit_type_concept_id,
COUNT(*)
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.concept` AS c
ON
vo.visit_type_concept_id=c.concept_id
GROUP BY
1,2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
success_rate=100-round(100*(foreign_key_df.loc[foreign_key_df["visit_type_concept_id"]==0,["f0_"]])/sum(foreign_key_df.iloc[:,2]),1)
print("success rate for visit_concept_id is: ", success_rate.iloc[0,0])
###Output
_____no_output_____
###Markdown
visit_type_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
visit_occurrence_visit_type_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(vo.person_id) as total_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.concept` AS c
ON
vo.visit_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as missing_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.concept` AS c
ON
vo.visit_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
WHERE
(vo.visit_type_concept_id is null or vo.visit_type_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
visit_occurrence_visit_type_concept_id.shape
print(visit_occurrence_visit_type_concept_id.shape[0], 'records received.')
visit_occurrence_visit_type_concept_id
visit_occurrence_visit_type_concept_id=visit_occurrence_visit_type_concept_id.rename(columns={"success_rate":"visit_occurrence_visit_type_concept_id"})
visit_occurrence_visit_type_concept_id=visit_occurrence_visit_type_concept_id[["src_hpo_id","visit_occurrence_visit_type_concept_id"]]
visit_occurrence_visit_type_concept_id=visit_occurrence_visit_type_concept_id.fillna(100)
visit_occurrence_visit_type_concept_id
###Output
_____no_output_____
###Markdown
provider_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
vo.provider_id,
COUNT(*) AS cnt
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
GROUP BY
1
ORDER BY
2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df.tail(10)
100-round(100*(foreign_key_df.loc[foreign_key_df["provider_id"].isnull(),["cnt"]].iloc[0,0]
+foreign_key_df.loc[(foreign_key_df["provider_id"]==0),["cnt"]].iloc[0,0])/sum(foreign_key_df.iloc[:,1]),1)
total_missing=foreign_key_df.loc[foreign_key_df["provider_id"].isnull(),["cnt"]].iloc[0,0]+foreign_key_df.loc[(foreign_key_df["provider_id"]==0),["cnt"]].iloc[0,0]
total_missing
###Output
_____no_output_____
###Markdown
provider_id by sites
###Code
######################################
print('Getting the data from the database...')
######################################
visit_occurrence_provider_id_df = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as total_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as missing_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
visit_occurrence_provider_id_df.shape
print(visit_occurrence_provider_id_df.shape[0], 'records received.')
visit_occurrence_provider_id_df
visit_occurrence_provider_id_df=visit_occurrence_provider_id_df.rename(columns={"success_rate":"visit_occurrence_provider_id"})
visit_occurrence_provider_id_df=visit_occurrence_provider_id_df[["src_hpo_id","visit_occurrence_provider_id"]]
visit_occurrence_provider_id_df=visit_occurrence_provider_id_df.fillna(100)
visit_occurrence_provider_id_df
###Output
_____no_output_____
###Markdown
care_site_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
visit_occurrence_care_site_id_df = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(vo.person_id) as total_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as missing_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
WHERE
(vo.care_site_id is null or vo.care_site_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(visit_occurrence_care_site_id_df.shape[0], 'records received.')
visit_occurrence_care_site_id_df=visit_occurrence_care_site_id_df.rename(columns={"success_rate":"visit_occurrence_care_site_id"})
visit_occurrence_care_site_id_df=visit_occurrence_care_site_id_df[["src_hpo_id","visit_occurrence_care_site_id"]]
visit_occurrence_care_site_id_df=visit_occurrence_care_site_id_df.fillna(100)
visit_occurrence_care_site_id_df
###Output
_____no_output_____
###Markdown
visit_source_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
visit_occurrence_visit_source_concept_id_df = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(vo.person_id) as total_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as missing_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
WHERE
(vo.visit_source_concept_id is null or vo.visit_source_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
visit_occurrence_visit_source_concept_id_df.shape
print(foreign_key_df.shape[0], 'records received.')
visit_occurrence_visit_source_concept_id_df=visit_occurrence_visit_source_concept_id_df.rename(columns={"success_rate":"visit_occurrence_visit_source_concept_id"})
visit_occurrence_visit_source_concept_id_df=visit_occurrence_visit_source_concept_id_df[["src_hpo_id","visit_occurrence_visit_source_concept_id"]]
visit_occurrence_visit_source_concept_id_df=visit_occurrence_visit_source_concept_id_df.fillna(100)
visit_occurrence_visit_source_concept_id_df
###Output
_____no_output_____
###Markdown
admitting_source_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
visit_occurrence_admitting_source_concept_id_df = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(vo.person_id) as total_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as missing_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
WHERE
(vo.admitting_source_concept_id is null or vo.admitting_source_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
visit_occurrence_admitting_source_concept_id_df.shape
print(foreign_key_df.shape[0], 'records received.')
visit_occurrence_admitting_source_concept_id_df=visit_occurrence_admitting_source_concept_id_df.rename(columns={"success_rate":"visit_occurrence_admitting_source_concept_id"})
visit_occurrence_admitting_source_concept_id_df=visit_occurrence_admitting_source_concept_id_df[["src_hpo_id","visit_occurrence_admitting_source_concept_id"]]
visit_occurrence_admitting_source_concept_id_df=visit_occurrence_admitting_source_concept_id_df.fillna(100)
visit_occurrence_admitting_source_concept_id_df
###Output
_____no_output_____
###Markdown
discharge_to_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
visit_occurrence_discharge_to_concept_id_df = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(vo.person_id) as total_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.concept` AS c
ON
vo.discharge_to_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as missing_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
`{}.concept` AS c
ON
vo.discharge_to_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
WHERE
(vo.discharge_to_concept_id is null or vo.discharge_to_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
visit_occurrence_discharge_to_concept_id_df.shape
print(foreign_key_df.shape[0], 'records received.')
visit_occurrence_discharge_to_concept_id_df=visit_occurrence_discharge_to_concept_id_df.rename(columns={"success_rate":"visit_occurrence_discharge_to_concept_id"})
visit_occurrence_discharge_to_concept_id_df=visit_occurrence_discharge_to_concept_id_df[["src_hpo_id","visit_occurrence_discharge_to_concept_id"]]
visit_occurrence_discharge_to_concept_id_df=visit_occurrence_discharge_to_concept_id_df.fillna(100)
visit_occurrence_discharge_to_concept_id_df
###Output
_____no_output_____
###Markdown
preceding_visit_occurrence_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
visit_occurrence_preceding_visit_occurrence_id_df = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(vo.person_id) as total_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mvo.src_hpo_id,
COUNT(vo.person_id) as missing_counts
FROM
`{}.unioned_ehr_visit_occurrence` AS vo
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS mvo
ON
vo.visit_occurrence_id=mvo.visit_occurrence_id
WHERE
(vo.preceding_visit_occurrence_id is null or vo.preceding_visit_occurrence_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
visit_occurrence_preceding_visit_occurrence_id_df.shape
print(foreign_key_df.shape[0], 'records received.')
visit_occurrence_preceding_visit_occurrence_id_df=visit_occurrence_preceding_visit_occurrence_id_df.rename(columns={"success_rate":"visit_occurrence_preceding_visit_occurrence_id"})
visit_occurrence_preceding_visit_occurrence_id_df=visit_occurrence_preceding_visit_occurrence_id_df[["src_hpo_id","visit_occurrence_preceding_visit_occurrence_id"]]
visit_occurrence_preceding_visit_occurrence_id_df=visit_occurrence_preceding_visit_occurrence_id_df.fillna(100)
visit_occurrence_preceding_visit_occurrence_id_df
###Output
_____no_output_____
###Markdown
Condition Occurrence Table condition_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
condition_occurrence_condition_concept_id_df = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(co.person_id) as total_counts
FROM
`{}.unioned_ehr_condition_occurrence` AS co
INNER JOIN
`{}.concept` AS c
ON
co.condition_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS mco
ON
co.condition_occurrence_id=mco.condition_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mco.src_hpo_id,
COUNT(co.person_id) as missing_counts
FROM
`{}.unioned_ehr_condition_occurrence` AS co
INNER JOIN
`{}.concept` AS c
ON
co.condition_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS mco
ON
co.condition_occurrence_id=mco.condition_occurrence_id
WHERE
(co.condition_concept_id is null or co.condition_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
condition_occurrence_condition_concept_id_df.shape
print(condition_occurrence_condition_concept_id_df.shape[0], 'records received.')
condition_occurrence_condition_concept_id_df=condition_occurrence_condition_concept_id_df.rename(columns={"success_rate":"condition_occurrence_condition_concept_id"})
condition_occurrence_condition_concept_id_df=condition_occurrence_condition_concept_id_df[["src_hpo_id","condition_occurrence_condition_concept_id"]]
condition_occurrence_condition_concept_id_df=condition_occurrence_condition_concept_id_df.fillna(100)
condition_occurrence_condition_concept_id_df
###Output
_____no_output_____
###Markdown
condition_type_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
condition_occurrence_condition_type_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(co.person_id) as total_counts
FROM
`{}.unioned_ehr_condition_occurrence` AS co
INNER JOIN
`{}.concept` AS c
ON
co.condition_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS mco
ON
co.condition_occurrence_id=mco.condition_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mco.src_hpo_id,
COUNT(co.person_id) as missing_counts
FROM
`{}.unioned_ehr_condition_occurrence` AS co
INNER JOIN
`{}.concept` AS c
ON
co.condition_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS mco
ON
co.condition_occurrence_id=mco.condition_occurrence_id
WHERE
(co.condition_type_concept_id is null or co.condition_type_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
condition_occurrence_condition_type_concept_id.shape
print(condition_occurrence_condition_type_concept_id.shape[0], 'records received.')
condition_occurrence_condition_type_concept_id=condition_occurrence_condition_type_concept_id.rename(columns={"success_rate":"condition_occurrence_condition_type_concept_id"})
condition_occurrence_condition_type_concept_id=condition_occurrence_condition_type_concept_id[["src_hpo_id","condition_occurrence_condition_type_concept_id"]]
condition_occurrence_condition_type_concept_id=condition_occurrence_condition_type_concept_id.fillna(100)
condition_occurrence_condition_type_concept_id
###Output
_____no_output_____
###Markdown
Provider_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
co.provider_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_condition_occurrence` AS co
GROUP BY
1
ORDER BY
2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df.tail()
###Output
_____no_output_____
###Markdown
visit_occurrence_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
co.visit_occurrence_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_condition_occurrence` AS co
GROUP BY
1
ORDER BY
2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df.tail()
###Output
_____no_output_____
###Markdown
condition_source_concept_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
condition_source_concept_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_condition_occurrence` AS co
GROUP BY
1
ORDER BY
2
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df.tail()
success_rate=100-round(100*(foreign_key_df.loc[foreign_key_df["condition_source_concept_id"].isnull(),["cnt"]].iloc[0,0]
+foreign_key_df.loc[(foreign_key_df["condition_source_concept_id"]==0),["cnt"]].iloc[0,0])/sum(foreign_key_df.iloc[:,1]),1)
print("success rate for condition_source_concept_id is: ", round(success_rate,1))
###Output
_____no_output_____
###Markdown
condition_source_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
condition_occurrence_condition_source_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(co.person_id) as total_counts
FROM
`{}.unioned_ehr_condition_occurrence` AS co
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS mco
ON
co.condition_occurrence_id=mco.condition_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mco.src_hpo_id,
COUNT(co.person_id) as missing_counts
FROM
`{}.unioned_ehr_condition_occurrence` AS co
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS mco
ON
co.condition_occurrence_id=mco.condition_occurrence_id
WHERE
(co.condition_source_concept_id is null or co.condition_source_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
condition_occurrence_condition_source_concept_id.shape
print(condition_occurrence_condition_source_concept_id.shape[0], 'records received.')
condition_occurrence_condition_source_concept_id=condition_occurrence_condition_source_concept_id.rename(columns={"success_rate":"condition_occurrence_condition_source_concept_id"})
condition_occurrence_condition_source_concept_id=condition_occurrence_condition_source_concept_id[["src_hpo_id","condition_occurrence_condition_source_concept_id"]]
condition_occurrence_condition_source_concept_id=condition_occurrence_condition_source_concept_id.fillna(100)
condition_occurrence_condition_source_concept_id
###Output
_____no_output_____
###Markdown
condition_status_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
condition_occurrence_condition_status_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(co.person_id) as total_counts
FROM
`{}.unioned_ehr_condition_occurrence` AS co
INNER JOIN
`{}.concept` AS c
ON
co.condition_status_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS mco
ON
co.condition_occurrence_id=mco.condition_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mco.src_hpo_id,
COUNT(co.person_id) as missing_counts
FROM
`{}.unioned_ehr_condition_occurrence` AS co
INNER JOIN
`{}.concept` AS c
ON
co.condition_status_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS mco
ON
co.condition_occurrence_id=mco.condition_occurrence_id
WHERE
(co.condition_status_concept_id is null or co.condition_status_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
condition_occurrence_condition_status_concept_id.shape
print(condition_occurrence_condition_status_concept_id.shape[0], 'records received.')
condition_occurrence_condition_status_concept_id=condition_occurrence_condition_status_concept_id.rename(columns={"success_rate":"condition_occurrence_condition_status_concept_id"})
condition_occurrence_condition_status_concept_id=condition_occurrence_condition_status_concept_id[["src_hpo_id","condition_occurrence_condition_status_concept_id"]]
condition_occurrence_condition_status_concept_id=condition_occurrence_condition_status_concept_id.fillna(100)
condition_occurrence_condition_status_concept_id
###Output
_____no_output_____
###Markdown
Drug Exposure Table person_id
###Code
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
COUNT(*) AS total,
sum(case when (de.person_id is null or de.person_id=0) then 1 else 0 end) as missing
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
`{}.unioned_ehr_observation` AS o
ON
de.person_id=o.person_id
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
foreign_key_df.shape
print(foreign_key_df.shape[0], 'records received.')
foreign_key_df
print("success rate for person_id is: ",round(100-100*(foreign_key_df.iloc[0,1]/foreign_key_df.iloc[0,0]),1))
###Output
_____no_output_____
###Markdown
drug_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
drug_exposure_drug_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(de.person_id) as total_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
`{}.concept` AS c
ON
de.drug_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mde.src_hpo_id,
COUNT(de.person_id) as missing_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
`{}.concept` AS c
ON
de.drug_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
WHERE
(de.drug_concept_id is null or de.drug_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
drug_exposure_drug_concept_id.shape
print(drug_exposure_drug_concept_id.shape[0], 'records received.')
drug_exposure_drug_concept_id=drug_exposure_drug_concept_id.rename(columns={"success_rate":"drug_exposure_drug_concept_id"})
drug_exposure_drug_concept_id=drug_exposure_drug_concept_id[["src_hpo_id","drug_exposure_drug_concept_id"]]
drug_exposure_drug_concept_id=drug_exposure_drug_concept_id.fillna(100)
drug_exposure_drug_concept_id
###Output
_____no_output_____
###Markdown
drug_type_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
drug_exposure_drug_type_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(de.person_id) as total_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
`{}.concept` AS c
ON
de.drug_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mde.src_hpo_id,
COUNT(de.person_id) as missing_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
`{}.concept` AS c
ON
de.drug_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
WHERE
(de.drug_type_concept_id is null or de.drug_type_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
drug_exposure_drug_type_concept_id.shape
print(drug_exposure_drug_type_concept_id.shape[0], 'records received.')
drug_exposure_drug_type_concept_id=drug_exposure_drug_type_concept_id.rename(columns={"success_rate":"condition_occurrence_drug_type_concept_id"})
drug_exposure_drug_type_concept_id=drug_exposure_drug_type_concept_id[["src_hpo_id","condition_occurrence_drug_type_concept_id"]]
drug_exposure_drug_type_concept_id=drug_exposure_drug_type_concept_id.fillna(100)
drug_exposure_drug_type_concept_id
###Output
_____no_output_____
###Markdown
route_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
drug_exposure_route_concept_id= pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(de.person_id) as total_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
`{}.concept` AS c
ON
de.route_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mde.src_hpo_id,
COUNT(de.person_id) as missing_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
`{}.concept` AS c
ON
de.route_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
WHERE
(de.route_concept_id is null or de.route_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
drug_exposure_route_concept_id.shape
print(drug_exposure_route_concept_id.shape[0], 'records received.')
drug_exposure_route_concept_id=drug_exposure_route_concept_id.rename(columns={"success_rate":"drug_exposure_route_concept_id"})
drug_exposure_route_concept_id=drug_exposure_route_concept_id[["src_hpo_id","drug_exposure_route_concept_id"]]
drug_exposure_route_concept_id=drug_exposure_route_concept_id.fillna(100)
drug_exposure_route_concept_id
###Output
_____no_output_____
###Markdown
provider_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
drug_exposure_provider_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(de.person_id) as total_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mde.src_hpo_id,
COUNT(de.person_id) as missing_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
WHERE
(de.provider_id is null or de.provider_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
drug_exposure_provider_id.shape
print(drug_exposure_provider_id.shape[0], 'records received.')
drug_exposure_provider_id=drug_exposure_provider_id.rename(columns={"success_rate":"drug_exposure_provider_id"})
drug_exposure_provider_id=drug_exposure_provider_id[["src_hpo_id","drug_exposure_provider_id"]]
drug_exposure_provider_id=drug_exposure_provider_id.fillna(100)
drug_exposure_provider_id
###Output
_____no_output_____
###Markdown
visit_occurrence_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
drug_exposure_visit_occurrence_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(de.person_id) as total_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mde.src_hpo_id,
COUNT(de.person_id) as missing_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
WHERE
(de.visit_occurrence_id is null or de.visit_occurrence_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
drug_exposure_visit_occurrence_id.shape
print(drug_exposure_visit_occurrence_id.shape[0], 'records received.')
drug_exposure_visit_occurrence_id=drug_exposure_visit_occurrence_id.rename(columns={"success_rate":"drug_exposure_visit_occurrence_id"})
drug_exposure_visit_occurrence_id=drug_exposure_visit_occurrence_id[["src_hpo_id","drug_exposure_visit_occurrence_id"]]
drug_exposure_visit_occurrence_id=drug_exposure_visit_occurrence_id.fillna(100)
drug_exposure_visit_occurrence_id
###Output
_____no_output_____
###Markdown
drug_source_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
drug_exposure_drug_source_concept_id= pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(de.person_id) as total_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mde.src_hpo_id,
COUNT(de.person_id) as missing_counts
FROM
`{}.unioned_ehr_drug_exposure` AS de
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS mde
ON
de.drug_exposure_id=mde.drug_exposure_id
WHERE
(de.drug_source_concept_id is null or de.drug_source_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
drug_exposure_drug_source_concept_id.shape
print(drug_exposure_drug_source_concept_id.shape[0], 'records received.')
drug_exposure_drug_source_concept_id=drug_exposure_drug_source_concept_id.rename(columns={"success_rate":"drug_exposure_drug_source_concept_id"})
drug_exposure_drug_source_concept_id=drug_exposure_drug_source_concept_id[["src_hpo_id","drug_exposure_drug_source_concept_id"]]
drug_exposure_drug_source_concept_id=drug_exposure_drug_source_concept_id.fillna(100)
drug_exposure_drug_source_concept_id
###Output
_____no_output_____
###Markdown
Measurement table measurement_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
measurement_measurement_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(me.person_id) as total_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.measurement_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mm.src_hpo_id,
COUNT(me.person_id) as missing_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.measurement_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
WHERE
(me.measurement_concept_id is null or me.measurement_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
measurement_measurement_concept_id.shape
print(measurement_measurement_concept_id.shape[0], 'records received.')
measurement_measurement_concept_id=measurement_measurement_concept_id.rename(columns={"success_rate":"measurement_measurement_concept_id"})
measurement_measurement_concept_id=measurement_measurement_concept_id[["src_hpo_id","measurement_measurement_concept_id"]]
measurement_measurement_concept_id=measurement_measurement_concept_id.fillna(100)
measurement_measurement_concept_id
###Output
_____no_output_____
###Markdown
measurement_type_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
measurement_measurement_type_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(me.person_id) as total_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.measurement_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mm.src_hpo_id,
COUNT(me.person_id) as missing_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.measurement_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
WHERE
(me.measurement_type_concept_id is null or me.measurement_type_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
measurement_measurement_type_concept_id.shape
print(measurement_measurement_type_concept_id.shape[0], 'records received.')
measurement_measurement_type_concept_id=measurement_measurement_type_concept_id.rename(columns={"success_rate":"measurement_measurement_type_concept_id"})
measurement_measurement_type_concept_id=measurement_measurement_type_concept_id[["src_hpo_id","measurement_measurement_type_concept_id"]]
measurement_measurement_type_concept_id=measurement_measurement_type_concept_id.fillna(100)
measurement_measurement_type_concept_id
###Output
_____no_output_____
###Markdown
operator_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
measurement_operator_concept_id= pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(me.person_id) as total_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.operator_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mm.src_hpo_id,
COUNT(me.person_id) as missing_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.operator_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
WHERE
(me.operator_concept_id is null or me.operator_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
measurement_operator_concept_id.shape
print(measurement_operator_concept_id.shape[0], 'records received.')
measurement_operator_concept_id=measurement_operator_concept_id.rename(columns={"success_rate":"measurement_operator_concept_id"})
measurement_operator_concept_id=measurement_operator_concept_id[["src_hpo_id","measurement_operator_concept_id"]]
measurement_operator_concept_id=measurement_operator_concept_id.fillna(100)
measurement_operator_concept_id
###Output
_____no_output_____
###Markdown
value_as_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
measurement_value_as_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(me.person_id) as total_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.value_as_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mm.src_hpo_id,
COUNT(me.person_id) as missing_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.value_as_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
WHERE
(me.value_as_concept_id is null or me.value_as_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
measurement_value_as_concept_id.shape
print(measurement_value_as_concept_id.shape[0], 'records received.')
measurement_value_as_concept_id=measurement_value_as_concept_id.rename(columns={"success_rate":"measurement_value_as_concept_id"})
measurement_value_as_concept_id=measurement_value_as_concept_id[["src_hpo_id","measurement_value_as_concept_id"]]
measurement_value_as_concept_id=measurement_value_as_concept_id.fillna(100)
measurement_value_as_concept_id
###Output
_____no_output_____
###Markdown
unit_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
measurement_unit_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(me.person_id) as total_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.unit_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mm.src_hpo_id,
COUNT(me.person_id) as missing_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
`{}.concept` AS c
ON
me.unit_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
WHERE
(me.unit_concept_id is null or me.unit_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
measurement_unit_concept_id.shape
print(measurement_unit_concept_id.shape[0], 'records received.')
measurement_unit_concept_id=measurement_unit_concept_id.rename(columns={"success_rate":"measurement_unit_concept_id"})
measurement_unit_concept_id=measurement_unit_concept_id[["src_hpo_id","measurement_unit_concept_id"]]
measurement_unit_concept_id=measurement_unit_concept_id.fillna(100)
measurement_unit_concept_id
###Output
_____no_output_____
###Markdown
provider_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
measurement_provider_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(me.person_id) as total_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mm.src_hpo_id,
COUNT(me.person_id) as missing_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
WHERE
(me.provider_id is null or me.provider_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
3
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
measurement_provider_id.shape
print(measurement_provider_id.shape[0], 'records received.')
measurement_provider_id=measurement_provider_id.rename(columns={"success_rate":"measurement_provider_id"})
measurement_provider_id=measurement_provider_id[["src_hpo_id","measurement_provider_id"]]
measurement_provider_id=measurement_provider_id.fillna(100)
measurement_provider_id
###Output
_____no_output_____
###Markdown
visit_occurrence_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
measurement_visit_occurrence_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(me.person_id) as total_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mm.src_hpo_id,
COUNT(me.person_id) as missing_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
WHERE
(me.visit_occurrence_id is null or me.visit_occurrence_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
3
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
measurement_visit_occurrence_id.shape
print(measurement_visit_occurrence_id.shape[0], 'records received.')
measurement_visit_occurrence_id=measurement_visit_occurrence_id.rename(columns={"success_rate":"measurement_visit_occurrence_id"})
measurement_visit_occurrence_id=measurement_visit_occurrence_id[["src_hpo_id","measurement_visit_occurrence_id"]]
measurement_visit_occurrence_id=measurement_visit_occurrence_id.fillna(100)
measurement_visit_occurrence_id
###Output
_____no_output_____
###Markdown
measurement_source_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
measurement_measurement_source_concept_id= pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(me.person_id) as total_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
mm.src_hpo_id,
COUNT(me.person_id) as missing_counts
FROM
`{}.unioned_ehr_measurement` AS me
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS mm
ON
me.measurement_id=mm.measurement_id
WHERE
(me.measurement_source_concept_id is null or me.measurement_source_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
3
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
measurement_measurement_source_concept_id.shape
print(measurement_measurement_source_concept_id.shape[0], 'records received.')
measurement_measurement_source_concept_id=measurement_measurement_source_concept_id.rename(columns={"success_rate":"measurement_measurement_source_concept_id"})
measurement_measurement_source_concept_id=measurement_measurement_source_concept_id[["src_hpo_id","measurement_measurement_source_concept_id"]]
measurement_measurement_source_concept_id=measurement_measurement_source_concept_id.fillna(100)
measurement_measurement_source_concept_id
###Output
_____no_output_____
###Markdown
Procedure Occurrence procedure_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
procedure_occurrence_procedure_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.procedure_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.procedure_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
WHERE
(t1.procedure_concept_id is null or t1.procedure_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
procedure_occurrence_procedure_concept_id.shape
print(procedure_occurrence_procedure_concept_id.shape[0], 'records received.')
procedure_occurrence_procedure_concept_id=procedure_occurrence_procedure_concept_id.rename(columns={"success_rate":"procedure_occurrence_procedure_concept_id"})
procedure_occurrence_procedure_concept_id=procedure_occurrence_procedure_concept_id[["src_hpo_id","procedure_occurrence_procedure_concept_id"]]
procedure_occurrence_procedure_concept_id=procedure_occurrence_procedure_concept_id.fillna(100)
procedure_occurrence_procedure_concept_id
###Output
_____no_output_____
###Markdown
procedure_type_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
procedure_occurrence_procedure_type_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.procedure_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.procedure_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
WHERE
(t1.procedure_type_concept_id is null or t1.procedure_type_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
procedure_occurrence_procedure_type_concept_id.shape
print(procedure_occurrence_procedure_type_concept_id.shape[0], 'records received.')
procedure_occurrence_procedure_type_concept_id=procedure_occurrence_procedure_type_concept_id.rename(columns={"success_rate":"procedure_occurrence_procedure_type_concept_id"})
procedure_occurrence_procedure_type_concept_id=procedure_occurrence_procedure_type_concept_id[["src_hpo_id","procedure_occurrence_procedure_type_concept_id"]]
procedure_occurrence_procedure_type_concept_id=procedure_occurrence_procedure_type_concept_id.fillna(100)
procedure_occurrence_procedure_type_concept_id
###Output
_____no_output_____
###Markdown
modifier_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
procedure_occurrence_modifier_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.modifier_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.modifier_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
WHERE
(t1.modifier_concept_id is null or t1.modifier_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
procedure_occurrence_modifier_concept_id.shape
print(procedure_occurrence_modifier_concept_id.shape[0], 'records received.')
procedure_occurrence_modifier_concept_id=procedure_occurrence_modifier_concept_id.rename(columns={"success_rate":"procedure_occurrence_modifier_concept_id"})
procedure_occurrence_modifier_concept_id=procedure_occurrence_modifier_concept_id[["src_hpo_id","procedure_occurrence_modifier_concept_id"]]
procedure_occurrence_modifier_concept_id=procedure_occurrence_modifier_concept_id.fillna(100)
procedure_occurrence_modifier_concept_id
###Output
_____no_output_____
###Markdown
provider_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
procedure_occurrence_provider_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
WHERE
(t1.provider_id is null or t1.provider_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
procedure_occurrence_provider_id.shape
print(procedure_occurrence_provider_id.shape[0], 'records received.')
procedure_occurrence_provider_id=procedure_occurrence_provider_id.rename(columns={"success_rate":"procedure_occurrence_provider_id"})
procedure_occurrence_provider_id=procedure_occurrence_provider_id[["src_hpo_id","procedure_occurrence_provider_id"]]
procedure_occurrence_provider_id=procedure_occurrence_provider_id.fillna(100)
procedure_occurrence_provider_id
###Output
_____no_output_____
###Markdown
visit_occurrence_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
procedure_occurrence_visit_occurrence_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
WHERE
(t1.visit_occurrence_id is null or t1.visit_occurrence_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
procedure_occurrence_visit_occurrence_id.shape
print(procedure_occurrence_visit_occurrence_id.shape[0], 'records received.')
procedure_occurrence_visit_occurrence_id=procedure_occurrence_visit_occurrence_id.rename(columns={"success_rate":"procedure_occurrence_visit_occurrence_id"})
procedure_occurrence_visit_occurrence_id=procedure_occurrence_visit_occurrence_id[["src_hpo_id","procedure_occurrence_visit_occurrence_id"]]
procedure_occurrence_visit_occurrence_id=procedure_occurrence_visit_occurrence_id.fillna(100)
procedure_occurrence_visit_occurrence_id
###Output
_____no_output_____
###Markdown
procedure_source_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
procedure_occurrence_procedure_source_concept_id= pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
WHERE
(t1.procedure_source_concept_id is null or t1.procedure_source_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
procedure_occurrence_procedure_source_concept_id.shape
print(procedure_occurrence_procedure_source_concept_id.shape[0], 'records received.')
procedure_occurrence_procedure_source_concept_id=procedure_occurrence_procedure_source_concept_id.rename(columns={"success_rate":"procedure_occurrence_procedure_source_concept_id"})
procedure_occurrence_procedure_source_concept_id=procedure_occurrence_procedure_source_concept_id[["src_hpo_id","procedure_occurrence_procedure_source_concept_id"]]
procedure_occurrence_procedure_source_concept_id=procedure_occurrence_procedure_source_concept_id.fillna(100)
procedure_occurrence_procedure_source_concept_id
###Output
_____no_output_____
###Markdown
Device Exposure device_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
device_exposure_device_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.device_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.device_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
WHERE
(t1.device_concept_id is null or t1.device_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
device_exposure_device_concept_id.shape
print(device_exposure_device_concept_id.shape[0], 'records received.')
device_exposure_device_concept_id=device_exposure_device_concept_id.rename(columns={"success_rate":"device_exposure_device_concept_id"})
device_exposure_device_concept_id=device_exposure_device_concept_id[["src_hpo_id","device_exposure_device_concept_id"]]
device_exposure_device_concept_id=device_exposure_device_concept_id.fillna(100)
device_exposure_device_concept_id
###Output
_____no_output_____
###Markdown
device_type_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
device_exposure_device_type_concept_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.device_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
`{}.concept` AS c
ON
t1.device_type_concept_id=c.concept_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
WHERE
(t1.device_type_concept_id is null or t1.device_type_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
device_exposure_device_type_concept_id.shape
print(device_exposure_device_type_concept_id.shape[0], 'records received.')
device_exposure_device_type_concept_id=device_exposure_device_type_concept_id.rename(columns={"success_rate":"device_exposure_device_type_concept_id"})
device_exposure_device_type_concept_id=device_exposure_device_type_concept_id[["src_hpo_id","device_exposure_device_type_concept_id"]]
device_exposure_device_type_concept_id=device_exposure_device_type_concept_id.fillna(100)
device_exposure_device_type_concept_id
###Output
_____no_output_____
###Markdown
provider_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
device_exposure_provider_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
WHERE
(t1.provider_id is null or t1.provider_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
device_exposure_provider_id.shape
print(device_exposure_provider_id.shape[0], 'records received.')
device_exposure_provider_id=device_exposure_provider_id.rename(columns={"success_rate":"device_exposure_provider_id"})
device_exposure_provider_id=device_exposure_provider_id[["src_hpo_id","device_exposure_provider_id"]]
device_exposure_provider_id=device_exposure_provider_id.fillna(100)
device_exposure_provider_id
###Output
_____no_output_____
###Markdown
visit_occurrence_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
device_exposure_visit_occurrence_id = pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
WHERE
(t1.visit_occurrence_id is null or t1.visit_occurrence_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
ORDER BY
4
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
device_exposure_visit_occurrence_id.shape
print(device_exposure_visit_occurrence_id.shape[0], 'records received.')
device_exposure_visit_occurrence_id=device_exposure_visit_occurrence_id.rename(columns={"success_rate":"device_exposure_visit_occurrence_id"})
device_exposure_visit_occurrence_id=device_exposure_visit_occurrence_id[["src_hpo_id","device_exposure_visit_occurrence_id"]]
device_exposure_visit_occurrence_id=device_exposure_visit_occurrence_id.fillna(100)
device_exposure_visit_occurrence_id
###Output
_____no_output_____
###Markdown
device_source_concept_id BY SITE
###Code
######################################
print('Getting the data from the database...')
######################################
device_exposure_device_source_concept_id= pd.io.gbq.read_gbq('''
WITH
hpo_counts AS (
SELECT
src_hpo_id,
COUNT(t1.person_id) as total_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
GROUP BY
1
),
hpo_missing_counts AS (
SELECT
t2.src_hpo_id,
COUNT(t1.person_id) as missing_counts
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
WHERE
(t1.device_source_concept_id is null or t1.device_source_concept_id=0)
GROUP BY
1
)
SELECT
hpo_counts.src_hpo_id,
missing_counts,
total_counts,
round(100-100*(missing_counts/total_counts),1) AS success_rate
FROM
hpo_counts
FULL OUTER JOIN
hpo_missing_counts
ON
hpo_missing_counts.src_hpo_id=hpo_counts.src_hpo_id
'''.format(DATASET, DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET,DATASET),
dialect='standard')
device_exposure_device_source_concept_id.shape
print(device_exposure_device_source_concept_id.shape[0], 'records received.')
device_exposure_device_source_concept_id
device_exposure_device_source_concept_id=device_exposure_device_source_concept_id.rename(columns={"success_rate":"device_exposure_device_source_concept_id"})
device_exposure_device_source_concept_id=device_exposure_device_source_concept_id[["src_hpo_id","device_exposure_device_source_concept_id"]]
device_exposure_device_source_concept_id=device_exposure_device_source_concept_id.fillna(100)
device_exposure_device_source_concept_id
datas=[visit_occurrence_visit_type_concept_id,
visit_occurrence_provider_id_df,
visit_occurrence_care_site_id_df,
visit_occurrence_visit_source_concept_id_df,
visit_occurrence_admitting_source_concept_id_df,
visit_occurrence_discharge_to_concept_id_df,
visit_occurrence_preceding_visit_occurrence_id_df,
condition_occurrence_condition_concept_id_df,
condition_occurrence_condition_type_concept_id,
condition_occurrence_condition_source_concept_id,
condition_occurrence_condition_status_concept_id,
drug_exposure_drug_concept_id,
drug_exposure_drug_type_concept_id,
drug_exposure_route_concept_id,
drug_exposure_provider_id,
drug_exposure_visit_occurrence_id,
drug_exposure_drug_source_concept_id,
measurement_measurement_concept_id,
measurement_measurement_type_concept_id,
measurement_operator_concept_id,
measurement_value_as_concept_id,
measurement_unit_concept_id,
measurement_provider_id,
measurement_visit_occurrence_id,
measurement_measurement_source_concept_id,
procedure_occurrence_procedure_concept_id,
procedure_occurrence_procedure_type_concept_id,
procedure_occurrence_modifier_concept_id,
procedure_occurrence_provider_id,
procedure_occurrence_visit_occurrence_id,
procedure_occurrence_procedure_source_concept_id,
device_exposure_device_concept_id,
device_exposure_device_type_concept_id,
device_exposure_provider_id,
device_exposure_visit_occurrence_id,
device_exposure_device_source_concept_id]
master_df=visit_occurrence_visit_concept_id_df
for filename in datas:
master_df = pd.merge(master_df,filename,on='src_hpo_id',how='outer')
master_df = pd.merge(master_df,site_df,on='src_hpo_id',how='outer')
master_df
master_df=master_df.fillna("No Data")
master_df
master_df.to_csv("data\\foreign.csv")
###Output
_____no_output_____ |
max_bermont.ipynb | ###Markdown
Project 4 Hack-a-thon Problem Statement:
###Code
#Imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.svm import SVC
from sklearn.tree import ExtraTreeClassifier, DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, classification_report, f1_score
from sklearn.neighbors import KNeighborsClassifier
df = pd.read_csv('./data/cheap_train_sample.csv')
#read in test_data.csv
test = pd.read_csv('./data/test_data.csv')
df.info()
df.describe()
#Changes representation of Unknown values.
df.replace(to_replace= ' ?', value='other', inplace=True)
sns.heatmap(df.corr(), cmap='coolwarm')
#clean object features to be numeric.
#alot of features for a limited amount of unique data points.
df.workclass.value_counts()
# good spread of values representing the data, however will use entire feature allotment.
#Already have numerical column
df.education.value_counts()
df['marital-status'].value_counts()
df['occupation'].value_counts()
df['relationship'].value_counts()
df['sex'].value_counts()
df['native-country'].value_counts()
df = pd.get_dummies(df, columns=['occupation','relationship', 'sex', 'marital-status', 'workclass', 'native-country'],
drop_first=True)
test = pd.get_dummies(test, columns=['occupation','relationship', 'sex', 'marital-status', 'workclass', 'native-country'],
drop_first=True)
print(f'Number of Columns in Training data: ',len(df.columns))
print(f'NUmber of Columns in Testing data: ', len(test.columns))
#note the columns are the same length and
df.wage.value_counts()
#below or equal to 50k will be true.
df['wage'] = [0 if wage == ' <=50K' else 1 for wage in df.wage]
df.wage.value_counts()
X = df.drop(columns=['wage', 'education'])
features = X.columns
y = df['wage']
X_train, X_val, y_train, y_val = train_test_split(X,y, random_state=420)
#Baseline score
guess = [1 if pred == 1 else 0 for pred in y]
sum(guess)/len(guess)
rfc = RandomForestClassifier()
cross_val_score(rfc, X,y, cv=5).mean()
svc = SVC()
cross_val_score(svc,X,y,cv= 5).mean()
boost = AdaBoostClassifier()
cross_val_score(boost,X,y,cv=5).mean()
logr= LogisticRegression()
cross_val_score(logr,X,y,cv=5).mean()
tree = DecisionTreeClassifier()
cross_val_score(tree, X,y,cv=5).mean()
x_trees = ExtraTreeClassifier()
cross_val_score(x_trees, X,y,cv= 5).mean()
knn = KNeighborsClassifier()
pipe = Pipeline([('scaler', StandardScaler()), ('knn', KNeighborsClassifier())])
pipe.fit(X_train, y_train)
print(f'Training Score: ', pipe.score(X_train,y_train))
print(f'Validation Score: ', pipe.score(X_val, y_val))
predictions = pipe.predict(X_val)
print(confusion_matrix(y_val, predictions))
print(classification_report(y_val, predictions))
pipe = Pipeline([('scaler', StandardScaler()),('boost', AdaBoostClassifier())])
pipe.fit(X_train, y_train)
print(f'Training Score: ', pipe.score(X_train,y_train))
print(f'Validation Score: ', pipe.score(X_val, y_val))
predictions = pipe.predict(X_val)
print(confusion_matrix(y_val, predictions))
print(classification_report(y_val, predictions))
boost = AdaBoostClassifier()
boost.fit(X_train,y_train)
values = boost.feature_importances_
feat = pd.DataFrame(features)
feat['values'] = values
feat.sort_values(by= 'values', ascending=False).tail(20)
boost.estimators_
#weight features and adjust hyperparameters.
#what if for feature with an importance below .1 we multiply the
estimators = np.random.randint(10,300,25)
params = dict(n_estimators= estimators)
grid = GridSearchCV(AdaBoostClassifier(), param_grid=params, n_jobs=-1, cv= 5, verbose= 1)
grid.fit(X_train,y_train)
print(f'Training Score: ', grid.score(X_train,y_train))
print(f'Validation Score: ', grid.score(X_val,y_val))
print(grid.best_params_)
print(grid.best_score_)
l_rate = [0.1291549665014884] #np.logspace(-2,-0.8,100)
estimators = [131] #np.random.randint(100,200,5)
criterion = ['mse'] #('friedman_mse', 'mse', 'mae')
max_features = [None] # [None, 'sqrt', 'log2']
params = dict(n_estimators= estimators,
learning_rate= l_rate,
criterion= criterion,
max_features= max_features)
grid = GridSearchCV(GradientBoostingClassifier(), param_grid=params, n_jobs=-1, cv= 5, verbose= 1)
grid.fit(X_train,y_train)
print(f'Training Score: ', grid.score(X_train,y_train))
print(f'Validation Score: ', grid.score(X_val,y_val))
print(grid.best_params_)
print(grid.best_score_)
best_params = grid.best_params_
#need to convert strings to
best_params = {key: [value] for key, value in best_params.items()}
###Output
_____no_output_____
###Markdown
Making the Predictions
###Code
X_test = test.drop(columns=['education'])
test_grid = GridSearchCV(GradientBoostingClassifier(), param_grid= best_params, n_jobs=-1, cv=5, verbose= 1)
test_grid.fit(X,y)
pred = test_grid.predict(X_test)
submit = pd.DataFrame(pred, columns=['wage'])
submit.to_csv('submission.csv', index=False)
###Output
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Fitting 5 folds for each of 1 candidates, totalling 5 fits
0.8592027378624065
|
T2/T1_remainder.ipynb | ###Markdown
IntroductionRonen NirEmail: [email protected] develop AI tools for Multi-Agent Coordination. => It just means that I can play with robots every day (want to join us? ask me how) Quick Reminder
###Code
a = 5
b = 'Ronen'
def triple(x):
y = 3*x
return y
###Output
_____no_output_____
###Markdown
What objects we have now? Functions are polymorphic
###Code
a = 5
b = 'Python_Is_Cool!\n'
def triple(x):
y = 3*x
return y
c = triple(a)
d = triple(b)
print(c)
print()
print(d)
###Output
15
Python_Is_Cool!
Python_Is_Cool!
Python_Is_Cool!
|
archive_nbs/ExploreAndUnderstandOptimWrappers.ipynb | ###Markdown
Table of Contents1 Copy tests from fastai2 notebook 12_optimizer.ipynb
###Code
from fastai2.vision.all import *
torch.optim.SGD?
###Output
_____no_output_____
###Markdown
Copy tests from fastai2 notebook 12_optimizer.ipynb
###Code
tst_sgd = OptimWrapper(torch.optim.SGD([{'params': [tensor([1,2,3])], 'lr': 1e-3},
{'params': [tensor([4,5,6])], 'lr': 1e-2}], momentum=0.9, weight_decay=1e-2))
tst_sgd.hypers
L(tst_sgd.hypers)
class OptFuncWrapper:
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
opt = self.f(*args, **kwargs)
optim_wrapper = OptimWrapper(opt)
return optim_wrapper
SGD
torch.optim.SGD
sgd = SGD([[tensor([1,2,3])], [tensor([4,5,6])]], lr=[1e-3, 1e-2], mom=0.9, wd=1e-2)
#Access to param_groups
param1 = [tensor([1,2,3]),tensor([-1,-2,-3])]
param2 = [tensor([4,5,6]),tensor([-4,-5,-6])]
params = [param1,param2]
my_sgd = SGD(params=params,lr=[3e-3,2e-2], mom=0.7,wd=5e-3)
my_sgd.hypers
torch_sgd = torch.optim.SGD(params=params[0],lr=3e-3,momentum=0.7,weight_decay=5e-3)
my_torch_sgd = OptimWrapper(torch_sgd)
my_torch_sgd.hypers
torch_sgd2 = torch.optim.SGD([{'params':params[0],'lr':3e-3},
{'params':params[1],'lr':2e-2}], momentum=0.7,weight_decay=5e-3)
my_torch_sgd2 = OptimWrapper(torch_sgd2)
my_torch_sgd2.hypers
!pip freeze | grep fastai_xla_extensions
def my_dumb_step(opt):
print('this is my dumb step')
opt.step()
class MyDumbOptimProxy:
def __init__(self,opt):
self.opt = opt
def my_own_step(self):
my_dumb_step(self.opt)
def __getattr__(self,name):
if name == 'step':
return getattr(self,'my_own_step')
return getattr(self.opt,name)
sgd.step()
mysgd_proxy = MyDumbOptimProxy(sgd)
mysgd_proxy.hypers
mysgd_proxy.step()
test_eq(tst_sgd.param_lists, sgd.param_lists)
#Set param_groups
tst_sgd.param_lists = [[tensor([4,5,6])], [tensor([1,2,3])]]
test_eq(tst_sgd.opt.param_groups[0]['params'], [tensor(4,5,6)])
test_eq(tst_sgd.opt.param_groups[1]['params'], [tensor(1,2,3)])
#Access to hypers
test_eq(tst_sgd.hypers, [{**sgd.hypers[i], 'dampening': 0., 'nesterov': False} for i in range(2)])
#Set hypers
tst_sgd.set_hyper('mom', 0.95)
test_eq([pg['momentum'] for pg in tst_sgd.opt.param_groups], [0.95,0.95])
tst_sgd.set_hyper('lr', [1e-4,1e-3])
test_eq([pg['lr'] for pg in tst_sgd.opt.param_groups], [1e-4,1e-3])
###Output
_____no_output_____ |
brian-tutorials/2-intro-to-brian-synapses.ipynb | ###Markdown
Introduction to Brian part 2: Synapses If you haven't yet read part 1: Neurons, go read that now.As before we start by importing the Brian package and setting up matplotlib for IPython:
###Code
from brian2 import *
%matplotlib inline
###Output
_____no_output_____
###Markdown
The simplest SynapseOnce you have some neurons, the next step is to connect them up via synapses. We'll start out with doing the simplest possible type of synapse that causes an instantaneous change in a variable after a spike.
###Code
start_scope()
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
tau : second
'''
G = NeuronGroup(2, eqs, threshold='v>1', reset='v = 0', method='exact')
G.I = [2, 0]
G.tau = [10, 100]*ms
# Comment these two lines out to see what happens without Synapses
S = Synapses(G, G, on_pre='v_post += 0.2')
S.connect(i=0, j=1)
M = StateMonitor(G, 'v', record=True)
run(100*ms)
plot(M.t/ms, M.v[0], label='Neuron 0')
plot(M.t/ms, M.v[1], label='Neuron 1')
xlabel('Time (ms)')
ylabel('v')
legend();
###Output
_____no_output_____
###Markdown
There are a few things going on here. First of all, let's recap what is going on with the ``NeuronGroup``. We've created two neurons, each of which has the same differential equation but different values for parameters I and tau. Neuron 0 has ``I=2`` and ``tau=10*ms`` which means that is driven to repeatedly spike at a fairly high rate. Neuron 1 has ``I=0`` and ``tau=100*ms`` which means that on its own - without the synapses - it won't spike at all (the driving current I is 0). You can prove this to yourself by commenting out the two lines that define the synapse.Next we define the synapses: ``Synapses(source, target, ...)`` means that we are defining a synaptic model that goes from ``source`` to ``target``. In this case, the source and target are both the same, the group ``G``. The syntax ``on_pre='v_post += 0.2'`` means that when a spike occurs in the presynaptic neuron (hence ``on_pre``) it causes an instantaneous change to happen ``v_post += 0.2``. The ``_post`` means that the value of ``v`` referred to is the post-synaptic value, and it is increased by 0.2. So in total, what this model says is that whenever two neurons in G are connected by a synapse, when the source neuron fires a spike the target neuron will have its value of ``v`` increased by 0.2.However, at this point we have only defined the synapse model, we haven't actually created any synapses. The next line ``S.connect(i=0, j=1)`` creates a synapse from neuron 0 to neuron 1. Adding a weightIn the previous section, we hard coded the weight of the synapse to be the value 0.2, but often we would to allow this to be different for different synapses. We do that by introducing synapse equations.
###Code
start_scope()
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
tau : second
'''
G = NeuronGroup(3, eqs, threshold='v>1', reset='v = 0', method='exact')
G.I = [2, 0, 0]
G.tau = [10, 100, 100]*ms
# Comment these two lines out to see what happens without Synapses
S = Synapses(G, G, 'w : 1', on_pre='v_post += w')
S.connect(i=0, j=[1, 2])
S.w = 'j*0.2'
M = StateMonitor(G, 'v', record=True)
run(50*ms)
plot(M.t/ms, M.v[0], label='Neuron 0')
plot(M.t/ms, M.v[1], label='Neuron 1')
plot(M.t/ms, M.v[2], label='Neuron 2')
xlabel('Time (ms)')
ylabel('v')
legend();
###Output
_____no_output_____
###Markdown
This example behaves very similarly to the previous example, but now there's a synaptic weight variable ``w``. The string ``'w : 1'`` is an equation string, precisely the same as for neurons, that defines a single dimensionless parameter ``w``. We changed the behaviour on a spike to ``on_pre='v_post += w'`` now, so that each synapse can behave differently depending on the value of ``w``. To illustrate this, we've made a third neuron which behaves precisely the same as the second neuron, and connected neuron 0 to both neurons 1 and 2. We've also set the weights via ``S.w = 'j*0.2'``. When ``i`` and ``j`` occur in the context of synapses, ``i`` refers to the source neuron index, and ``j`` to the target neuron index. So this will give a synaptic connection from 0 to 1 with weight ``0.2=0.2*1`` and from 0 to 2 with weight ``0.4=0.2*2``. Introducing a delaySo far, the synapses have been instantaneous, but we can also make them act with a certain delay.
###Code
start_scope()
eqs = '''
dv/dt = (I-v)/tau : 1
I : 1
tau : second
'''
G = NeuronGroup(3, eqs, threshold='v>1', reset='v = 0', method='exact')
G.I = [2, 0, 0]
G.tau = [10, 100, 100]*ms
S = Synapses(G, G, 'w : 1', on_pre='v_post += w')
S.connect(i=0, j=[1, 2])
S.w = 'j*0.2'
S.delay = 'j*2*ms'
M = StateMonitor(G, 'v', record=True)
run(50*ms)
plot(M.t/ms, M.v[0], label='Neuron 0')
plot(M.t/ms, M.v[1], label='Neuron 1')
plot(M.t/ms, M.v[2], label='Neuron 2')
xlabel('Time (ms)')
ylabel('v')
legend();
###Output
_____no_output_____
###Markdown
As you can see, that's as simple as adding a line ``S.delay = 'j*2*ms'`` so that the synapse from 0 to 1 has a delay of 2 ms, and from 0 to 2 has a delay of 4 ms. More complex connectivitySo far, we specified the synaptic connectivity explicitly, but for larger networks this isn't usually possible. For that, we usually want to specify some condition.
###Code
start_scope()
N = 10
G = NeuronGroup(N, 'v:1')
S = Synapses(G, G)
S.connect(condition='i!=j', p=0.2)
###Output
_____no_output_____
###Markdown
Here we've created a dummy neuron group of N neurons and a dummy synapses model that doens't actually do anything just to demonstrate the connectivity. The line ``S.connect(condition='i!=j', p=0.2)`` will connect all pairs of neurons ``i`` and ``j`` with probability 0.2 as long as the condition ``i!=j`` holds. So, how can we see that connectivity? Here's a little function that will let us visualise it.
###Code
def visualise_connectivity(S):
Ns = len(S.source)
Nt = len(S.target)
figure(figsize=(10, 4))
subplot(121)
plot(zeros(Ns), arange(Ns), 'ok', ms=10)
plot(ones(Nt), arange(Nt), 'ok', ms=10)
for i, j in zip(S.i, S.j):
plot([0, 1], [i, j], '-k')
xticks([0, 1], ['Source', 'Target'])
ylabel('Neuron index')
xlim(-0.1, 1.1)
ylim(-1, max(Ns, Nt))
subplot(122)
plot(S.i, S.j, 'ok')
xlim(-1, Ns)
ylim(-1, Nt)
xlabel('Source neuron index')
ylabel('Target neuron index')
visualise_connectivity(S)
###Output
_____no_output_____
###Markdown
There are two plots here. On the left hand side, you see a vertical line of circles indicating source neurons on the left, and a vertical line indicating target neurons on the right, and a line between two neurons that have a synapse. On the right hand side is another way of visualising the same thing. Here each black dot is a synapse, with x value the source neuron index, and y value the target neuron index.Let's see how these figures change as we change the probability of a connection:
###Code
start_scope()
N = 10
G = NeuronGroup(N, 'v:1')
for p in [0.1, 0.5, 1.0]:
S = Synapses(G, G)
S.connect(condition='i!=j', p=p)
visualise_connectivity(S)
suptitle('p = '+str(p))
###Output
_____no_output_____
###Markdown
And let's see what another connectivity condition looks like. This one will only connect neighbouring neurons.
###Code
start_scope()
N = 10
G = NeuronGroup(N, 'v:1')
S = Synapses(G, G)
S.connect(condition='abs(i-j)<4 and i!=j')
visualise_connectivity(S)
###Output
_____no_output_____
###Markdown
Try using that cell to see how other connectivity conditions look like. You can also use the generator syntax to create connections like this more efficiently. In small examples like this, it doesn't matter, but for large numbers of neurons it can be much more efficient to specify directly which neurons should be connected than to specify just a condition. Note that the following example uses `skip_if_invalid` to avoid errors at the boundaries (e.g. do not try to connect the neuron with index 1 to a neuron with index -2).
###Code
start_scope()
N = 10
G = NeuronGroup(N, 'v:1')
S = Synapses(G, G)
S.connect(j='k for k in range(i-3, i+4) if i!=k', skip_if_invalid=True)
visualise_connectivity(S)
###Output
_____no_output_____
###Markdown
If each source neuron is connected to precisely one target neuron (which would be normally used with two separate groups of the same size, not with identical source and target groups as in this example), there is a special syntax that is extremely efficient. For example, 1-to-1 connectivity looks like this:
###Code
start_scope()
N = 10
G = NeuronGroup(N, 'v:1')
S = Synapses(G, G)
S.connect(j='i')
visualise_connectivity(S)
###Output
_____no_output_____
###Markdown
You can also do things like specifying the value of weights with a string. Let's see an example where we assign each neuron a spatial location and have a distance-dependent connectivity function. We visualise the weight of a synapse by the size of the marker.
###Code
start_scope()
N = 30
neuron_spacing = 50*umetre
width = N/4.0*neuron_spacing
# Neuron has one variable x, its position
G = NeuronGroup(N, 'x : metre')
G.x = 'i*neuron_spacing'
# All synapses are connected (excluding self-connections)
S = Synapses(G, G, 'w : 1')
S.connect(condition='i!=j')
# Weight varies with distance
S.w = 'exp(-(x_pre-x_post)**2/(2*width**2))'
scatter(S.x_pre/um, S.x_post/um, S.w*20)
xlabel('Source neuron position (um)')
ylabel('Target neuron position (um)');
###Output
_____no_output_____
###Markdown
Now try changing that function and seeing how the plot changes. More complex synapse models: STDPBrian's synapse framework is very general and can do things like short-term plasticity (STP) or spike-timing dependent plasticity (STDP). Let's see how that works for STDP.STDP is normally defined by an equation something like this:$$\Delta w = \sum_{t_{pre}} \sum_{t_{post}} W(t_{post}-t_{pre})$$That is, the change in synaptic weight w is the sum over all presynaptic spike times $t_{pre}$ and postsynaptic spike times $t_{post}$ of some function $W$ of the difference in these spike times. A commonly used function $W$ is:$$W(\Delta t) = \begin{cases}A_{pre} e^{-\Delta t/\tau_{pre}} & \Delta t>0 \\A_{post} e^{\Delta t/\tau_{post}} & \Delta t<0\end{cases}$$This function looks like this:
###Code
tau_pre = tau_post = 20*ms
A_pre = 0.01
A_post = -A_pre*1.05
delta_t = linspace(-50, 50, 100)*ms
W = where(delta_t>0, A_pre*exp(-delta_t/tau_pre), A_post*exp(delta_t/tau_post))
plot(delta_t/ms, W)
xlabel(r'$\Delta t$ (ms)')
ylabel('W')
axhline(0, ls='-', c='k');
###Output
_____no_output_____
###Markdown
Simulating it directly using this equation though would be very inefficient, because we would have to sum over all pairs of spikes. That would also be physiologically unrealistic because the neuron cannot remember all its previous spike times. It turns out there is a more efficient and physiologically more plausible way to get the same effect.We define two new variables $a_{pre}$ and $a_{post}$ which are "traces" of pre- and post-synaptic activity, governed by the differential equations:$$\begin{align}\tau_{pre}\frac{\mathrm{d}}{\mathrm{d}t} a_{pre} &= -a_{pre}\\\tau_{post}\frac{\mathrm{d}}{\mathrm{d}t} a_{post} &= -a_{post}\end{align}$$When a presynaptic spike occurs, the presynaptic trace is updated and the weight is modified according to the rule:$$\begin{align}a_{pre} &\rightarrow a_{pre}+A_{pre}\\w &\rightarrow w+a_{post}\end{align}$$When a postsynaptic spike occurs:$$\begin{align}a_{post} &\rightarrow a_{post}+A_{post}\\w &\rightarrow w+a_{pre}\end{align}$$To see that this formulation is equivalent, you just have to check that the equations sum linearly, and consider two cases: what happens if the presynaptic spike occurs before the postsynaptic spike, and vice versa. Try drawing a picture of it.Now that we have a formulation that relies only on differential equations and spike events, we can turn that into Brian code.
###Code
start_scope()
taupre = taupost = 20*ms
wmax = 0.01
Apre = 0.01
Apost = -Apre*taupre/taupost*1.05
G = NeuronGroup(1, 'v:1', threshold='v>1')
S = Synapses(G, G,
'''
w : 1
dapre/dt = -apre/taupre : 1 (event-driven)
dapost/dt = -apost/taupost : 1 (event-driven)
''',
on_pre='''
v_post += w
apre += Apre
w = clip(w+apost, 0, wmax)
''',
on_post='''
apost += Apost
w = clip(w+apre, 0, wmax)
''')
###Output
_____no_output_____
###Markdown
There are a few things to see there. Firstly, when defining the synapses we've given a more complicated multi-line string defining three synaptic variables (``w``, ``apre`` and ``apost``). We've also got a new bit of syntax there, ``(event-driven)`` after the definitions of ``apre`` and ``apost``. What this means is that although these two variables evolve continuously over time, Brian should only update them at the time of an event (a spike). This is because we don't need the values of ``apre`` and ``apost`` except at spike times, and it is more efficient to only update them when needed.Next we have a ``on_pre=...`` argument. The first line is ``v_post += w``: this is the line that actually applies the synaptic weight to the target neuron. The second line is ``apre += Apre`` which encodes the rule above. In the third line, we're also encoding the rule above but we've added one extra feature: we've clamped the synaptic weights between a minimum of 0 and a maximum of ``wmax`` so that the weights can't get too large or negative. The function ``clip(x, low, high)`` does this.Finally, we have a ``on_post=...`` argument. This gives the statements to calculate when a post-synaptic neuron fires. Note that we do not modify ``v`` in this case, only the synaptic variables.Now let's see how all the variables behave when a presynaptic spike arrives some time before a postsynaptic spike.
###Code
start_scope()
taupre = taupost = 20*ms
wmax = 0.01
Apre = 0.01
Apost = -Apre*taupre/taupost*1.05
G = NeuronGroup(2, 'v:1', threshold='t>(1+i)*10*ms', refractory=100*ms)
S = Synapses(G, G,
'''
w : 1
dapre/dt = -apre/taupre : 1 (clock-driven)
dapost/dt = -apost/taupost : 1 (clock-driven)
''',
on_pre='''
v_post += w
apre += Apre
w = clip(w+apost, 0, wmax)
''',
on_post='''
apost += Apost
w = clip(w+apre, 0, wmax)
''', method='linear')
S.connect(i=0, j=1)
M = StateMonitor(S, ['w', 'apre', 'apost'], record=True)
run(30*ms)
figure(figsize=(4, 8))
subplot(211)
plot(M.t/ms, M.apre[0], label='apre')
plot(M.t/ms, M.apost[0], label='apost')
legend()
subplot(212)
plot(M.t/ms, M.w[0], label='w')
legend(loc='best')
xlabel('Time (ms)');
###Output
_____no_output_____
###Markdown
A couple of things to note here. First of all, we've used a trick to make neuron 0 fire a spike at time 10 ms, and neuron 1 at time 20 ms. Can you see how that works?Secondly, we've replaced the ``(event-driven)`` by ``(clock-driven)`` so you can see how ``apre`` and ``apost`` evolve over time. Try reverting this change and see what happens.Try changing the times of the spikes to see what happens.Finally, let's verify that this formulation is equivalent to the original one.
###Code
start_scope()
taupre = taupost = 20*ms
Apre = 0.01
Apost = -Apre*taupre/taupost*1.05
tmax = 50*ms
N = 100
# Presynaptic neurons G spike at times from 0 to tmax
# Postsynaptic neurons G spike at times from tmax to 0
# So difference in spike times will vary from -tmax to +tmax
G = NeuronGroup(N, 'tspike:second', threshold='t>tspike', refractory=100*ms)
H = NeuronGroup(N, 'tspike:second', threshold='t>tspike', refractory=100*ms)
G.tspike = 'i*tmax/(N-1)'
H.tspike = '(N-1-i)*tmax/(N-1)'
S = Synapses(G, H,
'''
w : 1
dapre/dt = -apre/taupre : 1 (event-driven)
dapost/dt = -apost/taupost : 1 (event-driven)
''',
on_pre='''
apre += Apre
w = w+apost
''',
on_post='''
apost += Apost
w = w+apre
''')
S.connect(j='i')
run(tmax+1*ms)
plot((H.tspike-G.tspike)/ms, S.w)
xlabel(r'$\Delta t$ (ms)')
ylabel(r'$\Delta w$')
axhline(0, ls='-', c='k');
###Output
_____no_output_____ |
ImageDetection.ipynb | ###Markdown
**Install ngrok package**
###Code
!pip install flask_ngrok
###Output
Collecting flask_ngrok
Downloading https://files.pythonhosted.org/packages/af/6c/f54cb686ad1129e27d125d182f90f52b32f284e6c8df58c1bae54fa1adbc/flask_ngrok-0.0.25-py3-none-any.whl
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from flask_ngrok) (2.23.0)
Requirement already satisfied: Flask>=0.8 in /usr/local/lib/python3.6/dist-packages (from flask_ngrok) (1.1.2)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->flask_ngrok) (2020.6.20)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->flask_ngrok) (2.9)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->flask_ngrok) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->flask_ngrok) (1.24.3)
Requirement already satisfied: itsdangerous>=0.24 in /usr/local/lib/python3.6/dist-packages (from Flask>=0.8->flask_ngrok) (1.1.0)
Requirement already satisfied: Werkzeug>=0.15 in /usr/local/lib/python3.6/dist-packages (from Flask>=0.8->flask_ngrok) (1.0.1)
Requirement already satisfied: click>=5.1 in /usr/local/lib/python3.6/dist-packages (from Flask>=0.8->flask_ngrok) (7.1.2)
Requirement already satisfied: Jinja2>=2.10.1 in /usr/local/lib/python3.6/dist-packages (from Flask>=0.8->flask_ngrok) (2.11.2)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from Jinja2>=2.10.1->Flask>=0.8->flask_ngrok) (1.1.1)
Installing collected packages: flask-ngrok
Successfully installed flask-ngrok-0.0.25
###Markdown
###Code
!unzip Image_Detection.zip
%cd /content/Image_Detection
!python app1.py
###Output
2020-07-05 14:38:30.007543: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2020-07-05 14:38:31.877302: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1
2020-07-05 14:38:31.933930: E tensorflow/stream_executor/cuda/cuda_driver.cc:313] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected
2020-07-05 14:38:31.934003: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (42961f7f1e5e): /proc/driver/nvidia/version does not exist
2020-07-05 14:38:31.934380: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX512F
2020-07-05 14:38:31.963676: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 2000155000 Hz
2020-07-05 14:38:31.963971: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x2470bc0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2020-07-05 14:38:31.964018: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5
102973440/102967424 [==============================] - 1s 0us/step
* Serving Flask app "app1" (lazy loading)
* Environment: production
[31m WARNING: This is a development server. Do not use it in a production deployment.[0m
[2m Use a production WSGI server instead.[0m
* Debug mode: off
* Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
* Running on http://2fe2dc80121d.ngrok.io
* Traffic stats available on http://127.0.0.1:4040
127.0.0.1 - - [05/Jul/2020 14:38:48] "[37mGET / HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:38:49] "[33mGET /favicon.ico HTTP/1.1[0m" 404 -
127.0.0.1 - - [05/Jul/2020 14:39:21] "[32mPOST / HTTP/1.1[0m" 302 -
Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json
40960/35363 [==================================] - 0s 0us/step
('n04467665', 'trailer_truck', 0.07856209)
127.0.0.1 - - [05/Jul/2020 14:39:22] "[37mGET /show/download_20200705143831.jpg HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:39:23] "[37mGET /uploads/download_20200705143831.jpg HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:39:29] "[37mGET / HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:40:46] "[32mPOST / HTTP/1.1[0m" 302 -
('n07768694', 'pomegranate', 0.29960626)
127.0.0.1 - - [05/Jul/2020 14:40:47] "[37mGET /show/apl_20200705143831.jpeg HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:40:48] "[37mGET /uploads/apl_20200705143831.jpeg HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:40:52] "[37mGET / HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:41:27] "[32mPOST / HTTP/1.1[0m" 302 -
('n04120489', 'running_shoe', 0.98044246)
127.0.0.1 - - [05/Jul/2020 14:41:28] "[37mGET /show/nike_20200705143831.jpg HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:41:29] "[37mGET /uploads/nike_20200705143831.jpg HTTP/1.1[0m" 200 -
127.0.0.1 - - [05/Jul/2020 14:41:36] "[37mGET / HTTP/1.1[0m" 200 -
^C
###Markdown
###Code
%cd /content/
!zip -r proj.zip /content/Image_Detection
###Output
adding: content/Image_Detection/ (stored 0%)
adding: content/Image_Detection/app1.py (deflated 58%)
adding: content/Image_Detection/app.py (deflated 56%)
adding: content/Image_Detection/.ipynb_checkpoints/ (stored 0%)
adding: content/Image_Detection/uploads/ (stored 0%)
adding: content/Image_Detection/uploads/.ipynb_checkpoints/ (stored 0%)
adding: content/Image_Detection/templates/ (stored 0%)
adding: content/Image_Detection/templates/upload.html (deflated 44%)
adding: content/Image_Detection/templates/template.html (deflated 32%)
adding: content/Image_Detection/templates/.ipynb_checkpoints/ (stored 0%)
adding: content/Image_Detection/.zip (stored 0%)
|
Data from external source/unit-1-reading-data-with-python-and-pandas/lesson-11-reading-data-from-relational-databases/files/Lecture.ipynb | ###Markdown
<img src="https://user-images.githubusercontent.com/7065401/68501079-0695df00-023c-11ea-841f-455dac84a089.jpg" style="width:400px; float: right; margin: 0 40px 40px 40px;"> Reading data from relational databasesIn this lesson you will learn how to read SQL queries and relational database tables into `DataFrame` objects using pandas. Also, we'll take a look at different techniques to persist that pandas `DataFrame` objects to database tables.  Hands on!
###Code
!pip install sqlalchemy
import pandas as pd
###Output
_____no_output_____
###Markdown
 Read data from SQL databaseReading data from SQL relational databases is fairly simple and pandas support a variety of methods to deal with it.We'll start with an example using SQLite, as it's a builtin Python package, and we don't need anything extra installed.
###Code
import sqlite3
###Output
_____no_output_____
###Markdown
In order to work with a SQLite database from Python, we first have to connect to it. We can do that using the connect function, which returns a `Connection` object.We'll use [this example database](http://www.sqlitetutorial.net/sqlite-sample-database/).
###Code
conn = sqlite3.connect('chinook.db')
###Output
_____no_output_____
###Markdown
Once we have a `Connection` object, we can then create a `Cursor` object. Cursors allow us to execute SQL queries against a database:
###Code
cur = conn.cursor()
###Output
_____no_output_____
###Markdown
The `Cursor` created has a method `execute`, which will receive SQL parameters to run against the database.The code below will fetch the first `5` rows from the `employees` table:
###Code
cur.execute('SELECT * FROM employees LIMIT 5;')
###Output
_____no_output_____
###Markdown
You may have noticed that we didn't assign the result of the above query to a variable. This is because we need to run another command to actually fetch the results.We can use the `fetchall` method to fetch all of the results of a query:
###Code
results = cur.fetchall()
results
###Output
_____no_output_____
###Markdown
As you can see, the results are returned as a list of tuples. Each tuple corresponds to a row in the database that we accessed. Dealing with data this way is painful.We'd need to manually add column headers, and manually parse the data. Luckily, the pandas library has an easier way, which we'll look at in the next section.
###Code
df = pd.DataFrame(results)
df.head()
###Output
_____no_output_____
###Markdown
Before we move on, it's good practice to close `Connection` objects and `Cursor` objects that are open. This prevents the SQLite database from being locked. When a SQLite database is locked, you may be unable to update the database, and may get errors. We can close the Cursor and the Connection like this:
###Code
cur.close()
conn.close()
###Output
_____no_output_____
###Markdown
 Using pandas `read_sql` methodWe can use the pandas `read_sql` function to read the results of a SQL query directly into a pandas `DataFrame`. The code below will execute the same query that we just did, but it will return a `DataFrame`. It has several advantages over the query we did above:- It doesn't require us to create a `Cursor` object or call `fetchall` at the end.- It automatically reads in the names of the headers from the table.- It creates a `DataFrame`, so we can quickly explore the data.
###Code
conn = sqlite3.connect('chinook.db')
df = pd.read_sql('SELECT * FROM employees;', conn)
df.head()
df = pd.read_sql('SELECT * FROM employees;', conn,
index_col='EmployeeId',
parse_dates=['BirthDate', 'HireDate'])
df.head()
df.info()
df['ReportsTo'].isna().sum()
df['ReportsTo'].mean()
df['ReportsTo'] > 1.75
df['City'] = df['City'].astype('category')
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 8 entries, 1 to 8
Data columns (total 14 columns):
LastName 8 non-null object
FirstName 8 non-null object
Title 8 non-null object
ReportsTo 7 non-null float64
BirthDate 8 non-null datetime64[ns]
HireDate 8 non-null datetime64[ns]
Address 8 non-null object
City 8 non-null category
State 8 non-null object
Country 8 non-null object
PostalCode 8 non-null object
Phone 8 non-null object
Fax 8 non-null object
Email 8 non-null object
dtypes: category(1), datetime64[ns](2), float64(1), object(10)
memory usage: 1008.0+ bytes
###Markdown
 Using pandas `read_sql_query` methodIt turns out that the `read_sql` method we saw above is just a wrapper around `read_sql_query` and `read_sql_table`.We can get the same result using `read_sql_query` method:
###Code
conn = sqlite3.connect('chinook.db')
df = pd.read_sql_query('SELECT * FROM employees LIMIT 5;', conn)
df.head()
df = pd.read_sql_query('SELECT * FROM employees;', conn,
index_col='EmployeeId',
parse_dates=['BirthDate', 'HireDate'])
df.head()
###Output
_____no_output_____
###Markdown
 Using `read_sql_table` method`read_sql_table` is a useful function, but it works only with [SQLAlchemy](https://www.sqlalchemy.org/), a Python SQL Toolkit and Object Relational Mapper.This is just a demonstration of its usage where we read the whole `employees` table.
###Code
from sqlalchemy import create_engine
engine = create_engine('sqlite:///chinook.db')
connection = engine.connect()
df = pd.read_sql_table('employees', con=connection)
df.head()
df = pd.read_sql_table('employees', con=connection,
index_col='EmployeeId',
parse_dates=['BirthDate', 'HireDate'])
df.head()
connection.close()
###Output
_____no_output_____
###Markdown
 Create tables from `DataFrame` objectsFinally we can persist `DataFrame` objects we've working on in a database using the pandas `to_sql` method.Although it is easy to implement, it could be a very slow process.
###Code
df.head()
df.to_sql('employees2', conn)
pd.read_sql_query('SELECT * FROM employees2;', conn).head()
#pd.read_sql_query('DROP TABLE employees2;', conn)
###Output
_____no_output_____
###Markdown
Custom behaviourThe `if_exists` parameter define how to behave if the table already exists and adds a ton of flexibility, letting you decide wheather to `replace` current database data, `append` new data at the end, or simply `fail` if database already exists.
###Code
pd.DataFrame().to_sql('employees2',
conn,
if_exists='replace')
pd.read_sql_query('SELECT * FROM employees2;', conn).head()
df.to_sql('employees2',
conn,
if_exists='replace')
pd.read_sql_query('SELECT * FROM employees2;', conn).head()
conn.close()
###Output
_____no_output_____ |
notebooks/Project_Vacation_Itinerary.ipynb | ###Markdown
Deliverable 3. Create a Travel Itinerary Map.
###Code
# Dependencies and Setup
import pandas as pd
import requests
import gmaps
# Import API key
from config import g_key
# Configure gmaps
gmaps.configure(api_key=g_key)
# 1. Read the WeatherPy_vacation.csv into a DataFrame.
# vacation_df = pd.read_csv("Vacation_Search/WeatherPy_vacation.csv")
# vacation_df.head()
# 2. Using the template add the city name, the country code, the weather description and maximum temperature for the city.
info_box_template = """
"""
# 3a. Get the data from each row and add it to the formatting template and store the data in a list.
hotel_info = [info_box_template.format(**row) for index, row in clean_hotel_df.iterrows()]
# 3b. Get the latitude and longitude from each row and store in a new DataFrame.
locations = clean_hotel_df[["Lat", "Lng"]]
# 4a. Add a marker layer for each city to the map.
# 4b. Display the figure
# From the map above pick 4 cities and create a vacation itinerary route to travel between the four cities.
# 5. Create DataFrames for each city by filtering the 'vacation_df' using the loc method.
# Hint: The starting and ending city should be the same city.
vacation_start = vacation_df.loc[]
vacation_end = vacation_df.loc[]
vacation_stop1 = vacation_df.loc[]
vacation_stop2 = vacation_df.loc[]
vacation_stop3 = vacation_df.loc[]
# 6. Get the latitude-longitude pairs as tuples from each city DataFrame using the to_numpy function and list indexing.
start =
end =
stop1 =
stop2 =
stop3 =
# 7. Create a direction layer map using the start and end latitude-longitude pairs,
# and stop1, stop2, and stop3 as the waypoints. The travel_mode should be "DRIVING", "BICYCLING", or "WALKING".
# 8. To create a marker layer map between the four cities.
# Combine the four city DataFrames into one DataFrame using the concat() function.
itinerary_df = pd.concat([],ignore_index=True)
itinerary_df
# 9 Using the template add city name, the country code, the weather description and maximum temperature for the city.
info_box_template = """
"""
# 10a Get the data from each row and add it to the formatting template and store the data in a list.
hotel_info = [info_box_template.format(**row) for index, row in itinerary_df.iterrows()]
# 10b. Get the latitude and longitude from each row and store in a new DataFrame.
locations = itinerary_df[["Lat", "Lng"]]
# 11a. Add a marker layer for each city to the map.
# 11b. Display the figure
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.