path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
funds.ipynb | ###Markdown
###Code
###Output
_____no_output_____ |
Assignment_4/Architecture/assignemnt_4_solutions.ipynb | ###Markdown
Total number of parameters used in the mode: **17498**
###Code
# Initialize transformation, datasets, and loaders
classes = range(10)
train_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(DATA_MEAN, DATA_STD),
])
test_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(DATA_MEAN, DATA_STD),
])
# Download MNIST Training
train_set = datasets.MNIST(root= './data',
train= True,
download= True,
transform= transforms.Compose([
transforms.ToTensor()
]))
# Download MNIST TEST
test_set = datasets.MNIST(root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
torch.manual_seed(1) #Set seed for reproducibility
batch_size = 128
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda_available else {}
# Load MNIST Train Data Loader
train_loader = torch.utils.data.DataLoader(train_set,
batch_size= batch_size,
shuffle= True,
drop_last=True, # Drop the last batch if total size is not divisble by batch size
**kwargs)
# Load MNIST Test Data Loader
test_loader = torch.utils.data.DataLoader(test_set,
batch_size=batch_size,
shuffle=True,
drop_last=True
**kwargs)
# Check Data for Each Class for train set| number of examples
print("Class & Data For Training Set : ")
print(train_set.targets.unique())
print(train_set.targets.bincount())
print()
# Check Data for Each Class for test set| number of examples
print("Class & Data For Test Set : ")
print(test_set.targets.unique())
print(test_set.targets.bincount())
import matplotlib.pyplot as plt
import numpy as np
def display_image(image, title: str="Class label"):
"""
This function essentially takes in normalized tensors and the
Un-normalize them and display the image as output.
Args:
----
image: Image which we want to plot.
title: Label for that image.
"""
image = image.numpy().transpose((1, 2, 0)) # (C, H, W) --> (H, W, C)
# Convert mean and std to numpy array
mean = np.asarray(DATA_MEAN)
std = np.asarray(DATA_STD)
# unnormalize the image
image = std * image + mean
image = np.clip(image, 0, 1)
print(title)
fig = plt.figure() # Create a new figure
fig.set_figheight(15)
fig.set_figwidth(15)
ax = fig.add_subplot(111)
ax.axis("off") # Sqitch off the axis
ax.imshow(image)
# Iteratre over and get 1 batch of training data
data, targets = next(iter(train_loader))
# make_grid takes all tensors(batch) and joins them into a single big tensor image (almost)
batch_grid = torchvision.utils.make_grid(data)
display_image(batch_grid, title=[str(cls.item()) for cls in targets])
# Drawing a single sample from the dataset
images, labels = next(iter(train_loader))
images[0].data.shape
# Plot a single image and its label.
print(f"Class label: {classes[labels[0]]}")
plt.axis("off")
plt.imshow(images[0].data.reshape((28,28)), cmap="gray");
from tqdm import tqdm
def train(model, device, train_loader, optimizer):
"""
Model training function.
Args
----
model : model that we want to train.
device: device on which we want to train our model (cuda or cpu)
train_loader : training dataset data loader.
optimizer: what optimization method we want to use for training our model.
"""
model.train()
train_loss = 0
train_acc = 0
correct = 0
pbar = tqdm(train_loader)
for batch_idx, (data, target) in enumerate(pbar):
data, target = data.to(device), target.to(device) # Put data to device
optimizer.zero_grad() # set gradients to 0
output = model(data) # Forward pass
loss = F.nll_loss(output, target) # Calculate loss
train_loss += loss
loss.backward() # Backpropogate
optimizer.step() # Update parameters
pred = output.argmax(dim=1, keepdim=True) # get the index of the max logit value
correct += pred.eq(target.view_as(pred)).sum().item()
pbar.set_description(desc= f'loss={round(loss.item(), 7)} batch_id={batch_idx}')
train_loss /= len(train_loader.dataset)
train_acc = (correct/len(train_loader.dataset)) * 100
print(f"\n\nTrain Accuracy: {round(train_acc,4)} % \t Train Loss: {round(train_loss.item(),4)}")
return train_loss, train_acc
def test(model, device, test_loader):
"""
Model testing function to test the performance of our trained model on the test data.
Args
----
model : model that we want to test.
device: device on which we want to train our model (cuda or cpu)
test_loader : testing dataset data loader to test our model on.
"""
model.eval() # Put model in evaluation mode
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target).item()
pred = output.argmax(dim=1, keepdim=True) # get the index of the max logit
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_acc = (correct / len(test_loader.dataset)) * 100
print(f"\nTest Accuracy: {round(test_acc,4)} % \t Test Loss: {round(test_loss,4)}\n")
print(f"Number of correct prediction in test set: {correct}/{len(test_loader.dataset)}")
return test_loss, test_acc
# Initialize the model
model = MNISTNet().to(device)
# Using Adam optimizer with inital learning rate as 0.01
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 15], gamma=0.1)
# To accumulate training and testing loss
train_losses = []
test_losses = []
# To accumulate training and testing accuracies
train_acc = []
test_acc = []
for epoch in range(num_epochs):
print(f"\nEpoch {epoch+1}:")
print("="*20)
loss, acc = train(model, device, train_loader, optimizer)
train_losses.append(loss)
train_acc.append(acc)
scheduler.step()
loss, acc = test(model, device, test_loader)
test_losses.append(loss)
test_acc.append(acc)
print("+"*20)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(18,10))
# Plot loss values over the epochs
ax1 = ax[0]
ax1.set_title("Loss")
ax1.plot(train_losses, color="red", label="Train Loss")
ax1.plot(test_losses, color="green", label="Test Loss")
ax1.legend()
# Plot accuracies over the epochs
ax2 = ax[1]
ax2.set_title("Accuracy")
ax2.plot(train_acc, color="red", label="Train Accuracy")
ax2.plot(test_acc, color="green", label="Test Accuracy")
ax2.legend()
plt.show()
###Output
_____no_output_____ |
tutorials/catalog.ipynb | ###Markdown
Source catalogs`~gammapy.catalog` provides convenient access to common gamma-ray source catalogs. E.g. creating a spectral model and spectral points for a given Fermi-LAT catalog and source from the FITS table is tedious, `~gammapy.catalog` has this implemented and makes it easy.In this tutorial you will learn how to:- List available catalogs- Load a catalog- Select a source- Pretty-print the source information- Get source spectral and spatial models- Get flux points (if available)- Get lightcurves (if available)- Access the source catalog table dataIn this tutorial we will show examples using the following catalogs:- `~gammapy.catalog.SourceCatalogHGPS`- `~gammapy.catalog.SourceCatalogGammaCat`- `~gammapy.catalog.SourceCatalog3FHL`- `~gammapy.catalog.SourceCatalog4FGL`All catalog and source classes work the same, as long as some information is available. E.g. trying to access a lightcurve from a catalog and source that doesn't have that information will return ``None``.Further information is available at `~gammapy.catalog`.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import astropy.units as u
from gammapy.catalog import SOURCE_CATALOGS
###Output
_____no_output_____
###Markdown
List available catalogs`~gammapy.catalog` contains a Python dictionary ``SOURCE_CATALOGS``, which maps catalog names (e.g. "3fhl") to catalog classes (e.g. ``SourceCatalog3FHL``).
###Code
SOURCE_CATALOGS
list(SOURCE_CATALOGS)
###Output
_____no_output_____
###Markdown
Load catalogsIf you have run `gammapy download datasets` or `gammapy download tutorials`,you have a copy of the catalogs as FITS files in `$GAMMAPY_DATA/catalogs`,and that is the default location where `~gammapy.catalog` loads from.You can load a catalog by name via `SOURCE_CATALOG[name]()` (not the `()` to instantiate a catalog object from the catalog class - only this will load the catalog and be useful), or by importing the catalog class (e.g. `SourceCatalog3FGL`) directly. The two ways are equivalent, the result will be the same.Note that `$GAMMAPY_DATA/catalogs` is just the default, you could pass a different `filename` when creating the catalog.
###Code
!ls -1 $GAMMAPY_DATA/catalogs
# Catalog object - FITS file is loaded
catalog = SOURCE_CATALOGS["3fgl"]()
catalog
from gammapy.catalog import SourceCatalog3FGL
catalog = SourceCatalog3FGL()
catalog
# Let's load the source catalogs we will use throughout this tutorial
catalog_gammacat = SOURCE_CATALOGS["gamma-cat"]
catalog_3fhl = SOURCE_CATALOGS["3fhl"]()
catalog_4fgl = SOURCE_CATALOGS["4fgl"]()
catalog_hgps = SOURCE_CATALOGS["hgps"]()
###Output
_____no_output_____
###Markdown
Select a sourceTo create a source object, index into the catalog using `[]`, passing a catalog table row index (zero-based, first row is `[0]`), or a source name. If passing a name, catalog table columns with source names and association names ("ASSOC1" in the example below) are searched top to bottom. There is no name resolution web query.
###Code
source = catalog_4fgl[42]
source
source.row_index, source.name
source = catalog_4fgl["4FGL J0010.8-2154"]
source
source.row_index, source.name
source.data["ASSOC1"]
source = catalog_4fgl["PKS 0008-222"]
source.row_index, source.name
###Output
_____no_output_____
###Markdown
Pretty-print source informationA source object has a nice string representation that you can print.You can also call `source.info()` instead and pass an option what information to print.
###Code
source = catalog_hgps["MSH 15-52"]
print(source)
print(source.info("associations"))
###Output
_____no_output_____
###Markdown
Source modelsThe `~gammapy.catalog.SourceCatalogObject` classes have a `sky_model()` modelwhich creates a `gammapy.modeling.models.SkyModel` object, with model parametervalues and parameter errors from the catalog filled in.In most cases, the `spectral_model()` method provides the `gammapy.modeling.models.SpectralModel`part of the sky model, and the `spatial_model()` method the `gammapy.modeling.models.SpatialModel`part individually.We use the `gammapy.catalog.SourceCatalog3FHL` for the examples in this section.
###Code
source = catalog_4fgl["PKS 2155-304"]
model = source.sky_model()
model
print(model)
print(model.spatial_model)
print(model.spectral_model)
energy_range = (100 * u.MeV, 100 * u.GeV)
opts = dict(energy_power=2, flux_unit="erg-1 cm-2 s-1")
model.spectral_model.plot(energy_range, **opts)
model.spectral_model.plot_error(energy_range, **opts)
###Output
_____no_output_____
###Markdown
Flux pointsThe flux points are available via the `flux_points` property as a `gammapy.spectrum.FluxPoints` object.
###Code
source = catalog_4fgl["PKS 2155-304"]
flux_points = source.flux_points
flux_points
flux_points.table[["e_min", "e_max", "flux", "flux_errn"]]
flux_points.plot()
###Output
_____no_output_____
###Markdown
LightcurvesThe Fermi catalogs contain lightcurves for each source. It is available via the `source.lightcurve` property as a `~gammapy.time.LightCurve` object.
###Code
lightcurve = catalog_4fgl["4FGL J0349.8-2103"].lightcurve
lightcurve
lightcurve.table[:3]
lightcurve.plot()
###Output
_____no_output_____
###Markdown
Catalog table and source dictionarySource catalogs are given as `FITS` files that contain one or multiple tables.Above we showed how to get spectra, light curves and other information as Gammapy objects.However, you can also access the underlying `~astropy.table.Table` for a catalog,and the row data as a Python `dict`. This can be useful if you want to do somethingthat is not pre-scripted by the `~gammapy.catalog` classes, such as e.g. selectingsources by sky position or association class, or accessing special source information(like e.g. `Npred` in the example below).Note that you can also do a `for source in catalog` loop, to find or processsources of interest.
###Code
type(catalog_3fhl.table)
len(catalog_3fhl.table)
catalog_3fhl.table[:3][["Source_Name", "RAJ2000", "DEJ2000"]]
source = catalog_3fhl["PKS 2155-304"]
source.data["Source_Name"]
source.data["Npred"]
source.position
# Find the brightest sources in the 100 to 200 GeV energy band
for source in catalog_3fhl:
flux = (
source.spectral_model()
.integral(100 * u.GeV, 200 * u.GeV)
.to("cm-2 s-1")
)
if flux > 1e-10 * u.Unit("cm-2 s-1"):
print(f"{source.row_index:<7d} {source.name:20s} {flux:.3g}")
###Output
_____no_output_____
###Markdown
Exercises- How many sources are in the 4FGL catalog? (try `len(catalog.table)`- What is the name of the source with row index 42?- What is the row index of the source with name "4FGL J0536.1-1205"?- What is the integral flux of "4FGL J0536.1-1205" in the energy range 100 GeV to 1 TeV according to the best-fit spectral model?- Which source in the HGPS catalog is closest to Galactic position `glon = 42 deg` and `glat = 0 deg`?
###Code
# Start coding here ...
###Output
_____no_output_____ |
matplotlib_rgb_image.ipynb | ###Markdown
 How to Display a Matplotlib RGB Image by [PyImageSearch.com](http://www.pyimagesearch.com) Welcome to **[PyImageSearch Plus](http://pyimg.co/plus)** Jupyter Notebooks!This notebook is associated with the [How to Display a Matplotlib RGB Image](https://www.pyimagesearch.com/2014/11/03/display-matplotlib-rgb-image/) blog post published on 2014-11-04.Only the code for the blog post is here. Most codeblocks have a 1:1 relationship with what you find in the blog post with two exceptions: (1) Python classes are not separate files as they are typically organized with PyImageSearch projects, and (2) Command Line Argument parsing is replaced with an `args` dictionary that you can manipulate as needed.We recommend that you execute (press ▶️) the code block-by-block, as-is, before adjusting parameters and `args` inputs. Once you've verified that the code is working, you are welcome to hack with it and learn from manipulating inputs, settings, and parameters. For more information on using Jupyter and Colab, please refer to these resources:* [Jupyter Notebook User Interface](https://jupyter-notebook.readthedocs.io/en/stable/notebook.htmlnotebook-user-interface)* [Overview of Google Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)As a reminder, these PyImageSearch Plus Jupyter Notebooks are not for sharing; please refer to the **Copyright** directly below and **Code License Agreement** in the last cell of this notebook. Happy hacking!*Adrian****Copyright:*** *The contents of this Jupyter Notebook, unless otherwise indicated, are Copyright 2020 Adrian Rosebrock, PyimageSearch.com. All rights reserved. Content like this is made possible by the time invested by the authors. If you received this Jupyter Notebook and did not purchase it, please consider making future content possible by joining PyImageSearch Plus at http://pyimg.co/plus/ today.* Download the code zip file
###Code
!wget https://www.pyimagesearch.com/wp-content/uploads/2014/05/matplotlib-rgb-image.zip
!unzip -qq matplotlib-rgb-image.zip
%cd matplotlib-rgb-image
###Output
_____no_output_____
###Markdown
Blog Post Code Import Packages
###Code
# import the necessary pacakges
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
###Output
_____no_output_____
###Markdown
Tutorial: How to Display a Matplotlib RGB Image
###Code
# display our image
image = mpimg.imread("chelsea-the-cat.png")
plt.imshow(image)
plt.show()
# turn axex off
plt.axis("off")
plt.imshow(image)
plt.show()
# load the image using OpenCV and display it -- but uh, oh!
# our image doesn't look right!
image = cv2.imread("chelsea-the-cat.png")
plt.axis("off")
plt.imshow(image)
plt.show()
# OpenCV stores images in BGR order, whereas matplotlib expects
# them in RGB order -- the simple fix is to use OpenCV to convert
# from BGR to RGB
plt.axis("off")
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____ |
protopipe/benchmarks/notebooks/TRAINING/benchmarks_DL1_image-cleaning.ipynb | ###Markdown
Image cleaning **Recommended datasample(s):** `gamma1` (dataset used to build the energy model)**Data level(s):** DL1b (telescope-wise image parameters)**Description:**This notebook contains benchmarks and metrics from the _protopipe_ pipeline aimed at the DL1b data level (cleaned and parametrized images). **Requirements and steps to reproduce:**To run this notebook you will need a TRAINING file generated using `protopipe-TRAINING`. To get a filled notebook and reproduce these results,- get the necessary input files using `protopipe-TRAINING`- execute the notebook with `protopipe-BENCHMARK``protopipe-BENCHMARK launch --config_file configs/benchmarks.yaml -n TRAINING/benchmarks_DL1_image-cleaning`To obtain the list of all available parameters add `--help-notebook`.**Comparison against CTAMARS:**- the input file needs to be a merged TRAINING file from the `gamma1` sample,- reference simtel-files, plots, values and settings can be found [here (please, always refer to the latest version)](https://forge.in2p3.fr/projects/benchmarks-reference-analysis/wiki/Comparisons_between_pipelines).**Development and testing:** As with any other part of _protopipe_ and being part of the official repository, this notebook can be further developed by any interested contributor. The execution of this notebook is not currently automatic, it must be done locally by the user _before_ pushing a pull-request.Please, strip the output before pushing. Table of contents - [Fraction of events (relative to telescope triggers) that survive a given intensity cut](Fraction-of-events-(relative-to-telescope-triggers)-that-survive-a-given-intensity-cut) - [Image-parameter distributions](Image-parameter-distributions) + [Image intensity from all telescope types](Image-intensity-from-all-telescope-types) + [Image intensity from LST-1](Image-intensity-from-LST-1) + [DL1 Parameters used for energy training from all telecopes](DL1-Parameters-used-for-energy-training-from-all-telecopes) Imports
###Code
import os
from pathlib import Path
import warnings
def fxn():
warnings.warn("runtime", RuntimeWarning)
import tables
import numpy as np
import pandas as pd
import uproot
from scipy.stats import binned_statistic, binned_statistic_2d, cumfreq, percentileofscore
from astropy import units as u
from astropy.table import Table
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.pyplot import rc
import matplotlib.style as style
from cycler import cycler
%matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
from protopipe.pipeline.io import get_camera_names, read_protopipe_TRAINING_per_tel_type
from protopipe.pipeline.utils import add_stats, CTAMARS_radii
from protopipe.benchmarks.utils import string_to_boolean, get_fig_size
###Output
_____no_output_____
###Markdown
Input data[back to top](Table-of-contents) Protopipe[back to top](Table-of-contents)
###Code
# Parametrized cell
# Modify these variables according to your local setup outside of the Vagrant Box
analyses_directory = None # path to the 'analyses' analyses folder
output_directory = Path.cwd() # default output directory for plots
analysis_name = None # Name of the analysis stored in 'analyses_folder'
load_CTAMARS = False # If True (default), compare to the CTAN analysis done with CTAMARS (Release 2019)
input_filename = None # Name of the file produced with protopipe
CTAMARS_put_directory = None # Path to DL1 CTAMARS data (if load_CTAMARS is True)
apply_image_extraction_status = True # (no effect for single-pass image extractors) If True select only images which pass both passes (enable if load_CTAMARS is True)
min_pixels = 3
min_ellipticity = 0.1
max_ellipticity = 0.6
containment_radius = 0.8 # from 0 to 1 (whole camera)
intensity_cut = 55 # phe
use_seaborn = False # If True import seaborn and apply global settings from config file
plots_scale = None
# Handle boolean variables (papermill reads them as strings)
[load_CTAMARS, use_seaborn, apply_image_extraction_status] = string_to_boolean([load_CTAMARS, use_seaborn, apply_image_extraction_status])
# Make sure available parameters are not read as strings
intensity_cut = float(intensity_cut)
min_pixels = int(min_pixels)
min_ellipticity = float(min_ellipticity)
max_ellipticity = float(max_ellipticity)
containment_radius = float(containment_radius)
if not analyses_directory or not analysis_name:
raise ValueError("Input source ill-defined.")
input_directory = Path(analyses_directory) / analysis_name / Path("data/TRAINING/for_energy_estimation/gamma")
if not input_filename:
try:
input_filename = input_filenames["TRAINING_energy_gamma"]
except (NameError, KeyError):
input_filename = "TRAINING_energy_tail_gamma_merged.h5"
cameras = get_camera_names(input_directory = input_directory,
file_name = input_filename)
data = read_protopipe_TRAINING_per_tel_type(input_directory = input_directory,
file_name = input_filename,
camera_names=cameras)
selected_data = {}
if apply_image_extraction_status:
# Remove from protopipe's data images that did not survive the preliminary image cleaning
# between the 2 image extraction passes
for camera in cameras:
selected_data[camera] = data[camera].query("image_extraction == 1")
else:
for camera in cameras:
selected_data[camera] = data[camera]
###Output
_____no_output_____
###Markdown
CTA-MARS[back to top](Table-of-contents)
###Code
if load_CTAMARS:
input_directory_CTAMARS = {}
input_directory_CTAMARS["parent_directory"] = "/Users/michele/Applications/ctasoft/tests/CTAMARS_reference_data"
input_directory_CTAMARS["TRAINING/DL1"] = "TRAINING/DL1"
# Get input file path
if (input_directory_CTAMARS["parent_directory"] is None) or (input_directory_CTAMARS["TRAINING/DL1"] is None):
raise ValueError("ERROR: CTAMARS data undefined. Please, check the documentation of protopipe-BENCHMARKS.")
else:
# read CTAMARS ROOT files
mars_dl1b_fileName = "check_dl1b.root"
path_mars_dl1b = Path(input_directory_CTAMARS["parent_directory"]) / input_directory_CTAMARS["TRAINING/DL1"] / mars_dl1b_fileName
ctamars_dl1b = uproot.open(path_mars_dl1b)
mars_LST1size_fileName = "LST1_SIZE_distro_gamma1sample.root"
path_mars_LST1size = Path(input_directory_CTAMARS["parent_directory"]) / input_directory_CTAMARS["TRAINING/DL1"] / mars_LST1size_fileName
ctamars_LST1size = uproot.open(path_mars_LST1size)
# create histograms
mars_size_npixels_LSTCam = ctamars_dl1b["log10Size_type0"].to_numpy()
mars_size_npixels_NectarCam = ctamars_dl1b["log10Size_type1"].to_numpy()
mars_size_WL_LSTCam = ctamars_dl1b["log10Size_WL_type0"].to_numpy()
mars_size_WL_NectarCam = ctamars_dl1b["log10Size_WL_type1"].to_numpy()
mars_size_d80_LSTCam = ctamars_dl1b["log10Size_d80_type0"].to_numpy()
mars_size_d80_NectarCam = ctamars_dl1b["log10Size_d80_type1"].to_numpy()
mars_size_LST1Cam = ctamars_LST1size["h"].to_numpy()
# fill camera-wise dictionaries
CTAMARS = {}
CTAMARS["LSTCam"] = {"size_npixels": mars_size_npixels_LSTCam,
"size_WL" : mars_size_WL_LSTCam,
"size_d80" : mars_size_d80_LSTCam,
"size_LST1" : mars_size_LST1Cam}
CTAMARS["NectarCam"] = {"size_npixels": mars_size_npixels_NectarCam,
"size_WL" : mars_size_WL_NectarCam,
"size_d80" : mars_size_d80_NectarCam}
###Output
_____no_output_____
###Markdown
Plots and benchmarks[back to top](Table-of-contents)
###Code
# First we check if a _plots_ folder exists already.
# If not, we create it.
plots_folder = Path(output_directory) / "plots"
plots_folder.mkdir(parents=True, exist_ok=True)
# Plot aesthetics settings
scale = matplotlib_settings["scale"] if plots_scale is None else float(plots_scale)
style.use(matplotlib_settings["style"])
cmap = matplotlib_settings["cmap"]
rc('font', size=matplotlib_settings["rc"]["font_size"])
if matplotlib_settings["style"] == "seaborn-colorblind":
# Change color order to have first ones more readable
colors_order = ['#0072B2', '#D55E00', '#009E73', '#CC79A7', '#56B4E9', '#F0E442']
rc('axes', prop_cycle=cycler(color=colors_order))
if use_seaborn:
import seaborn as sns
sns.set_theme(context=seaborn_settings["theme"]["context"] if "context" in seaborn_settings["theme"] else "talk",
style=seaborn_settings["theme"]["style"] if "style" in seaborn_settings["theme"] else "whitegrid",
palette=seaborn_settings["theme"]["palette"] if "palette" in seaborn_settings["theme"] else None,
font=seaborn_settings["theme"]["font"] if "font" in seaborn_settings["theme"] else "Fira Sans",
font_scale=seaborn_settings["theme"]["font_scale"] if "font_scale" in seaborn_settings["theme"] else 1.0,
color_codes=seaborn_settings["theme"]["color_codes"] if "color_codes" in seaborn_settings["theme"] else True
)
sns.set_style(seaborn_settings["theme"]["style"], rc=seaborn_settings["rc_style"])
sns.set_context(seaborn_settings["theme"]["context"],
font_scale=seaborn_settings["theme"]["font_scale"] if "font_scale" in seaborn_settings["theme"] else 1.0)
###Output
_____no_output_____
###Markdown
Fraction of events (relative to telescope triggers) that survive a given intensity cut[back to top](Table-of-contents) Multi-cluster cleaning If the "no-cuts" curve doesn't start at 1, it's because some images were so bad that they couldn't get a valid parametrization and have been recorded with ``hillas_intensity = NaN``.
###Code
for camera in cameras:
fig = plt.figure(figsize=get_fig_size(ratio=4./3, scale=scale), tight_layout=False)
plt.xlabel("log10(intensity #p.e)")
plt.ylabel("Telescope triggers fraction\nwith log10(intensity #p.e) > x phe")
plt.title(camera)
#tot_entries = len(selected_data[camera]["hillas_intensity"])
tot_entries = len(data[camera]["hillas_intensity"])
if load_CTAMARS:
xbins = CTAMARS[camera]["size_WL"][1]
else:
xbins = np.linspace(0,6,100)
# No cuts
selected_images = data[camera]
intensity_hist, xbins = np.histogram(np.log10(selected_images["hillas_intensity"]), bins=xbins)
plt.plot(xbins[:-1], intensity_hist[::-1].cumsum()[::-1]/tot_entries, drawstyle="steps-post", label="No cuts",
color="steelblue"
)
# Cut in the number of pixels
selected_images = selected_data[camera].query(f"pixels > {min_pixels}")
intensity_hist, xbins = np.histogram( np.log10(selected_images["hillas_intensity"]), bins=xbins)
plt.plot(xbins[:-1], intensity_hist[::-1].cumsum()[::-1]/tot_entries, drawstyle="steps-post", label="+ n_pixel",
color="orange"
)
# Cut in ellipticity
selected_images = selected_data[camera].query(f"pixels > {min_pixels}\
and hillas_ellipticity > {min_ellipticity}\
and hillas_ellipticity < {max_ellipticity}")
intensity_hist, xbins = np.histogram( np.log10(selected_images["hillas_intensity"]), bins=xbins)
plt.plot(xbins[:-1], intensity_hist[::-1].cumsum()[::-1]/tot_entries, drawstyle="steps-post", label="+ ellipticity",
color="green"
)
# Cut in containment radius
selected_images = selected_data[camera].query(f"pixels > {min_pixels}\
and hillas_ellipticity > {min_ellipticity}\
and hillas_ellipticity < {max_ellipticity}\
and hillas_r < {(CTAMARS_radii(camera)*containment_radius)}")
intensity_hist, xbins = np.histogram( np.log10(selected_images["hillas_intensity"]), bins=xbins)
plt.plot(xbins[:-1], intensity_hist[::-1].cumsum()[::-1]/tot_entries, drawstyle="steps-post", label="+ COG containment",
color="red"
)
plt.ylim([0.,1.05])
ax = plt.gca()
ylims=ax.get_ylim()
# Plot CTAMARS data
if load_CTAMARS:
x = 0.5 * (CTAMARS[camera]["size_WL"][1][1:] + CTAMARS[camera]["size_WL"][1][:-1])
plt.step(x, CTAMARS[camera]["size_npixels"][0], where='mid', label='CTAMARS npixels', color="orange", linestyle="--")
plt.step(x, CTAMARS[camera]["size_WL"][0], where='mid', label='+ CTAMARS ellipticity', color="green", linestyle="--")
plt.step(x, CTAMARS[camera]["size_d80"][0], where='mid', label='+ CTAMARS COG containment', color="red", linestyle="--")
CTAMARS_intensity_cut = 50
plt.vlines(np.log10(CTAMARS_intensity_cut),
ymin=min(ylims), ymax=max(ylims),
ls="dashed", lw=2,
color="blue",
label=f"{CTAMARS_intensity_cut} phe (protopipe==CTAMARS)")
else:
plt.vlines(np.log10(intensity_cut),
ymin=min(ylims), ymax=max(ylims),
ls="dashed", lw=2,
color="blue",
label=f"{intensity_cut} phe")
plt.minorticks_on()
plt.grid()
plt.legend()
fig.savefig(plots_folder / f"image_cleaning_eventsAboveIntensity_{camera}_protopipe_{analysis_name}.png")
plt.show()
###Output
_____no_output_____
###Markdown
Image-parameter distributions[back to top](Table-of-contents) **Notes** - probably better to make bins in true energy - the parameters should be at least those that enter the estimators training (here only the pure DL1 are listed) Image intensity from all telescope types[back to top](Table-of-contents)
###Code
x_bins_edges = np.linspace(1,5,100)
all_telescope_types = pd.concat([selected_data[camera] for camera in cameras])
intensity = all_telescope_types["hillas_intensity"]
fig = plt.figure(figsize=(7, 5), tight_layout=False)
h_protopipe = plt.hist(np.log10(intensity),
bins=x_bins_edges,
histtype="step",
label="protopipe", color="blue")
print(f"Total number of images = {np.sum(h_protopipe[0])}")
plt.xlabel(f"log10(hillas_intensity) [#phe]")
plt.ylabel("Number of images")
plt.yscale('log')
plt.ylim(1, 1.e6)
plt.minorticks_on()
plt.grid(which = "both")
ax = plt.gca()
ylims=ax.get_ylim()
plt.vlines(np.log10(intensity_cut),
ymin=min(ylims), ymax=max(ylims),
ls="dashed", lw=2,
color="blue",
label=f"{intensity_cut} phe")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Image intensity from LST-1[back to top](Table-of-contents)
###Code
if "LSTCam" in selected_data.keys():
if load_CTAMARS:
x_bins_edges = CTAMARS["LSTCam"]["size_LST1"][1]
CTAMARS_counts = CTAMARS["LSTCam"]["size_LST1"][0]
fig = plt.figure(figsize=(16, 5), tight_layout=False)
plt.subplot(1,2,1)
size_LST1 = selected_data["LSTCam"].query("tel_id == 1")["hillas_intensity"]
else:
x_bins_edges = np.linspace(1,5,100)
fig = plt.figure(figsize=(7, 5), tight_layout=False)
plt.xlabel(f"log10(hillas_intensity) [#phe]")
plt.ylabel("Number of images")
plt.title("LST1 - gamma1")
h_protopipe = plt.hist(np.log10(size_LST1),
bins=x_bins_edges,
histtype="step",
label="protopipe", color="blue")
print(f"Total number of images = {np.sum(h_protopipe[0])}")
if load_CTAMARS:
print(f"Total number of images for CTAMARS = {np.sum(CTAMARS_counts)}")
plt.step(x_bins_edges[:-1], CTAMARS_counts, where='pre', label='CTAMARS', color="darkorange")
plt.yscale('log')
plt.minorticks_on()
plt.grid(which = "both")
ax = plt.gca()
ylims=ax.get_ylim()
if load_CTAMARS:
plt.vlines(np.log10(CTAMARS_intensity_cut),
ymin=min(ylims), ymax=max(ylims),
ls="dashed", lw=2,
color="darkorange",
label=f"{CTAMARS_intensity_cut} phe (CTAMARS)")
else:
plt.vlines(np.log10(intensity_cut),
ymin=min(ylims), ymax=max(ylims),
ls="dashed", lw=2,
color="blue",
label=f"{intensity_cut} phe")
plt.legend()
plt.ylim(1, 1.e5)
if load_CTAMARS:
plt.subplot(1,2,2)
plt.xlabel(f"log10(hillas_intensity) [#phe]")
plt.ylabel(f"Ratio protopipe / CTAMARS")
x = 0.5 * (x_bins_edges[1:] + x_bins_edges[:-1])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
plt.step(x, h_protopipe[0]/CTAMARS_counts, where='pre')
ax = plt.gca()
xlims=ax.get_xlim()
xlims=[np.min(x_bins_edges),np.max(x_bins_edges)]
plt.hlines(1., xlims[0], xlims[1], label="expectation", color='r')
plt.grid()
plt.legend()
plt.ylim(0, 3)
fig.savefig(plots_folder / f"image_cleaning_hillas_intensity_LST1_gamma1_{camera}_protopipe_{analysis_name}.png")
plt.show()
else:
print("No LST camera in this analysis.")
###Output
_____no_output_____
###Markdown
DL1 Parameters used for direction reconstruction from all telecopes[back to top](Table-of-contents)
###Code
nbins = 100
parameters_to_plot = ["hillas_intensity",
"hillas_width",
"hillas_length",
"concentration_pixel",
"leakage_intensity_width_1",
"hillas_x",
"hillas_y"]
fig, axes = plt.subplots(ncols=len(parameters_to_plot),
nrows=len(cameras),
constrained_layout=False,
figsize = (40, 15))
plt.subplots_adjust(hspace = 0.5)
fontsize=20
for i, camera in enumerate(cameras):
for j, key in enumerate(parameters_to_plot):
axes[i, j].set_ylabel("Number of events", fontsize=fontsize)
axes[i, j].set_title(camera, fontsize=fontsize)
if "hillas_intensity" in key:
axes[i, j].set_xlabel(f"log10({key}) [#phe]", fontsize=fontsize)
axes[i, j].hist(np.log10(selected_data[camera][key]),
bins=nbins,
range=[1.,6.],
alpha = 0.5,
histtype="step",
linewidth=5)
add_stats(np.log10(selected_data[camera][key]), axes[i, j], x=0.70, y=0.85, fontsize=fontsize)
else:
axes[i, j].set_xlabel(f"{key} [deg]", fontsize=fontsize)
axes[i, j].hist(selected_data[camera][key],
bins=nbins,
alpha = 0.5,
histtype="step",
linewidth=5)
add_stats(selected_data[camera][key], axes[i, j], x=0.70, y=0.85, fontsize=fontsize)
axes[i, j].set_yscale('log')
axes[i, j].minorticks_on()
axes[i, j].grid(which = "both")
# Save just the portion _inside_ the second axis's boundaries
extent = axes[i, j].get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig(plots_folder / f"image_cleaning_{key}_gamma1_{camera}_protopipe_{analysis_name}.png", bbox_inches=extent.expanded(1.2, 1.2))
fig.savefig(plots_folder / f"image_cleaning_gamma1_allKeysallCameras_protopipe_{analysis_name}.png")
plt.show()
###Output
_____no_output_____ |
weird_keras_models1.ipynb | ###Markdown
IMPORTS:
###Code
import tensorflow as tf
import numpy as np
import pandas as pd
import random
from matplotlib import pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Conv2D, Input, BatchNormalization, Add
from tensorflow.keras.layers import Flatten, GlobalAveragePooling2D, Concatenate, Dropout
###Output
_____no_output_____
###Markdown
BASE MODEL:
###Code
def convBlock(x):
conv = Conv2D(128, (3,3), strides=1, padding="same", activation='relu')(x)
bn = BatchNormalization()(conv)
return bn
def ResBlock(x):
conv1 = Conv2D(128, kernel_size=(3,3), strides=1, padding="same", activation='relu')(x)
bn1 = BatchNormalization()(conv1)
conv2 = Conv2D(128, kernel_size=(3,3), strides=1, padding="same", activation='relu')(bn1)
bn2 = BatchNormalization()(conv2)
out = Add()([bn2, x])
return out
def OutBlock(x):
conv = Conv2D(3, kernel_size=(1,1), strides=1, activation='relu')(x)
bn = BatchNormalization()(conv)
flat = Flatten()(bn)
dense1 = Dense((32), activation='relu')(flat)
value = Dense(1, activation='tanh')(dense1) #Value Head
conv = Conv2D(32, kernel_size=(1,1), strides=1, activation='relu')(x)
bn = BatchNormalization()(conv)
flat = Flatten()(bn)
linear = Dense(6*7*32, activation='relu')(flat)
policy = Dense(7, activation='softmax')(linear)
return policy, value
input0 = Input([6, 7, 3])
model = convBlock(input0)
for _ in range(19):
model = ResBlock(model)
policy, value = OutBlock(model)
model = Model(inputs=input0, outputs=[policy, value])
model.summary()
###Output
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_11 (InputLayer) [(None, 6, 7, 3)] 0
__________________________________________________________________________________________________
conv2d_165 (Conv2D) (None, 6, 7, 128) 3584 input_11[0][0]
__________________________________________________________________________________________________
batch_normalization_163 (BatchN (None, 6, 7, 128) 512 conv2d_165[0][0]
__________________________________________________________________________________________________
conv2d_166 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_163[0][0]
__________________________________________________________________________________________________
batch_normalization_164 (BatchN (None, 6, 7, 128) 512 conv2d_166[0][0]
__________________________________________________________________________________________________
conv2d_167 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_164[0][0]
__________________________________________________________________________________________________
batch_normalization_165 (BatchN (None, 6, 7, 128) 512 conv2d_167[0][0]
__________________________________________________________________________________________________
add_76 (Add) (None, 6, 7, 128) 0 batch_normalization_165[0][0]
batch_normalization_163[0][0]
__________________________________________________________________________________________________
conv2d_168 (Conv2D) (None, 6, 7, 128) 147584 add_76[0][0]
__________________________________________________________________________________________________
batch_normalization_166 (BatchN (None, 6, 7, 128) 512 conv2d_168[0][0]
__________________________________________________________________________________________________
conv2d_169 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_166[0][0]
__________________________________________________________________________________________________
batch_normalization_167 (BatchN (None, 6, 7, 128) 512 conv2d_169[0][0]
__________________________________________________________________________________________________
add_77 (Add) (None, 6, 7, 128) 0 batch_normalization_167[0][0]
add_76[0][0]
__________________________________________________________________________________________________
conv2d_170 (Conv2D) (None, 6, 7, 128) 147584 add_77[0][0]
__________________________________________________________________________________________________
batch_normalization_168 (BatchN (None, 6, 7, 128) 512 conv2d_170[0][0]
__________________________________________________________________________________________________
conv2d_171 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_168[0][0]
__________________________________________________________________________________________________
batch_normalization_169 (BatchN (None, 6, 7, 128) 512 conv2d_171[0][0]
__________________________________________________________________________________________________
add_78 (Add) (None, 6, 7, 128) 0 batch_normalization_169[0][0]
add_77[0][0]
__________________________________________________________________________________________________
conv2d_172 (Conv2D) (None, 6, 7, 128) 147584 add_78[0][0]
__________________________________________________________________________________________________
batch_normalization_170 (BatchN (None, 6, 7, 128) 512 conv2d_172[0][0]
__________________________________________________________________________________________________
conv2d_173 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_170[0][0]
__________________________________________________________________________________________________
batch_normalization_171 (BatchN (None, 6, 7, 128) 512 conv2d_173[0][0]
__________________________________________________________________________________________________
add_79 (Add) (None, 6, 7, 128) 0 batch_normalization_171[0][0]
add_78[0][0]
__________________________________________________________________________________________________
conv2d_174 (Conv2D) (None, 6, 7, 128) 147584 add_79[0][0]
__________________________________________________________________________________________________
batch_normalization_172 (BatchN (None, 6, 7, 128) 512 conv2d_174[0][0]
__________________________________________________________________________________________________
conv2d_175 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_172[0][0]
__________________________________________________________________________________________________
batch_normalization_173 (BatchN (None, 6, 7, 128) 512 conv2d_175[0][0]
__________________________________________________________________________________________________
add_80 (Add) (None, 6, 7, 128) 0 batch_normalization_173[0][0]
add_79[0][0]
__________________________________________________________________________________________________
conv2d_176 (Conv2D) (None, 6, 7, 128) 147584 add_80[0][0]
__________________________________________________________________________________________________
batch_normalization_174 (BatchN (None, 6, 7, 128) 512 conv2d_176[0][0]
__________________________________________________________________________________________________
conv2d_177 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_174[0][0]
__________________________________________________________________________________________________
batch_normalization_175 (BatchN (None, 6, 7, 128) 512 conv2d_177[0][0]
__________________________________________________________________________________________________
add_81 (Add) (None, 6, 7, 128) 0 batch_normalization_175[0][0]
add_80[0][0]
__________________________________________________________________________________________________
conv2d_178 (Conv2D) (None, 6, 7, 128) 147584 add_81[0][0]
__________________________________________________________________________________________________
batch_normalization_176 (BatchN (None, 6, 7, 128) 512 conv2d_178[0][0]
__________________________________________________________________________________________________
conv2d_179 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_176[0][0]
__________________________________________________________________________________________________
batch_normalization_177 (BatchN (None, 6, 7, 128) 512 conv2d_179[0][0]
__________________________________________________________________________________________________
add_82 (Add) (None, 6, 7, 128) 0 batch_normalization_177[0][0]
add_81[0][0]
__________________________________________________________________________________________________
conv2d_180 (Conv2D) (None, 6, 7, 128) 147584 add_82[0][0]
__________________________________________________________________________________________________
batch_normalization_178 (BatchN (None, 6, 7, 128) 512 conv2d_180[0][0]
__________________________________________________________________________________________________
conv2d_181 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_178[0][0]
__________________________________________________________________________________________________
batch_normalization_179 (BatchN (None, 6, 7, 128) 512 conv2d_181[0][0]
__________________________________________________________________________________________________
add_83 (Add) (None, 6, 7, 128) 0 batch_normalization_179[0][0]
add_82[0][0]
__________________________________________________________________________________________________
conv2d_182 (Conv2D) (None, 6, 7, 128) 147584 add_83[0][0]
__________________________________________________________________________________________________
batch_normalization_180 (BatchN (None, 6, 7, 128) 512 conv2d_182[0][0]
__________________________________________________________________________________________________
conv2d_183 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_180[0][0]
__________________________________________________________________________________________________
batch_normalization_181 (BatchN (None, 6, 7, 128) 512 conv2d_183[0][0]
__________________________________________________________________________________________________
add_84 (Add) (None, 6, 7, 128) 0 batch_normalization_181[0][0]
add_83[0][0]
__________________________________________________________________________________________________
conv2d_184 (Conv2D) (None, 6, 7, 128) 147584 add_84[0][0]
__________________________________________________________________________________________________
batch_normalization_182 (BatchN (None, 6, 7, 128) 512 conv2d_184[0][0]
__________________________________________________________________________________________________
conv2d_185 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_182[0][0]
__________________________________________________________________________________________________
batch_normalization_183 (BatchN (None, 6, 7, 128) 512 conv2d_185[0][0]
__________________________________________________________________________________________________
add_85 (Add) (None, 6, 7, 128) 0 batch_normalization_183[0][0]
add_84[0][0]
__________________________________________________________________________________________________
conv2d_186 (Conv2D) (None, 6, 7, 128) 147584 add_85[0][0]
__________________________________________________________________________________________________
batch_normalization_184 (BatchN (None, 6, 7, 128) 512 conv2d_186[0][0]
__________________________________________________________________________________________________
conv2d_187 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_184[0][0]
__________________________________________________________________________________________________
batch_normalization_185 (BatchN (None, 6, 7, 128) 512 conv2d_187[0][0]
__________________________________________________________________________________________________
add_86 (Add) (None, 6, 7, 128) 0 batch_normalization_185[0][0]
add_85[0][0]
__________________________________________________________________________________________________
conv2d_188 (Conv2D) (None, 6, 7, 128) 147584 add_86[0][0]
__________________________________________________________________________________________________
batch_normalization_186 (BatchN (None, 6, 7, 128) 512 conv2d_188[0][0]
__________________________________________________________________________________________________
conv2d_189 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_186[0][0]
__________________________________________________________________________________________________
batch_normalization_187 (BatchN (None, 6, 7, 128) 512 conv2d_189[0][0]
__________________________________________________________________________________________________
add_87 (Add) (None, 6, 7, 128) 0 batch_normalization_187[0][0]
add_86[0][0]
__________________________________________________________________________________________________
conv2d_190 (Conv2D) (None, 6, 7, 128) 147584 add_87[0][0]
__________________________________________________________________________________________________
batch_normalization_188 (BatchN (None, 6, 7, 128) 512 conv2d_190[0][0]
__________________________________________________________________________________________________
conv2d_191 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_188[0][0]
__________________________________________________________________________________________________
batch_normalization_189 (BatchN (None, 6, 7, 128) 512 conv2d_191[0][0]
__________________________________________________________________________________________________
add_88 (Add) (None, 6, 7, 128) 0 batch_normalization_189[0][0]
add_87[0][0]
__________________________________________________________________________________________________
conv2d_192 (Conv2D) (None, 6, 7, 128) 147584 add_88[0][0]
__________________________________________________________________________________________________
batch_normalization_190 (BatchN (None, 6, 7, 128) 512 conv2d_192[0][0]
__________________________________________________________________________________________________
conv2d_193 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_190[0][0]
__________________________________________________________________________________________________
batch_normalization_191 (BatchN (None, 6, 7, 128) 512 conv2d_193[0][0]
__________________________________________________________________________________________________
add_89 (Add) (None, 6, 7, 128) 0 batch_normalization_191[0][0]
add_88[0][0]
__________________________________________________________________________________________________
conv2d_194 (Conv2D) (None, 6, 7, 128) 147584 add_89[0][0]
__________________________________________________________________________________________________
batch_normalization_192 (BatchN (None, 6, 7, 128) 512 conv2d_194[0][0]
__________________________________________________________________________________________________
conv2d_195 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_192[0][0]
__________________________________________________________________________________________________
batch_normalization_193 (BatchN (None, 6, 7, 128) 512 conv2d_195[0][0]
__________________________________________________________________________________________________
add_90 (Add) (None, 6, 7, 128) 0 batch_normalization_193[0][0]
add_89[0][0]
__________________________________________________________________________________________________
conv2d_196 (Conv2D) (None, 6, 7, 128) 147584 add_90[0][0]
__________________________________________________________________________________________________
batch_normalization_194 (BatchN (None, 6, 7, 128) 512 conv2d_196[0][0]
__________________________________________________________________________________________________
conv2d_197 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_194[0][0]
__________________________________________________________________________________________________
batch_normalization_195 (BatchN (None, 6, 7, 128) 512 conv2d_197[0][0]
__________________________________________________________________________________________________
add_91 (Add) (None, 6, 7, 128) 0 batch_normalization_195[0][0]
add_90[0][0]
__________________________________________________________________________________________________
conv2d_198 (Conv2D) (None, 6, 7, 128) 147584 add_91[0][0]
__________________________________________________________________________________________________
batch_normalization_196 (BatchN (None, 6, 7, 128) 512 conv2d_198[0][0]
__________________________________________________________________________________________________
conv2d_199 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_196[0][0]
__________________________________________________________________________________________________
batch_normalization_197 (BatchN (None, 6, 7, 128) 512 conv2d_199[0][0]
__________________________________________________________________________________________________
add_92 (Add) (None, 6, 7, 128) 0 batch_normalization_197[0][0]
add_91[0][0]
__________________________________________________________________________________________________
conv2d_200 (Conv2D) (None, 6, 7, 128) 147584 add_92[0][0]
__________________________________________________________________________________________________
batch_normalization_198 (BatchN (None, 6, 7, 128) 512 conv2d_200[0][0]
__________________________________________________________________________________________________
conv2d_201 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_198[0][0]
__________________________________________________________________________________________________
batch_normalization_199 (BatchN (None, 6, 7, 128) 512 conv2d_201[0][0]
__________________________________________________________________________________________________
add_93 (Add) (None, 6, 7, 128) 0 batch_normalization_199[0][0]
add_92[0][0]
__________________________________________________________________________________________________
conv2d_202 (Conv2D) (None, 6, 7, 128) 147584 add_93[0][0]
__________________________________________________________________________________________________
batch_normalization_200 (BatchN (None, 6, 7, 128) 512 conv2d_202[0][0]
__________________________________________________________________________________________________
conv2d_203 (Conv2D) (None, 6, 7, 128) 147584 batch_normalization_200[0][0]
__________________________________________________________________________________________________
batch_normalization_201 (BatchN (None, 6, 7, 128) 512 conv2d_203[0][0]
__________________________________________________________________________________________________
add_94 (Add) (None, 6, 7, 128) 0 batch_normalization_201[0][0]
add_93[0][0]
__________________________________________________________________________________________________
conv2d_205 (Conv2D) (None, 6, 7, 32) 4128 add_94[0][0]
__________________________________________________________________________________________________
conv2d_204 (Conv2D) (None, 6, 7, 3) 387 add_94[0][0]
__________________________________________________________________________________________________
batch_normalization_203 (BatchN (None, 6, 7, 32) 128 conv2d_205[0][0]
__________________________________________________________________________________________________
batch_normalization_202 (BatchN (None, 6, 7, 3) 12 conv2d_204[0][0]
__________________________________________________________________________________________________
flatten_7 (Flatten) (None, 1344) 0 batch_normalization_203[0][0]
__________________________________________________________________________________________________
flatten_6 (Flatten) (None, 126) 0 batch_normalization_202[0][0]
__________________________________________________________________________________________________
dense_9 (Dense) (None, 1344) 1807680 flatten_7[0][0]
__________________________________________________________________________________________________
dense_7 (Dense) (None, 32) 4064 flatten_6[0][0]
__________________________________________________________________________________________________
dense_10 (Dense) (None, 7) 9415 dense_9[0][0]
__________________________________________________________________________________________________
dense_8 (Dense) (None, 1) 33 dense_7[0][0]
==================================================================================================
Total params: 7,457,591
Trainable params: 7,447,537
Non-trainable params: 10,054
__________________________________________________________________________________________________
###Markdown
Custom Loss Function
###Code
losses = {
"category_output": "categorical_crossentropy",
"color_output": "categorical_crossentropy",
}
lossWeights = {"category_output": 1.0, "color_output": 1.0}
model.compile(optimizer='adam', loss=losses, loss_weights=lossWeights)
###Output
_____no_output_____ |
notebooks/level2_cartopy_resample.ipynb | ###Markdown
Table of Contents1 Water vapor retrieval using MYD05 data1.1 Near IR vs. IR datasets1.2 What this notebook does2 Setup3 Read in the 1km and 5km water vapor files3.1 Start with the lats/lons for 1km and 5km3.2 Get the IR vapor plus 5 of its attributes3.3 Replace -9999 with np.nan3.4 now scale the data and histogram it3.5 Repeat for the 1 km near-ir data3.6 Note that the scaled wv values are similar between near_ir and ir retrievals4 Map the data4.0.1 Resample the 5km IR retrieval onto a laea xy grid4.0.2 Resample the 1km near-ir water vapor on the same grid4.1 now use the 1 km MYD03 lons and lats to get a full resolution xy grid5 Save the mapped images5.1 Now save these three images plus their area_def's for future plotting5.2 Create a directory to hold the images and area_def dictionaries5.3 Here's a function that writes the image plus metadata to npz and json files5.4 Write out images, putting useful metadeta in metadata_dict Water vapor retrieval using MYD05 data Near IR vs. IR datasetsAs we will discuss in class, Modis provides two separate measurements on the column integrated water vapor.The high level overview is given in the [modis water vapor products](https://ladsweb.modaps.eosdis.nasa.gov/missions-and-measurements/products/water-vapor/MYD05_L2). Basically the reason for two separate retrievals is that they have different strengths and weaknesses.* Near Infrared Retrieval * Uses reflected photons in two separate water vapor absorption bands * Strengths * 1 km spatial resolution at nadir * retrieval doesn't depend on temperature difference between vapor and surface * more accurate than longwave * Weaknesses * Doesn't work at night * Doesn't work over dark surfaces (can work over ocean as long as the pixel is reflecting direct sunlight ("sunglint") * Needs separate MYD03 file for lats/lons * Infrared Retrieval * Uses the water absorption bands near 11 microns * Strengths * Works day/night, over dark surfaces * 5 km lat/lons included in file * Weaknesses * 5 km pixels at nadir * Doesn't work when most of the vapor is in the boundary layer and has about the same temperature as the surface What this notebook does1. Reads a MYD03 file named m3_file_2018_10_1.hdf and a MYD05 file named myd05_l2_10_7.hdf located in a301.data_dir and grabs latitudes, longitudes and two arrays: Water_Vapor_Near_Infrared and Water_Vapor_Infrared 1. Scales the water vapar arrays by scale_factor and offset to produce the retrieved column water vapor in cm 1. Maps the two arrays onto the same 5km array for direct comparison1. Maps the near_ir array onto a 1 km grid to show the full resolution.1. Writes the three images with their area_def map information and metadata out to new folders in a301_code/map_data/wv_maps as npz files (for the images) and json files (for the metadata) Setup1. Download the MYD05 granule that corresponds to your 5 minute date/time. It should look something like: MYD05_L2.A2013222.2105.061.2018048043105.hdf 1. Rename it to **myd05_l2_10_7.hdf** and copy to a301.data_dir1. Run the checkup program: python -m a301.install_tests.wv_resample_test which should produce something like this: working on /Users/phil/repos/a301_code/data/m3_file_2018_10_1.hdf, originally was MYD03.A2013222.2105.006.2013223155808.hdf **************************************** lats_1km.shape, lons_1km.shape: (2040, 1354),(2040, 1354) **************************************** through working on /Users/phil/repos/a301_code/data/myd05_l2_10_7.hdf, originally was MYD05_L2.A2013222.2105.061.2018048043105.hdf **************************************** nearir vapor array shape is: (2040, 1354) **************************************** **************************************** ir vapor array shape is: (408, 270) **************************************** **************************************** lats_5km arrayshape is: (408, 270) **************************************** **************************************** lons_5km arrayshape is: (408, 270) **************************************** was able to regrid the nearir image, xy shape is (2244, 1489) was able to regrid the ir image, xy shape is (448, 297) data looks good, ready to go
###Code
from matplotlib import cm
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
from IPython.display import Image,display
#Image('figures/MYBRGB.A2016224.2100.006.2016237025650.jpg',width=600)
%matplotlib inline
from matplotlib import cm
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
from IPython.display import Image,display
import a301
from a301.geometry import get_proj_params
from a301.scripts.modismeta_read import parseMeta
from pathlib import Path
from pyhdf.SD import SD, SDC
import pprint
import json
import pdb
###Output
_____no_output_____
###Markdown
Read in the 1km and 5km water vapor files Start with the lats/lons for 1km and 5km
###Code
m5_file = a301.data_dir / Path('myd05_l2_10_7.hdf')
m3_file = a301.data_dir / Path('m3_file_2018_10_1.hdf')
the_file = SD(str(m3_file), SDC.READ)
lats_1km = the_file.select('Latitude').get()
lons_1km = the_file.select('Longitude').get()
the_file.end()
the_file = SD(str(m5_file), SDC.READ)
lats_5km = the_file.select('Latitude').get()
lons_5km = the_file.select('Longitude').get()
the_file.end()
###Output
_____no_output_____
###Markdown
Get the IR vapor plus 5 of its attributesStore the data in a numpy array, and the attributes in a dictionary,using a [dictionary comprehension](https://jakevdp.github.io/WhirlwindTourOfPython/11-list-comprehensions.html)at line 4
###Code
the_file = SD(str(m5_file), SDC.READ)
wv_ir = the_file.select('Water_Vapor_Infrared')
attributes=['units', 'scale_factor', 'add_offset', 'valid_range', '_FillValue']
attr_dict=wv_ir.attributes()
wv_ir_attrs={k: attr_dict[k] for k in attributes}
print(f'wv_ir attributes: {pprint.pformat(wv_ir_attrs)}')
wv_ir_data = wv_ir.get()
###Output
wv_ir attributes: {'_FillValue': -9999,
'add_offset': 0.0,
'scale_factor': 0.0010000000474974513,
'units': 'cm',
'valid_range': [0, 20000]}
###Markdown
Replace -9999 with np.nanNote that this has to a happen before we scale the data by the scale_factor so the -9999 can be recognized
###Code
bad_data = (wv_ir_data == wv_ir_attrs['_FillValue'])
#
# next line converts to floating point so we can use np.nan
#
wv_ir_data = wv_ir_data.astype(np.float32)
wv_ir_data[bad_data]=np.nan
###Output
_____no_output_____
###Markdown
now scale the data and histogram it
###Code
wv_ir_scaled = wv_ir_data*attr_dict['scale_factor'] + attr_dict['add_offset']
###Output
_____no_output_____
###Markdown
Note that we need to get rid of all nan values by taking ~ (not) np.isnan```plt.hist(wv_ir_scaled)```won't work
###Code
plt.hist(wv_ir_scaled[~np.isnan(wv_ir_scaled)])
ax=plt.gca()
ax.set_title('5 km wv data (cm)');
###Output
_____no_output_____
###Markdown
Repeat for the 1 km near-ir dataUse a dictionary comprehension again to move the attributes in attrib_list into a dict at line 4
###Code
the_file = SD(str(m5_file), SDC.READ)
wv_nearir = the_file.select('Water_Vapor_Near_Infrared')
attrib_list=['unit', 'scale_factor', 'add_offset', 'valid_range', '_FillValue']
attr_dict=wv_nearir.attributes()
wv_nearir_attrs={k: attr_dict[k] for k in attrib_list}
print(f'wv_nearir attributes: {pprint.pformat(wv_nearir_attrs)}')
wv_nearir_data = wv_nearir.get()
the_file.end()
bad_data = wv_nearir_data == wv_nearir_attrs['_FillValue']
wv_nearir_data = wv_nearir_data.astype(np.float32)
wv_nearir_data[bad_data]=np.nan
wv_nearir_scaled = wv_nearir_data*attr_dict['scale_factor'] + attr_dict['add_offset']
###Output
_____no_output_____
###Markdown
Note that the scaled wv values are similar between near_ir and ir retrievals
###Code
plt.hist(wv_nearir_scaled[~np.isnan(wv_nearir_scaled)])
ax=plt.gca()
ax.set_title('1 km water vapor (cm)');
###Output
_____no_output_____
###Markdown
Map the data Resample the 5km IR retrieval onto a laea xy gridLet swath_def.compute_optimal_bb_area choose the extent and dimensions forthe low resolution (lr) image
###Code
# %load temp.md
def runit():
from pyresample import SwathDefinition, kd_tree, geometry
proj_params = get_proj_params(m5_file)
swath_def = SwathDefinition(lons_5km, lats_5km)
area_def_lr=swath_def.compute_optimal_bb_area(proj_dict=proj_params)
area_def_lr.name="ir wv retrieval modis 5 km resolution (lr=low resolution)"
area_def_lr.area_id='modis_ir_wv'
area_def_lr.job_id = area_def_lr.area_id
fill_value=-9999.
image_wv_ir = kd_tree.resample_nearest(swath_def, wv_ir_scaled.ravel(),
area_def_lr, radius_of_influence=5000,
nprocs=2,fill_value=fill_value)
image_wv_ir[image_wv_ir < -9000]=np.nan
print(f'\ndump area definition:\n{area_def_lr}\n')
print((f'\nx and y pixel dimensions in meters:'
f'\n{area_def_lr.pixel_size_x}\n{area_def_lr.pixel_size_y}\n'))
pdb.set_trace()
runit()
###Output
/Users/phil/mb36/lib/python3.6/site-packages/ipykernel_launcher.py:14: RuntimeWarning: invalid value encountered in less
###Markdown
Resample the 1km near-ir water vapor on the same gridReuse area_def_lr for the high resolution nearir image so we can compare directly with low resolution ir
###Code
swath_def = SwathDefinition(lons_1km, lats_1km)
fill_value=-9999.
image_wv_nearir_lr = kd_tree.resample_nearest(swath_def, wv_nearir_scaled.ravel(),
area_def_lr, radius_of_influence=5000,
nprocs=2,fill_value=fill_value)
image_wv_nearir_lr[image_wv_nearir_lr < -9000]=np.nan
plt.hist(image_wv_nearir_lr[~np.isnan(image_wv_nearir_lr)])
ax=plt.gca()
ax.set_title('1 km water vapor (cm), low resolution nearir scaled to 5km (lr)');
###Output
_____no_output_____
###Markdown
now use the 1 km MYD03 lons and lats to get a full resolution xy gridresample the neair wv onto that grid to show full resolution image. Call thisarea_def area_def_hr
###Code
### Resample the 1 km near-ir water vapor onto a 1 km grid
proj_params = get_proj_params(m3_file)
swath_def = SwathDefinition(lons_1km, lats_1km)
area_def_hr=swath_def.compute_optimal_bb_area(proj_dict=proj_params)
area_def_hr.name="near ir wv retrieval modis 1 km resolution (hr=high resolution)"
area_def_hr.area_id="wv_nearir_hr"
area_def_hr.job_id = area_def_hr.area_id
fill_value=-9999.
image_wv_nearir_hr = kd_tree.resample_nearest(swath_def, wv_nearir_scaled.ravel(),
area_def_hr, radius_of_influence=5000,
nprocs=2,fill_value=fill_value)
image_wv_nearir_hr[image_wv_nearir_hr < -9000]=np.nan
###Output
_____no_output_____
###Markdown
Save the mapped images Now save these three images plus their area_def's for future plottingThe function area_def_to_dict saves the pyresample area_def as a dictAt line 20 note that```python a=getattr(area_def,key) ```where key='my_attribute' is the same as```python a=area_def.my_attribute```but you don't have to hard-code in 'my_attribute'
###Code
import json
def area_def_to_dict(area_def):
"""
given an area_def, save it as a dictionary`
Parameters
----------
area_def: pyresample area_def object
Returns
-------
out_dict: dict containing
area_def dictionary
"""
keys=['area_id','proj_id','name','proj_dict','x_size','y_size','area_extent']
area_dict={key:getattr(area_def,key) for key in keys}
area_dict['proj_id']=area_dict['area_id']
return area_dict
###Output
_____no_output_____
###Markdown
Create a directory to hold the images and area_def dictionaries
###Code
map_dir = a301.map_dir / Path('map_data/wv_maps')
map_dir.mkdir(parents=True, exist_ok=True)
###Output
_____no_output_____
###Markdown
Here's a function that writes the image plus metadata to npz and json filesWe'll need to use area_def_to_dict when we create the metadata_dict
###Code
import pdb
def dump_image(image_array,metadata_dict,foldername,
image_array_name='image'):
"""
write an image plus mmetadata to a folder
Parameters
----------
image_array: ndarray
the 2-d image to be saved
foldername: Path object or string
the path to the folder that holds the image files
image_array_name: str
the root name for the npz and json files
i.e. image.npz and image.json
Returns: None
side effect -- an npz and a json file are written
"""
image_file=Path(foldername) / Path(image_array_name)
out_dict={image_array_name:image_array}
np.savez(image_file,**out_dict)
json_name = foldername / Path(image_array_name + '.json')
with open(json_name,'w') as f:
json.dump(metadata_dict,f,indent=4)
print(f"\ndumping {image_file}\n and {json_name}\n")
###Output
_____no_output_____
###Markdown
Write out images, putting useful metadeta in metadata_dict
###Code
image_name='wv_nearir_lr'
metadata_dict=dict(modismeta = parseMeta(m5_file))
metadata_dict['area_def']=area_def_to_dict(area_def_lr)
metadata_dict['image_name']=image_name
metadata_dict['description']='modis near ir water vapor (cm) sampled at 5 km resolution'
metadata_dict['history']='written by level2_cartopy_resample.ipynb'
map_dir = a301.data_dir.parent / Path('map_data/wv_maps')
map_dir.mkdir(parents=True, exist_ok=True)
dump_image(image_wv_nearir_lr,metadata_dict,map_dir,image_name)
image_name='wv_nearir_hr'
metadata_dict=dict(modismeta = parseMeta(m5_file))
metadata_dict['area_def']=area_def_to_dict(area_def_hr)
metadata_dict['image_name']=image_name
metadata_dict['description']='modis near ir water vapor (cm) sampled at 1 km resolution'
metadata_dict['history']='written by level2_cartopy_resample.ipynb'
dump_image(image_wv_nearir_hr,metadata_dict,map_dir,image_name)
image_name='wv_ir'
metadata_dict=dict(modismeta = parseMeta(m5_file))
metadata_dict['area_def']=area_def_to_dict(area_def_lr)
metadata_dict['image_name']=image_name
metadata_dict['description']='modis ir water vapor (cm) sampled at 5 km resolution'
metadata_dict['history']='written by level2_cartopy_resample.ipynb'
dump_image(image_wv_ir,metadata_dict,map_dir,image_name)
area_def_lr
area_def_hr
area_def_lr
###Output
_____no_output_____ |
06 - Descriptive Stats with Python/notebooks/01_InterpretingDataUsingDescriptive Statistics.ipynb | ###Markdown
Understanding and Interpreting Data using Descriptive Statistics Data Analysis Workflow- Data Collection- Importing Data- Data Cleaning - Handling Missing Data - Outlier Detection and Removal- Exploring Data using Descriptive Statistics - Understanding Data using - Univariate Analysis - Bivariate Analysis - Multivariate Analysis - Understanding Data using Visualizations - Univariate - Histograms - Density Plot - Bivariate - Scatter Plot - Boxplot - Multivariate - Correlation Matrix - Covariance Matrix- Decision Making using Inferential Statistics - Hypothesis Testing(T-Test, Z-Test, Chi-square, ANOVA) - Creating Predicting Models Dataset Source- http://www.statsci.org/data/oz/ms212.htmlThe data was supplied by Dr Richard J. Wilson, Department of Mathematics,University of Queensland. Original data file is tab-delimited text. Description110 students in an introductory statistics class (MS212 taught by Professor John Eccleston and Dr Richard Wilsonat The University of Queensland) participated in a simple experiment. The students took their own pulse rate.They were then asked to flip a coin. If the coin came up heads, they were to run in place for one minute.Otherwise they sat for one minute. Then everyone took their pulse again. The pulse rates and other physiologicaland lifestyle data are given in the data. There was missing data for one student and seemingly incorrect values forheights for two students. These observations were removed resulting in 107 subjects in the final dataset.Five class groups between 1993 and 1998 participated in the experiment. The lecturer, Richard Wilson, wasconcerned that some students would choose the less strenuous option of sitting rather than running even if theircoin came up heads, so in the years 1995-1998 a different method of random assignment was used. In theseyears, data forms were handed out to the class before the experiment. The forms were pre-assigned to eitherrunning or non-running and there were an equal number of each. In 1995 and 1998 not all of the forms werereturned so the numbers running and sitting was still not entirely controlled. Variable Information  Importing Data
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_table('../data/pulse.txt')
###Output
_____no_output_____
###Markdown
Exploring Data
###Code
data.head()
data.tail()
data.shape
data.columns
data.dtypes
data.info()
###Output
_____no_output_____
###Markdown
Data Preprocessing - Rename variables - Check missing values - Remove missing values - Check duplicate rows - Drop duplicate rows - Creating new variables - Outliers detection and removal Missing Values
###Code
data.isnull().sum()
plt.figure(figsize=(12,8))
sns.heatmap(data.isnull(), cmap="viridis")
plt.show()
# impute with mean
data['Pulse1'] = data['Pulse1'].fillna(data['Pulse1'].mean())
data['Pulse2'] = data['Pulse2'].fillna(data['Pulse1'].mean())
data.isnull().sum()
###Output
_____no_output_____
###Markdown
Duplicate Rows
###Code
data.duplicated().sum()
###Output
_____no_output_____
###Markdown
Outliers Detection and Removal
###Code
data.describe()
data.quantile(0.25)
# calculate quantile
Q1, Q2, Q3 = data['Height'].quantile([.25, .50, .75])
print("Q1 25 percentile of the given data is: ", Q1)
print("Q2 50 percentile of the given data is: ", Q2)
print("Q3 75 percentile of the given data is: ", Q3)
r = data.Height.max() - data.Height.min()
print(r)
# iqr
IQR = Q3 - Q1
print(IQR)
# set upper and lower limit [Q1 - 1.5 x IQR, Q3 + 1.5 x IQR]
lower = Q1 - 1.5 * IQR
upper = Q3 + 1.5 * IQR
lower, upper
data.shape
# detect & removal outliers
data_new = data[(data['Height'] < upper) & (data['Height'] > lower)]
data_new
data.shape, data_new.shape
###Output
_____no_output_____
###Markdown
Creating New Variable
###Code
data.head()
data['BMI'] = data['Weight']/(data['Height']/100*data['Height']/100)
data.head()
# 1 = Underweight, 2 = Normal, 3 = Overweight, 4 = Obese
def bmicat(bmi):
if 0 <= bmi < 19.5:
return 1
elif 18.5 <= bmi < 25:
return 2
elif 25 <= bmi < 30:
return 3
else:
return 4
data["BMICat"] = data["BMI"].apply(bmicat)
data.head()
###Output
_____no_output_____
###Markdown
Natural Logarithm Transformation
###Code
data['WeightLog10'] = np.log10(data['Weight'])
data.head()
###Output
_____no_output_____
###Markdown
Standardize a Variable
###Code
data['AgeStd'] = (data['Age'] - data['Age'].mean())/data['Age'].std()
data.head()
###Output
_____no_output_____
###Markdown
Identifying Variables
###Code
data.columns
###Output
_____no_output_____
###Markdown
Categorical Variables - Gender- Smokes- Alcohol- Exercise- Ran- BMICat Numerical Variables - Height- Weight- Age- Pulse1- Pulse2 Qualitative Univariate Analysis Frequency Distribution: One-way Table
###Code
import researchpy as rp
rp.summary_cat(data['Gender'])
rp.summary_cat(data[['Gender', 'Smokes', 'Alcohol', 'Exercise']])
rp.codebook(data[['Age', 'Height']])
data.columns
# Sex (1 = Male, 2 =Female)
data['Gender'].value_counts()
data['Gender'].value_counts(normalize=True)
# Regular smoker? (1 = Yes, 2 = No)
data['Smokes'].value_counts()
data['Smokes'].value_counts(normalize=True)
# Regular drinker? (1 = Yes, 2 = No)
data['Alcohol'].value_counts()
data['Alcohol'].value_counts(normalize=True)
# Frequency of exercise (1 = High, 2 = Moderate, 3 = Low)
data['Exercise'].value_counts()
# Frequency of exercise (1 = High, 2 = Moderate, 3 = Low)
data['Exercise'].value_counts(normalize=True)
data['Ran'].value_counts()
data['Ran'].value_counts(normalize=True)
data['BMICat'].value_counts()
data['BMICat'].value_counts(normalize=True)
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "Gender")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "Smokes")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "Alcohol")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "Exercise")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "BMICat")
plt.show()
###Output
_____no_output_____
###Markdown
Qualitative Bivariate Analysis Frequency Distribution: Two-way Table
###Code
pd.crosstab(data['Gender'], data['Smokes'])
pd.crosstab(data['Gender'], data['Smokes'], normalize=True) * 100
pd.crosstab(data['Gender'], data['Alcohol'])
pd.crosstab(data['Gender'], data['Alcohol'], normalize=True)
pd.crosstab(data['Gender'], data['Exercise'])
pd.crosstab(data['Gender'], data['Exercise'], normalize=True)
pd.crosstab(data['Gender'], data['BMICat'])
###Output
_____no_output_____
###Markdown
Frequency Distribution: Marginal Table
###Code
pd.crosstab(data['Gender'], data['Smokes'], normalize=True, margins=True)
pd.crosstab(data['Gender'], data['Smokes'], normalize=True, margins=True)
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "Gender", hue="Smokes")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "Gender", hue="Alcohol")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "Gender", hue="Exercise")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data = data, x = "Gender", hue="BMICat")
plt.show()
###Output
_____no_output_____
###Markdown
Quantitative Univariate Analysis
###Code
data['Height'].describe()
data['Weight'].describe()
data['Age'].describe()
data['Pulse1'].describe()
data['Pulse2'].describe()
data['BMI'].describe()
data['BMI'].skew()
data['BMI'].kurtosis()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.histplot(data=data, x="Age")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.histplot(data=data, x="Height")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.histplot(data=data, x="Pulse1")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.histplot(data=data, x="Pulse2")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.histplot(data=data, x="BMI")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.boxplot(data=data, x="BMI")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.boxplot(data['BMI'])
plt.show()
###Output
_____no_output_____
###Markdown
Quantitative Bivariate Analysis
###Code
data.Age.corr(data.Height)
data.Age.corr(data.BMI)
data.Age.corr(data.Weight)
data.Age.cov(data.BMI)
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.scatterplot(data=data, x="Age", y="BMI")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.scatterplot(data=data, x="Age", y="BMI", hue="Gender")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.scatterplot(data=data, x="Age", y="Weight")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.scatterplot(data=data, x="Age", y="Pulse1")
plt.show()
###Output
_____no_output_____
###Markdown
Multivariate Analysis
###Code
data.corr()
data.cov()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.heatmap(data.corr())
plt.show()
plt.figure(figsize=(20,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.heatmap(data.corr(), annot=True)
plt.show()
###Output
_____no_output_____
###Markdown
Categorical - Quantitative(C-Q) Analysis
###Code
data.groupby('Gender')['BMI'].describe()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.boxplot(data=data, x="Gender", y="BMI")
plt.show()
data.groupby('Gender')['BMI'].describe()
###Output
_____no_output_____
###Markdown
Categorical- Categorical(CC) Analysis
###Code
data.groupby('Gender')['Smokes'].value_counts().unstack()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.countplot(data=data, x="Gender", hue='Smokes')
plt.show()
###Output
_____no_output_____
###Markdown
Quantitative - Quantitative Analysis
###Code
data.Age.corr(data.BMI)
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.scatterplot(data=data, x="Age", y="BMI")
plt.show()
plt.figure(figsize=(12,8))
sns.set(font_scale=1.5, palette= "viridis")
sns.scatterplot(data=data, x="Age", y="BMI", hue='Gender')
plt.show()
###Output
_____no_output_____ |
Part 4- Object Tracking and Localization/Optical Flow.ipynb | ###Markdown
Optical FlowOptical flow tracks objects by looking at where the *same* points have moved from one image frame to the next. Let's load in a few example frames of a pacman-like face moving to the right and down and see how optical flow finds **motion vectors** that describe the motion of the face!As usual, let's first import our resources and read in the images.
###Code
import numpy as np
import matplotlib.image as mpimg # for reading in images
import matplotlib.pyplot as plt
import cv2 # computer vision library
%matplotlib inline
# Read in the image frames
frame_1 = cv2.imread('images/pacman_1.png')
frame_2 = cv2.imread('images/pacman_2.png')
frame_3 = cv2.imread('images/pacman_3.png')
# convert to RGB
frame_1 = cv2.cvtColor(frame_1, cv2.COLOR_BGR2RGB)
frame_2 = cv2.cvtColor(frame_2, cv2.COLOR_BGR2RGB)
frame_3 = cv2.cvtColor(frame_3, cv2.COLOR_BGR2RGB)
# Visualize the individual color channels
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
ax1.set_title('frame 1')
ax1.imshow(frame_1)
ax2.set_title('frame 2')
ax2.imshow(frame_2)
ax3.set_title('frame 3')
ax3.imshow(frame_3)
###Output
_____no_output_____
###Markdown
Finding Points to TrackBefor optical flow can work, we have to give it a set of *keypoints* to track between two image frames!In the below example, we use a **Shi-Tomasi corner detector**, which uses the same process as a Harris corner detector to find patterns of intensity that make up a "corner" in an image, only it adds an additional parameter that helps select the most prominent corners. You can read more about this detection algorithm in [the documentation](https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.html). Alternatively, you could choose to use Harris or even ORB to find feature points. I just found that this works well.**You sould see that the detected points appear at the corners of the face.**
###Code
# parameters for ShiTomasi corner detection
feature_params = dict( maxCorners = 10,
qualityLevel = 0.2,
minDistance = 5,
blockSize = 5 )
# convert all frames to grayscale
gray_1 = cv2.cvtColor(frame_1, cv2.COLOR_RGB2GRAY)
gray_2 = cv2.cvtColor(frame_2, cv2.COLOR_RGB2GRAY)
gray_3 = cv2.cvtColor(frame_3, cv2.COLOR_RGB2GRAY)
# Take first frame and find corner points in it
pts_1 = cv2.goodFeaturesToTrack(gray_1, mask = None, **feature_params)
# display the detected points
plt.imshow(frame_1)
for p in pts_1:
# plot x and y detected points
plt.plot(p[0][0], p[0][1], 'r.', markersize=15)
# print out the x-y locations of the detected points
print(pts_1)
###Output
[[[ 318. 82.]]
[[ 308. 304.]]
[[ 208. 188.]]
[[ 309. 81.]]
[[ 299. 304.]]
[[ 199. 188.]]]
###Markdown
Perform Optical FlowOnce we've detected keypoints on our initial image of interest, we can calculate the optical flow between this image frame (frame 1) and the next frame (frame 2), using OpenCV's `calcOpticalFlowPyrLK` which is [documented, here](https://docs.opencv.org/trunk/dc/d6b/group__video__track.htmlga473e4b886d0bcc6b65831eb88ed93323). It takes in an initial image frame, the next image, and the first set of points, and it returns the detected points in the next frame and a value that indicates how good matches are between points from one frame to the next.The parameters also include a window size and maxLevels that indicate the size of a window and mnumber of levels that will be used to scale the given images using pyramid scaling; this version peforms an iterative search for matching points and this matching criteria is reflected in the last parameter (you may need to change these values if you are working with a different image, but these should work for the provided example).
###Code
# parameters for lucas kanade optical flow
lk_params = dict( winSize = (5,5),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# calculate optical flow between first and second frame
pts_2, match, err = cv2.calcOpticalFlowPyrLK(gray_1, gray_2, pts_1, None, **lk_params)
# Select good matching points between the two image frames
good_new = pts_2[match==1]
good_old = pts_1[match==1]
###Output
_____no_output_____
###Markdown
Next, let's display the resulting motion vectors! You should see the first image with motion vectors drawn on it that indicate the direction of motion from the first frame to the next.
###Code
# create a mask image for drawing (u,v) vectors on top of the second frame
mask = np.zeros_like(frame_2)
# draw the lines between the matching points (these lines indicate motion vectors)
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
# draw points on the mask image
mask = cv2.circle(mask,(a,b),5,(200),-1)
# draw motion vector as lines on the mask image
mask = cv2.line(mask, (a,b),(c,d), (200), 3)
# add the line image and second frame together
composite_im = np.copy(frame_2)
composite_im[mask!=0] = [0]
plt.imshow(composite_im)
###Output
_____no_output_____
###Markdown
TODO: Perform Optical Flow between image frames 2 and 3Repeat this process but for the last two image frames; see what the resulting motion vectors look like. Imagine doing this for a series of image frames and plotting the entire-motion-path of a given object.
###Code
## TODO: Perform optical flow between image frames 2 and 3
###Output
_____no_output_____ |
chapter_appendix/naive-bayes-to-mv-to-appendix.ipynb | ###Markdown
Naive Bayes Classification:label:`chapter_naive_bayes`Before we worry about complex optimization algorithms or GPUs, we can already deploy our first classifier, relying only on simple statistical estimators and our understanding of conditional independence. Learning is all about making assumptions. If we want to classify a new data point that we've never seen before we have to make some assumptions about which data points are similar to each other. The naive Bayes classifier, a popular and remarkably simple algorithm, assumes all features are independent of each other to simplify the computation. In this chapter, we will apply this model to recognize characters in images.Let's first import libraries and modules. Especially, we import `d2l`, which now contains the function `use_svg_display` we defined in :numref:`chapter_prob`.
###Code
%matplotlib inline
import d2l
import math
from mxnet import np, npx, gluon
npx.set_np()
d2l.use_svg_display()
###Output
_____no_output_____
###Markdown
Optical Character RecognitionMNIST :cite:`LeCun.Bottou.Bengio.ea.1998` is one of widely used datasets. It contains 60,000 images for training and 10,000 images for validation. We will formally introduce training data in :numref:`chapter_linear_regression` and validation data in :numref:`chapter_model_selection` later, here we just simply remember we will train the naive Bayes model in the training data and then test its quality on the validation data. Each image contains a handwritten digit from 0 to 9. The task is classifying each image into the corresponding digit.Gluon, MXNet's high-level interface for implementing neural networks, provides a `MNIST` class in the `data.vision` module to automatically retrieve the dataset via our Internet connection.Subsequently, Gluon will use the already-downloaded local copy.We specify whether we are requesting the training set or the test setby setting the value of the parameter `train` to `True` or `False`, respectively.Each image is a grayscale image with both width and height of 28 with shape $(28,28,1)$. We use a customized transformation to remove the last channel dimension. In addition, each pixel is presented by a unsigned 8-bit integer, we quantize them into binary features to simplify the problem.
###Code
np.floor?
def transform(data, label):
return np.floor(data.astype('float32')/128).squeeze(axis=-1), label
mnist_train = gluon.data.vision.MNIST(train=True, transform=transform)
mnist_test = gluon.data.vision.MNIST(train=False, transform=transform)
###Output
_____no_output_____
###Markdown
We can access a particular example, which contains the image and the corresponding label.
###Code
image, label = mnist_train[2]
image.shape, label
###Output
_____no_output_____
###Markdown
Our example, stored here in the variable `image` corresponds to an image with a height and width of 28 pixels. Each pixel is an 8-bit unsigned integer (uint8) with values between 0 and 255. It is stored in a 3D ndarray, whose last dimension is the number of channels. Since the data set is a grayscale image, the number of channels is 1. When we encounter color, images, we'll have 3 channels for red, green, and blue. To keep things simple, we will record the shape of the image with the height and width of $h$ and $w$ pixels, respectively, as $h \times w$ or `(h, w)`.
###Code
image.shape, image.dtype
###Output
_____no_output_____
###Markdown
The label of each image is represented as a scalar in NumPy. Its type is a 32-bit integer.
###Code
label, type(label), label.dtype
###Output
_____no_output_____
###Markdown
We can also access multiple examples at the same time.
###Code
images, labels = mnist_train[10:38]
images.shape, labels.shape
###Output
_____no_output_____
###Markdown
Now let's visualize these examples.
###Code
d2l.show_images(images, 2, 9);
###Output
_____no_output_____
###Markdown
The Probabilistic Model for ClassificationIn a classification task, we map an example into a category. Here an example is a grayscale $28\times 28$ image, and a category is a digit. (Refer to :numref:`chapter_softmax` for a more detailed explanation.) One natural way to express the classification task is via the probabilistic question: what is the most likely label given the features (i.e. image pixels)? Denote by $\mathbf x\in\mathbb R^d$ the features of the example and $y\in\mathbb R$ the label. Here features are image pixels, where we can reshape a 2-dimensional image to a vector so that $d=28^2=784$, and labels are digits. We will formally define general features and labels in :numref:`chapter_linear_regression`. The $p(y | \mathbf{x})$ is the probability of the label given the features. If we are able to compute these probabilities, which are $p(y | \mathbf{x})$ for $y=0,\ldots,9$ in our example, then the classifier will output the prediction $\hat{y}$ given by the expression:$$\hat{y} = \operatorname*{argmax} \> p(y | \mathbf{x}).$$Unfortunately, this requires that we estimate $p(y | \mathbf{x})$ for every value of $\mathbf{x} = x_1, ..., x_d$. Imagine that each feature could take one of $2$ values. For example, the feature $x_1 = 1$ might signify that the word apple appears in a given document and $x_1 = 0$ would signify that it does not. If we had $30$ such binary features, that would mean that we need to be prepared to classify any of $2^{30}$ (over 1 billion!) possible values of the input vector $\mathbf{x}$.Moreover, where is the learning? If we need to see every single possible example in order to predict the corresponding label then we're not really learning a pattern but just memorizing the dataset. The Naive Bayes ClassifierFortunately, by making some assumptions about conditional independence, we can introduce some inductive bias and build a model capable of generalizing from a comparatively modest selection of training examples. To begin, let's use Bayes Theorem, to express the classifier as$$\hat{y} = \operatorname*{argmax}_y \> p(y | \mathbf{x}) = \operatorname*{argmax}_y \> \frac{p( \mathbf{x} | y) p(y)}{p(\mathbf{x})}.$$Note that the denominator is the normalizing term $p(\mathbf{x})$ which does not depend on the value of the label $y$. As a result, we only need to worry about comparing the numerator across different values of $y$. Even if calculating the demoninator turned out to be intractable, we could get away with ignoring it, so long as we could evaluate the numerator. Fortunately, however, even if we wanted to recover the normalizing constant, we could, since we know that $\sum_y p(y | \mathbf{x}) = 1$, hence we can always recover the normalization term.Now, let's focus on $p( \mathbf{x} | y)$. Using the chain rule of probability, we can express the term $p( \mathbf{x} | y)$ as$$p(x_1 |y) \cdot p(x_2 | x_1, y) \cdot ... \cdot p( x_d | x_1, ..., x_{d-1}, y)$$By itself, this expression doesn't get us any further. We still must estimate roughly $2^d$ parameters. However, if we assume that *the features are conditionally independent of each other, given the label*, then suddenly we're in much better shape, as this term simplifies to $\prod_i p(x_i | y)$, giving us the predictor$$ \hat{y} = \operatorname*{argmax}_y \> \prod_{i=1}^d p(x_i | y) p(y).$$If we can estimate $\prod_i p(x_i=1 | y)$ for every $i$ and $y$, and save its value in $P_{xy}[i,y]$, here $P_{xy}$ is a $d\times n$ matrix with $n$ being the number of classes and $y\in\{1,\ldots,n\}$. In addition, we estimate $p(y)$ for every $y$ and save it in $P_y[y]$, with $P_y$ a $n$-length vector. Then for any new example $\mathbf x$, we could compute$$ \hat{y} = \operatorname*{argmax}_y \> \prod_{i=1}^d P_{xy}[x_i, y]P_y[y],$$:eqlabel:`eq_naive_bayes_estimation`for any $y$. So our assumption of conditional independence has taken the complexity of our model from an exponential dependence on the number of features $O(2^dn)$ to a linear dependence, which is $O(dn)$. TrainingThe problem now is that we don't actually know $P_{xy}$ and $P_y$. So we need to estimate their values given some training data first. This is what is called *training* the model. Estimating $P_y$ is not too hard. Since we are only dealing with $10$ classes, this is pretty easy - simply count the number of occurrences $n_y$ for each of the digits and divide it by the total amount of data $n$. For instance, if digit 8 occurs $n_8 = 5,800$ times and we have a total of $n = 60,000$ images, the probability estimate is $p(y=8) = 0.0967$.
###Code
X, Y = mnist_train[:] # all training examples
n_y = np.zeros((10))
for y in range(10):
n_y[y] = (Y==y).sum()
P_y = n_y / n_y.sum()
P_y
###Output
_____no_output_____
###Markdown
Now on to slightly more difficult things $P_{xy}$. Since we picked black and white images, $p(x_i | y)$ denotes the probability that pixel $i$ is switched on for class $y$. Just like before we can go and count the number of times $n_{iy}$ such that an event occurs and divide it by the total number of occurrences of $y$, i.e. $n_y$. But there's something slightly troubling: certain pixels may never be black (e.g. for very well cropped images the corner pixels might always be white). A convenient way for statisticians to deal with this problem is to add pseudo counts to all occurrences. Hence, rather than $n_{iy}$ we use $n_{iy}+1$ and instead of $n_y$ we use $n_{y} + 1$. This is also called [Laplace Smoothing](https://en.wikipedia.org/wiki/Additive_smoothing).
###Code
n_x = np.zeros((10, 28, 28))
for y in range(10):
n_x[y] = np.array(X.asnumpy()[Y.asnumpy()==y].sum(axis=0))
P_xy = (n_x+1) / (n_y+1).reshape(10,1,1)
d2l.show_images(P_xy, 2, 5);
###Output
_____no_output_____
###Markdown
By visualizing these $10\times 28\times 28$ probabilities (for each pixel for each class) we could get some mean looking digits. ...Now we can use :eqref:`eq_naive_bayes_estimation` to predict a new image. Given $\mathbf x$, the following functions computes $p(\mathbf x|y)p(y)$ for every $y$.
###Code
np.expand_dims?
def bayes_pred(x):
x = np.expand_dims(x, axis=0) # (28, 28) -> (1, 28, 28)
p_xy = P_xy * x + (1-P_xy)*(1-x)
p_xy = p_xy.reshape(10,-1).prod(axis=1) # p(x|y)
return np.array(p_xy) * P_y
image, label = mnist_test[0]
bayes_pred(image)
###Output
_____no_output_____
###Markdown
This went horribly wrong! To find out why, let's look at the per pixel probabilities. They're typically numbers between $0.001$ and $1$. We are multiplying $784$ of them. At this point it is worth mentioning that we are calculating these numbers on a computer, hence with a fixed range for the exponent. What happens is that we experience *numerical underflow*, i.e. multiplying all the small numbers leads to something even smaller until it is rounded down to zero.To fix this we use the fact that $\log a b = \log a + \log b$, i.e. we switch to summing logarithms.Even if both $a$ and $b$ are small numbers, the logarithm values should be in a proper range.
###Code
a = 0.1
print('underflow:', a**784)
print('logrithm is normal:', 784*math.log(a))
###Output
underflow: 0.0
logrithm is normal: -1805.2267129073316
###Markdown
Since the logarithm is an increasing function, so we can rewrite :eqref:`eq_naive_bayes_estimation` as$$ \hat{y} = \operatorname*{argmax}_y \> \sum_{i=1}^d \log P_{xy}[x_i, y] + \log P_y[y].$$We can implement the following stable version:
###Code
log_P_xy = np.log(P_xy)
log_P_xy_neg = np.log(1-P_xy)
log_P_y = np.log(P_y)
def bayes_pred_stable(x):
x = np.expand_dims(x, axis=0) # (28, 28) -> (1, 28, 28)
p_xy = log_P_xy * x + log_P_xy_neg * (1-x)
p_xy = p_xy.reshape(10,-1).sum(axis=1) # p(x|y)
return p_xy + log_P_y
py = bayes_pred_stable(image)
py
###Output
_____no_output_____
###Markdown
Check if the prediction is correct.
###Code
# convert label which is a scalar tensor of int32 dtype
# to a Python scalar integer for comparison
py.argmax(axis=0) == int(label)
###Output
_____no_output_____
###Markdown
Now predict a few validation examples, we can see the Bayesclassifier works pretty well except for the 9th 16th digits.
###Code
def predict(X):
return [bayes_pred_stable(x).argmax(axis=0).astype(np.int32) for x in X]
X, y = mnist_test[:18]
preds = predict(X)
d2l.show_images(X, 2, 9, titles=[str(d) for d in preds]);
###Output
_____no_output_____
###Markdown
Finally, let's compute the overall accuracy of the classifier.
###Code
X, y = mnist_test[:]
preds = np.array(predict(X), dtype=np.int32)
'Validation accuracy', float((preds == y).sum()) / len(y)
###Output
_____no_output_____ |
Intro-to-sampling.ipynb | ###Markdown
Are food trucks in rural areas? Where are the most italian resturants? What county has the shortest wait time for an Uber? To answer these questions, I'll provide you with some useful intution but nfortunately, I will not be able to give you a definate or complete answer. We will develop skills to be able to start asking a lot of questions - the answers a bit further out. To start sampling America by the county, we need to generate some data points (latitude and longitude pairs) to use. In this notebook, we generate a list of latitutes and longitudes that we can then use to query different APIs. We are given a latitude and longitude as the center of each county and the total square area of each county from the Gazetter Dataset downloaded from [here](http://people.bu.edu/balawson/csv/congress.csv'). I use the Latin Square technique to randomly sample the county. I use PyDOE to implement this and you can read more about Python Design Of Experiments [here](http://pythonhosted.org/pyDOE/randomized.html). Here's the general process outline:- Retrive size of county and center of county- Generate samples using Latin Square sampling- Apply samples to county size and center to retieve a sample point to describe the county
###Code
import pandas as pd
import random
from pyDOE import *
import math
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Here's the link to Wikipedia's page on [Latin Hypercube sampling](https://en.wikipedia.org/wiki/Latin_hypercube_sampling). In an oversimplified way, it can be though of as "Sudoku"- every row and column has exactly one sample. I would like to use orthogonal latin square sampling, that is to add another constraint on exactly one sample per quandrant as well, but this isn't implemented (yet) in the pyDOE library. This would be a great open source project if you are looking for one. We will be using the general latin cube sampling to randomly sample points in a county. The output of this algorithm will be values between [0-1] that we can use a ratios away from the center of the county.
###Code
cube = lhs(3, samples=20, criterion='center')
cube
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for sample in cube:
xs, ys, zs = sample
ax.scatter(xs, ys, zs)
plt.show()
num_of_samples = 30 #samples per county
a = lhs(2, samples=num_of_samples, criterion='center')
a
plt.plot(zip(*a)[0], zip(*a)[1], 'ro')
plt.plot([0, 0], [-1,1], 'k-', lw=2)
plt.plot([-1, 1], [0,0], 'k-', lw=2)
plt.show()
#renormalize values between -1 and 1 in order to plot on all four quadrants
b = (a-0.5)*2
b
plt.plot(zip(*b)[0], zip(*b)[1], 'bo')
plt.plot([0, 0], [-1,1], 'k-', lw=2)
plt.plot([-1, 1], [0,0], 'k-', lw=2)
plt.show()
###Output
_____no_output_____
###Markdown
Now that we can sample points, we need to be able to apply these samples to each county. Using the data from the Census file we can extract the center as a pair of latitude and longitude points and the total area of the county. Assuming each county has the general shape of a square, we can derive the raduis of the county and apply our Latin Square sample to the county.
###Code
#orginal data source: https://www.census.gov/geo/maps-data/data/gazetteer2014.html
df = pd.DataFrame.from_csv("csv/congress.csv")
states = df.index
df.index = df.GEOID #because current index is just state abbreviation
df['states'] = states
#grab the first row and look at it - what info do we want to grab? ALAND, INTPTLAT, and INTPTLONG
df.iloc[0:1]
#let's try to look at the info that's interesting at the moment (1001 corresponds to the first GEOID/index value)
df["INTPTLONG"][1001]
###Output
_____no_output_____
###Markdown
This demostrates a frusting aspect of data analysis - you never know the condition of data unless you collect it yourself.
###Code
#why the error message? try this one:
df["INTPTLONG "][1001]
#quick fix
df.columns = [x.strip() for x in df.columns]
df["INTPTLONG"][1001]
###Output
_____no_output_____
###Markdown
The general idea here is to find the maxium distance from the center in the x and y direction and then find random samples within that boundary.
###Code
def get_max_distances(land_area):
#assuming counties are square (smaller area than circle - less points near or outside boundary)
side = math.sqrt(land_area)
r = side/2
return r
def get_max_distances_circle(land_area):
#assuming counties are circles (which they are not, but shapes are hard)
r_2 = land_area/math.pi
r = math.sqrt(r_2)
return r
#http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
def meters_to_degs(x, y):
#takes meters in the x- and y-directions
#returns a tuple changes in degree
#this method is refered to as 'quick and dirty' and not suggested for life-dependent applications or long distances
return ((y/111111.0), x/(111111 * math.cos(y)))
def get_degree_ranges(land_area):
d = get_max_distances(land_area)
return (meters_to_degs(d, d))
#let's test the functions I wrote using the first entry in the csv
x = df.iloc[0:1].ALAND
al = get_degree_ranges(x)
al #this is a random point in the first county
def sampler(row, val):
#row corresponds to one of the dataframe rows
#val is the row of the Latin Square that I will use for this sample
latin_square_coefficient = b[val]
multiplier = get_degree_ranges(row.ALAND)
center = [row.INTPTLAT, row.INTPTLONG]
return latin_square_coefficient*multiplier + center
###Output
_____no_output_____
###Markdown
Let's test out the sampling function using the first row of the dataframe.
###Code
print "latitude\tlongitude"
for x in xrange(num_of_samples):
sample = sampler(df.loc[1001], x)
print "{0}\t{1}".format(sample[0], sample[1])
###Output
latitude longitude
32.5969020036 -86.5073573605
32.3850190827 -86.6265959086
32.6557583705 -86.6530933638
32.5380456367 -86.6000984535
32.4085616295 -86.6663420914
32.4674179964 -86.6133471811
32.7028434641 -86.8253268223
32.6204445504 -86.6398446362
32.3614765359 -86.8120780947
32.6910721907 -86.4676111777
32.608673277 -86.7458344568
32.4438754496 -86.5471035432
32.5615881835 -86.4941086329
32.4321041762 -86.6795908189
32.5851307302 -86.520606088
32.4909605432 -86.5868497259
32.455646723 -86.7325857292
32.6439870971 -86.5736009983
32.5027318165 -86.7855806395
32.5262743633 -86.8385755498
32.5733594568 -86.5338548156
32.6322158238 -86.6928395465
32.5145030899 -86.772331912
32.3732478093 -86.4808599053
32.4203329029 -86.5603522708
32.3967903561 -86.7193370017
32.6793009173 -86.7060882741
32.6675296439 -86.4543624502
32.5498169101 -86.7590831844
32.4791892698 -86.7988293671
###Markdown
Sweet, so now we have 30 points generated inside our target county. We should stop and check at this point to make sure our methodology is correct, that is check to make sure these points are actually inside the correct county. We can visualize this on map. I'm grabbing the first county, which is in AL and plotting the points against the boundary of the county, using a GEOjson (which you can download [here](http://people.bu.edu/balawson/json/al.geojson) and is originally from here [here](http://catalog.opendata.city/dataset/alabama-counties-polygon/resource/af46d2c0-5f84-42ae-85a1-d2ab7d46d9a7))
###Code
import folium, json
row = df.loc[1001]
center = [row.INTPTLAT, row.INTPTLONG]
name = row.NAME
#you can download this file here: (http://catalog.civicdashboards.com/dataset/1c992edf-5ec7-456b-8191-c73a33bb79e1/resource/af46d2c0-5f84-42ae-85a1-d2ab7d46d9a7/download/ee2d088c0afb441cb8eaf57a8d279de6temp.geojson)
#it contains the boundaries for all the counties in AL
with open('al.geojson') as f:
countylines = json.load(f)
multi = []
for x in countylines['features']:
if name in x['properties']['name']:
for y in x['geometry']['coordinates'][0][0]:
multi.append([y[1], y[0]])
#sample code from the Folium tutorial at (https://github.com/python-visualization/folium)
map_osm = folium.Map(location=[center[0], center[1]])
# Create the map and add the line
map_osm.line(multi, line_color='#FF0000', line_weight=5)
#loop over point
for x in xrange(num_of_samples):
sample = sampler(df.loc[1001], x)
map_osm.simple_marker([sample[0], sample[1]], popup='Sample Number: {0}'.format(x))
map_osm.create_map(path='osm.html')
#http://nbviewer.ipython.org/url/ocefpaf.github.com/python4oceanographers/downloads/notebooks/2015-02-02-cartopy_folium_shapefile.ipynb
#This function is created by Filipe Fernandes and found at the above link, used under CC:Attribution-ShareAlike. No modifications have been made.
from IPython.display import IFrame, HTML
def inline_map(m, width=500, height=500):
"""Takes a folium instance and embed HTML."""
m._build_map()
srcdoc = m.HTML.replace('"', '"')
embed = HTML('<iframe srcdoc="{}" '
'style="width: {}px; height: {}px; '
'border: none"></iframe>'.format(srcdoc, width, height))
return embed
inline_map(map_osm)
###Output
_____no_output_____
###Markdown
Okay let's see how we did for this one example county. Note: our sampling is based on a random algorithm so everyone *should* have different plots. Discussion: Do we include points that are in the county? How to they relate to the different things we are measuring?
###Code
#blank dataframe
samples = pd.DataFrame(columns=[x for x in range(num_of_samples)])
#sample
for idx, row in df.iterrows():
for x in range(num_of_samples):
sample = (sampler(row, x))
samples.loc[idx,x] = (sample[0], sample[1])
samples.info()
print "latitude\tlongitude"
for x in samples.loc[1001]:
print "{0}\t{1}".format(x[0], x[1])
#get the state abbreviation for later use
samples['states'] = df.states
samples.head()
samples.to_csv('csv/samples.csv')
###Output
_____no_output_____
###Markdown
Now we have the samples, we can query those locations and begin to ask questions. Below, I will show you how to work with a few corporate APIs to pursue some questions.We can try this using the Uber and Yelp APIs [here](http://nbviewer.ipython.org/github/benlawson/intro-to-data/blob/master/sampler.ipynb?flush_cache=true)
###Code
# Code for setting the style of the notebook
from IPython.core.display import HTML
def css_styling():
styles = open("./theme/custom.css", "r").read()
return HTML(styles)
css_styling()
###Output
_____no_output_____ |
Kickstarter_Analysis.ipynb | ###Markdown
Kickstarter
###Code
# Mount to Drive
from google.colab import drive
drive.mount('/content/drive')
# Import data and necessary libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('/content/drive/MyDrive/Copy of ks-projects-201801.csv')
###Output
_____no_output_____
###Markdown
**Step 1: Exploration Ideas** Audience: Project owners who are considering to use KickstarterPossible Big Questions:- What are the most lucrative categories on Kickstarter?- The trend of Number of Projects and Success Rate over time?
###Code
df.info()
df.describe()
###Output
_____no_output_____
###Markdown
**Step 2: Data Cleaning** Drop the columns `currency`, `goal`, `pledged`, `usd_pledged` since we won't use them for our analysis.
###Code
df.drop(columns=['currency', 'goal', 'pledged', 'usd pledged'], inplace=True)
###Output
_____no_output_____
###Markdown
Change the columns:- `usd_pledge_real` --> `pledged`- `usd_goal` --> `goal`for easier use.
###Code
df.rename(columns={'usd_pledged_real':'pledged', 'usd_goal':'goal'})
df.columns = ['ID', 'name', 'category', 'main_category', 'deadline', 'launched',
'state', 'backers', 'country', 'pledged', 'goal']
###Output
_____no_output_____
###Markdown
Convert columns into their correct datatype.
###Code
df.info()
# My solution: change type of columns "launched" and "deadline" to datetime
df['launched'] = pd.to_datetime(df['launched'])
df['deadline'] = pd.to_datetime(df['deadline'])
###Output
_____no_output_____
###Markdown
Kickstarter was founded in 2009
###Code
print(df['launched'].min())
print(df['launched'].max())
# My solution: keep only projects with launched date between 2009 and 2018
df = df[(df['launched'].dt.year>2008) & (df['launched'].dt.year<2018)]
###Output
_____no_output_____
###Markdown
Check for null value. Fill them with `Unknown` if there's any.
###Code
df.isnull().sum()
df['name'].fillna('Unknown', inplace=True)
###Output
_____no_output_____
###Markdown
Check for duplication and drop them if there's any.
###Code
df['ID'].duplicated().sum()
df.duplicated().sum()
###Output
_____no_output_____
###Markdown
Projects with state `undefined` are errors during data collection.
###Code
df['state'].value_counts()
df[df['state']=='undefined']['backers'].sum()
# My solution: change all state values "undefined" --> "failed"
df['state'] = df['state'].str.replace('undefined', 'failed')
df['state'].value_counts()
###Output
_____no_output_____
###Markdown
Check the country column.
###Code
df['country'].value_counts()
# My solution: change the strange value to "Unknown"
# df['country'] = df['country'].apply(lambda x: 'Unknown' if x=='N,0"' else x)
df['country'] = df['country'].str.replace('N,0"', 'Unknown')
df['country'].value_counts()
df.info()
df.describe()
###Output
_____no_output_____
###Markdown
**Step 3: EDA - Exploratory Data Analysis** What are the most lucrative categories on Kickstarter? Plot the top 10 Categories By Number of Projects and answer the question below.
###Code
# Select top 10 categories
top10_num = df.groupby('category').size().sort_values(ascending=False).head(10).reset_index()
top10_num.columns = ['category', 'no. of projects']
top10_num
plt.figure(figsize=(17, 5))
sns.barplot(data=top10_num,
x='no. of projects',
y='category',
color='salmon')
plt.grid(linestyle='-.')
plt.show()
# What is the difference in the number of projects between the top 1 and top 10 categories?
top10_num['no. of projects'].max() - top10_num['no. of projects'].min()
###Output
_____no_output_____
###Markdown
In the Top 10 Categories by Number of Projects and plot the Total Pledged.
###Code
top10_cat = top10_num['category'].values
top10_cat
top10_pledged = df[df['category'].isin(top10_cat)].groupby('category')['pledged'].sum().reset_index()
top10_pledged
plt.figure(figsize=(17, 5))
sns.barplot(data=top10_pledged,
y='category',
x='pledged',
order=top10_cat,
color='salmon')
plt.grid(linestyle='-.')
plt.show()
###Output
_____no_output_____
###Markdown
In the Top 10 Categories by Number of Projects, plot the Average Pledge Per Project.
###Code
top10_ppp = df[df['category'].isin(top10_cat)].groupby('category')['pledged'].mean().reset_index()
top10_ppp.columns = ['category','avg. pledged']
plt.figure(figsize=(17, 5))
sns.barplot(data=top10_ppp,
x='avg. pledged',
y='category',
order=top10_cat[::-1],
color='salmon')
plt.grid(linestyle='-.')
plt.show()
###Output
_____no_output_____
###Markdown
In the Top 10 Categories by Number of Projects, plot number of projects broken down by their states (failed/successful)
###Code
top10_data = df[df['category'].isin(top10_cat)]
plot_data = top10_data[top10_data['state'].isin(['successful', 'failed'])]
plt.figure(figsize=(20, 10))
sns.countplot(data = plot_data,
y = 'category',
hue = 'state',
order=top10_cat)
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
As you can see, in top 10 categorires, there're only 2 categories that have number of successful project more than failed project, they're TabletopGames and Shorts Organize the above 4 charts into one figure with 4 subplots, following this layout:
###Code
# Layout: https://matplotlib.org/3.3.3/tutorials/intermediate/gridspec.html
# Define layout
fig = plt.figure(constrained_layout=False, figsize=(20, 14))
fig.suptitle('TOP 10 CATEGORIES BY NUMBER OF PROJECTS')
gs = fig.add_gridspec(nrows=3, ncols=3, wspace=0.5, hspace=0.5)
# Creating the axes to put the plots on. Can you guess which axis correspond to which plot?
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[-1, 0])
ax3 = fig.add_subplot(gs[-1, 1])
ax4 = fig.add_subplot(gs[-1, 2])
##### BEGIN PLOTTING #####
# Total Plot
sns.barplot(data=top10_num,
x='no. of projects',
y='category',
color='salmon',
ax=ax1,
order=top10_cat,) # This one is the BIG plot in the center
ax1.set_title('By Number of Projects')
ax1.grid(linestyle='-.', axis='x')
# Plot Total Pledged
sns.barplot(data=top10_pledged,
x='pledged',
y='category',
color='blue',
order=top10_cat,
ax=ax2) # Bottom left
ax2.set_title('By Total Amount Pledged')
ax2.grid(linestyle='-.', axis='x')
# Plot Avg Pledged
sns.barplot(data=top10_ppp,
x='avg. pledged',
y='category',
order=top10_cat,
color='salmon',
ax=ax3) # Bottom middle
ax3.set_title('By Average Amount Pledged')
ax3.grid(linestyle='-.', axis='x')
# Plot State Projects
sns.countplot(data = plot_data,
y = 'category',
hue = 'state',
order=top10_cat,
ax=ax4) # Bottom right
ax4.set_title('By Number of Successful/Failed Projects')
ax4.grid(linestyle='-.', axis='x')
plt.show()
###Output
_____no_output_____
###Markdown
The trend of Number of Projects and Success Rate over time? In the Top 10 Categories by Number of Projects, plot the trend of ***Number of Projects*** over the years. In the Top 10 Category by Number of Projects, plot the trend of ***Success Rate*** over the year. Now combine the last 2 question in dual axis chart.
###Code
top10_data['launched_year'] = top10_data['launched'].dt.year
plt.figure(figsize=(30, 12))
for i in range(10):
# Combine the two plots above here.
# Remember how to combine two different plots into one with two different y-axis?
cat = top10_cat[i]# what's the category for this loop?
top10_current_cat = top10_data[top10_data['category']==cat]
top10_current_cat['encode_state'] = top10_current_cat['state'] == 'successful'
num_by_year = top10_current_cat.groupby('launched_year').size().reset_index()
num_by_year.columns = ['launched_year', 'number of projects']
success_year_rate = top10_current_cat.groupby('launched_year').mean()['encode_state'].reset_index()
success_year_rate.columns = ['year', 'rate']
plt.subplot(2,5,i+1)
# Here, I use plt.bar just to keep it precise with the sample plot.
# However, feel free to use seaborn as long as the data is the same.
plt.bar(data=num_by_year,
x='launched_year',
height='number of projects',
color='salmon')
plt.ylim(0, 5000)
plt.twinx() # Here is the magic line to make a dual-axes plot
# HOWEVER, here if you use seaborn lineplot, you might've noticed that your
# plot doesn't look quite right. This is a known bug of the interactions
# between seaborn and matplotlib.
# To fix it, you can use seaborn pointplot instead of lineplot, OR, like
# below, switch to plt.plot to match 100% the sample plot.
plt.plot(success_year_rate['year'], # x-array
success_year_rate['rate'], # y-array
color='blue')
plt.ylim(0, 1)
plt.title(cat)
plt.grid()
plt.subplots_adjust(wspace=0.3, top=0.8, hspace=0.3)
###Output
_____no_output_____
###Markdown
Out of the top 10 categories with the highest number of projects, how many have a higher success rate in 2017 than in 2009?The answer is 4: Product Design, TableTop Games, Shorts, Fashion Similarly, in Top10 Categories based on Number of Projects, plot the trend of ***Number of Projects*** and the ***Success Rate*** over the ***MONTHS IN YEAR***.
###Code
# First, similar to when we created a new column 'launched_year' for questions
# 6-8, here we create a new column 'launched_month' for the next plot.
top10_data['launched_month'] = top10_data['launched'].dt.month
top10_data['launched_month'].value_counts()
plt.figure(figsize=(30, 12))
for i in range(10):
# NOTE: this is almost exactly the same code for the previous plot.
# In fact, I just copied the plotting code from above and pasted here, and
# then just change year --> month in the necessary places.
cat = top10_cat[i]# what's the category for this loop?
top10_current_cat = top10_data[top10_data['category']==cat]
top10_current_cat['encode_state'] = top10_current_cat['state'] == 'successful'
num_by_month = top10_current_cat.groupby('launched_month').size().reset_index()
num_by_month.columns = ['launched_month', 'number of projects']
success_month_rate = top10_current_cat.groupby('launched_month').mean()['encode_state'].reset_index()
success_month_rate.columns = ['month', 'rate']
plt.subplot(2,5,i+1)
plt.bar(data=num_by_month,
x='launched_month',
height='number of projects',
color='salmon')
plt.ylim(0, 5000)
plt.twinx()
plt.plot(success_month_rate['month'],
success_month_rate['rate'],
color='blue')
plt.ylim(0, 1)
plt.title(cat)
plt.grid()
plt.subplots_adjust(wspace=0.3, top=0.8, hspace=0.3)
###Output
_____no_output_____
###Markdown
In Top 10 Categories based on Number of Projects, plot ***Number of Projects*** by duration of the pitch (the time difference between `launched` and `deadline`), and the trend of ***Success Rate*** by that duration.The duration is in number of months.
###Code
top10_data['duration'] = (top10_data['deadline'] - top10_data['launched']) / pd.to_timedelta(30, 'D')
top10_data['duration'] = top10_data['duration'].astype('int')
top10_data['duration'].value_counts()
plt.figure(figsize=(30, 12))
for i in range(10):
# YOUR CODE HERE
# Combine the two plots above here.
# Remember how to combine two different plots into one with two different y-axis?
cat = top10_cat[i]# what's the category for this loop?
top10_current_cat = top10_data[top10_data['category']==cat]
top10_current_cat['encode_state'] = top10_current_cat['state'] == 'successful'
num_by_duration = top10_current_cat.groupby('duration').size().reset_index()
num_by_duration.columns = ['duration', 'number of projects']
success_duration_rate = top10_current_cat.groupby('duration').mean()['encode_state'].reset_index()
success_duration_rate.columns = ['duration', 'rate']
plt.subplot(2,5,i+1)
plt.bar(data=num_by_duration,
x='duration',
height='number of projects',
color='salmon')
plt.ylim(0, 14000)
plt.twinx()
plt.plot(success_duration_rate['duration'],
success_duration_rate['rate'],
color='blue')
plt.ylim(0, 1)
plt.title(cat)
plt.grid()
plt.subplots_adjust(wspace=0.3, top=0.8, hspace=0.3)
###Output
_____no_output_____ |
.ipynb_checkpoints/Has the weather an impact on the spread of the coronavirus?-checkpoint.ipynb | ###Markdown
Has the weather an impact on the spread of the coronavirus?With more than 775 748 peoples infected and 37109 deaths (03/30/2020) and with a significative decrease of usual human activity, the COVID-19 will be remembered as a sad part of mankind's history. I, like many others, am trying to keep doing what I love to do and avoid get crazy thinking about the impact of this crisis in my family and people around the World. I am not a politician, I don't have any kind of power, but, at the same time, I feel that I need to do something else, and for that reason I began this project as a modest contribution of what I think could be some interesting open questions about the COVID-19:1. Is there some relationship between the temperature and the spread of the virus? In such a case, what is the minimum temperature that help to slow down it spread?2. Has the humidity some kind of impact on the spread of the virus?3. What happens with the virus at different atmospheric pressures? This project is structured as follows:1. [Data Collection and Cleaning](data_collection_and_cleaning) 1. [Cities Selection](cities_selection) 1. [Cities in countries with more infections.](cities_in_countries_with_more_infections) 2. [Cities in Coldest Countries.](coldest_countries) 3. [Cities in Hottest Countries.](hottest_countries) 2. [Weather Data](weather_data) 1. [Merging the weather and the COVID-19 datasets.](merging_weather_COVID_datasets)2. [Weather and New Infections.](weather_and_new_infections) 1. [Coronavirus vs Temperature.](coronavirus_vs_temperature) 2. [Coronavirus vs Humidity.](coronavirus_vs_humidity) 3. [Coronavirus vs Pressure.](coronavirus_vs_pressure)3. [Conclusions and Remarks.](conclusions_and_remarks) Data Collection and CleaningThe above questions are related to a more general motivation [proposed at Kaggle](https://www.kaggle.com/sudalairajkumar/novel-corona-virus-2019-dataset/tasks?taskId=62). One of the main dataset that I am going to use in this projects was also obtained from Kaggle ("[covid_19_data.csv](https://www.kaggle.com/sudalairajkumar/novel-corona-virus-2019-datasetcovid_19_data.csv)") and relates the total number of confirmed, deaths and recovered cases per day, `Province/State` and `Country/Region`.
###Code
# Load libraries
import pandas as pd
import numpy as np
# Use the "glob" module to extract pathnames matching a specified pattern
import glob
import calendar
# Visualization
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
# Statistics
from scipy import stats
###Output
_____no_output_____
###Markdown
The COVID-19 dataset is composed by 8 variables which description could be found [here](https://www.kaggle.com/sudalairajkumar/novel-corona-virus-2019-dataset).
###Code
# Load the "covid_19_data" dataset
covid_2019=pd.read_csv("novel-corona-virus-2019-dataset/covid_19_data.csv")
covid_2019.head()
###Output
_____no_output_____
###Markdown
As we can see in the following descriptive data frame, this data is composed of 11614 observations with 119827 infections, 14681 deaths and 63612 recovered patients around the world. An important piece of information is that the variables `Confirmed`, `Deaths` and `Recovered` are cumulative and for that reason, at this point, we can't say anything for example about the mean number of new cases by day.
###Code
# Describes the continuous variables
covid_2019.describe()
###Output
_____no_output_____
###Markdown
Before going forward, it's important to transform the variables to the correct format.
###Code
# Actual data types
covid_2019.dtypes
## Transform the data type to the correct format
# 'Last Update' and 'ObservationDate' to datetime
covid_2019['Last Update']=pd.to_datetime(covid_2019['Last Update'])
covid_2019['ObservationDate']=pd.to_datetime(covid_2019['ObservationDate'])
# 'Confirmed','Deaths','Recovered' to int
covid_2019[['Confirmed','Deaths','Recovered']]=covid_2019[['Confirmed','Deaths','Recovered']].astype('int')
# 'Province/State' and 'Country/Region' to category
covid_2019[['Province/State','Country/Region']]=covid_2019[['Province/State','Country/Region']].astype('category')
covid_2019.dtypes
print('Some general facts about our data:')
print('=> The first day reported in our data was {}.'.format(min(covid_2019['Last Update'])))
print('=> While the last day included is {}.'.format(max(covid_2019['Last Update'])))
print('=> Our data resume the information of the coronavirus spread in {}'.format(max(covid_2019['Last Update']) - min(covid_2019['Last Update'])))
print('=> During these days, a total of {} Province/States had reported at least one case of coronavirus.'.format(len(covid_2019['Province/State'].unique())))
print('=> These Province/States are distributed in {} countries or regions.'.format(len(covid_2019['Country/Region'].unique())))
###Output
Some general facts about our data:
=> The first day reported in our data was 2020-01-22 17:00:00.
=> While the last day included is 2020-04-03 22:52:45.
=> Our data resume the information of the coronavirus spread in 72 days 05:52:45
=> During these days, a total of 295 Province/States had reported at least one case of coronavirus.
=> These Province/States are distributed in 216 countries or regions.
###Markdown
Cities SelectionFor this study I considered the weather in the last 4 months (December 1 to March 29) of 9 differents cities. The selection criterion was:* Include the most infected city of the 3 countries with more cases. At 2020-04-03 these countries were US, Italy, and Spain, and the cities were "New York" (US), the region of "Lombardia" in Italy (in this case we selected Milan, which is the capital of Lombardia), and Madrid (Spain) (see details in the subsection [Cities in Countries with more Infections](cities_in_countries_with_more_infections)).* Include the city with a greater number of cases in the 3 coldest countries. The list with the coldest countries in the World was obtained from [here](https://www.swedishnomad.com/coldest-countries-in-the-world/) taking into account the average yearly temperature. * The results show that the coldest countries with more cases are "Austria", "Sweden", and Canada with 11524, 6131 and 6101 cases respectively. For these countries the most afected cities are Vienna (Austria's capital), Stockholm (Sweden's capital), and Quebec in Canada. For details, see the subsection [Cities in Coldest-Countries](coldest_countries). * Include the city with a greater number of cases in the 3 hottest countries. A list with the 15 hottest countries given the average yearly temperature was obtained from [here](https://www.swedishnomad.com/hottest-countries-in-the-world/). * Given these selection parameters, I obtained that the hottest 3 countries with the greatest number of cases are United Arab Emirates, Qatar, and Burkina Faso with 1264, 1075 and 302 cases respectively. Unfortunately, I can't found free weather information about Burkina Faso, and as consequence I pick the next country in the list, which is Senegal with 207 cases. The weather of the cities with more cases of these 3 countries (Dubai (United Arab Emirates), Doha (Qatar), Dakar (Senegal)) were selected for this analysis (see details in the subsection [Cities in Hottest-Countries](hottest_countries)).> Unfortunately, it was very difficult for me to find accurate information about the weather by country/cities in the last 4 months. All the webpages sell this information (and is very expensive by the way), so, I collected and curated manually this data. If anybody has or knows where to obtain this kind of data easily, please, share this information. If you want to use this information, it is available[here](https://github.com/Yasel-Garces/The-impact-of-weather-in-the-coronavirus-spread). Cities in Countries with more Infections.
###Code
# Extract the data of the last day
covid_2019_lastDay=covid_2019.loc[covid_2019['ObservationDate']==max(covid_2019['ObservationDate']),:]
covid_2019_lastDay.head()
# Compute the total number of cases by country
cases_by_countries=covid_2019_lastDay.pivot_table(index=['Country/Region'],
values='Confirmed',
aggfunc='sum').sort_values(by='Confirmed',
ascending=False)
print('The countries with more cases are:\n {}'.format(cases_by_countries.head()))
# Select the city with more cases in the 3 countries with more cases.
countries=['US', 'Italy','Spain']
function = lambda country: covid_2019_lastDay.loc[covid_2019_lastDay['Country/Region']==country,:].sort_values(by='Confirmed',
ascending=False).iloc[0,[2,5]]
# Stores the results in a dictionary
result={country: list(function(country)) for country in countries}
print('The cities with more cases for each of the top countries are:\n {}'.format(pd.DataFrame(result)))
###Output
The cities with more cases for each of the top countries are:
US Italy Spain
0 New York NaN NaN
1 102987 119827.0 119199.0
###Markdown
We can see here something unexpected (wasn't obtained any city for Italy or Spain). Let's see what happened:
###Code
# Slice the dataset to show only the information relative to Italy
covid_2019.loc[covid_2019['Country/Region']=='Italy',:].sort_values(by='Confirmed',
ascending=False).head()
###Output
_____no_output_____
###Markdown
The problem seems to be clear! Our data doesn't contain the information of Italy or Spain segmented by regions or provinces (note that only exists one record per day). Fortunately, in the case of Italy, this inconvenience can be overcome using another available [dataset in Kaggle that contain specific information about Italy](https://www.kaggle.com/sudalairajkumar/covid19-in-italycovid19_italy_region.csv). Therefore, I decided to drop all the information relative to Italy from the `covid_2019` dataset and include the new information available in the Italy dataset.
###Code
# Drop all the information relative to Italy from "covid_2019"
covid_2019=covid_2019.loc[covid_2019['Country/Region']!='Italy',:]
# Check that the information was droped
covid_2019.loc[covid_2019['Country/Region']=='Italy',:]
# Load the new dataframe with the information about Italy
italy=pd.read_csv("novel-corona-virus-2019-dataset/covid19_italy_region.csv")
# Print the columns of this data frame
print(italy.columns)
###Output
Index(['SNo', 'Date', 'Country', 'RegionCode', 'RegionName', 'Latitude',
'Longitude', 'HospitalizedPatients', 'IntensiveCarePatients',
'TotalHospitalizedPatients', 'HomeConfinement', 'CurrentPositiveCases',
'NewPositiveCases', 'Recovered', 'Deaths', 'TotalPositiveCases',
'TestsPerformed'],
dtype='object')
###Markdown
If we look at the columns of the `italy` data frame, it's easy to realize that we only need to consider the next variables to include in our `covid_19` data frame: `Sno`, `Country`, `Date`, `Recovered`, `Deaths`, `TotalPositiveCases`.
###Code
# Create a new dataframe for Italy with only the necesary variables (listed above)
italy=italy[['SNo','Date','RegionName','Country','Date','TotalPositiveCases','Deaths','Recovered']]
# Name the columns as in covid_19
italy.columns=['SNo','ObservationDate','Province/State','Country/Region','Last Update',
'Confirmed','Deaths','Recovered']
# Concat the two dataframes
covid_2019=pd.concat([covid_2019,italy])
# Rename ITA for Italy
covid_2019['Country/Region'].replace(to_replace='ITA',value='Italy',inplace=True)
covid_2019.loc[covid_2019['Country/Region']=='Italy',:].head()
###Output
_____no_output_____
###Markdown
I couldn't find detailed information about the number of cases in Spain by region, but, we know that the greatest number of cases are in Madrid, so, I'm going to pick the information about the weather in Madrid for this analysis. Finally, the functions `transform_dtypes` and `cases_country_city` available in the `functions.py` script runs all the steps that we did earlier in [this section](cities_in_countries_with_more_infections). The results shows that at this moment New York is US's city with more cases ($\approx 102987$), while the region of Lombardia in Italy has $\approx 47520$ cases.
###Code
from functions import transform_dtypes, cases_country_city
# Transform data types
covid_2019=transform_dtypes(covid_2019)
# Extract the information about the cities with more cases
_ , cities=cases_country_city(covid_2019)
cities
###Output
_____no_output_____
###Markdown
Cities in Coldest-Countries
###Code
# List the names of the coldest countries
coldest_countries=['Canada','Russia','Mongolia','Greenland','Sweden','Norway','Finland','Iceland','Austria']
# Pick only the information of the countries in "coldest_countries"
ind=(covid_2019_lastDay['Country/Region'].isin(set(coldest_countries)))
# Subset and sort the dataframe using the number of confirmed cases
covid_2019_lastDay.loc[ind,:].sort_values('Confirmed',ascending=False).head()
###Output
_____no_output_____
###Markdown
Cities in Hottest-Countries
###Code
# List of hottest countries
hottest_countries=['Mali','Burkina Faso','Senegal','Mauritania','Djibouti','Benin','Ghana','Niger',
'Cambodia','South Sudan','Qatar','United Arab Emirates','Sudan',
'Saint Vincent and the Grenadines','Togo']
# Pick only the information of the countries in "hottest_countries"
ind=(covid_2019_lastDay['Country/Region'].isin(set(hottest_countries)))
# Subset and sort the dataframe using the number of confirmed cases
covid_2019_lastDay.loc[ind,:].sort_values('Confirmed',ascending=False).head()
###Output
_____no_output_____
###Markdown
Weather dataThe historical weather of the selected 9 cities was collected from ["Weather Underground"](https://www.wunderground.com/) and saved in independents CSV files (one file per city). Each file contains information about the weather from December 2019 to March 30, 2020 (121 observations), condensed into 18 variables: | Variable | Description ||:----------------------------------------------|:---------------------------------------------------|| Day | Day || Month | Month || Year | Year || Country | Name of the country || State | Name of the state or region || TempMax/TempAvg/TempMin | Maximum, average and minimum temperature ($^o F$) || HumMax/HumAvg/HumMin | Maximum, average and minimum humidity (%) || Wind_Speed_Max/Wind_Speed_Avg/Wind_Speed_Min | Maximum, average and minimum wind speed (mph) || Pressure_Max/Pressure_Avg/Pressure_Min | Maximum, average and minimum pressure (Hg) || Total_Precipitations | Total precipitations (in) |Below you can take a look at this information in the case of New York.
###Code
weather_NewYork=pd.read_csv("Weather/NewYork_December2019_March_2020.csv")
weather_NewYork.head()
###Output
_____no_output_____
###Markdown
The next step is to merge the information in all these 9 files in only one.
###Code
# Extract the directories
directories=glob.glob("Weather/*.csv")
# Create an empty dataframe to store the information
weather=pd.DataFrame()
# Include the new data in "weather" for each csv file in the directory
for file in directories:
this_data=pd.read_csv(file)
weather=pd.concat([weather,this_data],axis=0)
weather.head()
###Output
_____no_output_____
###Markdown
Above, you can see that the `months` appear as a `string`, let's transform this variable to `int`.
###Code
# Create a dictionary with the names of the months and the number that represent it.
d = dict((v,k) for k,v in enumerate(calendar.month_name))
# Replace the variable 'Month' using the dictionary
weather['Month']=weather['Month'].map(d)
weather.head()
# Create a new variable called "Infection Day" (note that I name this variable as in the
# covid data frame to make clear that I am going the merge this dataframes using this variable)
weather['Infection Day']=pd.to_datetime(weather[['Year', 'Month', 'Day']]).dt.date
# Drop the information relative to the Day, Month and Year
weather.drop(columns=['Day','Month','Year'],inplace=True)
# Convert the 'Country' and 'State' features from objects to category variables
weather[['Country','State']]=weather[['Country','State']].astype('category')
weather.head()
# Print some basic exploration statistics
print('=> The data frame with the weather information is composed by {} rows and {} columns.'.format(weather.shape[0],
weather.shape[1]))
print('=> The countries included in this dataframe are:\n {}'.format(weather['Country'].unique()))
print('=> The cities included in this dataframe are:\n {}'.format(weather['State'].unique()))
print('=> The total number of Missing Values are: {}'.format(weather.isna().sum().sum()))
###Output
=> The data frame with the weather information is composed by 1218 rows and 16 columns.
=> The countries included in this dataframe are:
[Austria, USA, United Arab Emirates, Qatar, Senegal, Sweden, Norway, Spain, Italy, Canada]
Categories (10, object): [Austria, USA, United Arab Emirates, Qatar, ..., Norway, Spain, Italy, Canada]
=> The cities included in this dataframe are:
[Vienna, New York, Dubai, Doha, Dakar, Stockholm, Oslo, Madrid, Lombardia, Quebec]
Categories (10, object): [Vienna, New York, Dubai, Doha, ..., Oslo, Madrid, Lombardia, Quebec]
=> The total number of Missing Values are: 0
###Markdown
> So far, the weather information looks nice, so, we can move forward and try to relate the covid 2019 dataset with the information about the weather. Merging the weather and the COVID-19 datasetsAt this point, it's important to structure the dataset based on some previous assumptions: 1. We only are considering 9 cities to study the relationship between the spread of the virus with the weather. 2. If a person "X" was reported as infected at the day "D", then the exposure occurred [between 2-14 days before](https://www.cdc.gov/coronavirus/2019-ncov/symptoms-testing/symptoms.html). I am going to assume here that the mean of persons with the disease has the first symptoms on day 8 (the mean between 2 and 14) after exposure. This is, the weather that matter in the infection of "X" is the weather in the day ("D" - 8). > I am assuming that the weather doesn't matter once the virus gets into a person (which seems logical). In accordance with the first point above, let's extract from our general data the information about these 9 `Province/State`. Here it's important to remember that our dataset has the information of some countries segmented by cities or regions (e.g. US, Italy), but others (like Spain) only have a country-level segmentation, so, the first step is to complete the missing information in the `Province/State` variable.
###Code
# Filter only the observations of the selected countries
selected_countries=['US','Italy','Austria', 'Canada', 'Sweden', 'Qatar',
'United Arab Emirates', 'Senegal', 'Spain']
covid_2019_countries=covid_2019.loc[covid_2019['Country/Region'].isin(selected_countries),:].copy()
# Include the cities in the selected countries without a city level information
countries_without_cities={'Austria': 'Vienna', 'Sweden': 'Stockholm',
'Qatar': 'Doha', 'United Arab Emirates': 'Dubai',
'Senegal': 'Dakar', 'Spain':'Madrid'}
covid_2019_countries.loc[:,'Province/State'] = covid_2019_countries.apply(
lambda row: countries_without_cities[row['Country/Region']] if
row['Country/Region'] in countries_without_cities.keys() else row['Province/State'],
axis=1
)
# Check that we don't have missing information in the "Province/State" feature
print('The number of missing values in the Province/State feature is: {} ==> Great!!'.format(covid_2019_countries['Province/State'].isna().sum()))
###Output
The number of missing values in the Province/State feature is: 0 ==> Great!!
###Markdown
Now, it's time to select only the information relative to the 9 `Province/State` that we are going to include in this project.
###Code
# Select only the information relative to the selected province/state
cities=['New York','Madrid','Quebec','Lombardia','Vienna','Stockholm',
'Doha','Dubai','Dakar']
covid_final=covid_2019_countries.loc[covid_2019_countries['Province/State'].isin(cities),:].copy()
print('=> The cities available in the reduced dataframe are:\n {} ==> Nice, everything looks fine'.format(covid_final['Province/State'].unique()))
print('=> The countries available in the reduced dataframe are:\n {} ==> Nice!'.format(list(covid_final['Country/Region'].unique())))
print('=> So far, the information about the cities of interests is contained in {} rows and {} columns.'.format(covid_final.shape[0],covid_final.shape[1]))
print('=> The new dataset has {} missing values'.format(covid_final.isna().sum().sum()))
covid_final.head()
###Output
=> The cities available in the reduced dataframe are:
['Dubai' 'Stockholm' 'Madrid' 'Vienna' 'Doha' 'Dakar' 'Quebec' 'New York'
'Lombardia'] ==> Nice, everything looks fine
=> The countries available in the reduced dataframe are:
['United Arab Emirates', 'Sweden', 'Spain', 'Austria', 'Qatar', 'Senegal', 'Canada', 'US', 'Italy'] ==> Nice!
=> So far, the information about the cities of interests is contained in 391 rows and 8 columns.
=> The new dataset has 0 missing values
###Markdown
The first task described at the [beginning of this section](merging_weather_COVID_datasets) is done. Now we need to move forward to the second point. Remember, for this, we need to compute the number of new cases by day and city (now our dataset contains the accumulative number of cases), and next we need to move back the dates in 8 days (see [above](merging_weather_COVID_datasets) for more details).Also, I am going to create a new variable (`Days Since First Case`) that represents the number of days since the first infection case was reported in a city. Note that this variable could give a more direct information about the number of new cases per day after the first infection.
###Code
# The number of new cases in the a day "d" (N_d) can be computed as [N_d - N_(d-1)].
# Remember that we need to do this by city.
# Iterate ove the cities and compute the number of new cases per day
covid_new_cases=pd.DataFrame()
for city in cities:
# Subset the dataset to considder only one city
temp=covid_final.loc[covid_final['Province/State']==city,:].sort_values(by='ObservationDate')
# Transform the variable "Confirmed" to include only the information
# about the new infections by day (not the cumulative)
temp.loc[temp['ObservationDate']>min(temp['ObservationDate']),
'Confirmed'] = temp['Confirmed'][1:].values - temp['Confirmed'][:-1].values
# Create a new variable "Days Since First Case" where 0 is the day when
# the first infection was reported and N is the last day where was
# recorded information about new cases in "city"
diff_dates=temp.loc[:,'ObservationDate'].values - temp.iloc[0,1] # Difference between the first and k dates
temp['Days Since First Case'] =[tt.days for tt in diff_dates] # Include only the information about the days
# Concatenate the result with the "covid_new_cases" dataframe
covid_new_cases=pd.concat([covid_new_cases,temp])
# Print a piece of "covid_new_cases" dataframe
covid_new_cases.head()
###Output
_____no_output_____
###Markdown
Above, we can see that the number of confirmed cases is no longer a cumulative variable. But given that this variable is very important in this project, let's be more cautious and confirm that everything looks as we want. For this, let's compute the sum of all the new cases by city and compare the result with the number of cases of the last day in the original data frame.
###Code
# Resume in test1 the sum of the new cases by cities
test1=covid_new_cases.pivot_table(index=['Province/State'],values='Confirmed',aggfunc='sum')
# Extract in test2 the number of cases the last day
test2=covid_final.loc[covid_final['ObservationDate']==max(covid_final['ObservationDate']),['Province/State','Confirmed']]
# Merge and show this information
pd.merge(test1,test2,on='Province/State',suffixes=('_cumulative (Last Day)', '_sum (new cases per day)'))
###Output
_____no_output_____
###Markdown
As we can see in the above table, everything looks well and only remain to move the dates 8 days back in the calendar as an approximation of when the infection occurred. For this, I am going to include a new variable called `Infection Day`.
###Code
# Estimate the infection day
covid_new_cases['Infection Day']=covid_new_cases['ObservationDate'] - pd.to_timedelta(8,'d')
# Shows the new results
covid_new_cases.head()
###Output
_____no_output_____
###Markdown
At this moment, we covered the two points described at the beginning of [this section](merging_weather_COVID_datasets) and only remain to merge the information in the coronavirus dataset ("covid_new_cases") with the weather dataset "weather". Note that we need to do a left join (complete the information in the "covid_new_cases" dataset with the weather).
###Code
# Left Join the two data frames
covid_weather=pd.merge(covid_new_cases,weather,how='left',left_on=['Infection Day','Province/State'],
right_on=['Infection Day','State'])
# Some variables like SNo, State (is a duplication of "Province/State"),
# Country (is a duplication of "Country/Region") or "LastUpdate" are not
# necessary to this study, so let's drop it from the data.
covid_weather.drop(columns=['SNo','State','Country','Last Update'],inplace=True)
covid_weather.head()
for city in cities:
print('=> The data frames have a {} match between the number of observations in {}'.format(
covid_weather.loc[covid_weather['Province/State']==city,:].shape[0]==
covid_new_cases.loc[covid_new_cases['Province/State']==city,:].shape[0],city))
print('=> The final data frame that condense all the information about the coronavirus disease and the weather in the selected 9 cities has {} observations and {} features.'.
format(covid_weather.shape[0],covid_weather.shape[1]))
print('=> The total number of missing values in the data frame is {} ==> Great!!'.format(covid_weather.isna().sum().sum()))
###Output
=> The data frames have a True match between the number of observations in New York
=> The data frames have a True match between the number of observations in Madrid
=> The data frames have a True match between the number of observations in Quebec
=> The data frames have a True match between the number of observations in Lombardia
=> The data frames have a True match between the number of observations in Vienna
=> The data frames have a True match between the number of observations in Stockholm
=> The data frames have a True match between the number of observations in Doha
=> The data frames have a True match between the number of observations in Dubai
=> The data frames have a True match between the number of observations in Dakar
=> The final data frame that condense all the information about the coronavirus disease and the weather in the selected 9 cities has 391 observations and 21 features.
=> The total number of missing values in the data frame is 0 ==> Great!!
###Markdown
> Finally!!! Our data looks tidy and we are ready to board our scientific questions. Weather and New Infections. Coronavirus vs Temperature For the following analysis is important to remember that the variable `Confirmed` contains the information about the new cases that were infected at `Infection Day`. Note that this is an estimation, but in my opinion, it is more realistic than study the weather around 8 days after that the infection occurs.Fig. "_Temperature Avg by Day_" shows the temperature in the 9 `Province/State` since the `Infection Day` of the first reported patient. This time series shows that the hottests `Province/State` (Dakar, Dubai, Doha) have mean temperatures over $\approx 65^o F$ in all days, while Quebec is the only `Province/State` that had under $30^o F$ most of the time. The temperature in all the others `Province/States` were between $30$ and $60^o F$.> Note that this graph shows the possibility to make a discretization of the temperature potentially considering 3 ranges of values. In the future, should be nice to do a k-means analysis with 3 clusters to study the number of cases by range of temperature (see below).
###Code
# PLot the Temperature Avg by day
px.line(covid_weather, x='Infection Day', y='TempAvg', color='Province/State',
title='Average Temperature by Day')
###Output
_____no_output_____
###Markdown
Now it's time to begin to explore the relationship between the temperature and the number of new cases. Fig. _"New Infections vs Temperature"_ (below) shows that the greatest number of infections occurs in cities with a mean temperature between $40$ and $60^o F$. This is the case of New York, Madrid, and Lombardia with a median temperature of $47.5^o F$, $50.5^o F$ and $47.75^o F$ respectively. Note that for temperatures over $65^o F$ the numbers of infections seems to be very low in comparison with the other regions with lowest temperatures. Also, as we advertise previously, Quebec is the province with the lowest temperature, and also seems to has a low number of new infections in comparison with the regions with temperatures between $40$ and $60^o F$.> Given this graph, we can hypothesize that if the temperature has an impact on the spread of the Coronavirus, then:1. The spread is reduced significatively when the temperature is over $\approx 65^o F$.2. It's more probable to have more infections when the temperature varies between $40$ and $60^o F$ approximately.3. For temperatures under the $\approx 35^o F$, the spread seems to be less than when the temperature is between $40$ and $60^o F$ but greater than regions with temperatures over $65^o F$. In resume, the cold seems to be a factor that impacts the spread of the virus but less than hight temperatures.
###Code
# Scatter plot between the Average Temperature and the number of Cases by Province/State
px.scatter(covid_weather, x="TempAvg", y="Confirmed", color="Province/State",
marginal_y=None, marginal_x="box", trendline="o",
title='New Infections vs Temperature')
###Output
_____no_output_____
###Markdown
As we exposed at the beginning of this section, it could be interesting (and potentially better for visualization) to create 3 clusters using the temperature. Probably and given the descriptive and exploratory analysis that we did so far, we know between which ranges the temperature will vary, but I think it's better to use a K-means algorithm to find these intervals.
###Code
# Import k-means from sklearn
from sklearn.cluster import KMeans
# Extract the information about the temperatures
X=np.array(covid_weather['TempAvg'])
# Cluster
kmeans = KMeans(n_clusters=3, random_state=0).fit(X.reshape(-1,1))
# Include the labels in our data frame in the variable "Cluster_Temp"
covid_weather['Cluster_Temp']=kmeans.labels_
# Compute the min and max temperature values in each cluster
covid_weather.pivot_table(index='Cluster_Temp',values='TempAvg',aggfunc=['min','max'])
###Output
_____no_output_____
###Markdown
The above table resumes the results of the k-means analysis. The results look very similar to what we expected, note that one of the cluster (cluster 2) groups temperatures under $\approx 40^o F$ while the cluster 1 groups temperatures greater than $60^o F$. Finally, the cluster 0 groups temperatures between $\approx 40$ and $60^o F$. Now, let's transform the new variable `Cluster_Temp` to include the ranges of values as labels instead of integer labels than doesn't provide much information.
###Code
# Dictionary with the new labels
dic={0:'40-60 F', 1: '>60 F', 2: '<40 F'}
# Replace the labels
covid_weather['Cluster_Temp'].replace(dic,inplace=True)
# Plot the clusters
px.scatter(covid_weather, x="TempAvg", y="Cluster_Temp", color="Cluster_Temp",
marginal_y=None, marginal_x=None, trendline="o",
width=900, height=300)
###Output
_____no_output_____
###Markdown
The above discretization allows quantifying the number of infections by temperature ranges. Fig. _"Temperature ranges and New Infections"_ shows that the number of new infections is around 250 000 when the temperature is between $40$ and $60^o F$, which is significantly higher than the two other ranges. Also, this histogram shows that the number of new cases when the temperature is under $40^o F$ ($\approx 35 000$) is notably bigger than when the temperature is over the $60^o F$, in which case our data only report around 10 000 new infections.
###Code
# Histogram of the number of infections by group of temperature
px.bar(covid_weather, x="Cluster_Temp", y="Confirmed",
color="Province/State", title='Temperature ranges and New Infections')
###Output
_____no_output_____
###Markdown
The previous observations are far away to be conclusive because these differences could be explained as a consequence of differents factors like:* Differences in the population density between Provinces/States (cities with more population are more likely to have more cases).* Our sample has mostly cities with temperatures between $40-60^o F$.* Sociocultural factors. Note here that Spanish and Italians are warm people, normally used to have close interpersonal relationships, and as a consequence this power the spread of the virus. The explanation is extensible for New York, which is a very multicultural city.The table below resumes the total days for each temperature group and `Province/Region`. As we can see, New York is the only city that has days in each of the temperature ranges but isn't enough to even consider a fair statistical comparison of the number of new cases between temperature groups. Without a doubt, this is a limitation of our dataset, and in my opinion, the best bet could be to compare cities with similar population density and different temperatures.> Note that taking this path, we are assuming that the sociocultural factors are similar between the population of two different cities, which could be a bias, but it's necessary to simplify our analysis because sadly our data is limited.
###Code
# Number of days that each Province/State had for each range of temperature
covid_weather.pivot_table(index='Province/State',columns='Cluster_Temp',
values='Days Since First Case',aggfunc='count')
###Output
_____no_output_____
###Markdown
The data about the `Population`, `Land Area` and `Population Density` for each `Province/Region` was obtained from [Wikipedia](https://www.wikipedia.org/), and are shown in the next table. The good news is that Doha and Dakar are the regions with more population density, but also the temperature in these regions is always over $60^o F$, so, this opens the possibility to compare these regions with others with different temperatures ranges.
###Code
# Create a data frame with the Region/State population and Land Area
region_state_density=pd.DataFrame({'Region/State':['Dakar', 'Doha', 'Dubai',
'Lombardia','Madrid','New York',
'Quebec','Stockholm','Vienna'],
'Population': [2956023,2382000,3331420, 10078012,
3223334,19453561,8164361,2377081,1888776 ],
'Land Area (sq mi)': [211,51,1588,9206,233.3,54555,595391,2517,160.15]})
# Compute the population density as Area/population
region_state_density['Population Density']=region_state_density['Population']/region_state_density['Land Area (sq mi)']
region_state_density.sort_values(by=['Population Density'],ascending=False,inplace=True)
region_state_density
###Output
_____no_output_____
###Markdown
Taking into account that we don't have a lot of information about states with low temperatures and higher population density than such regions or states with temperatures between $40-60^o F$, I decided to compare only the impact of high temperatures in the spread of the virus. For this comparison, I defined the next rules in order to decrease the bias (see points above):1. I will only consider Dakar, Dubai and Doha as the `Province/State` with high temperatures.2. In order to compare one `Province/State` with one of the above three cities ($X_i$), the population density of the `Province/State` should be less than the population density of $X_i$. With this, we avoid the bias that the differences between the number of new cases in two different cities are given because of the population's density and not for other factors.Based on the above rules, the comparables Provinces/State are:| | Dakar | Doha | Dubai ||-----------|-------|------|-------|| New York | ✔️ | ✔️ | ✔️ || Madrid | ✔️ | ✔️ | ✖️ || Lombardia | ✔️ | ✔️ | ✔️ || Vienna | ✔️ | ✔️ | ✖️ || Stockholm | ✔️ | ✔️ | ✔️ || Quebec | ✔️ | ✔️ | ✔️ |Our objective is to compare if the distribution of the new infections in two different cities comes from the same population or not. In other words, the null hypothesis is that the spread of the virus is independent of the temperature, and the alternative hypothesis is that the spread is lower in cities with high temperatures. The distributions of the new infections are independent but also are far away from following a normal distribution (see Fig. _"New Infections Histogram by Province/State"_ below). Nevertheless, it has been reported that samples with more than 15 observations (there are at least 25 in each of our distributions) are enough to avoid the normality assumption in the case of two samples t-student hypothesis test (see [here](https://support.minitab.com/en-us/minitab/18/Assistant_Two_Sample_t.pdf), or [here](https://books.google.com/books?hl=en&lr=&id=fZZTBgAAQBAJ&oi=fnd&pg=PR7&ots=KVNzlTQZBU&sig=uc2nGPRKmXFRx5q5d627Vf2ndPcv=onepage&q&f=false), and also [here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3693611/)). I'm not happy with this number of samples, but, in this case, it's important to make inference over the mean (parametric case) and not over the median (nonparametric case) Why? Because we need to take into account if exists a high number of new infections. Also, the nonparametric tests assume that all groups must have the same or very similar spread (variance), which doesn't seem to be the case in our data. Finally, a parametric test (like t-student) gives more statistical power to the test (less probability to fail to reject the null hypothesis when it is false).
###Code
from functions import trim_axs, color_p_value
# Histogram of the new cases by cities
fig1, axs = plt.subplots(3, 3, figsize=(10,8), constrained_layout=True)
axs = trim_axs(axs, len(cities))
for ax, city in zip(axs, cities):
X=covid_weather.loc[covid_weather['Province/State']==city,'Confirmed']
ax.set_title('{} ({} days observed)'.format(city,len(X)))
sns.distplot(X,kde=False,ax=ax,bins=40)
fig1.suptitle('New Infections Histogram by Province/State', fontsize=16);
###Output
_____no_output_____
###Markdown
For these tests, I am going to set the critical value $\alpha=0.05$, and establish the length of each sample to the lengh of the sample with less observations. This is, if X, Y are the two samples to compare with length $l_X$ and $l_Y$ respectively, and $\hat{l}=min(l_X,l_Y)$, then I am only going to consider the observations of X and Y between the first infection day and the day $\hat{l}$.> The above could look tricky, but in fact, it has an easy explanation, and it is that we need to compare the same number of days after the first infection because in the other way we are introducing a bias based on the lack of information in the distribution with fewer observations.
###Code
## t-student Hypothesis tests ##
from functions import t_test_byCities
# Create a dictionary with the pairs of cities to be tested
cities2test=dict({'Dakar': ['New York','Madrid','Lombardia','Vienna','Stockholm','Quebec'],
'Doha': ['New York','Madrid','Lombardia','Vienna','Stockholm','Quebec'],
'Dubai': ['New York','Lombardia','Stockholm','Quebec']})
# Run the tests (use the function "t_test_byCities" available in "functions.py")
results_pvalue, results_stat=t_test_byCities(cities2test,covid_weather)
print('The p-values are:')
results_pvalue.style.applymap(color_p_value)
print('The t-statistics are:')
results_stat.style.applymap(color_p_value)
###Output
The t-statistics are:
###Markdown
The results obtained through the `stats.ttest_ind` function are relatives to a [two-tailed test](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html), and for that reason, I divided the p-value by 2 (one-tailed test), and print the t-statistic. * If $p/20$ then we are on the right tail of the distribution and we can affirm that the mean of new cases in warm regions is greater than in other regions, * If $p/2<0.05$ and $t<0$ we can conclude that the mean of new cases is lesser when the temperature is upper $60^o F$.In 12 of the 16 hypothesis tests, we reject the null hypothesis in favor of the alternative (the mean of the people infected in regions with temperatures under $60^o F$ is more than regions with temperatures over $60^o F$). Nevertheless, it may be interesting to study what happened in these 4 cases where we fail to reject the null hypothesis. In my opinion, a nice way to see this is to relate the number of new infections by day. Fig. _"New Infections and Average Temperature by Day (Madrid, Dakar, Doha)"_ shows this relationship in the case of Madrid, Dakar and Doha (note that we fail to reject the null hyphotesis in the test between "Madrid vs Doha" and "Madrid vs Dakar"), and is easy to see an interesting patter here:1. We have only $\approx 32$ days in common between the three distributions (number of days since the first infection).2. Madrid and Dakar have almost the same number of infections by day until day 32, while some days Doha seems to have more infections than Madrid, which explains why we fail to reject the null hypothesis in these case, but this isn't all: 1. After day 32, the number of cases increases exponentially in Madrid, but we don't have more information about Doha or Dakar because at this moment these regions only had one month since the first infection. 2. The temperature in Madrid doesn't suffer big changes after day 32 (see Fig. _"New Infections and Average Temperature by Day"_), so, this doesn't seem to be the factor that shot the number of infections. > At this point we can formulate two hypothesis: 1. The spread of the virus in Doha and Dakar is following a pattern very similar to the case of Madrid, and in the following days, we are going to see an exponential increase in the number of new infections in these regions (I don't believe that this is the case if we also consider that the number of new infections in Dubai is stable and close to zero). 2. Other factors (like socioculturals) make a huge impact in the spread of the virus. This looks like a more reasonable hypothesis, but we are not going deeper here because this topic is beyond the scope of this project.
###Code
# Extract only the information of Dakar, Doha, and Madrid
Dakar_Doha_Madrid=covid_weather.loc[covid_weather['Province/State'].isin(['Doha','Dakar','Madrid']),:]
# Plot the number of new cases and temperature for Madrid, Dakar and Doha
g=sns.pairplot(x_vars="Days Since First Case", aspect=3,
y_vars=["Confirmed","TempAvg"], kind='scatter', hue="Province/State",
data=Dakar_Doha_Madrid);
g.fig.suptitle("New Infections and Average Temperature by Day (Madrid, Dakar, Doha)");
###Output
_____no_output_____
###Markdown
The same situation occurs in the case of Stockholm, Dakar, and Doha. You can see in Fig. _"New Infections and Average Temperature by Day (Stockholm, Dakar, Doha)"_ that the number of cases increases exponentially in Stockholm after the first 35 days of the first infection. The conclusions explained before in the case of Madrid are extensible to this case, and we need to wait for more information to draw conclusions in these four regions comparison.
###Code
Dakar_Doha_Stockholm=covid_weather.loc[covid_weather['Province/State'].isin(['Doha','Dakar','Stockholm']),:]
# Plot the number of new cases and temperature in Stockholm, Dakar and Doha
g=sns.pairplot(x_vars="Days Since First Case", aspect=3,
y_vars=["Confirmed","TempAvg"], kind='scatter', hue="Province/State",
data=Dakar_Doha_Stockholm);
g.fig.suptitle("New Infections and Average Temperature by Day (Stockholm, Dakar, Doha)");
###Output
_____no_output_____
###Markdown
Finally, below I include an interactive graphic as a tool if somebody wants to play with this data. If you can find some patterns that I didn't perceive, please, let me know about it.
###Code
# Plot the number of new infections by day and region
px.scatter(covid_weather, x="Days Since First Case",y="Confirmed",color="Province/State",
title='New Infections by Day')
###Output
_____no_output_____
###Markdown
Coronavirus vs Humidity So far, we know that high temperatures potentially reduce the spread of the coronavirus, but what about other weather factors like humidity or pressure? In this section, I am going to see if exists any kind of relationship between the humidity and the spread of the virus. The next table resumes the minimum and maximum values of the average humidity by `Province/State`. The range of values is wide in each of the cases, being New York the State with less reported humidity.
###Code
# Min and Max Humidity Average values by Province/State
covid_weather.pivot_table(index="Province/State",values="HumAvg", aggfunc=['min','max'])
###Output
_____no_output_____
###Markdown
Fig. _"Number of New Infections by Day and Humidity Average"_ shows the number of new infections in relationship with the days since the first infection and the humidity average by day. If we interact with the graphic, it's possible to observe that there is not (at least with the naked eye) a clear relationship between the humidity and the new infections (low or high humidity doesn't imply a high or a low number of infections and vice versa in each `Province/State`). > Nevertheless, would be interesting to see if exists differences in the humidity between regions and in such cases if that difference seems to have an impact on the number of cases.
###Code
# PLot the number of infections in relationship with the Days since the first infection and the Humidity Avg
px.scatter(covid_weather, x="Days Since First Case",
y="Confirmed", size="HumAvg",color="Province/State",
title="Number of New Infections by Day and Humidity Average")
###Output
_____no_output_____
###Markdown
Again, if we want to compare the number of cases between regions it's necessary to consider the same number of days since the first infection. The table below shows that the maximum number of days that we can consider is 25 (New York).
###Code
# Number of cases by Province/State
covid_weather.pivot_table(index="Province/State",values='HumAvg',aggfunc='count')
###Output
_____no_output_____
###Markdown
The next chunk of code transforms or data frame and now we only include the first 25 observations by region. Just to be sure that everything looks as was planned, I printed the minimum and maximum day after the first case by Province/State.
###Code
# Reduce each group to only the first 25 observations
reduced_data=covid_weather.loc[(covid_weather['Days Since First Case']>=0) &
(covid_weather['Days Since First Case']<=24),:]
reduced_data.pivot_table(index='Province/State',values='Days Since First Case',aggfunc=['min','max'])
###Output
_____no_output_____
###Markdown
The faster way to prove the existence of differences in the distribution of the `HumAvg` between regions (`Province/State`) is to use an analysis of variance ([ANOVA](https://en.wikipedia.org/wiki/Analysis_of_variance)). One of the key assumptions of this test is the homogeneity of the variance ([homoscedasticity](https://en.wikipedia.org/wiki/Homoscedasticity)), and a good way to prove this is using the [Levene's Tests](https://en.wikipedia.org/wiki/Levene%27s_test). Under the null hypothesis, this test assumes the equality of the variance while the alternative hypothesis is that the variance are different. Again, I am going to use a critical value $\alpha=0.05$.
###Code
# Create a list of 1-D arrays with the information of the Average Humidity.
data = [reduced_data.loc[ids, 'HumAvg'].values for ids in
reduced_data.groupby('Province/State').groups.values()]
# Run the Levene's test for the homeostasis of the variance
from scipy import stats
print(stats.levene(*data))
###Output
LeveneResult(statistic=7.524914343656738, pvalue=7.474531583844797e-09)
###Markdown
The p-value is smaller than 0.05, so we reject the null hypothesis ==> There are differences in the variance between groups ==> We can\'t use ANOVA. The alternative is to use a [Kruskal-Wallis H hypothesis test](https://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance), which is the non-parametric version of ANOVA.
###Code
# Kruskal-Wallis H hypothesis test (analysis of the variance)
stats.kruskal(*data)
###Output
_____no_output_____
###Markdown
The p-value is in the order of $10^{-19}$ ==> Reject the null hypothesis that the population medians of all of the groups are equal, and as consequence, exist differences in the humidity between cities. Now, we need to do a post hoc test to see which groups are different. For this, I am going to use [Dunn's tests](https://www.tandfonline.com/doi/abs/10.1080/00401706.1964.10490181) as a post hoc with a [Bonferroni correction](https://en.wikipedia.org/wiki/Bonferroni_correction) of the p-value, and this is justified because:* Dunn's test employs the same ranking than the Kruskal-Wallis test.* Dunn's test employs the pooled variance implied by the null hypothesis of the Kruskal-Wallis test. See [here](https://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variancecite_note-Dunn-5) for more information. Note that the Bonferroni correction is necessary because we are doing multiple comparisons between groups.
###Code
# Use the library scikit_posthocs to the posthoc test
import scikit_posthocs as sp
result=sp.posthoc_dunn(reduced_data,val_col='HumAvg',p_adjust='bonferroni',group_col='Province/State')
###Output
_____no_output_____
###Markdown
The next heatmap shows the results of the post hoc test, and if we take into account our previous study about the differences between the number of new infections, we can infer that the humidity doesn't have any influence on the spread of the virus. For example, in the first 25 days of infection, New York had 195 times more cases than Doha and 7922 times that Dubai, but our test revealed that don't exist statistical differences between the humidity of these regions. On the other hand, there is a significant difference between the humidity in Madrid and Dakar but as we analyzed in the previous section, there are no differences between the distribution of the new infections of these two regions. > Based on these results, I think that the humidity is not an influencing environmental factor in the spread of the virus.
###Code
# Plot the results as a heatmap
sp.sign_plot(result);
###Output
_____no_output_____
###Markdown
Coronavirus vs PressureAccording to [Gay-Lussac's law](https://en.wikipedia.org/wiki/Gay-Lussac%27s_law) the pressure and the temperatures are directly proportional to each other, so, if the temperature increase, the pressure increase, and vice-versa. Some good examples and explanations are available [here](https://www.enotes.com/homework-help/what-relationship-between-air-temperature-air-162931).> "An easy way to understand this concept is by comparing car tires in the winter and car tires in the summer. In the summer the air is a lot warmer so the molecules are moving around a lot. The tire expands and you not need as much air because the pressure in the tires is high enough. In the winter, when the air is cold, the molecules are moving very slowly. Many people need to add more air to their tires because there is not enough pressure." So, if these two variables are correlated and we analyzed the temperature, there is no reason to analyze the pressure, right? Maybe, but no soo fast! There is another atmospheric relationship that we don't talk yet and it is that the [pressure drops as altitude increases](https://www.nationalgeographic.org/encyclopedia/atmospheric-pressure/), and as the pressure decrease the amount of oxygen also decreases. Now, do you think that it's valid to study the relationship between the altitude of the province or states as a factor in the spread of the virus? It seems unlikely that this could be a key factor but at least we can get out of the doubt.The altitude in feet and meters of our 9 `Province/State` are included in the next table.| Province/State | Altitude ||----------------|--------------|| New York | 33' (10m) || Madrid | 2188' (667m) || Doha | 33' (10m) || Dakar | 72' (22m) || Dubai | 52' (16m) || Lombardia | 390' (120m) || Quebec | 322' (98m) || Stockholm | 62' (19m) || Vienna | 2273' (350m) |The region with more altitude is Madrid followed by Vienna and Lombardia, while New York, Doha, and Dubai have very close altitudes. The case of Madrid is interesting because we know that this city is one of the three with more cases worldwide. Fig. _"Boxplot Pressure by Province/Stats"_ shows that as was expected, Madrid has a significantly lower pressure than the other regions. So, if the pressure given the altitude is a factor in the spread of the virus, we should expect that the number of infections in Madrid will be greater (lower) than each of the other regions.
###Code
# Boxplot of the pressure by Province/State
px.box(covid_weather, x='Province/State', y='Pressure_Avg',
title='Boxplot Pressure by Province/State')
###Output
_____no_output_____
###Markdown
The previous idea seems valid, but in fact, we need to take the temperature out of the equation if we want to be sure that if exist differences is because the altitude/pressure and not for the temperature (which we are pretty sure that have an influence in the spread of the virus). With this aim, the simpler solution is to include in the study only the `Province/Regions` with temperatures that are not significantly different than Madrid. As we did in Section [Coronavirus vs Humidity](coronavirus_vs_humidity), we are going to analyze all the cities at the same time through a variance analysis.
###Code
# Create a list of 1-D arrays with the information of the Average Humidity.
data = [reduced_data.loc[ids, 'TempAvg'].values for ids in
reduced_data.groupby('Province/State').groups.values()]
# Levene's test
print(stats.levene(*data))
print('The test reveals that there are statistically significant differences between the variance of the temperature in different cities.')
# Kruskal-Wallis H hypothesis test (analysis of the variance)
print(stats.kruskal(*data))
print('The Kruskal-Wallis test shows that there are differences in the distribution of the temperature across different Province/State.')
###Output
KruskalResult(statistic=197.93719460751058, pvalue=1.738021217309792e-38)
The Kruskal-Wallis test shows that there are differences in the distribution of the temperature across different Province/State.
###Markdown
The below heatmap shows the result of the post hoc Dunn's test with Bonferroni correction. In the case of Madrid, we fail to reject the null hypothesis of equality of the median of the temperatures in the combinations with New York, Lombardia, and Vienna, so now we can take these two by two combinations {(Madrid-New York), (Madrid-Lombardia), (Madrid-Vienna)} and see if existing differences in the number of new infections.
###Code
# Dunn Posthoc test with Bonferroni correction
result=sp.posthoc_dunn(covid_weather,val_col='TempAvg',p_adjust='bonferroni',group_col='Province/State')
sp.sign_plot(result);
###Output
_____no_output_____
###Markdown
At this point, the objective is to see if exists differences in the number of infections in each of the three pair of cities defined before. Based on the same explanation given in Section [Coronavirus vs Temperature](coronavirus_vs_temperature), we are going to use a one tail t-student test with critical value $\alpha=0.05$. Again, the null hypothesis is that there aren't differences in the mean of the number of new infections between the cities and the alternative is that there is. The results (see data frame below) are extremely interesting! Note that in all the comparisons we obtained that the number of new infections in Madrid is significantly lower (from a statistical point of view) than in all the other regions with similar temperatures. Also, the population density in Madrid is greater than the other three regions, so, this doesn't seem to be the factor that explains this difference. What do you think about this? Is it not weird? I think it is, and this is because currently, we know that Madrid is one of the three regions with more cases, so, what happened here? * This effect is given because (as we did before) we are only considering the common maximum number of days since the first infection between both cities. Remember that I made this decision as a way to mitigate the effect that cities with a greater number of days are more likely to have more cases.* If we observe the relationship of the number of new cases by city and the number of days since the first infection (Fig. _"New Infections by Day (Madrid, New York, Lombardia, Vienna)"_ below), it is easy to see that our dataset only have the information of Vienna and Lombardia in approximately 40 days, and in those first 40 days, Madrid has fewer infections than these two regions and New York, so, this is the reason why we are obtaining significant statistical differences.* Now, the number of cases increased exponentially in Madrid after 40 days of infection, so, maybe these differences in the first 40 days are just a random effect and are not related with the altitude, or the altitude has an effect in the spread of the virus, but a new factor took effect in Madrid after the 40 first days and mitigated the effect of the altitude. > At this point, this analysis is something interesting but is far away to be conclusive! A lot more data and analysis are necessary if we want to prove this hypothesis.
###Code
# Create a dictionary with the combinations of cities
cities2test=dict({'Madrid': ['New York','Lombardia','Vienna']})
# Run the tests
results_pvalue, results_stat=t_test_byCities(cities2test,covid_weather)
results_pvalue['t-stats']=results_stat['Madrid']
results_pvalue=results_pvalue.rename(columns={'Madrid':'p-value'})
results_pvalue
# Plot the number of cases by day only for Madrid, Lombardia, New York and Vienna.
g=sns.relplot(x='Days Since First Case',y='Confirmed',hue='Province/State',
data=covid_weather.loc[covid_weather['Province/State'].isin(['Madrid',
'New York','Lombardia',
'Vienna']),:]);
g.fig.suptitle("New Infections by Day (Madrid, New York, Lombardia, Vienna)");
###Output
_____no_output_____ |
matlantis_contrib_examples/generate_random_structure/generate_random_structure.ipynb | ###Markdown
Copyright Preferred Computational Chemistry, Inc. and Preferred Networks, Inc. as contributors to Matlantis contrib project ランダム構造をMatlantis上で生成するexample※原子数が増えるほど実行時間がかかります。
###Code
!pip install ase pymatgen
# # 初回使用時のみ、ライブラリのインストールをお願いします。
###Output
_____no_output_____
###Markdown
1. ASEを使ったランダム構造生成
###Code
import numpy as np
from ase import Atoms
from ase.data import atomic_numbers
from ase.ga.utilities import closest_distances_generator, CellBounds
from ase.data import atomic_numbers, covalent_radii
from ase.ga.startgenerator import StartGenerator
from ase.ga.utilities import closest_distances_generator
from ase.visualize import view
###Output
_____no_output_____
###Markdown
1-1. 結晶
###Code
blocks = ['Ti'] * 4 + ['O'] * 8 # 作りたい結晶の組成
box_volume = 12 * 12 # 10~12 * 原子数くらいが経験的に上手くいきます
blmin = closest_distances_generator(atom_numbers=Atoms(blocks).get_atomic_numbers(),
ratio_of_covalent_radii=0.8) # 原子同士の近づいていい距離リスト
cellbounds = CellBounds(bounds={'phi': [30, 150], 'chi': [30, 150],
'psi': [30, 150], 'a': [3, 10],
'b': [3, 10], 'c': [3, 10]}) # cellの可動範囲
slab = Atoms('', pbc=True) # 原子を詰めるための雛形を用意します
sg = StartGenerator(slab, blocks, blmin, box_volume=box_volume,
cellbounds=cellbounds,
number_of_variable_cell_vectors=3,
test_too_far=False)
atoms = sg.get_new_candidate()
atoms
v = view(atoms, viewer='ngl')
v.view.add_representation("ball+stick")
display(v)
###Output
_____no_output_____
###Markdown
1-2. 分子結晶
###Code
blocks = ['H2O'] * 8
box_volume = 20 * 8 # どの程度の密度で詰めたいかに依存します
blmin = closest_distances_generator(atom_numbers=[1,8], # HとO
ratio_of_covalent_radii=1.2) # 大きめにとると分子同士がくっつきにくくなります
cellbounds = CellBounds(bounds={'phi': [30, 150], 'chi': [30, 150],
'psi': [30, 150], 'a': [3, 10],
'b': [3, 10], 'c': [3, 10]})
slab = Atoms('', pbc=True)
sg = StartGenerator(slab, blocks, blmin, box_volume=box_volume,
cellbounds=cellbounds,
number_of_variable_cell_vectors=3,
test_too_far=False)
atoms = sg.get_new_candidate()
atoms
v = view(atoms, viewer='ngl')
v.view.add_representation("ball+stick")
display(v)
###Output
_____no_output_____
###Markdown
2. pyxtalを用いたランダム構造生成 pyxtalライブラリ( https://github.com/qzhu2017/PyXtal )はMITライセンスで公開されているオープンソースソフトです。現在も開発が活発に行われているので使用するバージョンには注意してください。pyxtal.XRDが `numba>=0.50.1` に依存しており、`numba 0.54.1` が `numpy=1.17` を要求するため、Matlantisのデフォルトnumpyバージョンではインストールできません。
###Code
!pip install numpy==1.20
!pip install pyxtal
from pyxtal import pyxtal
from ase.visualize import view
from pymatgen.io.ase import AseAtomsAdaptor
!pip install -U numpy
###Output
_____no_output_____
###Markdown
2-1. 結晶 空間群を番号で指定して、対称性を保ちながら構造を生成します。例えばルチル型はP42/mnm (136)に属します。
###Code
crystal = pyxtal()
crystal.from_random(3, 136, ['Ti','O'], [4,8]) # 次元, 空間群, 元素 ,組成
crystal
atoms = crystal.to_ase()
atoms.wrap()
v = view(atoms, viewer='ngl')
v.view.add_representation("ball+stick")
display(v)
###Output
_____no_output_____
###Markdown
2-2. 分子結晶
###Code
mol_crystal = pyxtal(molecular=True)
mol_crystal.from_random(3, 36, ['H2O'], [8])
mol_crystal
v = view(mol_crystal.to_ase(), viewer='ngl')
v.view.add_representation("ball+stick")
display(v)
###Output
_____no_output_____
###Markdown
2-3. クラスター 次元を0にし、空間群ではなく点群で指定します。何も対称性を入れない場合はC1 (1)を用います。
###Code
cluster = pyxtal()
cluster.from_random(0, 1, ['Pt'], [13]) # 次元, 点群, 元素 ,組成
cluster
v = view(cluster.to_ase(), viewer='ngl')
v.view.add_representation("ball+stick")
display(v)
###Output
_____no_output_____ |
QA_baseline_semifinals.ipynb | ###Markdown
Базовое решение Профиль "Искусственный интеллект" 2020Олимпиады Кружкового движения НТИ и Академии искусственного интеллекта для школьников Загрузим данныеТренировочные и валидационные данные, а также этот ноутбук лежат в [репозитории](https://github.com/AI-Front/NTI)
###Code
!wget https://raw.githubusercontent.com/AI-Front/NTI/main/semifinals/data/train.jsonl
!wget https://raw.githubusercontent.com/AI-Front/NTI/main/semifinals/data/val.jsonl
###Output
--2020-11-26 11:07:24-- https://raw.githubusercontent.com/AI-Front/NTI/main/semifinals/data/val.jsonl
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 566932 (554K) [text/plain]
Saving to: ‘val.jsonl’
val.jsonl 100%[===================>] 553.64K --.-KB/s in 0.03s
2020-11-26 11:07:24 (17.8 MB/s) - ‘val.jsonl’ saved [566932/566932]
###Markdown
Формат данныхМы имеем дело с jsonl - JSON Lines format ```{"idx": 0, "passage": {"text": "(1) Самый первый «остров» Архипелага возник в 1923 году на месте Соловецкого монастыря. (2) Затем появились ТОНы — тюрьмы особого назначения и этапы. (3) Люди попадали на Архипелаг разными способами: в вагон-заках, на баржах, пароходах и пешими этапами. (4) В тюрьмы арестованных доставляли в «воронках» — фургончиках чёрного цвета. (5) Роль портов Архипелага играли пересылки, временные лагеря, состоящие из палаток, землянок, бараков или участков земли под открытым небом. (6) На всех пересылках держать «политических» в узде помогали специально отобранные урки, или «социально близкие». (7) Солженицын побывал на пересылке Красная Пресня в 1945 году. (8) Эмигранты, крестьяне и «малые народы» перевозили красными эшелонами. (9) Чаще всего такие эшелоны останавливались на пустом месте, посреди степи или тайги, и осуждённые сами строили лагерь. (10) Особо важные заключённые, в основном учёные, перевозились спецконвоем. (11) Так перевозили и Солженицына. (12) Он назвался ядерным физиком, и после Красной Пресни его перевезли в Бутырки.", "questions": [{"question": "Почему Солженицына перевозили спецконвоем?", "answers": [{"idx": 0, "text": "Так перевозили особо важных заключенных.", "label": 1}, {"idx": 1, "text": "Потому, что был эмигрантом.", "label": 0}, {"idx": 2, "text": "Потому, что он сам вырыл себе землянку.", "label": 0}, {"idx": 3, "text": "Потому, что он побывал на пересылке Красная Пресня в 1945 году.", "label": 0}, {"idx": 4, "text": "Потому, что он был особо важным заключённым и назвался ядерным физиком.", "label": 1}], "idx": 0}, {"question": "Как люди попадали в тюрьмы особого типа на Соловках?", "answers": [{"idx": 5, "text": "Люди попадали на архипелаг с помощью дрезин и вертолётов.", "label": 0}, {"idx": 6, "text": "Люди попадали на Архипелаг разными способами: в вагон-заках, на баржах, пароходах, пешими этапами, а также спецконвоем.", "label": 1}, {"idx": 7, "text": "Люди попадали на архипелаг с помощью специально отобранных пони.", "label": 0}, {"idx": 8, "text": "Люди попадали на Соловки с помощью вертолётов и дрезин.", "label": 0}, {"idx": 9, "text": "Люди попадали на Соловки с помощью вагон-заков, барж, пароходов, спецконвоев или пешком.", "label": 1}], "idx": 1}]}}``` Импортируем библиотеки
###Code
import json
import os, re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%pylab inline
from sklearn.metrics import *
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.linear_model import SGDClassifier,LogisticRegression
from sklearn.ensemble import RandomForestClassifier
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Подготовим данныеЗдесь у вас есть большой простор для инженерных решений: * есть тройки - текст, вопрос, ответ* есть пары - вопрос и правильный ответ, вопрос и неправильный ответ* есть пары - вопрос и ответ,* и т.д.В бейзлайне мы пойдем по самому простому пути - склеим вместе текст, вопрос и ответ - это будет один элемент классификации, к которому приписан лейбл (класс - 0 или 1)
###Code
def get_X_y(data_json_file):
X, y = [], []
with open(data_json_file, 'r') as json_file:
json_list = list(json_file)
#print(json_list[0])
for json_str in json_list:
item = json.loads(json_str)
text = item['passage']['text']
#print(item['passage'].keys())
questions = item['passage']['questions']
for q in questions:
query = q['question']
ans = q['answers']
for a in ans:
X.append(text+' Query: '+query+' Answer: '+a['text'])
y.append(a['label'])
return X, y
X_train, y_train = get_X_y('train.jsonl')
X_test, y_test = get_X_y('val.jsonl')
###Output
_____no_output_____
###Markdown
Посмотрим на количество примеров и состав классов:
###Code
len(X_train)
len(X_test)
from collections import Counter
print(Counter(y_train))
print(Counter(y_test))
###Output
Counter({0: 6568, 1: 5382})
Counter({0: 1242, 1: 993})
###Markdown
Запуск baseline Перебор моделей и параметров - классический подходПопробуем модели разного типа, выберем лучший классификатор из простых, а потом будем перебирать его настройки
###Code
rs = 42
clf = LogisticRegression(random_state=rs)
clf2 = RandomForestClassifier(random_state=rs, n_jobs =-1)
clf3 = SGDClassifier()
clf4 = SVC(random_state =rs)
clf5 = DecisionTreeClassifier(random_state=rs)
"""clf6 = SVC(class_weight="balanced", random_state =rs)
clf7 = DecisionTreeClassifier()
clf8 = ExtraTreeClassifier()
clf9 = LinearRegression()
clf10 = LogisticRegressionCV()
clf11 = GradientBoostingClassifier(random_state =rs)"""
clflist = [clf, clf2, clf3, clf4, clf5]
###Output
_____no_output_____
###Markdown
Запустим и оценим решение
###Code
for classif in clflist:
clf = Pipeline([
('vect', CountVectorizer(ngram_range=(1,3), analyzer='word', max_features=10000)),
('tfidf', TfidfTransformer(sublinear_tf=True)),
#('reducer', TruncatedSVD(n_components=Val3)),
('clf', classif),
])
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print(classif)
print("Precision: {0:6.2f}".format(precision_score(y_test, predictions, average='macro')))
print("Recall: {0:6.2f}".format(recall_score(y_test, predictions, average='macro')))
print("F1-measure: {0:6.2f}".format(f1_score(y_test, predictions, average='macro')))
print("Accuracy: {0:6.2f}".format(accuracy_score(y_test, predictions)))
print(classification_report(y_test, predictions))
labels = clf.classes_
sns.heatmap(data=confusion_matrix(y_test, predictions), annot=True, fmt="d", cbar=False, xticklabels=labels, yticklabels=labels)
plt.title("Confusion matrix")
plt.show()
###Output
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=None, max_iter=100,
multi_class='auto', n_jobs=None, penalty='l2',
random_state=42, solver='lbfgs', tol=0.0001, verbose=0,
warm_start=False)
Precision: 0.50
Recall: 0.50
F1-measure: 0.40
Accuracy: 0.55
precision recall f1-score support
0 0.56 0.94 0.70 1242
1 0.45 0.06 0.11 993
accuracy 0.55 2235
macro avg 0.50 0.50 0.40 2235
weighted avg 0.51 0.55 0.44 2235
###Markdown
Место для ваших экспериментов* Что будет, если попробовать другие настройки tf-idf, другое количество признаков?* Почему бы не использовать вектора word2vec для текстов? [Пример](http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/) * Подумайте, какие ограничения есть у линейных моделей, в которые мы передаем один вектор на все: текст, вопрос, ответ* Предложите свои методики проверки соответствия ответа содержанию текста [Пример1](http://docs.deeppavlov.ai/en/0.0.6.5/components/tfidf_ranking.html) [Пример2](https://medium.com/deeppavlov/open-domain-question-answering-with-deeppavlov-c665d2ee4d65)
###Code
###Output
_____no_output_____
###Markdown
Попробуем BERTПерезапустите среду выполнения! И тут лучше переключиться на GPU!Установите все библиотеки ниже, и затем заново загрузите данные, импортируйте sklearn - так вы избежите конфликта версий
###Code
!pip install transformers
!pip install tensorboardx
!pip install simpletransformers
train_df = pd.DataFrame({
'text': X_train,
'label':y_train
})
print(train_df.head())
eval_df = pd.DataFrame({
'text': X_test,
'label': y_test
})
print(eval_df.head())
from simpletransformers.classification import ClassificationModel
# Create a TransformerModel
model = ClassificationModel('bert', 'bert-base-multilingual-uncased',use_cuda=False)
# Train the model
model.train_model(train_df)
# Evaluate the model
result, model_outputs, wrong_predictions = model.eval_model(eval_df)
predictions, raw_outputs = model.predict(X_test)
print("Precision: {0:6.2f}".format(precision_score(y_test, predictions, average='macro')))
print("Recall: {0:6.2f}".format(recall_score(y_test, predictions, average='macro')))
print("F1-measure: {0:6.2f}".format(f1_score(y_test, predictions, average='macro')))
print("Accuracy: {0:6.2f}".format(accuracy_score(y_test, predictions)))
print(classification_report(y_test, predictions))
confusion_matrix(y_test, predictions)
###Output
_____no_output_____
###Markdown
Что еще можно улучшить?Ограничение модели BERT - обработка только первых 512 токенов текстаНам это не очень подходит - если обрезать тексты по 512 слову, как это происходит в simple transformers, но мы можем обрезать и вопрос, и ответ! Тогда наш классификатор не сможет научиться различать неправильные ответы.Обрежем тексты по началу, а не по концу:
###Code
def text_splitter(text, amount=500):
tokens = text.split(' ')
new_text = ' '.join(tokens[-amount:])
return new_text
def get_X_y_for_bert(data_json_file):
X, y = [], []
with open(data_json_file, 'r') as json_file:
json_list = list(json_file)
#print(json_list[0])
for json_str in json_list:
item = json.loads(json_str)
text = item['passage']['text']
questions = item['passage']['questions']
for q in questions:
query = q['question']
ans = q['answers']
for a in ans:
X.append(text_splitter(text+' Query: '+query+' Answer: '+a['text']))
y.append(a['label'])
return X, y
###Output
_____no_output_____ |
nonlinear/Fixed point iteration.ipynb | ###Markdown
Fixed Point Iteration
###Code
import numpy as np
import matplotlib.pyplot as pt
###Output
_____no_output_____
###Markdown
**Task:** Find a root of the function below by fixed point iteration.
###Code
x = np.linspace(0, 4.5, 200)
def f(x):
return x**2 - x - 2
pt.plot(x, f(x))
pt.grid()
###Output
_____no_output_____
###Markdown
Actual roots: $2$ and $-1$. Here: focusing on $x=2$. We can choose a wide variety of functions that have a fixed point at the root $x=2$:(These are chosen knowing the root. But here we are only out to study the *behavior* of fixed point iteration, not the finding of fixed point functions--so that is OK.)
###Code
def fp1(x): return x**2-2
def fp2(x): return np.sqrt(x+2)
def fp3(x): return 1+2/x
def fp4(x): return (x**2+2)/(2*x-1)
fixed_point_functions = [fp1, fp2, fp3, fp4]
for fp in fixed_point_functions:
pt.plot(x, fp(x), label=fp.__name__)
pt.ylim([0, 3])
pt.legend(loc="best")
###Output
/usr/local/lib/python3.5/dist-packages/ipykernel/__main__.py:3: RuntimeWarning: divide by zero encountered in true_divide
app.launch_new_instance()
###Markdown
Common feature?
###Code
for fp in fixed_point_functions:
print(fp(2))
# All functions have 2 as a fixed point.
z = 2.1; fp = fp1
#z = 1; fp = fp2
#z = 1; fp = fp3
#z = 1; fp = fp4
n_iterations = 4
pt.figure(figsize=(8,8))
pt.plot(x, fp(x), label=fp.__name__)
pt.plot(x, x, "--", label="$y=x$")
pt.gca().set_aspect("equal")
pt.ylim([-0.5, 4])
pt.legend(loc="best")
for i in range(n_iterations):
z_new = fp(z)
pt.arrow(z, z, 0, z_new-z)
pt.arrow(z, z_new, z_new-z, 0)
z = z_new
print(z)
###Output
2.41
3.8081000000000005
12.501625610000003
154.29064289260796
|
cn/.ipynb_checkpoints/sicp-3-01-checkpoint.ipynb | ###Markdown
SICP 习题 (3.1)解题总结: 赋值和累积器 SICP的第三个章节是“模块化,对象和赋值”,从这里开始讨论“赋值”这个概念。作为典型程序员的我们确实会觉得很奇怪,一本编程书都讲了快一半了,还没有讲“赋值”?不能赋值的话,前面一百多道题目的程序都干了些什么?这是函数式编程和命令式编程的一个很大差别。函数式编程里函数本身是没有状态的,一个给定函数在被调用时会对输入值进行处理,然后给出输出值,它的行为是固定的,不管你什么时候调用它。命令式编程,特别是后面演化出来的面向对象编程,一个函数是可能有状态的,调用某些函数可以改变状态,最明显的就是一个类的setter函数。因为一个函数拥有了状态,它的行为就是不确定的,调用它的时候的返回值不仅依赖于输入值,还依赖于它本身的状态。 在Scheme里也有改变状态的语句,像set!,可以改变一个变量的值。在Scheme里,如果你看到一个函数名带感叹号,要留意,这个函数是会改变状态的。 下面跑几个简单的代码看看,这些代码特别简单,非常像一般的幼儿编程书里的第一个章节。确实很难想象在SICP这种书的题解中,我们会讨论这样的代码:
###Code
(define a 10)
a
(set! a 20)
a
###Output
_____no_output_____
###Markdown
题目本身要求我们创建一个累加器。虽然代码非常简单,但是我们是不是已经看到了一些面向对象的影子?虽然在Lisp语言里通过状态的引入就形成了面向对象的处理能力,但是SICP的作者似乎并不觉得有什么了不起。作者在后面还在讨论“引入赋值的代价”,这个代价就是“任何具有漂亮性质的简单模型”都会因为赋值的引入而变得难以处理。很多喜欢Lisp的人都会把(set!)这种带感叹号的函数称之为有“副作用”的函数,对他们来讲,这是个“副作用”。
###Code
(define (make-accumulator total)
(lambda (value-added)
(begin
(set! total (+ total value-added))
total)))
(define (start-test-3-1)
(define A (make-accumulator 5))
(define B (make-accumulator 10))
(display "A: ")
(display (A 5))
(newline)
(display "B: ")
(display (B 5))
(newline)
(display "A: ")
(display (A 5))
(newline)
(display "A: ")
(display (A 5))
(newline)
(display "B: ")
(display (B 5))
(newline)
)
(start-test-3-1)
###Output
A: 10
B: 15
A: 15
A: 20
B: 20
|
1Bag of Words.ipynb | ###Markdown
Bag of Words Is a representation of the texts in numbers. In bag of words you count the occurrence of words in a document without giving importance to the grammar and the order of words.
###Code
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
text1 = open('../Data/carl_sagan_quote4.txt').read()
text2 = open('../Data/carl_sagan_quote2.txt').read()
text3 = open('../Data/carl_sagan_quote3.txt').read()
corpus = [text1,text2,text3]
doc_vec = vectorizer.fit_transform(corpus)
df = pd.DataFrame(doc_vec.toarray().transpose(), index=vectorizer.get_feature_names())
# making a Term document matrix
df.columns = ['CS_text1','CS_text2','CS_text3']
df
###Output
_____no_output_____
###Markdown
Term Frequency-Inverse Document Frequency(TF-IDF) TF-IDF is a statistical measure to reflect the relevance of the term to the document in a collection of documents or corpus.
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
doc_vec = vectorizer.fit_transform(corpus)
df = pd.DataFrame(doc_vec.toarray().transpose(), index=vectorizer.get_feature_names())
df.columns = ['CS_text1','CS_text2','CS_text3']
df
###Output
_____no_output_____ |
Assignment 1 and 2 DAY 3.ipynb | ###Markdown
Assignment 1 Day 3 B7
###Code
num = input("Please enter the landing Height - ")
num = int(num)
if num% 1000 ==0:
print("Safe to land ")
num = input("Please enter the landing Height - ")
num = int(num)
if num% 4500 ==0:
print("Bring down to 1000 ")
num = input("Please enter the landing Height - ")
num = int(num)
if num% 6500 ==0:
print("Turn Around ")
###Output
_____no_output_____
###Markdown
Assignment 2 Day 3 B7
###Code
for Number in range (1, 200):
num = 0
for i in range(2, (Number//2 + 1)):
if(Number % i == 0):
num = num + 1
break
if (num == 0 and Number != 1):
print(" %d" %Number, end = ' ')
###Output
2 3 5 7 11 13 17 19 23 29 31 37 41 43 47 53 59 61 67 71 73 79 83 89 97 101 103 107 109 113 127 131 137 139 149 151 157 163 167 173 179 181 191 193 197 199 |
.internal/update_workshop_material.ipynb | ###Markdown
Update Workshop MaterialUnidata AMS 2021 Student Conference---This notebook can be used to update the workshop training material whenever updates are posted.--- Running the UpdateWhen you run the following cell, any changes from the workshop GitHub repository will be applied to the material under the `metpy-workshop/` directory in your workspace.
###Code
!gitpuller https://github.com/Unidata/metpy-workshop/ main metpy-workshop
###Output
_____no_output_____ |
gru_seq2seq/run_gcr.ipynb | ###Markdown
本部分运行聊天机器人代码
###Code
%run seq2seq
###Output
_____no_output_____ |
MG_mu_Sigma_with_z_and_k_dependences.ipynb | ###Markdown
Examples of power spectra, C_ell's and correlations with Modified Gravity (MG) parameters that depend on redshift and scale Importing packages and setting up cosmological parameters beside MG parameters
###Code
import numpy as np
import pyccl as ccl
import pylab as plt
import math
%matplotlib inline
Omega_c = 0.25; Omega_b = 0.05; h = 0.7; A_s = 2.1e-9;
n_s = 0.96; Neff = 3.046; m_nu = 0.
###Output
_____no_output_____
###Markdown
Parameter arrays for MG parameters today
###Code
mu_0 = [0.2, 0.2, -0.2, -0.2]
sigma_0 = [0.2, -0.2, 0.2, -0.2]
c1_mg = [1, 1.5, 1., 1.5]
c2_mg = [1.5, 1., 1.5, 1.]
lambda_mg = [1, 1, 10, 10]
###Output
_____no_output_____
###Markdown
Setting five cosmologies: GR and four MG models following the paramterization described in for example: P. A. R. Ade, others, and (Planck Collaboration), Astron. Astrophys. 594, A14 (2016), 1502.01590. For mu(a,k) and Sigma(a,k) with: mu_0, Sigma_0, and c_1, c_2, and lambda_mg to parametrize the scale dependence
###Code
cosmo_GR_C = ccl.Cosmology(Omega_c = Omega_c, Omega_b = Omega_b,
h = h, A_s = A_s, n_s = n_s, Neff = Neff,
m_nu = m_nu, matter_power_spectrum='linear',transfer_function='boltzmann_isitgr')
cosmo_1_C = ccl.Cosmology(Omega_c = Omega_c, Omega_b = Omega_b,
h = h, A_s = A_s, n_s = n_s, Neff = Neff,
m_nu = m_nu, mu_0 = mu_0[0], sigma_0 = sigma_0[0],
c1_mg = c1_mg[0], c2_mg = c2_mg[0], lambda_mg = lambda_mg[0],
matter_power_spectrum='linear',transfer_function='boltzmann_isitgr')
cosmo_2_C = ccl.Cosmology(Omega_c = Omega_c, Omega_b = Omega_b, h = h,
A_s = A_s, n_s = n_s, Neff = Neff,
m_nu = m_nu, mu_0 = mu_0[1], sigma_0 = sigma_0[1],
c1_mg = c1_mg[1], c2_mg = c2_mg[1], lambda_mg = lambda_mg[1],
matter_power_spectrum='linear',transfer_function='boltzmann_isitgr')
cosmo_3_C = ccl.Cosmology(Omega_c = Omega_c, Omega_b = Omega_b,
h = h, A_s = A_s, n_s = n_s, Neff = Neff,
m_nu = m_nu, mu_0 = mu_0[2], sigma_0 = sigma_0[2],
c1_mg = c1_mg[2], c2_mg = c2_mg[2], lambda_mg = lambda_mg[2],
matter_power_spectrum='linear',transfer_function='boltzmann_isitgr')
cosmo_4_C = ccl.Cosmology(Omega_c = Omega_c, Omega_b = Omega_b, h = h,
A_s = A_s, n_s = n_s, Neff = Neff,
m_nu = m_nu, mu_0 = mu_0[3], sigma_0 = sigma_0[3],
c1_mg = c1_mg[3], c2_mg = c2_mg[3], lambda_mg = lambda_mg[3],
matter_power_spectrum='linear',transfer_function='boltzmann_isitgr')
###Output
_____no_output_____
###Markdown
MG scale-dependence in the matter power spectrum
###Code
k = np.logspace(-4, 0) # 1 / Mpc units
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
Pk_GR_C = ccl.linear_matter_power(cosmo_GR_C, k, a=1.)
Pk_1_C = ccl.linear_matter_power(cosmo_1_C, k, a=1.)
Pk_2_C = ccl.linear_matter_power(cosmo_2_C, k, a=1.)
Pk_3_C = ccl.linear_matter_power(cosmo_3_C, k, a=1.)
Pk_4_C = ccl.linear_matter_power(cosmo_4_C, k, a=1.)
plt.loglog(k, Pk_GR_C, 'k', label='GR')
plt.loglog(k, Pk_1_C, 'b', label='$\mu_0$ ='+str(mu_0[0])+', $c_1$ ='+str(c1_mg[0])+
'$, c_2$ ='+str(c2_mg[0])+'$, \lambda$ ='+str(lambda_mg[0]))
plt.loglog(k, Pk_2_C, 'r', label='$\mu_0$ ='+str(mu_0[1])+'$, c_1$ ='+str(c1_mg[1])+
'$, c_2$ ='+str(c2_mg[1])+'$, \lambda$ ='+str(lambda_mg[1]))
plt.loglog(k, Pk_3_C, '--g', label='$\mu_0$ ='+str(mu_0[2])+'$, c_1$ ='+str(c1_mg[2])+
'$, c_2$ ='+str(c2_mg[2])+'$, \lambda$ ='+str(lambda_mg[2]))
plt.loglog(k, Pk_4_C, '--m', label='$\mu_0$ ='+str(mu_0[3])+'$, c_1$ ='+str(c1_mg[3])+
'$, c_2$ ='+str(c2_mg[3])+'$, \lambda$ ='+str(lambda_mg[3]))
plt.xlabel('$k\quad[Mpc^{-1}]$', fontsize = 15)
plt.ylabel('$P(k)\quad[{\\rm Mpc}]^3$', fontsize=12)
plt.legend(fontsize=12, loc='lower right')
plt.show()
###Output
_____no_output_____
###Markdown
Weak Lensing C_ell's
###Code
# Redshift array
z = np.linspace(0., 3., 600)
# Number density input
n = np.exp(-((z-0.5)/0.1)**2)
# ell range input
ell = np.arange(10, 2000)
# ClTracer objects
lens_GR_C = ccl.WeakLensingTracer(cosmo_GR_C, dndz=(z,n))
lens_1_C = ccl.WeakLensingTracer(cosmo_1_C, dndz=(z,n))
lens_2_C = ccl.WeakLensingTracer(cosmo_2_C, dndz=(z,n))
lens_3_C = ccl.WeakLensingTracer(cosmo_3_C, dndz=(z,n))
lens_4_C = ccl.WeakLensingTracer(cosmo_4_C, dndz=(z,n))
Cl_lensing_GR_C = ccl.angular_cl(cosmo_GR_C, lens_GR_C, lens_GR_C, ell)
Cl_lensing_1_C = ccl.angular_cl(cosmo_1_C, lens_1_C, lens_1_C, ell)
Cl_lensing_2_C = ccl.angular_cl(cosmo_2_C, lens_2_C, lens_2_C, ell)
Cl_lensing_3_C = ccl.angular_cl(cosmo_3_C, lens_3_C, lens_3_C, ell)
Cl_lensing_4_C = ccl.angular_cl(cosmo_4_C, lens_4_C, lens_4_C, ell)
plt.figure()
plt.loglog(ell, Cl_lensing_GR_C, 'k', label='GR')
plt.loglog(ell, Cl_lensing_1_C, 'b',
label='$\mu_0$ ='+str(mu_0[0])+', $\Sigma_0$ =' + str(sigma_0[0])+', $c_1$ ='+str(c1_mg[0])+
'$, c_2$ ='+str(c2_mg[0])+'$, \lambda$ ='+str(lambda_mg[0]))
plt.loglog(ell, Cl_lensing_2_C, 'r',
label='$\mu_0$ ='+str(mu_0[1])+', $\Sigma_0$ =' + str(sigma_0[1])+', $c_1$ ='+str(c1_mg[1])+
'$, c_2$ ='+str(c2_mg[1])+'$, \lambda$ ='+str(lambda_mg[1]))
plt.loglog(ell, Cl_lensing_3_C, '--g',
label='$\mu_0$ ='+str(mu_0[2])+', $\Sigma_0$ =' + str(sigma_0[2])+', $c_1$ ='+str(c1_mg[2])+
'$, c_2$ ='+str(c2_mg[2])+'$, \lambda$ ='+str(lambda_mg[2]))
plt.loglog(ell, Cl_lensing_4_C, '--m',
label='$\mu_0$ ='+str(mu_0[3])+', $\Sigma_0$ =' + str(sigma_0[3])+', $c_1$ ='+str(c1_mg[3])+
'$, c_2$ ='+str(c2_mg[3])+'$, \lambda$ ='+str(lambda_mg[3]))
plt.xlabel('$\ell$', fontsize=15)
plt.ylabel('$C_\ell$, lensing', fontsize=15)
plt.legend(fontsize=12, loc='lower right')
plt.show()
###Output
_____no_output_____
###Markdown
Weak Lensing Correlations
###Code
theta = np.logspace(-1.5, np.log10(5), 20) # In degrees
xi_p_GR_C = ccl.correlation(cosmo_GR_C, ell, Cl_lensing_GR_C, theta,
type='GG+', method='fftlog')
xi_p_1_C = ccl.correlation(cosmo_1_C, ell, Cl_lensing_1_C, theta,
type='GG+', method='fftlog')
xi_p_2_C = ccl.correlation(cosmo_2_C, ell, Cl_lensing_2_C, theta,
type='GG+', method='fftlog')
xi_p_3_C = ccl.correlation(cosmo_3_C, ell, Cl_lensing_3_C, theta,
type='GG+', method='fftlog')
xi_p_4_C = ccl.correlation(cosmo_4_C, ell, Cl_lensing_4_C, theta,
type='GG+', method='fftlog')
xi_m_GR_C = ccl.correlation(cosmo_GR_C, ell, Cl_lensing_GR_C, theta,
type='GG-', method='fftlog')
xi_m_1_C = ccl.correlation(cosmo_1_C, ell, Cl_lensing_1_C, theta,
type='GG-', method='fftlog')
xi_m_2_C = ccl.correlation(cosmo_2_C, ell, Cl_lensing_2_C, theta,
type='GG-', method='fftlog')
xi_m_3_C = ccl.correlation(cosmo_3_C, ell, Cl_lensing_3_C, theta,
type='GG-', method='fftlog')
xi_m_4_C = ccl.correlation(cosmo_4_C, ell, Cl_lensing_4_C, theta,
type='GG-', method='fftlog')
theta_amin = theta * 60. # In arcminutes.
plt.figure()
plt.loglog(theta_amin, xi_p_GR_C, 'k', label='GR')
plt.loglog(theta_amin, xi_p_1_C, 'b',
label='$\mu_0$ ='+str(mu_0[0])+', $\Sigma_0$ =' + str(sigma_0[0])+', $c_1$ ='+str(c1_mg[0])+
'$, c_2$ ='+str(c2_mg[0])+'$, \lambda$ ='+str(lambda_mg[0]))
plt.loglog(theta_amin, xi_p_2_C, 'g',
label='$\mu_0$ ='+str(mu_0[1])+', $\Sigma_0$ =' + str(sigma_0[1])+', $c_1$ ='+str(c1_mg[1])+
'$, c_2$ ='+str(c2_mg[1])+'$, \lambda$ ='+str(lambda_mg[1]))
plt.loglog(theta_amin, xi_p_3_C, '--r',
label='$\mu_0$ ='+str(mu_0[2])+', $\Sigma_0$ =' + str(sigma_0[2])+', $c_1$ ='+str(c1_mg[2])+
'$, c_2$ ='+str(c2_mg[2])+'$, \lambda$ ='+str(lambda_mg[2]))
plt.loglog(theta_amin, xi_p_4_C, '--m',
label='$\mu_0$ ='+str(mu_0[3])+', $\Sigma_0$ =' + str(sigma_0[3])+', $c_1$ ='+str(c1_mg[3])+
'$, c_2$ ='+str(c2_mg[3])+'$, \lambda$ ='+str(lambda_mg[3]))
plt.legend(fontsize=12)
plt.xlabel('Arcminutes', fontsize = 15)
plt.ylabel('$\\xi_+$', fontsize=15)
plt.show()
plt.figure()
plt.loglog(theta_amin, xi_m_GR_C, 'k', label='GR ')
plt.loglog(theta_amin, xi_m_1_C, 'b',
label='$\mu_0$ ='+str(mu_0[0])+', $\Sigma_0$ =' + str(sigma_0[0])+', $c_1$ ='+str(c1_mg[0])+
'$, c_2$ ='+str(c2_mg[0])+'$, \lambda$ ='+str(lambda_mg[0]))
plt.loglog(theta_amin, xi_m_2_C, 'g',
label='$\mu_0$ ='+str(mu_0[1])+', $\Sigma_0$ =' + str(sigma_0[1])+', $c_1$ ='+str(c1_mg[1])+
'$, c_2$ ='+str(c2_mg[1])+'$, \lambda$ ='+str(lambda_mg[1]))
plt.loglog(theta_amin, xi_m_3_C, '--r',
label='$\mu_0$ ='+str(mu_0[2])+', $\Sigma_0$ =' + str(sigma_0[2])+', $c_1$ ='+str(c1_mg[2])+
'$, c_2$ ='+str(c2_mg[2])+'$, \lambda$ ='+str(lambda_mg[2]))
plt.loglog(theta_amin, xi_m_4_C, '--m',
label='$\mu_0$ ='+str(mu_0[3])+', $\Sigma_0$ =' + str(sigma_0[3])+', $c_1$ ='+str(c1_mg[3])+
'$, c_2$ ='+str(c2_mg[3])+'$, \lambda$ ='+str(lambda_mg[3]))
plt.legend(fontsize=12)
plt.xlabel('Arcminutes', fontsize = 15)
plt.ylabel('$\\xi_-$', fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
CMB Lensing C_ell's
###Code
# Cl Tracer objects
cmbl_GR_C = ccl.CMBLensingTracer(cosmo_GR_C,1089.)
cmbl_1_C = ccl.CMBLensingTracer(cosmo_1_C,1089.)
cmbl_2_C = ccl.CMBLensingTracer(cosmo_2_C,1089.)
cmbl_3_C = ccl.CMBLensingTracer(cosmo_3_C,1089.)
cmbl_4_C = ccl.CMBLensingTracer(cosmo_4_C,1089.)
Cl_cmb_GR_C = ccl.angular_cl(cosmo_GR_C, cmbl_GR_C, cmbl_GR_C, ell)
Cl_cmb_1_C = ccl.angular_cl(cosmo_1_C, cmbl_1_C, cmbl_1_C, ell)
Cl_cmb_2_C = ccl.angular_cl(cosmo_2_C, cmbl_2_C, cmbl_2_C, ell)
Cl_cmb_3_C = ccl.angular_cl(cosmo_3_C, cmbl_3_C, cmbl_3_C, ell)
Cl_cmb_4_C = ccl.angular_cl(cosmo_4_C, cmbl_4_C, cmbl_4_C, ell)
plt.figure()
plt.loglog(ell, Cl_cmb_GR_C, 'k', label='GR')
plt.loglog(ell, Cl_cmb_1_C, 'b',
label='$\mu_0$ ='+str(mu_0[0])+', $\Sigma_0$ =' + str(sigma_0[0])+', $c_1$ ='+str(c1_mg[0])+
'$, c_2$ ='+str(c2_mg[0])+'$, \lambda$ ='+str(lambda_mg[0]))
plt.loglog(ell, Cl_cmb_2_C, 'g',
label='$\mu_0$ ='+str(mu_0[1])+', $\Sigma_0$ =' + str(sigma_0[1])+', $c_1$ ='+str(c1_mg[1])+
'$, c_2$ ='+str(c2_mg[1])+'$, \lambda$ ='+str(lambda_mg[1]))
plt.loglog(ell, Cl_cmb_3_C, '--r',
label='$\mu_0$ ='+str(mu_0[2])+', $\Sigma_0$ =' + str(sigma_0[2])+', $c_1$ ='+str(c1_mg[2])+
'$, c_2$ ='+str(c2_mg[2])+'$, \lambda$ ='+str(lambda_mg[2]))
plt.loglog(ell, Cl_cmb_4_C, '--m',
label='$\mu_0$ ='+str(mu_0[3])+', $\Sigma_0$ =' + str(sigma_0[3])+', $c_1$ ='+str(c1_mg[3])+
'$, c_2$ ='+str(c2_mg[3])+'$, \lambda$ ='+str(lambda_mg[3]))
plt.xlabel('$\ell$', fontsize=15)
plt.ylabel('$C_\ell$, CMB lensing', fontsize=15)
plt.legend(fontsize=12,loc='lower right')
plt.show()
###Output
_____no_output_____
###Markdown
Number count tracers with magnification bias
###Code
# Redshift array
z = np.linspace(0., 3., 600)
# Number density input
n = np.exp(-((z-0.5)/0.1)**2)
# Bias input
b = np.sqrt(1. + z)
mb = np.exp(1. + z)/10
# ell range input
ell = np.arange(10, 2000)
# Cl Tracer objects
nc_GR_C = ccl.NumberCountsTracer(cosmo_GR_C, has_rsd=False, dndz=(z,n), bias=(z,b), mag_bias=(z,mb))
nc_1_C = ccl.NumberCountsTracer(cosmo_1_C, has_rsd=False, dndz=(z,n), bias=(z,b), mag_bias=(z,mb))
nc_2_C = ccl.NumberCountsTracer(cosmo_2_C, has_rsd=False, dndz=(z,n), bias=(z,b), mag_bias=(z,mb))
nc_3_C = ccl.NumberCountsTracer(cosmo_3_C, has_rsd=False, dndz=(z,n), bias=(z,b), mag_bias=(z,mb))
nc_4_C = ccl.NumberCountsTracer(cosmo_4_C, has_rsd=False, dndz=(z,n), bias=(z,b), mag_bias=(z,mb))
Cl_nc_GR_C = ccl.angular_cl(cosmo_GR_C, nc_GR_C, nc_GR_C, ell)
Cl_nc_1_C = ccl.angular_cl(cosmo_1_C, nc_1_C, nc_1_C, ell)
Cl_nc_2_C = ccl.angular_cl(cosmo_2_C, nc_2_C, nc_2_C, ell)
Cl_nc_3_C = ccl.angular_cl(cosmo_3_C, nc_3_C, nc_3_C, ell)
Cl_nc_4_C = ccl.angular_cl(cosmo_4_C, nc_4_C, nc_4_C, ell)
plt.figure()
plt.loglog(ell, Cl_nc_GR_C, 'k', label='GR')
plt.loglog(ell, Cl_nc_1_C, 'b',
label='$\mu_0$ ='+str(mu_0[0])+', $\Sigma_0$ =' + str(sigma_0[0])+', $c_1$ ='+str(c1_mg[0])+
'$, c_2$ ='+str(c2_mg[0])+'$, \lambda$ ='+str(lambda_mg[0]))
plt.loglog(ell, Cl_nc_2_C, 'g',
label='$\mu_0$ ='+str(mu_0[1])+', $\Sigma_0$ =' + str(sigma_0[1])+', $c_1$ ='+str(c1_mg[1])+
'$, c_2$ ='+str(c2_mg[1])+'$, \lambda$ ='+str(lambda_mg[1]))
plt.loglog(ell, Cl_nc_3_C, '--r',
label='$\mu_0$ ='+str(mu_0[2])+', $\Sigma_0$ =' + str(sigma_0[2])+', $c_1$ ='+str(c1_mg[2])+
'$, c_2$ ='+str(c2_mg[2])+'$, \lambda$ ='+str(lambda_mg[2]))
plt.loglog(ell, Cl_nc_4_C, '--m',
label='$\mu_0$ ='+str(mu_0[3])+', $\Sigma_0$ =' + str(sigma_0[3])+', $c_1$ ='+str(c1_mg[3])+
'$, c_2$ ='+str(c2_mg[3])+'$, \lambda$ ='+str(lambda_mg[3]))
plt.xlabel('$\ell$', fontsize=15)
plt.ylabel('$C_\ell$, clustering', fontsize=15)
plt.legend(fontsize=12,loc='lower right')
plt.show()
###Output
_____no_output_____ |
notebooks/scvi_amortised/cell2location_synthetic_data_scVI_amortised_10x_data_batch_1250_2500_dropout_rate02_n_hidden200_eval.ipynb | ###Markdown
Benchmarking cell2location pyro model using softplus/exp for scales
###Code
import sys, ast, os
#sys.path.insert(1, '/nfs/team205/vk7/sanger_projects/BayraktarLab/cell2location/')
sys.path.insert(1, '/nfs/team205/vk7/sanger_projects/BayraktarLab/scvi-tools/')
import scanpy as sc
import anndata
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
data_type='float32'
#import cell2location_model
#import cell2location_module_scvi
import scvi
import torch
from matplotlib import rcParams
rcParams['pdf.fonttype'] = 42 # enables correct plotting of text
import seaborn as sns
###Output
_____no_output_____
###Markdown
The purpose of the notebook is to benchmark several versions of the model using mouse brain data.
###Code
sc_data_folder = '/nfs/team205/vk7/sanger_projects/cell2location_paper/notebooks/selected_data/mouse_visium_snrna/'
sp_data_folder = '/nfs/team205/vk7/sanger_projects/cell2location_paper/notebooks/selected_results/benchmarking/with_tissue_zones/data/'
results_folder = '/nfs/team205/vk7/sanger_projects/cell2location_paper/notebooks/selected_results/benchmarking/with_tissue_zones/real_mg/pyro/'
###Output
_____no_output_____
###Markdown
Read datasets and train cell2location Data can be downloaded as follows:```bashwget https://cell2location.cog.sanger.ac.uk/paper/synthetic_with_tissue_zones/synth_adata_real_mg_20210131.h5adwget https://cell2location.cog.sanger.ac.uk/paper/synthetic_with_tissue_zones/training_5705STDY8058280_5705STDY8058281_20210131.h5ad```
###Code
adata_vis = anndata.read(f'{sp_data_folder}synth_adata_real_mg_20210131.h5ad')
adata_vis.uns['spatial'] = {'x': 'y'}
#adata_vis = adata_vis[adata_vis.obs['sample'].isin([f'exper{i}' for i in range(5,10)]),:]
adata_snrna_raw = anndata.read(f'{sp_data_folder}training_5705STDY8058280_5705STDY8058281_20210131.h5ad')
import scipy
adata_snrna_raw.X = scipy.sparse.csr_matrix(adata_snrna_raw.X)
###Output
_____no_output_____
###Markdown
adata_vis.X = scipy.sparse.csr_matrix(adata_vis.X)
###Code
Add counts matrix as `adata.raw`
###Output
_____no_output_____
###Markdown
adata_snrna_raw.raw = adata_snrna_rawadata_vis.raw = adata_vis compute average for each clusteraver = scvi.external.cell2location.compute_cluster_averages(adata_snrna_raw, 'annotation_1') make sure the order of gene matches between aver and x_dataaver = aver.loc[adata_vis.var_names,:] generate one-hot encoded matrix telling which obs belong to whic samplesobs2sample_df = pd.get_dummies(adata_vis.obs['sample'])
###Code
adata_vis
###Output
_____no_output_____
###Markdown
Model training
###Code
adata_vis = scvi.external.cell2location.setup_anndata(adata=adata_vis, cell_state_df=aver, batch_key="sample")
adata_vis.uns['_scvi']
mod = scvi.external.Cell2location(adata_vis, batch_size=2500,
amortised=True,
encoder_kwargs={'n_layers': 1, 'n_hidden': 200,
'dropout_rate': 0.2,
'activation_fn': torch.nn.ReLU},
N_cells_per_location=8)
mod.train(max_epochs=1000, lr=0.01, use_gpu=True)
means = mod.posterior_median(use_gpu = True)
means['w_sf'].shape
mod_m = scvi.external.Cell2location(adata_vis, batch_size=1250,
amortised=True,
encoder_kwargs={'n_layers': 1, 'n_hidden': 200,
'dropout_rate': 0.2,
'activation_fn': torch.nn.ReLU},
N_cells_per_location=8)
mod_m.train(max_epochs=1000, lr=0.01, use_gpu=True)
means_m = mod_m.posterior_median(use_gpu = True)
###Output
_____no_output_____
###Markdown
test Predictivenum_samples = 5predictive = mod_m.module.create_predictive(num_samples=num_samples, parallel=False)from scvi.dataloaders import AnnDataLoadertrain_dl = AnnDataLoader(adata_vis, shuffle=False, batch_size=500)for tensor_dict in train_dl: args, kwargs = mod_m.module._get_fn_args_from_batch(tensor_dict) samples = { k: v.detach().cpu().numpy() for k, v in predictive(*args, **kwargs).items() if k != "obs" } save Pyro param state model_save_path = os.path.join(save_path, "model_params.pt")torch.save(model.state_dict(), model_save_path) amortised_plate_sites = {'name': "obs_plate", 'in': ['x_data'], 'sites': { "n_s_cells_per_location": 1, "y_s_groups_per_location": 1, "z_sr_groups_factors": 5, "w_sf": 4, "l_s_add": 1, }}np.sum([np.sum(amortised_plate_sites['sites'][k]) for k in amortised_plate_sites['sites'].keys()]) * 2 create indices for loc and scales of each sitecounter = 0indices = dict()for site, n_dim in amortised_plate_sites['sites'].items(): indices[site] = {'locs': np.arange(counter, counter + n_dim), 'scales': np.arange(counter + n_dim, counter + n_dim * 2)} counter += n_dim * 2 indices save modelmod_m.save(dir_path='./results/scvi/minibatch_1sample', overwrite=True, save_anndata=False) load modelmod_m.load(dir_path='./results/scvi/minibatch_1sample', adata=adata_vis, use_gpu=True)
###Code
### Compare ELBO as training progresses
###Output
_____no_output_____
###Markdown
plt.plot(mod.module.history_['train_loss_epoch'].index[200:], np.array(mod.module.history_['train_loss_epoch'].values.flatten())[200:]);plt.plot(mod_m.module.history_['train_loss_epoch'].index[200:], np.array(mod_m.module.history_['train_loss_epoch'].values.flatten())[200:]);plt.legend(labels=['minibatch 2500/25000', 'minibatch 1250/25000']);plt.xlim(0, len(mod_m.module.history_['train_loss_epoch']));
###Code
plt.plot(mod.module.history_['train_loss_epoch'].index[10:],
np.array(mod.module.history_['train_loss_epoch'].values.flatten())[10:]);
plt.legend(labels=['minibatch 125/25000']);
plt.xlim(0, len(mod_m.module.history_['train_loss_epoch']));
plt.plot(mod_m.module.history_['train_loss_epoch'].index[40:],
np.array(mod_m.module.history_['train_loss_epoch'].values.flatten())[40:]);
plt.legend(labels=['minibatch 1250/25000']);
plt.xlim(0, len(mod_m.module.history_['train_loss_epoch']));
#plt.plot(range(1, 100), np.array(mod.module.history_)[1:100]);
plt.plot(mod_m.module.history_['train_loss_epoch'].index[1:100],
np.array(mod_m.module.history_['train_loss_epoch'].values.flatten())[1:100]);
plt.legend(labels=['full data', 'minibatch 500/2500']);
plt.xlim(0, 100);
###Output
_____no_output_____
###Markdown
Evaluate accuracy using $R^2$
###Code
from re import sub
cell_count = adata_vis.obs.loc[:, ['cell_abundances_' in i for i in adata_vis.obs.columns]]
cell_count.columns = [sub('cell_abundances_', '', i) for i in cell_count.columns]
cell_count_columns = cell_count.columns
cell_proportions = (cell_count.T / cell_count.sum(1)).T
infer_cell_count = pd.DataFrame(means['w_sf'], index=adata_vis.obs_names,
columns=aver.columns)
infer_cell_count = infer_cell_count[cell_count.columns]
infer_cell_proportions = (infer_cell_count.T / infer_cell_count.sum(1)).T
infer_cell_count_m = pd.DataFrame(means_m['w_sf'], index=adata_vis.obs_names,
columns=aver.columns)
infer_cell_count_m = infer_cell_count_m[cell_count.columns]
infer_cell_proportions_m = (infer_cell_count_m.T / infer_cell_count_m.sum(1)).T
infer_cell_count.iloc[0:5,0:5], infer_cell_count_m.iloc[0:5,0:5]
rcParams['figure.figsize'] = 4, 4
rcParams["axes.facecolor"] = "white"
plt.hist2d(cell_count.values.flatten(),
infer_cell_count.values.flatten(),# / np.mean(adata_vis_res.var['gene_level'].values),
bins=[50, 50], norm=mpl.colors.LogNorm());
plt.xlabel('Simulated cell abundance');
plt.ylabel('Estimated cell abundance');
plt.title(r'minibatch 2500/25000, $R^2$: ' \
+ str(np.round(np.corrcoef(cell_count.values.flatten(),
infer_cell_count.values.flatten()), 3)[0,1]));
#plt.gca().set_aspect('equal', adjustable='box')
plt.tight_layout()
#plt.savefig(fig_path + '/Cell_density_cor.pdf')
rcParams['figure.figsize'] = 4, 4
rcParams["axes.facecolor"] = "white"
plt.hist2d(cell_count.values.flatten(),
infer_cell_count_m.values.flatten(),# / np.mean(adata_vis_res.var['gene_level'].values),
bins=[50, 50], norm=mpl.colors.LogNorm());
plt.xlabel('Simulated cell abundance');
plt.ylabel('Estimated cell abundance');
plt.title(r'minibatch 1250/25000, $R^2$: ' \
+ str(np.round(np.corrcoef(cell_count.values.flatten(),
infer_cell_count_m.values.flatten()), 3)[0,1]));
#plt.gca().set_aspect('equal', adjustable='box')
plt.tight_layout()
#plt.savefig(fig_path + '/Cell_density_cor.pdf')
###Output
_____no_output_____
###Markdown
Original implementation of cell2location in pymc3 has $R^2 = 0.791$. Evaluate with PR curves
###Code
import matplotlib as mpl
from matplotlib import pyplot as plt
import numpy as np
from scipy import interpolate
with plt.style.context('seaborn'):
seaborn_colors = mpl.rcParams['axes.prop_cycle'].by_key()['color']
def compute_precision_recall(pos_cell_count, infer_cell_proportions, mode='macro'):
r""" Plot precision-recall curves on average and for each cell type.
:param pos_cell_count: binary matrix showing which cell types are present in which locations
:param infer_cell_proportions: inferred locations (the higher the more cells)
"""
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
### calculating ###
predictor = infer_cell_proportions.values + np.random.gamma(20, 1e-12,
infer_cell_proportions.shape)
# For each cell type
precision = dict()
recall = dict()
average_precision = dict()
for i, c in enumerate(infer_cell_proportions.columns):
precision[c], recall[c], _ = precision_recall_curve(pos_cell_count[:, i],
predictor[:, i])
average_precision[c] = average_precision_score(pos_cell_count[:, i], predictor[:, i], average=mode)
average_precision["averaged"] = average_precision_score(pos_cell_count, predictor,
average=mode)
# A "micro-average": quantifying score on all classes jointly
if mode == 'micro':
precision_, recall_, threshold = precision_recall_curve(pos_cell_count.ravel(),
predictor.ravel())
#precision_[threshold < 0.1] = 0
precision["averaged"], recall["averaged"] = precision_, recall_
elif mode == 'macro':
precisions = []
recall_grid = np.linspace(0, 1, 2000)
for i, c in enumerate(infer_cell_proportions.columns):
f = interpolate.interp1d(recall[c], precision[c])
precision_interp = f(recall_grid)
precisions.append(precision_interp)
precision["averaged"] = np.mean(precisions, axis=0)
recall['averaged'] = recall_grid
return precision, recall, average_precision
def compare_precision_recall(pos_cell_count, infer_cell_proportions,
method_title, title='',
legend_loc=(0, -.37),
colors=sc.pl.palettes.default_102,
mode='macro', curve='PR'):
r""" Plot precision-recall curves on average and for each cell type.
:param pos_cell_count: binary matrix showing which cell types are present in which locations
:param infer_cell_proportions: inferred locations (the higher the more cells),
list of inferred parameters for several methods
:param method_title: title for each infer_cell_proportions
:param title: plot title
"""
# setup plot details
from itertools import cycle
colors = cycle(colors)
lines = []
labels = []
roc = {}
### plotting ###
for i, color in zip(range(len(infer_cell_proportions)), colors):
if curve == 'PR':
precision, recall, average_precision = compute_precision_recall(pos_cell_count,
infer_cell_proportions[i],
mode=mode)
xlabel = 'Recall'
ylabel = 'Precision'
l, = plt.plot(recall["averaged"], precision["averaged"], color=color, lw=3)
elif curve == 'ROC':
FPR, TPR, average_precision = compute_roc(pos_cell_count,
infer_cell_proportions[i],
mode=mode)
xlabel = 'FPR'
ylabel = 'TPR'
l, = plt.plot(FPR["averaged"], TPR["averaged"], color=color, lw=3)
lines.append(l)
labels.append(method_title[i] + '(' + curve + ' score = {0:0.2f})'
''.format(average_precision["averaged"]))
roc[method_title[i]] = average_precision["averaged"]
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if legend_loc is not None:
plt.legend(lines, labels, loc=legend_loc, prop=dict(size=8))
#plt.show()
return roc
rcParams['figure.figsize'] = 6, 3
rcParams['font.size'] = 8
results = [
infer_cell_count,
infer_cell_count_m
]
results_proportion = [
infer_cell_proportions,
infer_cell_proportions_m
]
names = [
'minibatch 2500/25000 obs',
'minibatch 1250/25000 obs',
]
compare_precision_recall(cell_count.values > 0.1,
results,
method_title=names,
legend_loc=(1.1, 0.5))
plt.tight_layout();
plt.title('Absolute cell abundance');
plt.show();
compare_precision_recall(cell_count.values > 0.1,
results_proportion,
method_title=names,
legend_loc=(1.1, 0.5))
plt.tight_layout();
plt.title('Relative cell abundance');
plt.show();
###Output
_____no_output_____
###Markdown
Original implementation of cell2location in pymc3 has PR score = 0.66. $R^2$ stratified by abundance and regional pattern
###Code
from scipy.spatial.distance import jensenshannon
def hist_obs_sim(cell_count, infer_cell_count,
xlab='Simulated cell proportion',
ylab='Estimated cell proportion',
title='', compute_kl=True, equal=True, max_val=1):
cor = np.round(np.corrcoef(cell_count.values.flatten(),
infer_cell_count.values.flatten()), 3)[0,1]
title = title +'\n'+ r'$R^2$: ' + str(cor)
if compute_kl:
js = np.array([jensenshannon(cell_count.values[r,:], infer_cell_count.values[r,:])
for r in range(cell_count.shape[0])])
js = np.mean(js[~np.isnan(js)])
title = title + '\nAverage JSD: ' + str(np.round(js, 2))
plt.hist2d(cell_count.values.flatten(),
infer_cell_count.values.flatten(),
bins=[35, 35], norm=mpl.colors.LogNorm());
plt.xlabel(xlab);
plt.ylabel(ylab);
if equal:
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim(0, max_val);
plt.ylim(0, max_val);
plt.title(title);
def hist_by_category(cell_count, infer_cell_count, design,
xlab='Simulated cell proportion',
ylab='Estimated cell proportion',
nrow=1, ncol=4, compute_kl=True, equal=True):
design_loc = design.loc[cell_count.columns,:]
max_val = np.array([cell_count.values.max(), infer_cell_count.values.max()]).max()
if max_val < 1:
max_val = 1
plt.subplot(nrow, ncol, 1)
ind = (design_loc['is_uniform'] * design_loc['is_high_density']).values.astype(bool)
hist_obs_sim(cell_count.loc[:,ind], infer_cell_count.loc[:,ind],
xlab=xlab,
ylab=ylab,
title=f'Uniform & high abundance ({ind.sum()})',
compute_kl=compute_kl, equal=equal, max_val=max_val)
plt.subplot(nrow, ncol, 2)
ind = (design_loc['is_uniform'] * (1 - design_loc['is_high_density'])).values.astype(bool)
hist_obs_sim(cell_count.loc[:,ind], infer_cell_count.loc[:,ind],
xlab=xlab,
ylab=ylab,
title=f'Uniform & low abundance ({ind.sum()})',
compute_kl=compute_kl, equal=equal, max_val=max_val)
plt.subplot(nrow, ncol, 3)
ind = ((1 - design_loc['is_uniform']) * design_loc['is_high_density']).values.astype(bool)
hist_obs_sim(cell_count.loc[:,ind], infer_cell_count.loc[:,ind],
xlab=xlab,
ylab=ylab,
title=f'Sparse & high abundance ({ind.sum()})',
compute_kl=compute_kl, equal=equal, max_val=max_val)
plt.subplot(nrow, ncol, 4)
ind = ((1 - design_loc['is_uniform']) * (1 - design_loc['is_high_density'])).values.astype(bool)
hist_obs_sim(cell_count.loc[:,ind], infer_cell_count.loc[:,ind],
xlab=xlab,
ylab=ylab,
title=f'Sparse & low abundance ({ind.sum()})',
compute_kl=compute_kl, equal=equal, max_val=max_val)
rcParams['figure.figsize'] = 18,4.5
rcParams["axes.facecolor"] = "white"
hist_by_category(cell_proportions, infer_cell_proportions, adata_vis.uns['design']['cell_types2zones'],
xlab='Simulated cell proportion',
ylab='Estimated cell proportion',
nrow=1, ncol=4, equal=True)
plt.tight_layout();
plt.show();
hist_by_category(cell_proportions, infer_cell_proportions_m, adata_vis.uns['design']['cell_types2zones'],
xlab='Simulated cell proportion',
ylab='Estimated cell proportion',
nrow=1, ncol=4, equal=True)
plt.tight_layout();
plt.show();
import sys
for module in sys.modules:
try:
print(module,sys.modules[module].__version__)
except:
try:
if type(modules[module].version) is str:
print(module,sys.modules[module].version)
else:
print(module,sys.modules[module].version())
except:
try:
print(module,sys.modules[module].VERSION)
except:
pass
###Output
ipykernel 5.3.4
ipykernel._version 5.3.4
json 2.0.9
re 2.2.1
IPython 7.20.0
IPython.core.release 7.20.0
logging 0.5.1.2
zlib 1.0
traitlets 5.0.5
traitlets._version 5.0.5
argparse 1.1
ipython_genutils 0.2.0
ipython_genutils._version 0.2.0
platform 1.0.8
pygments 2.7.4
pexpect 4.8.0
ptyprocess 0.7.0
decorator 4.4.2
pickleshare 0.7.5
backcall 0.2.0
prompt_toolkit 3.0.8
wcwidth 0.2.5
jedi 0.17.0
parso 0.8.1
colorama 0.4.4
ctypes 1.1.0
_ctypes 1.1.0
urllib.request 3.7
jupyter_client 6.1.7
jupyter_client._version 6.1.7
zmq 20.0.0
zmq.backend.cython 40303
zmq.backend.cython.constants 40303
zmq.sugar 20.0.0
zmq.sugar.constants 40303
zmq.sugar.version 20.0.0
jupyter_core 4.7.1
jupyter_core.version 4.7.1
_curses b'2.2'
dateutil 2.8.1
six 1.15.0
decimal 1.70
_decimal 1.70
distutils 3.7.9
scanpy 1.7.0
scanpy._metadata 1.7.0
packaging 20.9
packaging.__about__ 20.9
importlib_metadata 1.7.0
csv 1.0
_csv 1.0
numpy 1.20.0
numpy.core 1.20.0
numpy.core._multiarray_umath 3.1
numpy.lib 1.20.0
numpy.linalg._umath_linalg 0.1.5
scipy 1.6.0
anndata 0.7.5
anndata._metadata 0.7.5
h5py 3.1.0
cached_property 1.5.2
natsort 7.1.1
pandas 1.2.1
pytz 2021.1
pandas.compat.numpy.function 1.20.0
sinfo 0.3.1
stdlib_list v0.8.0
numba 0.52.0
yaml 5.3.1
llvmlite 0.35.0
pkg_resources._vendor.appdirs 1.4.3
pkg_resources.extern.appdirs 1.4.3
pkg_resources._vendor.packaging 20.4
pkg_resources._vendor.packaging.__about__ 20.4
pkg_resources.extern.packaging 20.4
pkg_resources._vendor.pyparsing 2.2.1
pkg_resources.extern.pyparsing 2.2.1
numba.misc.appdirs 1.4.1
sklearn 0.24.1
sklearn.base 0.24.1
joblib 1.0.0
joblib.externals.loky 2.9.0
joblib.externals.cloudpickle 1.6.0
scipy._lib.decorator 4.0.5
scipy.linalg._fblas b'$Revision: $'
scipy.linalg._flapack b'$Revision: $'
scipy.linalg._flinalg b'$Revision: $'
scipy.special.specfun b'$Revision: $'
scipy.ndimage 2.0
scipy.optimize.minpack2 b'$Revision: $'
scipy.sparse.linalg.isolve._iterative b'$Revision: $'
scipy.sparse.linalg.eigen.arpack._arpack b'$Revision: $'
scipy.optimize._lbfgsb b'$Revision: $'
scipy.optimize._cobyla b'$Revision: $'
scipy.optimize._slsqp b'$Revision: $'
scipy.optimize._minpack 1.10
scipy.optimize.__nnls b'$Revision: $'
scipy.linalg._interpolative b'$Revision: $'
scipy.integrate._odepack 1.9
scipy.integrate._quadpack 1.13
scipy.integrate._ode $Id$
scipy.integrate.vode b'$Revision: $'
scipy.integrate._dop b'$Revision: $'
scipy.integrate.lsoda b'$Revision: $'
scipy.interpolate._fitpack 1.7
scipy.interpolate.dfitpack b'$Revision: $'
scipy.stats.statlib b'$Revision: $'
scipy.stats.mvn b'$Revision: $'
sklearn.utils._joblib 1.0.0
leidenalg 0.8.3
igraph 0.8.3
texttable 1.6.3
igraph.version 0.8.3
matplotlib 3.3.4
pyparsing 2.4.7
cycler 0.10.0
kiwisolver 1.3.1
PIL 8.1.0
PIL._version 8.1.0
PIL.Image 8.1.0
xml.etree.ElementTree 1.3.0
cffi 1.14.4
tables 3.6.1
numexpr 2.7.2
legacy_api_wrap 1.2
get_version 2.1
scvi 0.0.0
torch 1.8.1+cu102
torch.version 1.8.1+cu102
tqdm 4.56.0
tqdm.cli 4.56.0
tqdm.version 4.56.0
tqdm._dist_ver 4.56.0
ipywidgets 7.6.3
ipywidgets._version 7.6.3
_cffi_backend 1.14.4
pycparser 2.20
pycparser.ply 3.9
pycparser.ply.yacc 3.10
pycparser.ply.lex 3.10
pyro 1.6.0+9e1fd393
opt_einsum v3.3.0
pyro._version 1.6.0+9e1fd393
pytorch_lightning 1.2.7
pytorch_lightning.info 1.2.7
torchmetrics 0.2.0
fsspec 0.8.5
tensorboard 2.4.1
tensorboard.version 2.4.1
google.protobuf 3.14.0
tensorboard.compat.tensorflow_stub stub
tensorboard.compat.tensorflow_stub.pywrap_tensorflow 0
seaborn 0.11.1
seaborn.external.husl 2.1.0
statsmodels 0.12.2
|
Business Team Guide.ipynb | ###Markdown
Imports
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Functions
###Code
def get_data (path):
df = pd.read_csv(path)
df.head()
df.rename(columns = {'price':'buying_price'}, inplace = True)
return df
def col_price_median(data):
df = data[['buying_price', 'zipcode']].groupby('zipcode').median().reset_index()
df.columns = df.columns.str.replace('buying_price', 'price_median')
df2 = pd.merge(data,df,on='zipcode',how='inner')
return df2
def col_status(df2):
for i in range( len(df2) ):
if (df2.loc[i, 'buying_price'] < df2.loc[i, 'price_median']) & (df2.loc[i, 'condition'] > 2):
df2.loc[i, 'status'] = 'buy'
else:
df2.loc[i, 'status'] = 'not_buy'
return df2
def col_season(data):
data['date'] = pd.to_datetime(data['date'], format='%Y-%m-%d')
data['season'] = data['date'].apply(lambda x:
'winter'
if ts('2014-05-02') <= x <= ts('2014-05-31') else
'summer'
if ts('2014-06-01') <= x <= ts('2014-11-30') else
'winter')
return data
def ts(obj):
return pd.to_datetime(obj)
def col_season_median(data):
df = data[['season', 'zipcode', 'buying_price']].groupby(['season', 'zipcode']).median()
df.columns = df.columns.str.replace('buying_price', 'season_median')
df2 = pd.merge(data,df,on=['zipcode', 'season'],how='inner')
return df2
def col_selling_price(data):
data['selling_price'] = float(0)
for i in range( len(data) ):
if (data.loc[i, 'buying_price'] < data.loc[i, 'season_median']) & (data.loc[i, 'status'] == 'buy'):
data.loc[i, 'selling_price'] = float(data.loc[i, 'buying_price']) * 1.30
if (data.loc[i, 'buying_price'] >= data.loc[i, 'season_median']) & (data.loc[i, 'status'] == 'buy'):
data.loc[i, 'selling_price'] = float(data.loc[i, 'buying_price']) * 1.10
return data
def col_profit(data):
data['profit'] = float(0)
for i in range(len(data)):
if data.loc[i, 'selling_price'] != 0:
data.loc[i, 'profit'] = float(data.loc[i, 'selling_price']) - float(data.loc[i, 'buying_price'])
else:
None
return data
###Output
_____no_output_____
###Markdown
Data Extraction
###Code
path = 'kc_house_data.csv'
data = get_data(path)
###Output
_____no_output_____
###Markdown
Data Visualization
###Code
data.columns
data
data.dtypes
###Output
_____no_output_____
###Markdown
Data Transformation
###Code
# setting the price_median column
data = col_price_median(data)
# setting the status column - ready for the buying report
data = col_status(data)
# setting the seson column
data = col_season(data)
# setting the season_median column - price median by season and zipcode
data = col_season_median(data)
# setting the selling_price column - ready for the selling report
data = col_selling_price(data)
# setting the profit column
data = col_profit(data)
report = data[['id', 'zipcode','season', 'price_median', 'buying_price','status', 'selling_price', 'profit']]
report.to_csv('buying-selling-report.csv',index=False)
report.to_excel('buying-selling-report.xlsx', index=False)
data
# get the buying price and the selling_price
df = data.loc[data['status'] == 'buy', ['buying_price', 'selling_price', 'profit']]
d = {'Nº of Properties': 10579,
'Total Cost (US$)': [df['buying_price'].sum()],
'Sales Revenue (US$)': [df['selling_price'].sum()],
'Profit Mean': [df['profit'].sum()]}
# set float configuration
pd.set_option('display.float_format', lambda x: '%.2f' % x)
df2 = pd.DataFrame(data=d)
print(df2.to_markdown())
df2
###Output
| | Nº of Properties | Total Cost (US$) | Sales Revenue (US$) | Profit Mean |
|---:|-------------------:|-------------------:|----------------------:|--------------:|
| 0 | 10579 | 4.09421e+09 | 5.29416e+09 | 1.19995e+09 |
|
content/2018-10-02-Pinning-data-to-GPU.ipynb | ###Markdown
The traditional and recommended data pipeline for deep learning involves pre-processing the data on CPU (data augmentation, cropping, etc), then loading small batches of pre-processed data on the GPU. There are several good reasons for this:* The datasets are often huge and cannot fit on the GPU memory.* The networks are big and the memory transfer overhead is negligible compared to the network computations.However this does not always apply. If the dataset is small enough to fit on the GPU memory or the network computation time is of the same order as the memory transfer overhead, we start to think about doing the pre-processing directly on GPU.*Pre-processing on CPU, training on GPU and idle times (figure from [Tensorflow documentation](https://www.tensorflow.org/performance/datasets_performance))***Some context on our use case**: We want to train a network on 3D images that are too big to be fed directly to the network. Our current pipeline is to crop our big images on CPU before feeding the crops one by one to the network training on GPU. First, the extraction of crops turns out to be expensive on CPU (of same order of magnitude as our network computations) and easily parallelizable on a GPU. Second, this scheme involves many small CPU-GPU memory transfers (one per crop) which we would like to avoid, as it costs a lot of time. Instead we want to transfer a handful of big images on the GPU in one shot, crop them on the GPU and feed them to the network *without going back to the CPU*. The cropping part involves writing our own custom CUDA kernel and integrating it in Tensorflow or PyTorch. We won't talk about this here. Let's focus on the data movement part.**To summarize what we want to achieve without the context details:**1. Load a batch of data on CPU2. Transfer the batch to GPU3. For each image in that batch: 1. Do some pre-processing on GPU, which outputs a batch of possibly unknown length (e.g. the number of crops might not be deterministic). 2. **Pin the data to the GPU** (i.e. prevent it from going back to CPU). 3. Use the pre-processed batch to do further computations on **minibatches** (such as training a network).We will go over toy example for this pipeline using both [Tensorflow](In-Tensorflow) and [PyTorch](In-PyTorch).*__Important warning__ If you work with more traditional 2D images you might want to use the recent [DALI](https://github.com/NVIDIA/DALI) library from NVIDIA. It solves exactly this issue: pre-processing the data on GPU before feeding it to a deep learning framework. They have bindings to [TensorFlow](https://docs.nvidia.com/deeplearning/sdk/dali-archived/dali_01_beta/docs/examples/tensorflow/tensorflow-resnet50.html) and PyTorch, too.In our case 3D images are not (yet) supported by DALI and no short-term implementation is planned, which explains why we have to tackle this 'by hand'.* In TensorflowIt turns out to be surprisingly hard in Tensorflow. First, it is hard to determine whether a Tensorflow operation implementation is available for GPU. The only way is to check the Github repository and look for a CUDA kernel corresponding to the operation. Second, it turns out most pre-processing operations such as `tf.train.batch` are implemented on CPU. (Random sidenote: random operations such as `tf.random_uniform` or `tf.random_crop` also seem to be only available on CPU.) Of course, Tensorflow recommends that pre-processing takes place on CPU... What it means for us: we might do our pre-processing on GPU, but as soon as we try to batch it for the actual computation it will be sent back to CPU.**The only way to pin data to the GPU in Tensorflow is to declare it as a `tf.constant`.** It gives rise to a convoluted but working pipeline: load a batch of data on GPU as a `tf.constant`, do the preprocessing on GPU, then use a placeholder for the index that defines a minibatch. This approach suggested in [this blog post](https://eklitzke.org/pinning-gpu-memory-in-tensorflow) works well, but one detail that is left out is how to change our batch of data: once it has been consumed by the network, how do we proceed to the next batch of data and declare it as another `tf.constant`? How do we run the network on that new constant? As you may know, once the graph has been defined, Tensorflow freezes it and runs always the same graph.The answer is to do some surgery with the Tensorflow computation graph: for each batch of data, remove the node for the `tf.constant` and replace it with the new batch.Let's demonstrate with a **toy example** how to do it in practice. First let us define our data: an array of shape (100, 3). We want to transfer it to GPU in batches of 20, do some pre-processing and then feed it to the network one by one.
###Code
import numpy as np
import tensorflow as tf
# The size of each initial batch.
BATCH_SIZE = 20
# The size of minibatch size which we want to pre-process.
MINIBATCH_SIZE = 1
# Initial number of images/data.
N = 100
# Create the dataset in CPU memory
np_data = np.array(range(N*4*4), dtype=np.float32).reshape(N, 4, 4)
###Output
_____no_output_____
###Markdown
Now we define the computation graph:
###Code
with tf.Graph().as_default() as g:
# Load a batch of data on GPU
tf_data = tf.constant(np_data[0:BATCH_SIZE], dtype=tf.float32, name='data')
# Index of the minibatch inside the current batch
ix = tf.placeholder(shape=(), dtype=tf.int32, name='ix')
# ix = tf.constant(0, dtype=tf.int32, name='ix')
# Select a single image from that batch = shape (1, 3, 3)
batch = tf.slice(tf_data, [MINIBATCH_SIZE * ix, 0, 0], [MINIBATCH_SIZE, -1, -1], name='batch')
# ...
# Do some pre-processing here on the batch, which outputs a minibatch of size (4, 2, 2)
# ...
minibatch = tf.reshape(batch, (-1, 2, 2))[:4]
# Do something with the minibatch - here dummy computation
# If we wanted to work on the minibatch slice by slice, we
# could have another index placeholder
outp = tf.reduce_sum(tf.square(minibatch), name='outp')
# Save graph definition
gdef = g.as_graph_def()
###Output
_____no_output_____
###Markdown
`ix` is a placeholder for the index inside the current batch. The batch data is defined as a `tf.constant` to force it to stay on GPU once it has been moved there. We use `tf.slice` to extract the data corresponding to our index `ix` from our initial batch, for the pre-processing step. After pre-processing we end up with a `minibatch` which is made of several images. `outp` performs some dummy computation on this minibatch. We save the graph definition in `gdef` variable for our later surgery.
###Code
tf.reset_default_graph()
with tf.Session() as sess:
# Set tf.AUTO_REUSE to be allowed to re-import the graph at each batch
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
# Loop over batches of data of size BATCH_SIZE
for idx in range(N/BATCH_SIZE):
new_data = tf.constant(np_data[BATCH_SIZE*idx:BATCH_SIZE*(idx+1)], dtype=tf.float32, name='data%d' % idx)
tf.import_graph_def(gdef, input_map={'data:0': new_data}, name='')
# If we wanted to train a network we should save/restore weights
# at this level.
# sess.run(tf.global_variables_initializer())
# For each batch, we are going to run the computation graph on a MINIBATCH_SIZE sample
for i in range(BATCH_SIZE/MINIBATCH_SIZE):
o_tensor = tf.get_default_graph().get_tensor_by_name('outp:0' if idx == 0 else 'outp_%d:0' % idx)
o = sess.run([o_tensor], feed_dict={tf.get_default_graph().get_tensor_by_name('ix:0' if idx == 0 else 'ix_%d:0' % idx): i})
###Output
_____no_output_____
###Markdown
The key to the surgery on TF computation graph lies in `tf.import_graph_def`. We use the keyword argument `input_map` to map the `data:0` constant node to a new constant node which holds the next batch of data. Note that the `name` argument should be set to an empty string, or all the variables will have an additional name scope appended to their names.*__Warning__: `tf.import_graph_def` only restores the graph, it does not restore variables values. If we wanted to train a real network, we should store all the weights for each batch of data and restore them after we do our surgery on the graph. For the sake of simplicity we leave this out to the reader. Please note that it can be yet another downside of this method, since storing/restoring weights involves additional memory transfers between CPU/GPU.* **Profiling** If we time it using `nvprof` profiler, we can see that there are only 5 host to device transfers (i.e. CPU to GPU) as expected. There are however still 100 transfers from device to host (GPU to CPU): every time we call `sess.run` in Tensorflow, after the computation graph is executed all the tensors that were requested are brought back to CPU (and each tensor brought back to CPU takes 1 call to `CUDA memcpy DtoH`, in our case we only asked for the output tensor).```bash Type Time(%) Time Calls Avg Min Max Name 29.61% 113.89us 100 1.1380us 1.0880us 1.5040us [CUDA memcpy DtoH] 1.59% 6.1120us 5 1.2220us 1.1200us 1.4080us [CUDA memcpy HtoD]```As you can see any data transfer will take at least 1 microsecond, no matter how small the data is. Let us increase the dataset to a batch size of 200 and 1000 entries, keeping the same ratio 1:5 between the batch size and the dataset size. Now we can clearly see the difference:```bash Type Time(%) Time Calls Avg Min Max Name 30.19% 1.1380ms 1000 1.1370us 1.0870us 4.3200us [CUDA memcpy DtoH] 0.30% 11.296us 5 2.2590us 2.2400us 2.2720us [CUDA memcpy HtoD]```Despite the data size being 10 times bigger in HtoD transfers, the average time for each call is only twice bigger. If we had kept our 'naive' scheme, sending the minibatch data one by one to the GPU, it would have increased similarly to the current DtoH transfers, by a factor of 10. So using this strategy already cuts by almost half the memory transfer time needed to achieve our goal! In PyTorchPyTorch is meant to be more flexible and DIY spirit than Tensorflow, so it is not surprising if this pipeline is much easier to achieve in PyTorch.
###Code
import numpy as np
import torch
import torch.utils.data
# The size of each initial batch.
BATCH_SIZE = 20
# The size of minibatch size which we want to pre-process.
MINIBATCH_SIZE = 1
# Initial number of images/data.
N = 100
###Output
_____no_output_____
###Markdown
Then we create a dataset:
###Code
# Create a dataset on CPU
np_data = np.array(range(N*4*4), dtype=np.float32).reshape(N, 4, 4)
# Load to Torch tensor
data = torch.from_numpy(np_data)
dataset = torch.utils.data.TensorDataset(data)
###Output
_____no_output_____
###Markdown
Creating batches from the dataset is simple and we can specify that it should be pinned to the device memory with `pin_memory`:
###Code
# Prepare batches
batch = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, pin_memory=True)
###Output
_____no_output_____
###Markdown
Now we can iterate over the batches and do our pre-processing:
###Code
# Iterate over batches
for i, data in enumerate(batch):
image, = data
# Load the batch to GPU
image = image.cuda()
# Slice into chunks
chunks = torch.chunk(image, BATCH_SIZE/MINIBATCH_SIZE, dim=0)
for c in chunks:
# ...
# Do some pre-processing and output a minibatch
# ...
minibatch = c.view((-1, 2, 2))[:4]
# If we wanted to work on the minibatch images one by one we could use
# torch.chunk again.
output = torch.sum(torch.sqrt(minibatch))
###Output
_____no_output_____ |
4. Chemical Reactions/4. Chemical Reactions.ipynb | ###Markdown
SlideshowRun the next cell or run this command in a terminal window: ```bashjupyter nbconvert "Chapter 11 - Chemical Reactions and Richardson Ellingham Diagrams.ipynb" --to slides --post serve```
###Code
# !jupyter nbconvert "Chapter 11 - Chemical Reactions and Richardson Ellingham Diagrams.ipynb" --to slides --post serve
###Output
_____no_output_____
###Markdown
11. Thermodynamics of Chemical Reactions Outline of the chapter:* Independent Chemical Reactions* Reaction Equilibrium * Mass Constraints* Affinity of Reactions* ExamplesThe treatment here (and examples) are based on DeHoff's [Thermodynamics in Materials Science][dehoff].[dehoff]:https://www.amazon.com/Thermodynamics-Materials-Science-Second-Robert/dp/0849340659 Molecules, Compounds, and Chemical Reactions A chemical reaction is a rearrangement of the atoms in the system and can be succinctly expressed, for example, by:$$\mathrm{C} + \mathrm{O_2} \rightleftharpoons \mathrm{CO_2}$$and$$\mathrm{2H_2} + \mathrm{O_2} \rightleftharpoons \mathrm{2H_2O}$$ * Chemical reactions are statements of mass and charge balance.* The coefficients may be divided or multiplied by any constant value without loss of generality.* One may think of these coefficients as molar quantities. $\require{mhchem}$ The concept of degrees of freedom can be used in the study of chemical reactions. We define:* $E$ is the number of elements ($\ce{H, O}$, etc.)* $C$ is the number of components ($\ce{H2, O2, H2O}$)* $R$ is the number of possible reactions (degrees of freedom)$$R = C - E$$ For a system containing C and O $(E=2)$ and contains molecular species $\mathrm{O_2}$, $\mathrm{CO_2}$, and $\mathrm{CO}$ $(C=3)$ we have a single independent reaction $(R = 3 - 2 = 1)$:$$\mathrm{2CO} + \mathrm{O_2} \rightleftharpoons \mathrm{2CO_2}$$ If the system also contains $\mathrm{C}$ as a chemical component then we can write two independent reactions:$$\mathrm{C} + \mathrm{O_2} \rightleftharpoons \mathrm{CO_2}$$$$\mathrm{2C} + \mathrm{O_2} \rightleftharpoons \mathrm{2CO}$$These are referred to as _multivariant_ interacting systems. Reaction Equilibrium We will now derive the thermodynamic equibrium of the following reaction using an isolated system:$$\mathrm{2CO} + \mathrm{O_2} \rightleftharpoons \mathrm{2CO_2}$$ To describe the equilibrium, we use the combination of the first and second law of thermodynamics,$$dU = \delta Q + \delta W.$$ Assuming a reversible process, we use$$\delta Q = T dS$$and$$\delta W = -p dV$$ giving$$dU = TdS - p dV.$$ If the system is multicomponent and single phase we can write:$$dU = T dS - p dV + \sum_{k=1}^{c}{\mu_k dn_k}$$ Explicitly in the components of our gaseous system we have:$$dS = \frac{1}{T}dU + \frac{P}{T}dV - \frac{1}{T}[\mu_\mathrm{CO} dn_\mathrm{CO} + \mu_\mathrm{O_2} dn_\mathrm{O_2} + \mu_\mathrm{CO_2} dn_\mathrm{CO_2}].$$ For an isolated, the energy and volume are conserved:$$dU = 0,$$$$dV = 0.$$ For an isolated system, the entropy is a maximum in equilibrium:$$dS_\mathrm{iso} = 0.$$Hence,$$dS = - \frac{1}{T}[\mu_\mathrm{CO} dn_\mathrm{CO} + \mu_\mathrm{O_2} dn_\mathrm{O_2} + \mu_\mathrm{CO_2} dn_\mathrm{CO_2}].$$ Mass Constraints Another constraint of an isolated system is that matter does not cross the boundary. If the system is non-reacting then the number of molecular species is constant:$$dn_k = 0 \quad (k=1, 2, \ldots, c).$$ However, in a reacting system this is not true:$$dn_k \neq 0 \quad (k=1, 2, \ldots, c).$$ However, in a reacting system, the number of atoms, $n_i$, of each species, $i$, _does_ remain constant:$$dn_i = 0 \quad (i=1, 2, \ldots, e).$$ Counting the total number of carbon and oxygen atoms in our hypothetical reaction:$$\mathrm{2CO} + \mathrm{O_2} \rightleftharpoons \mathrm{2CO_2}$$we get the following relations:$$n_\mathrm{C} = n_\mathrm{CO_2} + n_\mathrm{CO}$$$$n_\mathrm{O} = n_\mathrm{CO} + 2n_\mathrm{CO_2} + 2n_\mathrm{O_2}.$$ Enforcing the isolation constraints$$dn_\mathrm{C} = dn_\mathrm{O} = 0.$$ we obtain$$dn_\mathrm{CO} = - dn_\mathrm{CO_2}$$ and$$dn_\mathrm{O_2} = - \frac{1}{2}dn_\mathrm{CO_2}$$ This result shows that for a system with one independent chemical reaction, the number of moles for only one component may be varied independently. Revisiting the result for our combined first and second law for an isolated system,$$dS_{iso} = \frac{1}{T}(0) + \frac{P}{T}(0) - \frac{1}{T} \left[\mu_\mathrm{CO} dn_\mathrm{CO} + \mu_\mathrm{O_2} dn_\mathrm{O_2} + \mu_\mathrm{CO_2} dn_\mathrm{CO_2} \right],$$ we can now substitute our mass constraints and obtain$$dS_{iso} = \frac{1}{T}(0) + \frac{P}{T}(0) - \frac{1}{T} \left[ \mu_\mathrm{CO} (- dn_\mathrm{CO_2}) + \mu_\mathrm{O_2} \left(- \frac{1}{2}dn_\mathrm{CO_2} \right) + \mu_\mathrm{CO_2} dn_\mathrm{CO_2} \right],$$ which simplifies to$$dS_{iso} = \frac{1}{T}(0) + \frac{P}{T}(0) - \frac{1}{T} \underbrace{\left[ \mu_\mathrm{CO_2} - \left( \mu_\mathrm{CO} + \frac{1}{2} \mu_\mathrm{O_2} \right) \right]}_{\cal A} dn_\mathrm{CO_2} $$ The terms in the brackets describes the chemical potentials of the product minus the chemical potential of the reactants. It is known as the affinity, $\cal A$, of the reaction,$$\mathcal{A} = \left[ \mu_{CO_2} - \left( \mu_{CO} + \frac{1}{2} \mu_{O_2} \right) \right].$$ For our example reaction, we obtain for the change in entropy in an isolated system$$dS_{iso} = -\frac{1}{T} \, \mathcal{A} \, dn_{CO_2}.$$ In general the affinity for a reaction is given by$$\mathcal{A} = \mu_{\textrm{products}} - \mu_{\textrm{reactants}}.$$ The equilibrium conditions for an isolated system dictates a maximum in the entropy with changes in the number of moles of $\mathrm{CO_2}$. Therefore the equilibrium condition is$$\mathcal{A} = 0$$ Affinity of Reactions Let's consider a more general chemical reaction$$l L + m M \rightleftharpoons r R + s S.$$with the affinity$$\mathcal{A} = (r \mu_R + s \mu_S) - (l \mu_L + m \mu_M).$$ It is usually not practical to measure the chemical potential, $\mu_k$, of a component $k$. Instead, we use the activity $a_k$ that we introduced earlier in the definition of the chemical potential$$\mu_k - \mu^\circ_k = \Delta \mu_k \equiv RT \ln a_k$$ Remember that in an ideal solution, $a_k = X_k$.(this makes it a little clearer what the idea of "activity" really is) If the solution is non-ideal, the activity differs from the mole fraction by a factor called the activity coefficient, $\gamma_k$,$$a_k = \gamma_k X_k.$$The concept of activity is a way of capturing the idea that a component "acts as if" a certain amount was present relative to an ideal solution (situation). In the definition of activity,$$\mu_k = \mu_k^\circ + RT \ln a_k = G_k^\circ + RT \ln a_k,$$$G^\circ$ is the Gibbs free energy per mol of component $k$ in the standard/reference state. Using this equation for the affinity , $\cal A$, we obtain$$\mathcal{A} = (r \mu_R + s \mu_S) - (l \mu_L + m \mu_M)$$$$\mathcal{A} = \underbrace{\left[ (r G^\circ_R + s G^\circ_S) - (l G^\circ_L + m G^\circ_M) \right]}_{\Delta G^\circ} + RT \ln \frac{a^r_R a^s_S}{a^l_L a^m_M}$$ The term $\Delta G^\circ$ is generally referred to as the _the standard Gibbs free energy change_,$$\Delta G^\circ \equiv \left[ (r G^\circ_R + s G^\circ_S) - (l G^\circ_L + m G^\circ_M) \right].$$ The affinity is now defined as follows, in general:$$\mathcal{A} = \Delta G^\circ + RT \ln Q,$$where Q is the quotient of the activities of the products and reactants:$$Q \equiv \frac{a^r_R a^s_S}{a^l_L a^m_M}.$$In equilibrium, we obtain:$$K \equiv Q_{\mathrm{equil.}} = \left[ \frac{a^r_R a^s_S}{a^l_L a^m_M} \right]_{\mathrm{equil}}$$with$$\mathcal{A} = 0 = \Delta G^\circ + RT \ln K.$$ The _equilibrium constant_, $K$, is given by the the standard Gibbs free energy change,$$K = \exp\left ( -\frac{\Delta G^\circ}{RT} \right ).$$ Now we can inspect the affinity of the reacting system (based on the instantaneous number of moles) and identify the following conditions:$$\begin{eqnarray}{Q/K} &>& 1 \quad \Rightarrow \quad \mathcal{A} &>& 0, \quad \textrm{products decompose}\\{Q/K} &=& 1 \quad \Rightarrow \quad \mathcal{A} &=& 0, \quad \textrm{equilibrium}\\{Q/K} &<& 1 \quad \Rightarrow \quad \mathcal{A} &<& 0, \quad \textrm{products form}\\\end{eqnarray}$$ Example 1 (DeHoff 11.1) **Problem:** A gas mixture at 1 atm total pressure and at the temperature 700˚C has the following composition:|Component|H$_2$|O$_2$|H$_2$O||------|------|------|------||Mole Fraction |0.01 |0.03 |0.96|At 700˚C the standard Gibbs free energy change for the reaction is:$$\Delta G^\circ = -440 \, \mathrm{kJ/mol}$$Determine the direction of spontaneous change for this reaction at 700˚C. **Solution:**$\require{mhchem}$The single reaction ($R = C - E = 3 - 2 = 1$) reaction for our system is:$$\ce{2H2 + O2 \rightleftharpoons H2O}$$ We compute the equilibrium constant, $K$, $K = \exp{(- \Delta G^\circ / RT)}$
###Code
GibbsChange = -440 * u.kJ/u.mol
R = 8.314 * u.J/u.mol/u.K
T = (Q_(700, u.degC)).to(u.K)
K = np.exp(-GibbsChange/(R*T))
print("Equilibrium constant K = ", K)
###Output
Equilibrium constant K = 4.151651805707335×10²³ dimensionless
###Markdown
Next, we compute the quotient of the activities, $Q$, (not at equilibrium),$$Q = \frac{X^2_{H_2O}}{X^2_{H_2} X_{O_2}}$$
###Code
X_H2O = 0.96
X_H2 = 0.01
X_O2 = 0.03
Q = X_H2O**2/(X_H2**2 * X_O2)
print("Q = ", Q)
print("Q/K = ", Q/K)
###Output
Q/K = 7.399464463221308×10⁻¹⁹ dimensionless
###Markdown
This number is much less than one, meaning that there is a strong tendency for products to form from this system in the current state.$\Delta G^\circ$ typically ranges from +1000 to $-1000$ kJ/mol, hence, $K$ ranges over many orders of magnitude. Thus, $Q$, usually differs by many orders of magnitude from $K$ and it is easy to determine the direction of the reaction. Example 2 (DeHoff 11.1)**Problem:** What will be the equilibrium composition for this system at 700˚C (973K)? **Solution:**In equilibrium$$K = Q_\mathrm{equil} = \frac{X^2_{H_2O}}{X^2_{H_2} X_{O_2}}$$
###Code
K = np.exp(-GibbsChange/(R*T))
print("Equilibrium constant K = ", K)
###Output
Equilibrium constant K = 4.151651805707335×10²³ dimensionless
###Markdown
This means that in equilibrium, the numerator is 23 orders of magnitude larger than the denominator. The system will maximize the H$_2$O content. Almost all the H$_2$ will be consumed but not all O$_2$.Conversion of 0.01 mol of H$_2$ will only consume 0.005 mol of O$_2$ and 0.01 mol of H$_2$O will be produced. The total number of moles will be reduced from 1.0 to 0.97 + 0.025 = 0.995. A precise calculation of the equilibrium mole fraction of solution requires a solution of a set of equations, the equilibrium condition,$$K = Q_\mathrm{equil} = \frac{X^2_{H_2O}}{X^2_{H_2} X_{O_2}}$$and the conservation of the number of H and O atoms,$$n_\mathrm{H} = 2 n_\mathrm{H_2} + 2 n_\mathrm{H_2O} $$$$n_\mathrm{O} = 2 n_\mathrm{O_2} + n_\mathrm{H_2O}.$$
###Code
# Number of moles of the various components, assuming a total amount of 1 mol in the system
X_H2O = 0.96
X_H2 = 0.01
X_O2 = 0.03
n_H = 2 * X_H2 + 2 * X_H2O
n_O = 2 * X_O2 + X_H2O
# Returns the difference in the moles of H atoms in the component minus original number of H atoms
def Equations(p):
n_H2, n_O2, n_H2O = abs(p)
dH = 2 * n_H2 + 2 * n_H2O - n_H
dO = 2 * n_O2 + n_H2O - n_O
n_tot = n_H2 + n_O2 + n_H2O
X_H2, X_O2, X_H2O = [n_H2, n_O2, n_H2O] /n_tot
dQ = np.log(X_H2O**2/(X_H2**2 * X_O2)/ K.magnitude)
return [dH, dO, dQ]
n_H2, n_O2, n_H2O = abs(fsolve(Equations, (1E-6, 0.02, 0.97), epsfcn = 1E-16, xtol = 1E-16))
print ("Number of moles of H2: ", n_H2, "\n",
" O2: ", n_O2, "\n",
" H2O: ", n_H2O)
###Output
Number of moles of H2: 9.497358001222012e-12
O2: 0.02500000000474876
H2O: 0.9699999999905026
|
Week_12_Programming_Assignments/3_Letters.ipynb | ###Markdown
solutions Provided by instructor
###Code
"""
@author: Descentis
"""
s = input()
d={"UPPER CASE":0, "LOWER CASE":0}
for c in s:
if c.isupper():
d["UPPER CASE"]+=1
elif c.islower():
d["LOWER CASE"]+=1
else:
pass
print(d['UPPER CASE'],d['LOWER CASE'])
###Output
Hello world!
1 9
|
notebooks/geopricing/main.ipynb | ###Markdown
Geopricing with atotiIn this notebook we will explore a pricing use case that combines machine learning algorithms and atoti. Imagine our retailer has many shops spread across France. The idea behind this notebook is to group the shops based on its price index and geographical characteristics to its competitors. Price index is a measurement of where a retailer is positioned compared to one or multiple of its competitors. Through the clustering, we will be able to apply different pricing strategies on each cluster based on its competition. We shall obtain the clustering of the shops via machine learning. For the machine learning, we will need a set of input features for each retail shops:- Number of competitors per distance range (1km, 5km, 10km etc)- Price Index per shop against its competitors We will see how we can generate these input values for the machine learning with atoti. Not only so, we will also make use of output from the machine learning to perform the below simulations:- Pricing simulations around clusters to obtain the optimised price index against its neighbouring competitors- Selling price simulation by clusters and retail shops to align the pricing within the cluster DependenciesAssuming atoti is already installed, let's start by installing the additional libraries required for this notebook to work.
###Code
import sys
!conda install --yes --prefix {sys.prefix} folium scipy scikit-learn matplotlib seaborn
import atoti as tt
import pandas as pd
from atoti.config import create_config
###Output
_____no_output_____
###Markdown
Data Preparation and exploration with atotiLet's start by loading our data into atoti stores.
###Code
config = create_config(metadata_db="./metadata.db")
session = tt.create_session(config=config)
# We used pandas to read the selling price here as we will be using it again for price optimisation in the later section.
product_sales_df = pd.read_csv(
"https://data.atoti.io/notebooks/geopricing/product_pricing.csv"
)
productSales = session.read_pandas(
product_sales_df,
keys=["ProductId", "ShopId"],
store_name="ProductSales",
types={"ProductId": tt.type.INT, "ShopId": tt.type.INT},
)
productSales.head()
###Output
The store has been sampled because there are more than 10000 lines in the files to load. Call Session.load_all_data() to trigger the full load of the data.
###Markdown
Due to the amount of data in this store, the store is sampled by default. We will proceed to load all the data only after we are done modeling the cube.We will also require the competitors' product pricing against our shops.
###Code
competitorPrices_df = pd.read_csv(
"https://data.atoti.io/notebooks/geopricing/competitors_prices.csv"
)
competitorPrices = session.read_pandas(
competitorPrices_df,
keys=["ProductId", "CompetitorShopId", "ShopId"],
store_name="CompetitorPrices",
)
competitorPrices.head()
###Output
_____no_output_____
###Markdown
We have the key stores necessary for us to generate the data required for machine learning. However, we will also load the following stores that will allow us to have a more in-depth analysis:- Products: Product catalogue- Shops: shops information such as location- CompetitorsShops: Competitors' shop information
###Code
products_df = pd.read_csv(
"https://data.atoti.io/notebooks/geopricing/products_info.csv", sep=";"
)
products = session.read_pandas(
products_df,
keys=["ProductId"],
store_name="Products",
)
products.head()
shops_df = pd.read_csv("https://data.atoti.io/notebooks/geopricing/shops.csv", sep=";")
shops = session.read_pandas(
shops_df,
keys=["ShopId"],
store_name="Shops",
types={"ShopId": tt.type.INT},
)
shops.head()
competitorShops_df = pd.read_csv(
"https://data.atoti.io/notebooks/geopricing/competitors_shops.csv", sep=";"
)
competitorShops = session.read_pandas(
competitorShops_df,
keys=["CompetitorShopId"],
store_name="CompetitorsShop",
types={"CompetitorShopId": tt.type.INT},
)
competitorShops.head()
###Output
_____no_output_____
###Markdown
Since we have the latitude and longitude of the shops and their competitors, we pre-computed distances in between using the [harvesine formula](https://en.wikipedia.org/wiki/Haversine_formula) and load into the data store. Note that another approach would be to use instead something like the [google API](https://developers.google.com/maps/documentation/distance-matrix/intro) to compute distances and durations between two points (thus taking into accounts possible means of transportation).
###Code
from _utils import geo_utils
shops_distances_matrix = geo_utils.create_shops_distances_matrix(
shops_df, competitorShops_df
)
distance_matrix = session.read_pandas(
shops_distances_matrix,
keys=["ShopId", "CompetitorShopId"],
store_name="DistanceMatrix",
types={"ShopId": tt.type.INT, "CompetitorShopId": tt.type.INT},
)
distance_matrix.head()
###Output
_____no_output_____
###Markdown
We choose _ProductSales_ as our base store as it contains the key facts for our shops. Look at [atoti tutorial](https://docs.atoti.io/0.4.1/tutorial/01-Basics.html) to understanding the cube better. Correspondingly, we have our _CompetitorPrices_ store that has a many-to-many relationship with our _ProductSales_ since multiple shops can sell the same products. We can easily setup this many-to-many relationship simply by joining the _CompetitorPrices_ store to our _ProductSales_ store by _ProductId_ and _ShopId_.
###Code
price_index_cube = session.create_cube(productSales, "PriceIndexCube")
productSales.join(
competitorPrices, mapping={"ProductId": "ProductId", "ShopId": "ShopId"}
)
###Output
_____no_output_____
###Markdown
Let's also enrich our cube with extra information about the shops to create a [snowflake schema](https://www.geeksforgeeks.org/snowflake-schema-in-data-warehouse-model/).
###Code
productSales.join(products, mapping={"ProductId": "ProductId"})
productSales.join(shops, mapping={"ShopId": "ShopId"})
competitorPrices.join(competitorShops, mapping={"CompetitorShopId": "CompetitorShopId"})
competitorPrices.join(
distance_matrix,
mapping={"CompetitorShopId": "CompetitorShopId", "ShopId": "ShopId"},
)
###Output
_____no_output_____
###Markdown
Let's see the final design of our cube.
###Code
price_index_cube.schema
h = price_index_cube.hierarchies
m = price_index_cube.measures
lvl = price_index_cube.levels
m
###Output
_____no_output_____
###Markdown
We can see a _SUM_ and _MEAN_ measure is created columns of type double/float for the base store - _ProductSales_. A _VALUE_ measure is created for columns of type double/float in the other referenced stores. With the cube created, let's start by computing the number of competitors per distance bucket (distance radius from the shop). 1. Computing number of Competitors per Distance Bucket There are many ways to do compute the number of competitors per distance buckets. However, we are going to showcase how we can make use of the simulations to create the distance buckets. The advantage of doing so is that we can easily create new distance bucket with minimum coding.Let's create a measure call `m["Distance Threshold"]` that contains the value for the distance threshold for each bucket and we start by looking at the number of competitors within 1km distance radius from our shop.
###Code
m["Distance Threshold"] = 1
###Output
_____no_output_____
###Markdown
Due to the join to the _CompetitorsPrice_ store, the `m["Contributor.COUNT]` returned is based on the products. We want to obtain the number of distinct competitors' shops that sell the same products as us, not the number of products. To do so, we look at the the average distance between the shop and its competitor, returning a count of 1 if it is located within our threshold radius.
###Code
m["Competitor distance KM.VALUE"] = distance_matrix["Competitor distance KM"]
m["Count within distance threshold"] = tt.agg.sum(
tt.where(
tt.agg.mean(m["Competitor distance KM.VALUE"]) < m["Distance Threshold"], 1, 0
),
scope=tt.scope.origin(lvl["ShopId"], lvl["CompetitorShopId"]),
)
###Output
_____no_output_____
###Markdown
Naturally we can quickly use Pandas to derive the same value. However, when we use this one time setup together with simulations, we have the below benefits:- easily add / delete the distance buckets- ability to drill down on the data for each distance range to perform further analysis Setting up simulation for distance bucketsWe setup a simulation where we can replace the threshold value in order to be able to create scenarios for other ranges of distance. We name this base scenario "1km".
###Code
simulation = price_index_cube.setup_simulation(
"Distance Simulation", base_scenario="01 km", replace=[m["Distance Threshold"]]
)
lvl["Distance Simulation"].comparator = tt.comparator.ASC
###Output
_____no_output_____
###Markdown
We can now easily obtain the number of competitors per area simply by creating a scenario for each distance radius. With this, we can easily create new distance buckets to generate different datasets for the machine learning.
###Code
simulation.scenarios["05 km"] = 5
simulation.scenarios["10 km"] = 10
simulation.scenarios["15 km"] = 15
simulation.scenarios["20 km"] = 20
###Output
_____no_output_____
###Markdown
We can now have the number of competitors per distance bucket. atoti allows us to do [modeling with sampled size](https://docs.atoti.io/0.4.1/tutorial/02-Configuration.htmlSampling-mode) of the data. As we are currently in sampling mode, let's trigger full data load to do some visualizations.
###Code
session.load_all_data()
###Output
_____no_output_____
###Markdown
Let's do a quick data-viz to see how the number of competitors varies by the distance.
###Code
session.visualize("Nr of competitors by distance bucket")
###Output
_____no_output_____
###Markdown
2. Computing the price index per shopThere are different existing formulas for the price index. The formula we will use in this example compares a product price to the average price found among the local competitors of a particular shop, measuring at which percentage of this average competitors price the product is.We will weight the price index indicator by the margin when aggregating above shop and product level. This is so that we can later optimize the price index for products that contribute the most to the margin. Other commonly used formulas weight by sales quantity or revenue. Price index formula: $100 \times \frac{\sum_{s,p \in (Shops,Products)}\frac{Selling Price(s,p)}{Average Competitor Price(s,p)} \times Margin(s,p)}{\sum_{s,p \in (Shops,Products)}Margin(s,p)}$ Let's create a measure to get the mean of _CompetitorPrice_ which will be used to derive the price index. We are only interested in the relevant _CompetitorPrice_ of competitors within the _distance threshold_.
###Code
m["CompetitorPrice.VALUE"] = competitorPrices["CompetitorPrice"]
m["CompetitorPrice.MEAN"] = tt.agg.mean(
tt.where(
m["Competitor distance KM.VALUE"] < m["Distance Threshold"],
m["CompetitorPrice.VALUE"],
None,
)
)
m["CompetitorPrice.MEAN"].formatter = "DOUBLE[#,###.00]"
###Output
_____no_output_____
###Markdown
Instead of using Pandas to do pre-aggregation, we perform the margin computation with atoti so that we can see the change in its value after we optimise the selling price later on.
###Code
m["Margin.SUM"] = tt.agg.sum(
(m["SellingPrice.SUM"] - m["PurchasePrice.SUM"]) * m["Quantity.SUM"],
scope=tt.scope.origin(lvl["ProductId"], lvl["ShopId"]),
)
###Output
_____no_output_____
###Markdown
We see how the weight price index indicator can be achieved in the next few cells. Realize how we are setting the scope on _ProductId_ and _ShopId_? This will ensure the summation of the various measures at the _Shops_ and _Product_ level as required by the formula: ${\sum_{s,p \in (Shops,Products)}\frac{Selling Price(s,p)}{Average Competitor Price(s,p)} \times Margin(s,p)}$
###Code
price_index_numerator = tt.agg.sum(
(m["SellingPrice.SUM"] * m["Margin.SUM"]) / m["CompetitorPrice.MEAN"],
scope=tt.scope.origin(lvl["ProductId"], lvl["ShopId"]),
)
###Output
_____no_output_____
###Markdown
Finally, we calculate the contribution of the product towards the total margin.
###Code
m["Price Index"] = price_index_numerator / m["Margin.SUM"]
###Output
_____no_output_____
###Markdown
Let's visualize the price index per shop.
###Code
session.visualize("Price index by shops and distance")
###Output
_____no_output_____
###Markdown
How do we systematically make use of this information? Let's use the _Competitors count within radius_ for each distance bucket and _PriceIndex_ computed above - to train a model and clusterize the stores. We can extract these data from atoti as shown in the function below:
###Code
def get_features():
# output dataframe for competitors count per shop & area (distance radius) from cube querying
shops_competitors_count_per_shop_area = price_index_cube.query(
m["Count within distance threshold"],
levels=[lvl["ShopId"], lvl["Distance Simulation"]],
).reset_index()
# pivot the table such that each scenario becomes a column
shops_competitors_count_per_shop_area = shops_competitors_count_per_shop_area.pivot(
index="ShopId",
columns="Distance Simulation",
values="Count within distance threshold",
)
# output dataframe for price index by shop from cube querying
price_index_per_shop_area = price_index_cube.query(
m["Price Index"], levels=[lvl["ShopId"], lvl["Distance Simulation"]]
).reset_index()
# pivot the table such that each scenario becomes a column
price_index_per_shop_area = price_index_per_shop_area.pivot(
index="ShopId",
columns="Distance Simulation",
values="Price Index",
)
# merge the 2 dataframe and return the output
shops_features = pd.merge(
shops_competitors_count_per_shop_area,
price_index_per_shop_area,
left_on="ShopId",
right_on="ShopId",
how="left",
suffixes=("", "_Price Index"),
).fillna(1)
return shops_features
###Output
_____no_output_____
###Markdown
3. Machine Learning - Shops clustering using price index and competitors number featuresWe can use a machine algorithm such as k-means to make clusters with the features (01km, 05km, 10km, 15km, 20km, Price Index) that we obtained from the cube:
###Code
shops_features = get_features()
shops_features.head(15)
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sc
import seaborn as sns
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.metrics import pairwise_distances_argmin
sns.set() # for plot styling
###Output
_____no_output_____
###Markdown
Let's set the number of clusters needed as 5. The number of clusters can increase if the number of shops is huge. We apply the k-mean on the _shops\_feature_ from above.
###Code
number_of_clusters = 5
kmeans = MiniBatchKMeans(number_of_clusters)
kmeans.fit(shops_features)
new_colors = kmeans.cluster_centers_[kmeans.predict(shops_features)]
k_means_labels = pairwise_distances_argmin(shops_features, kmeans.cluster_centers_)
labels = KMeans(number_of_clusters, random_state=0).fit_predict(shops_features)
###Output
_____no_output_____
###Markdown
Using competitors within 1km as an example, we can now analyze the result of the clustering by pair of features using matplotlib as shown below:
###Code
plt.scatter(
shops_features.loc[:, "01 km"],
shops_features.loc[:, "01 km_Price Index"],
c=k_means_labels,
s=50,
cmap="viridis",
)
plt.xlabel("Nr Competitors within 1km")
plt.ylabel("Price Index")
###Output
_____no_output_____
###Markdown
In the above plot, each color represents a cluster. We can see that clusters seem to be strongly based on the number of competitors rather than on the price index. However, to avoid having to plot every couple of features and understand more quickly what our clusters are, we will use seaborn to have a plot of the clustering result for every pair of features.
###Code
shops_features["Cluster"] = labels
shops_features.head(5)
sns.pairplot(data=shops_features, hue="Cluster")
###Output
_____no_output_____
###Markdown
We can have a better understanding of the clusters with the chart above. Within 1km distance radius, price index across the clusters are generally around 1. The stores in cluster 1 have a much higher number of competitors (>40) in a 5km radius, compared to those of cluster 0 having less than 20 competitors within 20km radius. While cluster 1 has more competitors, its price index is generally higher than cluster 0 and greater than 1.Continuing this analysis tells us that:- Cluster 0 is a big cluster with little competitors around and its price index is generally around 1.- Cluster 1 has a high number of competitors even within a 5km distance radius. However its price index is slightly skewed towards a higher price index even with the high competition.- Cluster 2 is a small cluster and the number of competitors increases tremendously as the distance radius increases. Generally it has a lower price index against its competitors.- Cluster 3 is a small cluster and the number of competitors remains about the same across all buckets. Its price index remains consistent around 1 across the distance bucket, although one of its shops started having a higher price index and the rest fall below 1 as we consider competitors in the 15-20km radius.- Cluster 4 is a small cluster that has a higher price index against the nearest competitors. This is reasonable considering the number of competitors nearby is not high. The price index becomes much lower as the number of competitors increases from 15km onwards.While this gives us an idea of how to position ourselves, we need to put these into context before we can decide on what pricing strategy to apply on it. Let's load the new cluster back into the cube to have more in-depth analysis. 4. Interpreting the machine learning output with atotiLet's load the cluster results obtained from the machine learning model into the cube.
###Code
clusters_df = shops_features[["Cluster"]].reset_index()
clusters_df.ShopId = clusters_df.ShopId.astype("int32")
clusters = session.read_pandas(clusters_df, keys=["ShopId"], store_name="Clusters")
clusters.head(5)
shops.join(clusters)
m["Longitude.VALUE"] = tt.value(shops["Longitude"])
m["Latitude.VALUE"] = tt.value(shops["Latitude"])
session.visualize("Spread of clusters by longitude and latitude")
###Output
_____no_output_____
###Markdown
Interestingly, cluster 1 (orange) is distributed across the longitude and latitude, and mostly they are the only shop in the neighbourhood that is under our retailer. There are few competitors in the area. Cluster 4 is a small cluster around Lille, the capital of the Hauts-de-France region in northern France. The rest of the clusters have shops under our retailer in close proximity, and most of them spread around Paris.The size of the points on the map reflects the number of competitors within 5km - we can see the competition around the city is the highest, specifically for cluster 2 (red).In the case of cluster 1, the shop is the only one under the retailer in the neighbourhood. The number of competitors is low, hence the price index is less affected by competition. Rather, other factors such as variety of products, branding etc could take on a heavier factor on the price index - these are to be considered when applying a pricing strategy for this cluster. Generally, the price index could be higher. For the rest of the clusters, there are a few considerations. Within the same proximity, the shops face the same competitors. Not only that, consumers can easily detect the price differences of products between the shops of the same retailer if they are close to one another. Hence it makes more sense to align their price index and it should be slightly lower to push up its competitiveness. 5. Pricing simulations around clustersWe will now generate new prices using the clustering information in order to take into account the different competitiveness constraints of the shops. Using the clusters generated, the below pricing method tries to reduce the prices if the competitiveness is strong, and on the contrary increase it if there is few competition. For instance, cluster 0 and cluster 4 has little competition, hence their price index could be slightly higher than 1. The rest of the clusters have more competitors within 10km radius, hence could have their price index at 1 or slightly lower to maintain their competitivity.
###Code
from _utils import pricer
selling_prices_based_on_clusters = pricer.optimize_prices(product_sales_df, clusters_df)
###Output
_____no_output_____
###Markdown
Thanks to atoti built-in simulations capabilities, we can easily create a new scenario for the new pricing by directly loading the price-optimised dataframe. All the previously defined KPIs, e.g. the price index, will be re-computed on the fly, enabling us to compare the scenarios and their benefits.
###Code
productSales.scenarios["Selling prices based on clusters"].load_pandas(
selling_prices_based_on_clusters
)
session.visualize("Price Optimisation impact on Price Index and Margin")
###Output
_____no_output_____
###Markdown
Thanks to atoti built-in simulations capabilities, we can easily create a new scenario for the new pricing by directly loading the price-optimised dataframe. All the previously defined KPIs, e.g. the price index and margin, will be re-computed on the fly, enabling us to compare the scenarios and their benefits. We see an increase in margin for all clusters except for cluster 2. Although the overall margin has decreased, we should have an increase in sales if the strategy works well and subsequently an increase in the overall margin. We saw the adjustment in price index at the cluster level and we could easily drill down to the shop and even product level. Now, let's visualize the changes in price index for the 5 clusters.
###Code
session.visualize("Price index optimisation difference")
###Output
_____no_output_____
###Markdown
In order to attract more customers, we can see that the pricing method decreased the pricing for cluster 2, which faced high competitions. On the contrary it increased the prices in shops belonging to low competition clusters in order to maximize margin. Cluster 0, 1 and 4 for instance, have fewer competitors. Hence their selling prices are adjusted higher, resulting in higher price index. Interactive GeoPricing Monitoring Dashboard
###Code
session.url + "/#/dashboard/1bb"
###Output
_____no_output_____
###Markdown
Click on the above URL to access the interactive GeoPricing Monitoring dashboard. Zoom in on the map and click on any store to see how its price index and margin are influenced by the number of competitors within a given distance threshold. 6. Selling price simulation by clusters and shopsZooming in on cluster 2, we see that _MyShop Paris 6_ has one of the highest competition within the cluster. However, looking at the chart below, the store also has a relatively high price index within the cluster. Likewise, _MyShop Paris 9_ also has a relatively high price index within the cluster even if the competition is just slightly lesser.
###Code
session.visualize("Price index for cluster 2")
###Output
_____no_output_____
###Markdown
Let's scale down the price index of these 2 shops using atoti's measure simulation.
###Code
price_simulation = price_index_cube.setup_simulation(
"Price simulation",
base_scenario="Selling Price Initial",
levels=[lvl["ShopId"]],
multiply=[m["SellingPrice.SUM"]],
)
###Output
_____no_output_____
###Markdown
We are now able to scale the _Selling Price_ either across clusters or by specific shop.
###Code
cluster_adjustment = price_simulation.scenarios["Selling Price New"]
cluster_adjustment.append(
(7, 0.95),
)
cluster_adjustment.append(
(10, 0.98),
)
session.visualize("Price index optimisation difference by scenario")
###Output
_____no_output_____
###Markdown
The price index after price optimization and the shop adjustment for the shops look more aligned now. Price Simulation DashboardAccess the interactive Price Simulation dashboard from the URL below.
###Code
session.url + "/#/dashboard/3e7"
###Output
_____no_output_____ |
notebooks/test/conferences/motiondeblur_recovery_from_simulation_COSI.ipynb | ###Markdown
Multi-Frame Motion Deblur RecoveryThis notebook opens .npz simulation data file, addes noise, and solved inverse problem
###Code
%matplotlib notebook
%load_ext autoreload
%autoreload 2
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.misc as misc
import time
import sys
import itertools
import math
import imageio
import skimage as sk
# Libwallerlab imports
from libwallerlab.algorithms import iterative as iterative
from libwallerlab.opticsalgorithms.motiondeblur import blurkernel
from libwallerlab.opticsalgorithms.motiondeblur import kernel_objectives
from libwallerlab.operators import operators as ops
from libwallerlab.utilities import displaytools, iotools
from libwallerlab.algorithms import objectivefunctions
from libwallerlab.algorithms import regularizers
from libwallerlab.operators import proximal as proxops
from libwallerlab.utilities.opticstools import Ft, iFt
###Output
_____no_output_____
###Markdown
Flow of Notebook1. Open .npz datafile (from simulation notebook)2. Solve Inverse Problem3. View blur paths, estimated conditioning, SSE To-Do- make compatible with libwallerlab.utilities.iotools.Dataset format
###Code
noise_magnitude = 1e-3
noise_type = 'shot'
savepath = '/home/sarah/Dropbox/deblurring/COSI/data/simulations/recovered'
###Output
_____no_output_____
###Markdown
Open Datafile
###Code
# directory and name of file of interest
datafile_dir = '/home/sarah/Dropbox/deblurring/COSI/data/simulations/blurred'
filename = 'raster_pseudo_random_9x1' #'raster_major_both_random_phase_18x1'
# load data and assign variables
data = np.load(datafile_dir + '/' + filename + '.npz')
#np.savez(savestring, object_true=object_true, image_size=image_size, object_edge_pad_type=object_edge_pad_type, point_list_segmented=point_list_segmented, illum_vector_list=illum_vector_list, y_list=y_list)
###Output
_____no_output_____
###Markdown
Add Noise and View Images
###Code
image_size = data['image_size']
y_list_pure = data['y_list']
y_list = []
for y in y_list_pure:
noise = noise_magnitude * np.random.normal(size=y.shape)
if noise_type == 'shot': noise = noise * y
y_list.append((y + noise).astype(np.float32))
nshow = min(5,len(y_list))
plt.figure(figsize=(3,nshow*2))
for i in range(nshow):
plt.subplot(nshow, 1, i+1)
plt.imshow(np.abs(y_list[i].reshape(image_size)))
plt.ylabel('Cropped y')
###Output
/home/sarah/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:8: ComplexWarning: Casting complex values to real discards the imaginary part
###Markdown
Recreate Blur Paths
###Code
# Generate blur kernel maps for each frame
object_size_0 = data['object_true'].shape
illum_vector_list = data['illum_vector_list']
point_list_segmented = data['point_list_segmented']
blur_kernel_list = np.zeros((len(point_list_segmented), object_size_0[0], object_size_0[1]))
for frame_index in range(len(illum_vector_list)):
for position_index, position in enumerate(point_list_segmented[frame_index]):
blur_kernel_list[frame_index, position[0], position[1]] = illum_vector_list[frame_index][position_index]
# Define cropped object sizes and crop true image
object_size = blur_kernel_list[0].shape
# Show blur kernels
displaytools.show3dArray(blur_kernel_list, figsize=(8,6))
###Output
_____no_output_____
###Markdown
Forward model based on Padding and Blur Kernels
###Code
# Determine maximum kernel support in x/y for all blur kernels in blur_kernel_list. This is how much we will pad our object by.
support_size_list = []
for blur_kernel in blur_kernel_list:
support_size_list.append(blurkernel.getPositionListBoundingBox(point_list_segmented).size())
max_kernel_support = np.max(np.asarray(support_size_list),axis=0)
# Generate pad operator for object support
object_size_padded = (np.asarray(object_size) + max_kernel_support).tolist() # Add to object_size
W_object_support = ops.Crop(object_size_padded, object_size, crop_start=(max_kernel_support[0] // 2, max_kernel_support[1] // 2)) # Add support
# Pad object with random values (to simulate an extended object)
object_true = data['object_true']
object_extended = W_object_support.H * object_true.reshape(-1).astype(np.complex64)
object_edge_pad_type = data['object_edge_pad_type']
if object_edge_pad_type == 'random':
object_extended += (1. - W_object_support.H * np.ones(object_true.size, dtype=np.complex64)) * np.random.rand(np.prod(object_size_padded))
elif object_edge_pad_type == 'zeros':
object_extended += (1. - W_object_support.H * np.zeros(object_true.size, dtype=np.complex64))
elif object_edge_pad_type == 'ones':
object_extended += (1. - W_object_support.H * np.ones(object_true.size, dtype=np.complex64))
elif object_edge_pad_type == 'mean':
object_extended += (1. - W_object_support.H * np.mean(object_true) * np.ones(object_true.size, dtype=np.complex64))
elif object_edge_pad_type == None:
object_extended = object_true
object_size_padded = object_true.shape
W_object_support = ops.Identity(object_true.shape)
# Define crop operator for object to image
W = ops.Crop(object_size, image_size)
A_list = []
# Generate forward model operators for each blur kernel
for blur_kernel_index, blur_kernel in enumerate(blur_kernel_list):
blur_kernel = blur_kernel.astype(np.complex64) / np.sum(np.abs(blur_kernel.astype(np.complex64)))
# 2D Convolution Operator with the given kernel
C = ops.Convolution(object_size_padded, (W_object_support.H * blur_kernel.reshape(-1)).reshape(object_size_padded))
# Forward operator with image crop and full object crop
A_list.append(W * W_object_support * C)
###Output
_____no_output_____
###Markdown
Recovery
###Code
# Generate measurements from image list
y_full = np.empty(0, dtype=np.complex64)
for y in y_list:
y_full = np.append(y_full, y)
# Normalize measurements
y_mean = np.mean(np.abs(y_full))
y_full /= y_mean
# Generate full A Operator
A_full = ops.Vstack(Operators=A_list)
# Initialization: choosing a "good" coefficient value will help in convergence
initialization = np.ones(object_size_padded, dtype=np.complex64).reshape(-1)
# Define cost function
objective = objectivefunctions.L2(A_full, y_full)
solve_method = 'cg'
display_type = 'text'
# Solve linear inverse problem
if solve_method is 'gd':
iteration_count = 3000
object_recovered = iterative.GradientDescent(objective).solve(initialization=initialization,
step_size=1,
iteration_count=iteration_count,
display_type=display_type,
display_iteration_delta=(iteration_count // 10))
elif solve_method is 'cg':
iteration_count = 500
object_recovered = iterative.ConjugateGradient(A_full, y_full).solve(initialization=initialization,
iteration_count=iteration_count,
display_type=display_type,
use_log_y=False,
use_log_x=False,
debug=True,
display_iteration_delta=(iteration_count // 10))
elif solve_method is 'fista':
iteration_count = 300
object_recovered = iterative.Fista(objective, proximal_operator=proxops.positivity).solve(initialization=initialization,
iteration_count=iteration_count,
display_type=display_type,
use_log_y=True,
use_log_x=False,
debug=True,
display_iteration_delta=(iteration_count // 10))
niterations = 500
object_recovered_crop = (W_object_support * object_recovered).reshape(object_size)
# normalize true object (because zero-frequency is irrelevent and recon is zero-mean)
object_true_normalized = object_true / np.mean(object_true)
# Calculate SSE
SSE = np.sum(np.abs(object_true_normalized - object_recovered_crop) ** 2)
print('Recovery SSE is %.2f' % SSE)
plt.figure(figsize=[8,5]);
plt.subplot(1,3,1); i_true = plt.imshow(np.abs(object_true_normalized), cmap='gray'); plt.title('Ground Truth')
plt.axis('off')
plt.subplot(1,3,2); i_rec = plt.imshow(np.abs(object_recovered_crop), cmap='gray'); plt.title('Recovered');
i_rec.set_clim(i_true.get_clim())
plt.axis('off')
#plt.savefig("test.png", bbox_inches='tight')
ax = plt.subplot(1,3,3); plt.imshow(np.abs(object_true_normalized - object_recovered_crop), cmap='gray');
plt.colorbar(fraction=0.046, pad=0.04); plt.title('Difference')
ax.tick_params(labelbottom='off',labelleft='off')
import os
if not os.path.exists(savepath + '/' + filename):
os.makedirs(savepath + '/' + filename)
# csv or text file with noise, convergence rate, sse
with open(savepath + '/' + filename + '/recovery.txt', "w") as text_file:
print("noise: {}\t{}\niterations: {}\nsse: {}".format(noise_type, noise_magnitude, niterations, SSE), file=text_file)
# npz file with recovered
np.savez(savepath + '/' + filename + '/recovered', object_recovered=object_recovered_crop)
###Output
_____no_output_____ |
examples/example_autoFIS_iris.ipynb | ###Markdown
AutoFIS code experimenting
###Code
import time
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn import datasets
from sklearn.metrics import accuracy_score
from autofis import AutoFISClassifier
###Output
_____no_output_____
###Markdown
Importing benchmark dataset Iris
###Code
iris = datasets.load_iris()
df_iris = pd.DataFrame(iris['data'])
df_iris['target'] = iris['target']
X = iris['data']
y = iris['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
autofis_estimator = AutoFISClassifier()
autofis_estimator.fit(X_train,y_train,categorical_attributes = [False, False, False, False],verbose = 0)
y_pred = autofis_estimator.predict(X_train)
print('train: ')
accuracy_score(y_pred, y_train)
y_test_pred = autofis_estimator.predict(X_test)
print('test: ')
accuracy_score(y_test_pred, y_test)
# autofis_estimator.fuzzifier.fuzzy_params
###Output
_____no_output_____
###Markdown
Running autofis with GridSearchCV
###Code
param_grid = {
'n_fuzzy_sets': [3,5,7],
'triangle_format': ['normal','tukey'],
'enable_negation':[True,False],
'criteria_support':['cardinality','frequency'],
'association_method': ["MQR", "PMQR", "CD", "PCD", "freq_max"],
'aggregation_method': ["MQR", "PMQR", "CD", "PCD", "freq_max"],
't_norm':['prod','min']
}
clf = GridSearchCV(AutoFISClassifier(),param_grid,cv = 2,n_jobs = -1,verbose=1)
start = time.time()
clf.fit(X_train, y_train, categorical_attributes = [False,False,False,False])
clf_best = clf.best_estimator_
print('')
print("--- Ellapsed time: %s seconds ---" % (time.time() - start))
print('Best score (%s)' % clf.best_score_)
clf_best.get_params()
y_test_pred = clf_best.predict(X_test)
print('test accuracy: ')
accuracy_score(y_test_pred, y_test)
###Output
test accuracy:
|
Research Papers/FrustumPointNets.ipynb | ###Markdown
PointNets -> Frustum PointNetsThe later builds on the concepts from the first one so we are going to take a look at it. PointNet:Takes in point cloud as input and outputs either class labels for the entire input or per point segment/part labels for each point of the input.Each point is processed independently and is represented by just three coordinates (x, y, z).1. Consumes unordered point sets in 3D.2. 3D space classification, shape part segmentation and scene semantic parsing3. Detailed empirical and theoretical analysis on the stability and efficiency of our method4. Illustration of 3D features computed by the selected neurons in the net and develop intuitive explanations on it's performance. ProblemPoint Cloud: {P_i | i = 1, .... , n}Where each point P_i is a vector of its (x, y, z) coordinates as well as additional information like color channels etc. For simplicity, we only used the (x,y,z) coordinates as the point's channels. Euclidean Space:Where points are represented by coordinates (one for each dimension) and the distance between the points is given by distance formula.In Euclidean Space, the point sets have following properties: **Unordered.** Unlike Pixel arrays in images, the point sets are random without specific order. So, the network needs to be able to consume N 3D point sets to be invariant to N! permutations of the input set in data feeding order. **Interaction among points.** The neighboring points are important for other points because it is in euclidean space and each point is connected to other via distance. **Invariance under transformations.** If we transform the point clouds, they should be invariant to certain transformations. For example, rotating and translating points all together should not modify the global point cloud category nor the segmentations of the points. PointNet Architecture The PointNet Architecture uses both classification and segmentation network. The input is sampled from the point net cloud and passed into the classification network. **Symmetry Function for Unordered Input:**To make the model invariant to input permutations, three strategies exist:1. Sort input into a canonical order - 2. Treat the input as a sequence to train an RNN and augment the training data with all kinds of permutations for the point cloud.3. Use a simple, symmetric function to aggregate the information from each point. i.e.A symmetric function takes n vectors as input and the output is invariant to what the n vectors were. For example, + and * in binary operation are symmetric functions.**Sorting** Issue with sorting is that there is not really an ordering that is stable with respect to the point perturbations that you can have in the high dimensional spaces. Asking for the point perturbations to keep the same order is the same as saying that the points should keep spacial proximity as the dimension reduce. **RNNs** Using RNNs is also not ideal because they work fine with small sequences but having them work with large sequences that point clouds are, is not ideal. **Symmetric function** takes in an N dimensional input and outputs in a specific way so that it is invariant of what it is intaking.Simple model: the transformed inputs are passed into an h function which is basically the perceptron network and then we perform the activation function and max pooling. Through a collection of different h functions, we can learn different properties of the set. (basically a neural network... duh)  Local and Global Information Aggregation:The output from the f({x_1, .... x_n}) funtion forms a vector [f_1 ...., f_K], which is a global singature of the input set. We can easily perform a SVM or multi-layer perceptron classifier on global features of the set. However, we need to have information about the local as well as global features. We do so by:After getting the global point cloud feature vector, we concatenate each of the point features with global feature. Then we extract new per point features that now contains the global as well as local information. Frustum PointNetsBuilds on the architecture of PointNets but also deviates from the PointNet because of some basic considerations. PointNet focuses on semantic segmentation of the points in the point cloud. On the contrary, frustum pointnets refers to instance segmentation and focuses on detecting a 3D object in a 3D space using PointNet architecture. Amodal detection: detecting the whole object as 3D object even though parts of it are still being covered by another object.Frustum PointNets uses two variants of PointNets: Segmentation network:detects the 3D mask of the object of interest i.e. instance segmentation Regression Network:Estimates teh amodal 3D bounding box detection.FP - Lift the 2D image to 3D data point cloud and then, use 3D techniques. GOAL: Using **RGB-D** data, **classify** and **localize** objects in 3D space.We do so by:1. Converting the RGB-D data into 3D data2. Use PointNet model architecture - with two variations - that perform classification and amodal 3D box detection. The object is representated by data: (x, y, z) for the center, (w, h, l) for the object dimensions, and for orientation, we just make use of the theta angle but there are also the azimuth angle and another angle. Frustum Proposal Generation With a known camera project matrix, a 2D bounding box can be lifted to a frustum that defines a 3D search space for the object.Q: Do we not lift the whole image to create a point cloud.The frustums that we create from 2D images might not align exactly with the image plane, it may orient towards different directions. This results in the furstum showing large variations in the placement of point clouds.Solution: We rotate the frustums toiwards a center view such that the center axis of the frustum is orthogonal to the image plane.For training purposes, the net uses FPN based model and trained it on ImageNet classification and COCO OD datasets, further finetuning with KITTI. 3D Instance SegmentationIdeally, now, we should have 2D image region and 3D frustum. One way to process this data is to directly regress 3D object locations from the depth map using 2D CNNs. But because of occlusion and background clutter, this is not ideal.So, it is easy to segment in a 3D point cloud than a 2D image or depth map. Similar to Mask R-CNN, we perform binary classification of each point cloud in frustum for instance segmentation. We do residual based 3D localization where we predict the 3D bounding box center in a local coordinate system. 3D Instance Segmentation PointNetThe PointNet takes in a point cloud in frustum, and makes a prediction about probability that the point belongs to that object. The points could become occluded in another orientation because of other objects/vegetation etc. So, we are teaching the PointNet model to not only classify the object correctly, but also be able to detect the object across variations.
###Code
###Output
_____no_output_____
###Markdown
Pipeline:Look into:- How to create 3D point clouds from images- How to pass those point clouds into the model, what do the LiDAR data processing models look like?- A bit more information on object detection losses.
###Code
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
# duplicates = set()
num_set = set(nums)
nums = [1, 2, 3, 1]
num_set = set(nums)
print(num_set)
num_set = set(nums)
if len(nums) >= len(num_set):
###Output
_____no_output_____ |
code/BERT/knn.ipynb | ###Markdown
Knn training sentences in this notebook, we first extract each (training) sentence's sentence embedding, and use knn to find each sentence's NN
###Code
import os
import numpy as np
from read_data import InputExample,read_examples_from_file_knn
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
from sklearn import metrics
import matplotlib.pyplot as plt
import pickle
import faiss
###Output
_____no_output_____
###Markdown
conll2003 1 load data
###Code
num_examples = 14042
data_dir = '../data/conll2003'
train_examples = read_examples_from_file_knn(data_dir=data_dir,
train_examples=num_examples,
mode='train')
###Output
_____no_output_____
###Markdown
2 build sentence emb
###Code
train_examples[0:700][-1].words
corpus = []
for example in train_examples:
corpus.append(' '.join(example.words))
embedder = SentenceTransformer('bert-base-nli-mean-tokens')
corpus_embeddings = embedder.encode(corpus)
corpus_embeddings_ = np.array(corpus_embeddings)
###Output
_____no_output_____
###Markdown
3 knn search
###Code
# sent_id starts from 0
num_examples = 700
corpus_embeddings = corpus_embeddings_[0:num_examples]
d = 768
nb = corpus_embeddings.shape[0]
nq = corpus_embeddings.shape[0]
xb = corpus_embeddings
xq = corpus_embeddings
index = faiss.IndexFlatL2(d) # build the index
print(index.is_trained)
index.add(xb) # add vectors to the index
print(index.ntotal)
k = 20 # we want to see 20 nearest neighbors
D, I = index.search(xq, k) # actual search
###Output
_____no_output_____
###Markdown
4. save the index
###Code
data_dir = '../data/conll2003'
file_path = os.path.join(data_dir, 'sent_id_knn{}.pkl'.format(len(corpus_embeddings)))
pickle.dump( I, open( file_path, "wb" ) )
file_path
###Output
_____no_output_____ |
interactive dashboard/dashboard.ipynb | ###Markdown
constants
###Code
IPC = 'A'
YEAR_APC_WIDTH = 900
YEAR_HEIGHT = 300
APC_HEIGHT = 350
COUNTRY_WIDTH = 500
ORG_WIDTH = 400
COUNTRY_ORG_HEIGHT = 350
WORD_HEIGHT =350
WORD_WIDTH = 500
WORD_CLOUD_WIDTH = 400
YEAR = 'data/year_ipc_count.csv'
APPLICANTS = 'data/applicants.csv'
WORD = 'data/word_top.csv'
WORD_IMG = 'data/word_img.csv'
COUNTRY = 'data/country_top10.csv'
ORG = 'data/org.csv'
###Output
_____no_output_____
###Markdown
data
###Code
df_year = pd.read_csv(YEAR)
df_apc = pd.read_csv(APPLICANTS)
df_org = pd.read_csv(ORG)
for i in ['A','B','C','D','E','F','G','H','default']:
df_org[i] = df_org[i+'_count']/df_org[i+'_count'].sum()*2*pi
df_org['color']=linear_palette(Viridis256, 12)
df_word = pd.read_csv(WORD)
df_country = pd.read_csv(COUNTRY)
df_word_img = pd.read_csv(WORD_IMG)
###Output
_____no_output_____
###Markdown
time series
###Code
p_year = figure(plot_width=YEAR_APC_WIDTH, plot_height=YEAR_HEIGHT, tools='pan,wheel_zoom,save,reset', toolbar_location='above')
p_year.title.text = 'year & IPC count'
source_year = ColumnDataSource(df_year)
p_year.line(source=source_year, x='year', y= IPC, line_width=1.8, color='#05445E', alpha=0.4)
hover = HoverTool(mode='vline')
hover.tooltips = [('Year','@year'),('Count', f'@{IPC}')]
p_year.add_tools(hover)
###Output
_____no_output_____
###Markdown
top10 countries
###Code
source_country = ColumnDataSource(df_country)
dot_country = figure(title="top 10 country ranking", tools='pan,wheel_zoom,save,reset', toolbar_location='above',
y_range=source_country.data[f'country_{IPC}'], plot_height = COUNTRY_ORG_HEIGHT , plot_width = COUNTRY_WIDTH)
dot_country.ygrid.grid_line_color = None
d = dot_country.segment(0, f'country_{IPC}', IPC, f'country_{IPC}', line_width=2, line_color="#7f7f7f", line_alpha=0.6, source=source_country)
c = dot_country.circle(x=IPC, y=f'country_{IPC}', size=12, fill_color="#0ba28d", fill_alpha=1, line_color="#0ba28d", line_width=2, source=source_country)
from bokeh.models import GlyphRenderer, Circle
grs = c.select(dict(type=GlyphRenderer))
for glyph in grs:
if isinstance(glyph.glyph, Circle):
circle_renderer = glyph
hover_country = HoverTool(renderers = [circle_renderer], mode='hline')
hover_country.tooltips = f'@country_{IPC}: @{IPC}'
dot_country.add_tools(hover_country)
###Output
_____no_output_____
###Markdown
organizations distribution
###Code
source_org = ColumnDataSource(df_org)
p_org = figure(plot_height=COUNTRY_ORG_HEIGHT, plot_width=ORG_WIDTH, title='organizations distribution',
toolbar_location='above', tools=['pan', 'wheel_zoom', 'save', 'reset', 'hover'], tooltips=f'@org: @{IPC}_count',
x_range=(-0.52,0.85))
p_org.wedge(x=0, y=1, radius=0.4, start_angle=cumsum(IPC, include_zero=True), end_angle=cumsum(IPC),
line_color="white", fill_color='color', fill_alpha=0.5, legend_field='org_abr', source=source_org)
p_org.axis.axis_label=None
p_org.axis.visible=False
p_org.grid.grid_line_color = None
p_org.legend.label_text_font_size = '6pt'
p_org.legend.border_line_color = None
###Output
_____no_output_____
###Markdown
applicants
###Code
source_apc = ColumnDataSource(df_apc)
x = source_apc.data[f'applicant_{IPC}_abr']
y = source_apc.data[IPC]
p_apc = figure(x_range=x.tolist(), x_axis_label='applicants', plot_width=YEAR_APC_WIDTH, plot_height=APC_HEIGHT,
tools='pan,wheel_zoom,save,reset',
toolbar_location='above',
title='top 15 applicants & patents count')
p_apc.xgrid.grid_line_color = None
p_apc.vbar(source=source_apc, x=f'applicant_{IPC}_abr', top=IPC, width=0.4,
fill_color=factor_cmap(
f'applicant_{IPC}_abr',
palette=linear_palette(Blues256, 20),
factors=x
),
fill_alpha=0.6,
color=None
)
hover_apc = HoverTool(mode='vline')
hover_apc.tooltips = f'@applicant_{IPC}: @{IPC}'
p_apc.add_tools(hover_apc)
###Output
_____no_output_____
###Markdown
word cloud
###Code
url = 'https://drive.google.com/file/d/1wq27812YyCCviiEoPiE37EfqHDpoG1HQ/view?usp=sharing'
source_word_img = ColumnDataSource(df_word_img)
p_word = figure(title='word cloud', plot_height = WORD_HEIGHT , plot_width = WORD_CLOUD_WIDTH, tools = 'save', toolbar_location='above')
p_word.image_url(source_word_img.data[IPC], x=0, y=1, w=2, h=1, global_alpha=0.7)
p_word.axis.visible = False
p_word.xgrid.grid_line_color = None
p_word.ygrid.grid_line_color = None
###Output
_____no_output_____
###Markdown
top10 words
###Code
factors = df_word[f'word_{IPC}'].tolist()
x = df_word[IPC].tolist()
source_word = ColumnDataSource(df_word)
dot = figure(title="top 10 word ranking", tools='pan,wheel_zoom,save,reset', toolbar_location='above',
y_range=factors, plot_height = WORD_HEIGHT , plot_width = WORD_WIDTH)
dot.ygrid.grid_line_color = None
dot.segment(0, f'word_{IPC}', IPC, f'word_{IPC}', line_width=2, line_color="#7f7f7f", line_alpha=0.6, source=source_word)
dot.circle(x=IPC, y=f'word_{IPC}', size=12, fill_color="#0ca29b", fill_alpha=1, line_color="#0ca29b", line_width=2, source=source_word)
hover_word = HoverTool(mode='hline')
hover_word.tooltips = f'"@word_{IPC}": @{IPC}'
dot.add_tools(hover_word)
###Output
_____no_output_____
###Markdown
layout
###Code
layout = column(p_year, row(dot_country, p_org), p_apc, row(dot, p_word))
show(layout)
###Output
_____no_output_____ |
MNIST using CNN/MNIST using CNN.ipynb | ###Markdown
Deep Learning Worksheet 5&6: MNIST using CNN Created by: Shubhnoor Gill UID: 18BCS6061 B.E. CSE(AIML-1)/B**Problem:** MNIST is a simple computer vision dataset. It consists of images of handwritten digits. It also includes labels for each image, telling us which digit it is.The MNIST data is split into two parts: 60,000 data points of training data, and 10,000 points of test data. Each image is 28 pixels by 28 pixels.**Objective:** In this notebook, I will try to make simple CNN model and then create complex model. I will also compare the various loss and accuracy related with each model.**Target:** Classify the label for the handwritten digit given using Convolutional Neural Networks(CNN).**NOTE:** Each step is explained via comments Data Loading and Data UnderstandingIn this step, I will load the MNIST dataset from keras.dataset and also analyse the dataset while importing important libraries. Importing important libraries
###Code
# To deal with numpy arrays
import numpy as np
# To randomise the selection
import random as r
# Keras abstraction for creating models with a stack of layers added squentially
from keras.models import Sequential
# Different layers(explained further in notebook)
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers.convolutional import Conv2D, MaxPooling2D
# To use the function to_categorical for converting class labels (from digits) to one-hot encoded vectors
from keras.utils import np_utils
# Load the MNIST dataset from keras.datasets
from keras.datasets import mnist
# To draw plots
import matplotlib.pyplot as plt
###Output
Using TensorFlow backend.
###Markdown
Loading the MINST dataset
###Code
# Load the dataset into train and test sets
(x_train ,y_train), (x_test, y_test) = mnist.load_data()
###Output
_____no_output_____
###Markdown
Understand and explore the data
###Code
# Plot the images in the dataset
j=[132,2050,4268,7523,6523,8400] # making a list of some random indexes
plt.figure(figsize=(15,10)) # Changing the plot figure size
for i in range(1,7):
plt.subplot(2,3,i) # To plot subplots
plt.title("Digit is:"+ str(y_train[j[i-1]])) # Giving the title to subplots
plt.imshow(x_train[j[i-1]]) #Plotting the subplot
# Check the shape of the data
print("Train data:", x_train.shape,"\n Labels:", y_train.shape)
print("Test data:", x_test.shape,"\n Labels:", y_test.shape)
# Check type of data
print("x_train: ",type(x_train))
print("y_train: ",type(y_train))
print("x_test: ",type(x_test))
print("y_test: ",type(y_test))
###Output
x_train: <class 'numpy.ndarray'>
y_train: <class 'numpy.ndarray'>
x_test: <class 'numpy.ndarray'>
y_test: <class 'numpy.ndarray'>
###Markdown
**From above we can observe that 60,000 training and 10,000 test graysacle images each of size 28 x 28 are present and are stored as 2D arrays.** Data PreparationIn this step we will perform the following:- Selecting a sample of data from given dataset- Converting data to float format- Rescaling or performing normalisation- Reshaping our data Select a sample from given datasetWe all know that it would take a lot of time to train 60,000 images. Hence, to solve this problem, I will take 25000 random images from the given dataset.
###Code
# Select a sample of only 25000 images for training
i = np.random.randint(x_train.shape[0], size=25000) # To select 25000 random indices from the dataset
x_train = x_train[i, :]
y_train = y_train[i]
print("New Train data dimensions:")
print(x_train.shape)
print(y_train.shape)
###Output
New Train data dimensions:
(25000, 28, 28)
(25000,)
###Markdown
Converting data to float formatThe pixels are originally stored as type int, so we prefer to convert it to float
###Code
# Pixel type before converting to float
x_train.dtype
# Pixel type after converting to float
# type casting one or more of the DataFrame's columns to column-specific types.
x_train = x_train.astype('float32')
x_test=x_test.astype('float32')
x_train.dtype # checking the type of data
###Output
_____no_output_____
###Markdown
Normalizing dataThe value of each pixel is between 0-255, so we will rescale eachpixel by dividing by 255 so that the range becomes between 0-1.
###Code
# Rescaling our data- convert into fully black and fully white
x_train= x_train/255.0
x_test= x_test/255.0
###Output
_____no_output_____
###Markdown
Reshaping the dataOur x_train data needs to be of the shape (25000, 28, 28, 1) whereas y_train needs to be of the shape (25000,10) where each image’s label is represented as a 10-d one-hot encoded vector. We perform one hot encoding on the y_train and y_test using to_categorical method.
###Code
# specify input dimensions of each image
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
class_num = 10 # Number of classes or digit labels in dataset
# Reshaping the data
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols,1) # 1----> channel-----> grayscale
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols,1)
print("New Train data dimensions:")
print(x_train.shape)
print(x_test.shape)
###Output
New Train data dimensions:
(25000, 28, 28, 1)
(10000, 28, 28, 1)
###Markdown
Perform one hot encoding
###Code
# Converts a class vector (integers)- class labels to binary class matrix or one-hot encoded vectors
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
class_num = y_test.shape[1]
print("One -hot encoded class label dimensions: ",y_train.shape)
###Output
One -hot encoded class label dimensions: (25000, 10)
###Markdown
Build 1st CNN ModelI will build a very simple CNN model with:- 1 convolution layer- 1 MaxPool layer- 1 Flatten- 1 Dense layer(Softmax)Other hyperparameters used:- Activation function: relu- Actvation function used in output layer: Softmax- Loss: Categorical Loss- Optimizer: Adam
###Code
# Creating first sequential model
model1 = Sequential()
# Adding a keras convolutional layer called Conv2D
# Filter size = 4x4
# input_shape = (img_rows, img_cols, 1)
model1.add(Conv2D(100, (4,4), input_shape=input_shape, padding='same', activation='relu'))
# Add maxpool layer with kernel size 3x3
model1.add(MaxPooling2D(pool_size=(2, 2)))
# Flatten the layer
model1.add(Flatten())
# Add softmax layer
model1.add(Dense(class_num ,activation = 'softmax'))
###Output
_____no_output_____
###Markdown
Compile Model1
###Code
model1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model1.summary())
###Output
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 28, 28, 100) 1700
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 14, 14, 100) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 19600) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 196010
=================================================================
Total params: 197,710
Trainable params: 197,710
Non-trainable params: 0
_________________________________________________________________
None
###Markdown
Fit and Evaluate the model1
###Code
batch_size = 2000
epochs = 10
# Train the model1
hist_1 = model1.fit(x_train, y_train, verbose = 1, validation_data = (x_test, y_test), epochs = epochs, batch_size = batch_size)
# Evaluate the model on test data
scores_1 = model1.evaluate(x_test,y_test,verbose = 1)
print("Accuracy:%.2f%%" % (scores_1[1]*100))
print("Loss:%.2f%%" % (scores_1[0]*100))
###Output
10000/10000 [==============================] - 2s 248us/step
Accuracy:96.43%
Loss:12.80%
###Markdown
Build 2nd CNN ModelI will build a very simple CNN model with:- 2 convolution layer- 1 MaxPool layer- 1 Dropout layer- 1 Flatten layer- 1 Dense layer- 1 Dropout layer- 1 Dense layer(Softmax)Other hyperparameters used:- Activation function: relu- Actvation function used in output layer: Softmax- Loss: Categorical Loss- Optimizer: Adam
###Code
# Creating second sequential model
model2 = Sequential()
# Adding a keras convolutional layer called Conv2D
# Filter size = 3x3
# input_shape = (img_rows, img_cols, 1)
model2.add(Conv2D(64,kernel_size=(3,3),input_shape=input_shape,activation='relu'))
# Second convolution layer
model2.add(Conv2D(128,(3,3),activation='relu'))
# Add maxpool layer with kernel size 2x2
model2.add(MaxPooling2D(pool_size=(2,2)))
# Add dropout layer which doesn't alter the output shape and has no trainable parameters
model2.add(Dropout(0.25))
# Add flatten layer
model2.add(Flatten())
# Add dense layer
model2.add(Dense(224,activation='relu'))
# Add dropout layer which drops few neurons to prevent overfitting of data
model2.add(Dropout(0.5))
# Add softmax layer
model2.add(Dense(class_num,activation='softmax'))
###Output
_____no_output_____
###Markdown
Compile model2
###Code
# Compile model2
model2.compile(loss='categorical_crossentropy',
optimizer = 'adam',
metrics=['accuracy'])
# Model summary
print(model2.summary())
###Output
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_2 (Conv2D) (None, 26, 26, 64) 640
_________________________________________________________________
conv2d_3 (Conv2D) (None, 24, 24, 128) 73856
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 12, 12, 128) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 12, 12, 128) 0
_________________________________________________________________
flatten_2 (Flatten) (None, 18432) 0
_________________________________________________________________
dense_2 (Dense) (None, 224) 4128992
_________________________________________________________________
dropout_2 (Dropout) (None, 224) 0
_________________________________________________________________
dense_3 (Dense) (None, 10) 2250
=================================================================
Total params: 4,205,738
Trainable params: 4,205,738
Non-trainable params: 0
_________________________________________________________________
None
###Markdown
Fit and evaluate the model2
###Code
batch_size = 2000
epochs = 10
# Train the model2
hist_2 = model2.fit(x_train,
y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
validation_data = (x_test, y_test))
# Evaluate the model2 on test data
scores_2 = model2.evaluate(x_test,y_test,verbose = 1)
print("Accuracy:%.2f%%" % (scores_2[1]*100))
print("Loss:%.2f%%" % (scores_2[0]*100))
###Output
10000/10000 [==============================] - 7s 704us/step
Accuracy:98.51%
Loss:4.53%
###Markdown
Build 3rd CNN ModelI will build a very simple CNN model with:- 2 convolution layer- 1 MaxPool layer- 1 Dropout layer- 1 convolution layer- 1 MaxPool layer- 1 Dropout layer- 1 Flatten layer- 1 Dense layer- 1 Dropout layer- 1 Dense layer- 1 Dropout layer- 1 Dense layer(Softmax)Other hyperparameters used:- Activation function: relu- Actvation function used in output layer: Softmax- Loss: Categorical Loss- Optimizer: Adam
###Code
# Create a sequential model3
model3 = Sequential()
# Add first convolution layer with kernel size of 3x3 and stride 1x1 whereas with same padding
model3.add(Conv2D(50, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', input_shape=(28,28, 1)))
# Add second convolution layer with kernel size of 3x3 and stride 1x1 whereas with same padding
model3.add(Conv2D(75, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
# Add maxpool layer with kernel size of 2x2
model3.add(MaxPooling2D(pool_size=(2,2)))
# Add dropout layer
model3.add(Dropout(0.25))
# Add third convolution layer with kernel size of 3x3 and stride 1x1 whereas with same padding
model3.add(Conv2D(125, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
# Add maxpool layer with kernel size of 2x2
model3.add(MaxPooling2D(pool_size=(2,2)))
# Add dropout layer
model3.add(Dropout(0.25))
# flatten output of conv
model3.add(Flatten())
# Add dense layer
model3.add(Dense(150, activation='relu'))
# Add dropout layer
model3.add(Dropout(0.4))
# Add dense layer
model3.add(Dense(125, activation='relu'))
# Add dropout layer
model3.add(Dropout(0.3))
# Add softmax output layer
model3.add(Dense(class_num, activation='softmax'))
###Output
_____no_output_____
###Markdown
Compile model3
###Code
# compiling the sequential model
model3.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
model3.summary()
###Output
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_4 (Conv2D) (None, 28, 28, 50) 500
_________________________________________________________________
conv2d_5 (Conv2D) (None, 28, 28, 75) 33825
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 14, 14, 75) 0
_________________________________________________________________
dropout_3 (Dropout) (None, 14, 14, 75) 0
_________________________________________________________________
conv2d_6 (Conv2D) (None, 14, 14, 125) 84500
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 7, 7, 125) 0
_________________________________________________________________
dropout_4 (Dropout) (None, 7, 7, 125) 0
_________________________________________________________________
flatten_3 (Flatten) (None, 6125) 0
_________________________________________________________________
dense_4 (Dense) (None, 150) 918900
_________________________________________________________________
dropout_5 (Dropout) (None, 150) 0
_________________________________________________________________
dense_5 (Dense) (None, 125) 18875
_________________________________________________________________
dropout_6 (Dropout) (None, 125) 0
_________________________________________________________________
dense_6 (Dense) (None, 10) 1260
=================================================================
Total params: 1,057,860
Trainable params: 1,057,860
Non-trainable params: 0
_________________________________________________________________
###Markdown
Fit and evaluate model3
###Code
batch_size = 2000
epochs = 10
# Train the model3
hist_3 = model3.fit(x_train,
y_train,
batch_size = batch_size,
epochs = epochs,
verbose = 1,
validation_data = (x_test, y_test))
# Evaluate the model3 on test data
scores_3 = model3.evaluate(x_test,y_test,verbose = 1)
print("Accuracy:%.2f%%" % (scores_3[1]*100))
print("Loss:%.2f%%" % (scores_3[0]*100))
###Output
10000/10000 [==============================] - 7s 693us/step
Accuracy:98.42%
Loss:4.65%
###Markdown
Compare modelsIn this part, I will compare and plot the graphs of the three models created on the basis of accuracy and loss.
###Code
import pandas as pd # To create a dataframe
test_loss=[scores_1[0],scores_2[0],scores_3[0]] # test loss list
test_acc=[scores_1[1],scores_2[1],scores_3[1]] # test accuracy list
# Creating a dataframe to store the model metrics
df_scores=pd.DataFrame({'Test Loss':test_loss,'Test Accuracy':test_acc},index=['model1','model2','model3'])
df_scores
# Create a list of model history
hists = [hist_1, hist_2, hist_3]
# Function to plot the history of all CNN models
def plot_history(hists, attribute, axis=(-1,10,0.85,0.94), loc='lower right'):
title={'val_loss': 'Validation loss', 'loss': 'Training loss', 'val_accuracy': 'Validation accuracy', 'accuracy': 'Training accuracy'}
num_hists=len(hists)
plt.figure(figsize=(12, 8)) # To modify plot size
plt.axis(axis) # To plot on axis
for i in range(num_hists):
plt.plot(hists[i].history[attribute]) # Plot the history
plt.title(title[attribute], fontsize=25) # Add the title to plot
plt.ylabel(title[attribute]) # Add label on y-axis
plt.xlabel('Epochs') # Add label on y-axis
plt.legend(['model1','model2','model3'], loc=loc) # Show the legend
plt.show() # Display the plot
# Plot training accuracy
plot_history(hists, attribute='accuracy', axis=(-1,10,0.725,1), loc='lower right')
# Plot validation accuracy
plot_history(hists, attribute='val_accuracy',axis=(-1,10,0.88,0.99), loc='lower right')
# Plot training loss
plot_history(hists, attribute='loss', axis=(-1,10,0.01,0.5), loc='upper right')
# Plot validation loss
plot_history(hists, attribute='val_loss', axis=(-1,10,0.01,0.35), loc='upper right')
###Output
_____no_output_____ |
scripts/terraclimate/02_terraclimate_regrid.ipynb | ###Markdown
Regridding TERRACLIMATE with xesmf_by Joe Hamman (CarbonPlan), June 29, 2020_This notebook converts the raw TERAACLIMATE dataset to Zarr format.**Inputs:****Outputs:**- Cloud copy of TERRACLIMATE**Notes:**- No reprojection or processing of the data is done in this notebook.
###Code
pip install -U xarray==0.16.0 --no-deps
import fsspec
import xarray as xr
import xesmf as xe
import numpy as np
from dask.diagnostics import ProgressBar
variables = {
# 'conservative': [
# "aet",
# "def",
# "pet",
# "ppt",
# "q",
# "srad",
# ],
"bilinear": [
"tmax",
"tmin",
"pdsi",
"vap",
"vpd",
"ws",
"soil",
"swe",
# move to conservative after scrable is fixed
"aet",
"def",
"pet",
"ppt",
"q",
"srad",
"awc",
"elevation",
]
}
# options
name = "terraclimate"
raw_location = f"gs://carbonplan-data/raw/terraclimate/4000m/raster.zarr"
target_grid = "gs://carbonplan-data/processed/grids/conus/4000m/domain.zarr"
# getting weird errors when writing to carbonplan-data
target_location = (
f"gs://carbonplan-data/processed/{name}/conus/4000m/raster.zarr"
)
mapper = fsspec.get_mapper(target_grid)
target_ds = xr.open_zarr(
mapper, consolidated=True
) # .rename({'xc': 'lon', 'yc': 'lat'})
target_ds
mapper = fsspec.get_mapper(raw_location)
ds = xr.open_zarr(mapper, consolidated=True)
ds
step = 360 / 8640 + 1e-9
global_grid = xe.util.grid_global(step, step)
global_grid = global_grid.isel(y=slice(None, None, -1)).isel(
y_b=slice(None, None, -1)
)
global_grid["lat_b"].values = np.clip(global_grid["lat_b"].values, -90, 90)
display(global_grid)
# check that this grid is a drop in replacement for the source grid
assert np.abs(global_grid.lat.isel(x=0).values - ds.lat.values).max() < 1e-5
assert np.abs(global_grid.lon.isel(y=0).values - ds.lon.values).max() < 1e-5
assert np.abs(global_grid.lat).max().item() <= 90
assert np.abs(global_grid.lat_b).max().item() <= 90
# rename grid variables
source_ds = ds.rename({"lon": "x", "lat": "y"}).assign_coords(
coords=global_grid.coords
)
regridders = {}
for method in variables:
regridders[method] = xe.Regridder(
source_ds, target_ds, method, reuse_weights=True
)
temp = []
for method, var_list in variables.items():
regridder = regridders[method]
temp.append(regridder(ds[var_list].chunk({"lat": -1, "lon": -1})))
ds_out = xr.merge(temp, compat="override")
ds_out
# fs = fsspec.get_filesystem_class('gs')()
# fs.rm(target_location, recursive=True)
import dask
from multiprocessing.pool import ThreadPool
with dask.config.set(scheduler="threads", pool=ThreadPool(3)):
with ProgressBar():
mapper2 = fsspec.get_mapper(target_location)
ds_out.to_zarr(mapper2, mode="w", consolidated=True)
mapper2 = fsspec.get_mapper(target_location)
import zarr
zarr.consolidate_metadata(mapper2)
###Output
_____no_output_____ |
w2/w2-02 DDM DCF Q&A.ipynb | ###Markdown
DDM 모델을 파이썬 함수로 작성해보세요D : dividendr : expected returng : growth rate
###Code
$$p = \frac{D} {r-g}$$
###Output
_____no_output_____
###Markdown
def ddm(d, r, g): 함수 선언 p = d / (r - g) return(p) d = 1000r = 0.02g = 0.01ddm(d, r, g)
###Code
DCF 모델 함수를 작성해보세요
CF : cash flow
r : expected return
###Output
_____no_output_____
###Markdown
$$p = \frac{CF_1}{(1+r)^1} + \frac{CF_2}{(1+r)^2} + ... + \frac{CF_n}{(1+r)^n}$$
###Code
def dcf(r, *cf): # 함수 선언 시 입력값에 *를 붙이면 n개(불특정 개수)의 입력값을 받음
n = 1
p = 0
for c in cf: # 몇 개일지는 모르지만 cf 값들을 순환한다
p = p + (c / (1+r)**n)
n = n + 1
return(p)
dcf(0.02, 1000, 1000, 1000)
###Output
_____no_output_____ |
Day-9-challenge.ipynb | ###Markdown
Day 9 challenge: RecursionPlease click on the link to view the challenge on hackerank website: https://www.hackerrank.com/challenges/30-recursion/problem
###Code
import sys
def factorial(n):
#base case
if (n <= 1):
return 1 # because n * 0 = 0 and n * 1 = n, this is mul identity
#recursive case
else:
return n * (factorial(n - 1))
if __name__ == "__main__":
n = int(input().strip())
result = factorial(n)
print(result)
# Output = 5 ! = 5 * 4 * 3 * 2 * 1 = 120
###Output
5
120
###Markdown
Recursion Concept:- The process of defining a function or calculating a number by the repeated application of algorithm.- Base case: when we stop repeating the algorithm- Recursive case: Repeating the algorithm- Example: f(f(f(x))) where f(x) = x + 10 let x = 10 f(x) = x + 10 ---> f(10) = 10 + 10 = 20 ----> f(f(20) f(x) = x + 10 ---> f(20) = 20 + 10 = 30 ----> f(30) f(x) = x + 10 ---> f(30) = 30 + 10 = 40 -----> 40 so f(f(f(x) = 40
###Code
def summation(n):
# base case
if (n <= 0):
return 0 # addetive identity property
# recursive case
# 3 + summation(3-1)
# 3 + 2 + summation(2-1)
# 3 + 2 + 1 + summation(0)
# 3 + 2 + 1 = 6
else:
return n + summation(n - 1)
summation(3)
# Factorial of 6! = 6 * 5 * 4 * 3 * 2 * 1 ---> 6 * 5!
def factorial(n):
#base case
if (n <= 1):
return 1 # because n * 0 = 0 and n * 1 = n, this is mul identity
#recursive case
else:
return n * (factorial(n - 1))
factorial(6)
#Exponential
# 6 ^2 ---> 6 * 6 = 36
# 6 * 6 ^ 1 = 36
#In above case n = 6 and p = 2
def exponential(n, p):
if(p <= 0):
return 1 #mul identity
else:
return n * exponential(n, p-1)
exponential(2, 3)
###Output
_____no_output_____ |
S01 - Bootcamp and Binary Classification/SLU18 - Hyperparameter Tuning/Examples notebook.ipynb | ###Markdown
SLU18 - Hyperparameter tunning: Examples notebook--- 1 Load and the prepare the data
###Code
import pandas as pd
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
cancer_data = load_breast_cancer()
X = pd.DataFrame(cancer_data["data"], columns=cancer_data["feature_names"])
y = cancer_data.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
estimator = DecisionTreeClassifier()
###Output
_____no_output_____
###Markdown
2 Grid search
###Code
from sklearn.model_selection import GridSearchCV
parameters = {'max_depth': range(1, 10),
'max_features': range(1, X.shape[1])}
grid_search = GridSearchCV(estimator, parameters, cv=5, scoring="roc_auc")
grid_search.fit(X_train, y_train)
y_pred = grid_search.predict(X_test)
###Output
_____no_output_____
###Markdown
2 Random search
###Code
from scipy.stats import randint
from sklearn.model_selection import RandomizedSearchCV
parameters_dist = {"max_depth": randint(1, 100),
"max_features": randint(1, X.shape[1]),
"class_weight": ["balanced", None]}
random_search = RandomizedSearchCV(estimator, parameters_dist, cv=5, n_iter=250,
random_state=0)
random_search.fit(X_train, y_train)
y_pred = random_search.predict(X_test)
###Output
_____no_output_____ |
notebooks/03_dcj_weddell_sea_ice.ipynb | ###Markdown
Sea ice extent: Weddell Sea region Data prepared by Caroline Holmes (BAS)
###Code
!pip install seaborn
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import datetime as dt
import xarray as xr
import pandas as pd
import os
###Output
_____no_output_____
###Markdown
Next, select a few years to plot, along with the colours to use for those lines
###Code
years_to_plot = [2013, 2016, 2020]
colors_for_lines = ["black", "red", "green"]
###Output
_____no_output_____
###Markdown
First, import the daily, mean, and standard deviation for the Weddell Sea region sea ice extent
###Code
weddell_daily = xr.open_dataset('obs_NASATeam_historical_1_siextentWeddelldaily.nc')
weddell_mean = xr.open_dataset('obs_NASATeam_historical_1_siextentWeddelldaily_mean_19912020.nc')
weddell_std = xr.open_dataset('obs_NASATeam_historical_1_siextentWeddelldaily_sd_19912020.nc')
###Output
_____no_output_____
###Markdown
Next, examine the properties of the daily dataset
###Code
weddell_daily
###Output
_____no_output_____
###Markdown
Plot of daily sea ice extent in the Weddell Sea
###Code
fig, ax = plt.subplots(figsize=(15,10))
weddell_daily['sea_ice_extent'].plot(x='time')
###Output
_____no_output_____
###Markdown
Climatological mean sea ice extent
###Code
fig, ax = plt.subplots(figsize=(10,7))
weddell_mean['sea_ice_extent'].plot(x='day_of_year')
###Output
_____no_output_____
###Markdown
Examine Antarctic-wide sea ice extent
###Code
# load antarctic climatology and daily values
antarctic_climatology = pd.read_csv('S_seaice_extent_climatology_1981-2010_v3.0.csv', skiprows=1)
antarctic_daily = pd.read_csv('S_seaice_extent_daily_v3.0.csv', skiprows=[1])
# remove whitespace from column names
antarctic_climatology.columns = antarctic_climatology.columns.str.replace(' ', '')
antarctic_daily.columns = antarctic_daily.columns.str.replace(' ', '')
# this creates the DataFrame that can be used to calculate day of year
df_date = pd.DataFrame({'year': antarctic_daily.Year.values,
'month': antarctic_daily.Month.values,
'day': antarctic_daily.Day.values})
# the .dt.dayofyear function calculates day of year
DOY = pd.to_datetime(df_date).dt.dayofyear
# insert day of year (DOY) as a new column
antarctic_daily.insert(0,"DOY",DOY)
###Output
_____no_output_____
###Markdown
Plot values for Weddell Sea Plotting prerequisites
###Code
df = weddell_daily.to_dataframe()
# helper function to add arrows and text
def annotate_years(years_to_plot, colors_for_lines, ax=None):
delta = 0
i = -1
for year in years_to_plot:
delta = delta + 20
i = i + 1
nowData=df[df.year==int(year)]
nowExtent=nowData['sea_ice_extent'].values
xy_x = min(50+delta,nowExtent.size)-1
xy_y = nowExtent[xy_x]
nowColor = str(colors_for_lines[i])
ax.annotate(year,
(xy_x,xy_y),
(xy_x-50,xy_y+2),
arrowprops=dict(facecolor=nowColor, shrink=0.05),
fontsize='xx-large',
color=nowColor)
return(ax)
# two standard deviations
x = weddell_mean.day_of_year
y1 = weddell_mean.sea_ice_extent - 2.0*weddell_std.sea_ice_extent
y2 = weddell_mean.sea_ice_extent + 2.0*weddell_std.sea_ice_extent
# one standard deviation
x = weddell_mean.day_of_year
y3 = weddell_mean.sea_ice_extent - weddell_std.sea_ice_extent
y4 = weddell_mean.sea_ice_extent + weddell_std.sea_ice_extent
# mean
ybar = weddell_mean.sea_ice_extent
# example of converting to dataframe, isolating a year
#df[df.year==2020].sea_ice_extent
# plot data
fig, ax = plt.subplots(figsize=(15,10))
sns.set_context("paper")
ax.fill_between(x, y1, y2, color='blue', alpha=.1)
ax.fill_between(x, y3, y4, color='purple', alpha=.1)
ax.plot(x, ybar, color='blue', linewidth=3.0)
# plot individual lines for individual years
year_count = -1
for nyear in years_to_plot:
year_count = year_count + 1
yline = df[df.year==nyear].sea_ice_extent.values
max_length = np.minimum(x.size,yline.size)
ax.plot(x[0:max_length], yline[0:max_length], color=colors_for_lines[year_count], linewidth=2.0)
# manual legend
purple_patch = mpatches.Patch(color='purple', label='1 standard deviation', alpha=.2)
blue_patch = mpatches.Patch(color='blue', label='2 standard deviations', alpha=.1)
plt.legend(handles=[blue_patch,purple_patch], fontsize=18)
# tick label font size
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
# title, xlabel, and ylabel
plt.title("Daily sea ice extent (millions of square kilometres), Weddell Sea", fontsize=20)
plt.xlabel("Day of the year", fontsize=18)
plt.ylabel("")
# limits of x and y axes
plt.xlim([1,365])
plt.ylim([0.5, 7.5])
# call helper function to annotate years
annotate_years(years_to_plot, colors_for_lines, ax=ax)
# customize (white, dark, whitegrid, darkgrid, ticks)
sns.set_style("darkgrid")
###Output
_____no_output_____
###Markdown
The year 2013 is interesting: it was a bit high in the summer and early spring, but a bit low in autumn and early winter. This is in contrast with the Antarctic-wide extent, which was anomalously high for most of 2013. As suspected, sea ice extent in the Weddell Sea does not necessarily behave in the same way as the Antarctic-wide sea ice extent.
###Code
# save figure
fig.savefig('weddell_sea_ice_extent.pdf', format='pdf')
# it's helpful to run "pdfcrop" or a similar command line tool on this PDF afterwards, if available
###Output
_____no_output_____ |
ipynb_python_mec_optim/B02_subwayNYC.ipynb | ###Markdown
Min Cost Flow problem
###Code
#!conda config --add channels conda-forge
#!conda install shapely
#!pip install --user -I matplotlib
#!pip install --user -I geopandas
import gurobipy as grb
import pandas as pd
import numpy as np
import os
import scipy.sparse as sp
%matplotlib notebook
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from shapely.geometry import Point, LineString
import geopandas as gpd
###Output
Traceback (most recent call last):
File "C:\Users\jmcgn\AppData\Roaming\Python\Python37\site-packages\matplotlib\cbook\__init__.py", line 216, in process
func(*args, **kwargs)
File "C:\Users\jmcgn\AppData\Roaming\Python\Python37\site-packages\matplotlib\animation.py", line 1465, in _stop
self.event_source.remove_callback(self._loop_delay)
AttributeError: 'NoneType' object has no attribute 'remove_callback'
###Markdown
NYC Subway network
###Code
thepath = os.path.join(os.getcwd(),'..')
arcs = pd.read_csv(os.path.join(thepath,'data_mec_optim/networks_subway/NYC/arcs.csv'), sep=',')#.sort_values(by=['route_id'])
nodes = pd.read_csv(os.path.join(thepath, 'data_mec_optim/networks_subway/NYC/nodes.csv'), sep=',')
###Output
_____no_output_____
###Markdown
Stations' caracteristics are contained in nodes dataframe:
###Code
nodes.head()
###Output
_____no_output_____
###Markdown
Routes between stations are contained in arcs dataframe:
###Code
arcs.head()
print(len(nodes))
print(len(arcs))
nb_nodes = len(nodes)
names_nodes = nodes['stop_name'] + ' ' + nodes['route_id']
arcs_list = [(i, j) for i, j in zip(arcs['from_stop_nb'], arcs['to_stop_nb'])]
Phi = -arcs['min_time_elapsed'].values
nb_arcs = len(Phi)
origin_node = 452# #Union Sq - we minus 1 because of python's 0 indexing
destination_node = 471 # 59 St
Nabla = sp.csr_matrix(([-1 for i in range(nb_arcs)], (list(range(nb_arcs)), [i - 1 for i,j in arcs_list])), shape = (nb_arcs,nb_nodes)) + \
sp.csr_matrix(([1 for i in range(nb_arcs)], (list(range(nb_arcs)), [j - 1 for i,j in arcs_list])), shape = (nb_arcs,nb_nodes))
m=grb.Model('NYC Subway')
x = m.addMVar(shape=nb_arcs, name="x")
m.setObjective(Phi @ x, grb.GRB.MAXIMIZE)
# we minus 1 for the nodes because of python's 0 indexing
rhs = np.asarray([-1 if x==(origin_node - 1) else 1 if x==(destination_node - 1) else 0 for x in range(nb_nodes)])
m.addConstr(Nabla.T @ x == rhs, name="Constr")
m.optimize()
def intersection(lst1, lst2):
return list(set(lst1) & set(lst2))
i = origin_node - 1
path_list = [i]
step = 1
if m.status == grb.GRB.Status.OPTIMAL:
print('***Optimal solution***')
print('Minimum distance from', names_nodes[origin_node - 1], 'to',
names_nodes[destination_node - 1], '\n', m.objVal)
print('0 :', names_nodes[origin_node - 1], '(#%d)' % origin_node)
solution = x.X
eqpath = np.argwhere(solution > 0)[:, 0]
while i != destination_node - 1:
leavingi = np.argwhere(Nabla[:,i] == -1)[:,0]
a = intersection(list(leavingi), list(eqpath))[0]
j = np.argwhere(Nabla[a,:] == 1)[0][1]
print(step, ':', names_nodes[i], '(#%d)' % (i + 1), 'to', names_nodes[j], '(#%d)' % (j + 1))
step += 1
path_list.append(j)
i = j
# For reasons I do not understand
path_list = [x + 1 for x in path_list]
geometry_nodes = [Point(xy) for xy in zip(nodes['stop_lon'], nodes['stop_lat'])]
gdf_nodes = gpd.GeoDataFrame(nodes,geometry=geometry_nodes)
gdf_nodes.head()
arcs_coord_int = pd.merge(arcs[['from_stop_id', 'to_stop_id']].rename(index=str, columns={'from_stop_id': 'stop_id'}),
gdf_nodes[['stop_id', 'geometry']].rename(index=str, columns={'geometry': 'from_geometry'}),
on = 'stop_id').rename(index=str, columns={'stop_id': 'from_stop_id'})
arcs_coord = pd.merge(arcs_coord_int.rename(index=str, columns={'to_stop_id': 'stop_id'}),
gdf_nodes[['stop_id', 'geometry']].rename(index=str, columns={'geometry': 'to_geometry'}),
on = 'stop_id').rename(index=str, columns={'stop_id': 'to_stop_id'})
del arcs_coord_int
arcs_coord.head()
geometry_arcs = [LineString(xy) for xy in zip(arcs_coord['from_geometry'],arcs_coord['to_geometry'])]
gdf_arcs = gpd.GeoDataFrame(arcs,geometry=geometry_arcs)
gdf_arcs.head()
def animate(i):
label = 'timestep {0}'.format(i)
print(label)
# Update the line and the axes (with a new xlabel). Return a tuple of
# "artists" that have to be redrawn for this frame.
stop_id = gdf_arcs[gdf_arcs["to_stop_nb"]==path_list[i]]['to_stop_id'].values[0]
stop_to_plot = gdf_nodes[gdf_nodes["stop_id"]==stop_id]
ax.text(stop_to_plot['stop_lon'], stop_to_plot['stop_lat'],stop_to_plot['stop_name'].values[0],
size = 'medium', fontweight='bold')
stop_to_plot.plot(marker = 'o', color = 'green', markersize=50, ax=ax)
ax.set_xlabel(label)
return ax
fig, ax = plt.subplots(figsize=(10,10))
ax.set_xlim([-74.3, -73.7])
ax.set_ylim([40.5, 40.95])
ax.set_yticklabels([])
ax.set_xticklabels([])
gdf_arcs.plot(color = 'lightblue',ax=ax)
gdf_nodes.plot(marker = 'o', color = 'lightgreen', markersize=50, ax=ax)
anim = FuncAnimation(fig, animate, frames = np.arange(0, len(path_list)), interval = 2000, repeat = False)
plt.show()
###Output
_____no_output_____ |
Yandex ML Cup 2021 (Recsys)/gradient_boosting.ipynb | ###Markdown
Data preparation and augmentation Считываем данные из .csv Некоторые данные (такие как рубрики и признаки), представлены строками значений. Преобразуем их в списки чисел.
###Code
to_list = lambda rubrics: [int(rubric) for rubric in str(rubrics).split(' ')]
def apply_to_columns(df, columns, func=to_list):
for column in columns:
df.loc[~df[column].isnull(), column] = df.loc[~df[column].isnull(), column].apply(func)
###Output
_____no_output_____
###Markdown
В первую очередь нам понадобятся данные по __пользователям__, __организациям__ и сами __отзывы__.
###Code
users = pd.read_csv('data/users.csv')
users['new_user_id'] = users.index
users.head()
test_users = pd.read_csv('data/test_users.csv')
test_users['user_id'].isin(users.user_id).all()
orgs = pd.read_csv('data/organisations.csv')
orgs['new_org_id'] = orgs.index
# create lists
columns = ['rubrics_id', 'features_id']
apply_to_columns(orgs, columns)
orgs.head()
# Create mappings
temp = users.drop('city', axis=1).to_numpy('uint64')
uid_to_new = dict(zip(temp[:, 0], temp[:, 1]))
new_to_uid = dict(zip(temp[:, 1], temp[:, 0]))
temp = orgs[['org_id', 'new_org_id']].to_numpy('uint64')
oid_to_new = dict(zip(temp[:, 0], temp[:, 1]))
new_to_oid = dict(zip(temp[:, 1], temp[:, 0]))
len(users) *len(orgs)
###Output
_____no_output_____
###Markdown
Чтобы не делать __join__ каждый раз, когда нам потребуется узнать, из какого города организация или пользователь, сразу добавим эту информацию в отзывы.
###Code
reviews = pd.read_csv('data/reviews.csv', low_memory=False)
# encode users ids as numeric
reviews = reviews.merge(users, on='user_id')
reviews = reviews.rename({'city': 'user_city'}, axis=1)
# # encode orgs ids as numeric
reviews = reviews.merge(orgs[['org_id', 'city', 'new_org_id']], on='org_id')
reviews = reviews.rename({'city': 'org_city'}, axis=1)
# # create lists
columns = ['aspects']
apply_to_columns(reviews, columns)
reviews['is_tourist'] = reviews['user_city'] != reviews['org_city']
reviews
###Output
_____no_output_____
###Markdown
Augmentation: preparation of new features
###Code
from itertools import chain
from collections import Counter
def get_feats_counts(reviews, id_col, feat_name):
"""Returns DataFrame with value counts of a features group with feat_name represented as a list in `reviews`
for each user or org. """
def tokenize(arr):
return Counter(list(chain(*arr)))
saved_idx = reviews.groupby(id_col)[feat_name].first().index
reviews = reviews[reviews[feat_name].notna()]
result = reviews.groupby(id_col)[feat_name]\
.apply(tokenize).unstack(level=1)\
.reindex(saved_idx).fillna(0)
result.columns = [feat_name + str(col) for col in result.columns]
return result
def get_stat_rating(reviews, func, id_col, feat_name='rating'):
"""Returns Series with stat function func applied to ratings either for users
(`id_col=='user_id'`) or for orgs (`id_col=='org_id'`)"""
return reviews.groupby(id_col)[feat_name].agg(func)#.replace(np.nan, 0)
# Sort by two columns due to Pandas sorting differently every time
reviews.sort_values(['ts', 'user_id'], inplace=True)
orgs_rubrics = get_feats_counts(orgs, 'org_id', 'rubrics_id')
orgs_features = get_feats_counts(orgs, 'org_id', 'features_id')
# Preset for experiments
#-----------------------
threshold_day = 1147
revs_for_FE = reviews[(reviews['ts'] < threshold_day)]
# Preset for final submisstion
#-----------------------
#threshold_day = reviews.ts.max()
#revs_for_FE = reviews.copy()
#-----------------------
revs_with_feats = revs_for_FE.merge(orgs[['org_id', 'rubrics_id', 'features_id', 'average_bill']], on='org_id')
user_rubrics = get_feats_counts(revs_with_feats, 'user_id', 'rubrics_id')
#user_features = get_feats_counts(revs_with_feats, 'user_id', 'features_id')
org_mean_rating = get_stat_rating(revs_for_FE, 'mean', 'org_id').rename('org_mean_rating')
org_median_rating = get_stat_rating(revs_for_FE, 'median', 'org_id').rename('org_median_rating')
org_tourists_count = get_stat_rating(revs_for_FE.query("is_tourist == True"),
'size', 'org_id').rename('org_tourists_count')
org_count_rating = get_stat_rating(revs_for_FE, 'size', 'org_id').rename('org_reviews_count')
user_mean_rating = get_stat_rating(revs_for_FE, 'mean', 'user_id').rename('user_mean_rating')
user_median_rating = get_stat_rating(revs_for_FE, 'median', 'user_id').rename('user_median_rating')
user_count_rating = get_stat_rating(revs_for_FE, 'size', 'user_id').rename('user_reviews_count')
user_mean_bill = get_stat_rating(
revs_with_feats, 'mean', 'user_id', feat_name='average_bill').rename('user_mean_bill')
user_median_bill = get_stat_rating(
revs_with_feats, 'median', 'user_id', feat_name='average_bill').rename('user_median_bill')
###Output
_____no_output_____
###Markdown
LightGBM Formation of the dataset with new features This snippet filters reviews that have rating less than 4 and makes an ordered set of most reviewed orgs for both cities during 500 days prior to the train set end.
###Code
N_BEST_IN_CITY = 4500
reviews = reviews[reviews.rating >= 4]
#threshold_day = reviews.loc[reviews['is_tourist']].iloc[-15000]['ts']
#threshold_day = reviews['ts'].max()
non_eq_cities = reviews[reviews.user_city != reviews.org_city]
non_eq_cities = non_eq_cities.query('ts <= @threshold_day & ts >= @threshold_day - 500')
msk_orgs = non_eq_cities[non_eq_cities['org_city'] == 'msk']['org_id']
msk_orgs = msk_orgs.value_counts().index[:N_BEST_IN_CITY].to_list()
msk_orgs = np.array(msk_orgs, dtype='uint64')
spb_orgs = non_eq_cities[non_eq_cities['org_city'] == 'spb']['org_id']
spb_orgs = spb_orgs.value_counts().index[:N_BEST_IN_CITY].to_list()
spb_orgs = np.array(spb_orgs, dtype='uint64')
best_orgs = msk_orgs + spb_orgs
def supplement_sample(df, N_POOL = 100, N_NEGATIVE_SAMPLES = 100,
opposite_cities=True):
"""Supplements df with positive samples by N_NEGATIVE_SAMPLES drawn randomly from N_POOL
first best orgs of corresponding city"""
if opposite_cities:
_for_msk_user = spb_orgs
_for_spb_user = msk_orgs
else:
_for_msk_user = msk_orgs
_for_spb_user = spb_orgs
def choose(row):
arr = _for_msk_user if row['user_city'] == 'msk' else _for_spb_user
chosen = np.random.choice(arr[:N_POOL], size=N_NEGATIVE_SAMPLES, replace=False)
return np.setdiff1d(chosen, row['target'])
if 'org_id' in df.columns and 'rating' in df.columns:
users = df.drop(columns=['org_id', 'rating'])
else: users = df.copy()
if users['target'].isna().any():
users['target'] = users['target'].apply(lambda x: tuple(x) if not np.isnan(x).all() else tuple())
else:
users['target'] = users['target'].apply(tuple)
users = users.drop_duplicates()
users['org_ids'] = users.apply(choose, axis=1)
users.drop(columns=['user_city', 'target'], inplace=True)
user_ids = []
org_ids = []
for _, i in users.iterrows():
user_ids.extend([i.user_id] * len(i.org_ids))
org_ids.extend(i.org_ids)
final = pd.DataFrame({'user_id': user_ids, 'org_id': org_ids})
#print(users['rating'].to_list())
final['rating'] = 0
if opposite_cities: final['is_tourist'] = 1
else: final['is_tourist'] = 0
return final
#supplement_sample(rev_test)
def get_dataset(reviews, n_pool=100, n_neg_samples=100,
for_submission=False, opposite_cities=True):
"""Forms a dataset by combining positive user-org pairs and negative
and adding with user and org features"""
if for_submission:
X = supplement_sample(reviews,
N_POOL=n_pool, N_NEGATIVE_SAMPLES=n_neg_samples, opposite_cities=opposite_cities)
else:
X = pd.concat([
supplement_sample(reviews, N_POOL=n_pool, N_NEGATIVE_SAMPLES=n_neg_samples,
opposite_cities=opposite_cities),
reviews[['user_id', 'org_id', 'rating', 'is_tourist']]
], ignore_index=True)
#.merge(org_tourists_count, on='org_id', how='left')\
#.merge(org_count_rating, on='org_id', how='left')\
#
#
X = X\
.merge(user_count_rating, on='user_id', how='left')\
.merge(user_rubrics, on='user_id', how='left')\
.merge(orgs_rubrics, on='org_id', how='left')\
.merge(user_mean_bill, on='user_id', how='left')\
.merge(user_median_bill, on='user_id', how='left')\
.merge(org_mean_rating, on='org_id', how='left')\
.merge(org_median_rating, on='org_id', how='left')\
.merge(orgs[['org_id', 'average_bill', 'rating']]\
.rename({'rating': 'org_defautl_rating'}, axis=1), on='org_id', how='left')\
.sort_values('user_id')
def reduce_rubrics(df):
temp = pd.DataFrame(index=df.index)
for rub in orgs_rubrics.columns:
temp[rub] = (df[rub + "_x"] > 0) * df[rub + "_y"]
return temp.sum(axis=1)
X['rubrics_coincidence'] = reduce_rubrics(X)
raw_sample = X[['user_id', 'org_id', 'rating']]
to_drop = [col for col in X.columns if "_x" in col]
X = X.drop(columns=to_drop)
y = X['rating']
ids = X.groupby('user_id')['user_id'].size()
X = X.drop(columns=['rating', 'user_id', 'org_id'])
return X, y, ids, raw_sample
np.random.seed(42)
###Output
_____no_output_____
###Markdown
Modelling / Choosing most performant model
###Code
rev_train = reviews.loc[reviews['is_tourist'], ['user_id', 'org_id', 'rating', 'user_city', 'is_tourist']]
rev_test = rev_train.iloc[-15000:]
rev_test = rev_test[rev_test.user_id.isin(revs_for_FE.user_id)]
rev_train = rev_train.iloc[:-15000]
# Explicitely list known positives from training period both for train and test
# to exclude them later in supplement_sample
_, train_positives = process_reviews(rev_train)
_, test_positives = process_reviews(rev_test)
all_positives = pd.merge(train_positives, test_positives, on='user_id', how='right')
all_positives['target'] = all_positives.apply(
lambda row: row['target_y'] + row['target_x']
if not np.isnan(row['target_x']).all() else row['target_y'],
axis=1)
all_positives.drop(columns=['target_x', 'target_y'], inplace=True)
rev_train = rev_train.merge(train_positives, on='user_id', how='left')
rev_test = rev_test.merge(all_positives, on='user_id', how='left')
N_NEGATIVE_SAMPLES = 100
N_POOL = 1000
N_TEST_POOL = 20
def choose_popular_orgs(reviews, n_popular=N_TEST_POOL):
return reviews[reviews.org_id.isin(
np.hstack([spb_orgs[:n_popular] , msk_orgs[:n_popular]]
))]
X_test, y_test, ids_test, X_raw_test = get_dataset(choose_popular_orgs(rev_test), N_TEST_POOL, N_TEST_POOL)
#X_train, y_train, ids_train, X_raw_train = get_dataset(choose_popular_orgs(rev_train), N_POOL, N_NEGATIVE_SAMPLES)
X_train, y_train, ids_train, X_raw_train = get_dataset(rev_train, N_POOL, N_NEGATIVE_SAMPLES)
X_raw_train.user_id.nunique()
"""
rev_train_same = reviews.loc[(~reviews['is_tourist']) & (reviews.ts < threshold_day)]\
[['user_id', 'org_id', 'rating', 'user_city', 'is_tourist']]
_, train_positives2 = process_reviews(rev_train_same)
rev_train_same = rev_train_same.merge(train_positives2, on='user_id', how='left')
X_train_same, y_train_same, ids_train_same, X_raw_train_same\
= get_dataset(
choose_popular_orgs(rev_train_same),
N_POOL, N_POOL, opposite_cities=False
)
X_train = pd.concat([X_train, X_train_same])
y_train = pd.concat([y_train, y_train_same])
ids_train = pd.concat([ids_train, ids_train_same])
X_raw_train = pd.concat([X_raw_train, X_raw_train_same])
X_raw_train_same.user_id.nunique()
weights = {1: 1., 0: 0.0}
weights_train = X_train['is_tourist'].apply(lambda x: weights[x])
"""
model = lgb.LGBMRanker(
objective='lambdarank',
random_state=34,
learning_rate = 0.0001,
#subsample=0.8, subsample_freq=5,
reg_alpha = 0.001,
#reg_lambda = 0.001,
#colsample_bytree = 0.8,
n_estimators = 200,
n_jobs = -1, first_metric_only=True
)
model.fit(X=X_train, y=y_train, group=ids_train,
eval_set=[(X_test, y_test)], eval_group=[ids_test],
#eval_set=[(X_train, y_train)], eval_group=[ids_train],
eval_metric=['map', 'average_precision'],
#sample_weight=weights_train,
eval_at=[20, 100], early_stopping_rounds=200
)
print(model.best_score_)
pd.DataFrame({ "feature": model.feature_name_, "importance": model.feature_importances_})\
.sort_values('importance', ascending=False)
###Output
_____no_output_____
###Markdown
Averaging predictions of a few copies of same algo with different seeds
###Code
X_test2, y_test2, ids_test2, X_raw_test2 = get_dataset(rev_test)
different_preds = []
right_preds = []
for i in range(20):
model = lgb.LGBMRanker(
objective='lambdarank',
random_state=i,
learning_rate = 0.0001,
#subsample=0.8, subsample_freq=5,
reg_alpha = 0.002,
#reg_lambda = 0.1,
#colsample_bytree = 0.5,
n_estimators = 200,
n_jobs = -1, first_metric_only=True
)
X_train, y_train, ids_train, X_raw_train = get_dataset(rev_train, N_POOL, N_NEGATIVE_SAMPLES)
model.fit(X=X_train, y=y_train, group=ids_train,
eval_set=[(X_test, y_test)], eval_group=[ids_test],
#eval_set=[(X_train, y_train)], eval_group=[ids_train],
eval_metric=['map'], verbose=-1,
eval_at=[20, 100], early_stopping_rounds=None)
print(i, model.best_score_)
inds = X_raw_test.org_id.isin(
np.hstack([spb_orgs[:N_POOL], msk_orgs[:N_POOL]])
)
predicted_vals = model.predict(X_test[inds], raw_score=False)
different_preds.append(predicted_vals)
inds = X_raw_test2.org_id.isin(
np.hstack([spb_orgs[:N_TEST_POOL], msk_orgs[:N_TEST_POOL]])
)
predicted_vals = model.predict(X_test2[inds], raw_score=False)
right_preds.append(predicted_vals)
###Output
_____no_output_____
###Markdown
By ranking
###Code
inds = X_raw_test.org_id.isin(
np.hstack([spb_orgs[:N_POOL], msk_orgs[:N_POOL]])
)
predicted_vals = pd.DataFrame(
{f'pred{i}': different_preds[i] for i in range(len(different_preds))},
index=X_raw_test.user_id).groupby('user_id').rank().sum(axis=1).rename('prediction')
X_raw_test.loc[inds, 'prediction'] = predicted_vals.values
predictions = X_raw_test[inds]\
.sort_values(['user_id', 'prediction'], ascending=[True, False])\
.groupby('user_id')['org_id'].apply(lambda x: list(x[:20]))\
.rename('target').to_frame().reset_index()
_, y_true = process_reviews(rev_test)
_, trial = process_reviews(X_raw_test.query('rating >= 4'))
y_true_mod = trial.copy()
y_true_mod['target'] = y_true_mod.target.apply(
lambda arr: [x for x in arr if x in np.hstack([spb_orgs[:N_POOL], msk_orgs[:N_POOL]])]
)
y_true_mod = y_true_mod[y_true_mod.target.apply(lambda x: len(x) > 0)]
print("Performance if accounting only users who have positives among most popular \
places by these most polular places")
print_score(MNAP_N(y_true_mod, predictions))
inds = X_raw_test2.org_id.isin(
np.hstack([spb_orgs[:N_TEST_POOL], msk_orgs[:N_TEST_POOL]])
)
predicted_vals = pd.DataFrame(
{f'pred{i}': right_preds[i] for i in range(len(right_preds))},
index=X_raw_test2[inds].user_id).groupby('user_id').rank()\
.sum(axis=1).rename('prediction')
X_raw_test2.loc[inds, 'prediction'] = predicted_vals.values
predictions = X_raw_test2[inds]\
.sort_values(['user_id', 'prediction'], ascending=[True, False])\
.groupby('user_id')['org_id'].apply(lambda x: list(x[:20]))\
.rename('target').to_frame().reset_index()
_, y_true = process_reviews(rev_test)
print("Performance if accounting all users by all positive places")
print_score(MNAP_N(y_true, predictions))
###Output
Performance if accounting all users by all positive places
Score: 6.91
###Markdown
By summation
###Code
inds = X_raw_test.org_id.isin(
np.hstack([spb_orgs[:N_POOL], msk_orgs[:N_POOL]])
)
predicted_vals = pd.DataFrame(
{f'pred{i}': different_preds[i] for i in range(len(different_preds))},
index=X_raw_test.index).sum(axis=1).rename('prediction')
X_raw_test.loc[inds, 'prediction'] = predicted_vals
predictions = X_raw_test[inds]\
.sort_values(['user_id', 'prediction'], ascending=[True, False])\
.groupby('user_id')['org_id'].apply(lambda x: list(x[:20]))\
.rename('target').to_frame().reset_index()
_, y_true = process_reviews(rev_test)
_, trial = process_reviews(X_raw_test.query('rating >= 4'))
y_true_mod = trial.copy()
y_true_mod['target'] = y_true_mod.target.apply(
lambda arr: [x for x in arr if x in np.hstack([spb_orgs[:N_POOL], msk_orgs[:N_POOL]])]
)
y_true_mod = y_true_mod[y_true_mod.target.apply(lambda x: len(x) > 0)]
print("Performance if accounting only users who have positives among most popular \
places by these most polular places")
print_score(MNAP_N(y_true_mod, predictions))
inds = X_raw_test2.org_id.isin(
np.hstack([spb_orgs[:N_TEST_POOL], msk_orgs[:N_TEST_POOL]])
)
predicted_vals = pd.DataFrame(
{f'pred{i}': right_preds[i] for i in range(len(right_preds))},
index=X_raw_test2[inds].index).sum(axis=1).rename('prediction')
X_raw_test2.loc[inds, 'prediction'] = predicted_vals
predictions = X_raw_test2[inds]\
.sort_values(['user_id', 'prediction'], ascending=[True, False])\
.groupby('user_id')['org_id'].apply(lambda x: list(x[:20]))\
.rename('target').to_frame().reset_index()
_, y_true = process_reviews(rev_test)
print("Performance if accounting all users by all positive places")
print_score(MNAP_N(y_true, predictions))
###Output
Performance if accounting all users by all positive places
Score: 6.97
###Markdown
Performance metrics for unaverage strategies
###Code
# Ensure we supply only most popular orgs to the test
inds = X_raw_test.org_id.isin(
np.hstack([spb_orgs[:N_POOL], msk_orgs[:N_POOL]])
)
predicted_vals = model.predict(X_test[inds], raw_score=False)
X_raw_test.loc[inds, 'prediction'] = predicted_vals
predictions = X_raw_test[inds]\
.sort_values(['user_id', 'prediction'], ascending=[True, False])\
.groupby('user_id')['org_id'].apply(lambda x: list(x[:20]))\
.rename('target').to_frame().reset_index()
_, y_true = process_reviews(rev_test)
_, trial = process_reviews(X_raw_test.query('rating >= 4'))
y_true_mod = trial.copy()
y_true_mod['target'] = y_true_mod.target.apply(
lambda arr: [x for x in arr if x in np.hstack([spb_orgs[:N_POOL], msk_orgs[:N_POOL]])]
)
y_true_mod = y_true_mod[y_true_mod.target.apply(lambda x: len(x) > 0)]
print("Performance if accounting only users who have positives among most popular \
places by these most polular places")
print_score(MNAP_N(y_true_mod, predictions))
X_test2, y_test2, ids_test2, X_raw_test2 = get_dataset(rev_test)
inds = X_raw_test2.org_id.isin(
np.hstack([spb_orgs[:N_TEST_POOL], msk_orgs[:N_TEST_POOL]])
)
predicted_vals = model.predict(X_test2[inds], raw_score=False)
X_raw_test2.loc[inds, 'prediction'] = predicted_vals
predictions = X_raw_test2[inds]\
.sort_values(['user_id', 'prediction'], ascending=[True, False])\
.groupby('user_id')['org_id'].apply(lambda x: list(x[:20]))\
.rename('target').to_frame().reset_index()
_, y_true = process_reviews(rev_test)
print("Performance if accounting all users by all positive places")
print_score(MNAP_N(y_true, predictions))
# Perfomance of succession of 20 most popular
X_val, y_true = process_reviews(rev_test)
X_val = X_val.merge(users, on='user_id', how='left')
choose = lambda x: spb_orgs[:20] if x['city'] == 'msk' else msk_orgs[:20]
X_val['target'] = X_val.apply(choose, axis=1)
X_val.drop(columns=['city', 'new_user_id'], inplace=True)
print_score(MNAP_N(y_true, X_val))
y_true_mod = y_true.copy()
y_true_mod['target'] = y_true.target.apply(
lambda arr: [x for x in arr if x in np.hstack([spb_orgs[:N_TEST_POOL], msk_orgs[:N_TEST_POOL]])]
)
y_true_mod = y_true_mod[y_true_mod.target.apply(lambda x: len(x) > 0)]
X_val_mod = X_val[X_val.user_id.isin(y_true_mod.user_id)]
print_score(MNAP_N(y_true_mod, X_val_mod))
# Perfomance of succession of 20 most popular
X_val, y_true = process_reviews(rev_test)
X_val = X_val.merge(users, on='user_id', how='left')
choose = lambda x: spb_orgs[:20] if x['city'] == 'msk' else msk_orgs[:20]
X_val['target'] = X_val.apply(choose, axis=1)
X_val.drop(columns=['city', 'new_user_id'], inplace=True)
print_score(MNAP_N(y_true, X_val))
y_true_mod = y_true.copy()
y_true_mod['target'] = y_true.target.apply(
lambda arr: [x for x in arr if x in np.hstack([spb_orgs[:100], msk_orgs[:100]])]
)
y_true_mod = y_true_mod[y_true_mod.target.apply(lambda x: len(x) > 0)]
X_val_mod = X_val[X_val.user_id.isin(y_true_mod.user_id)]
print_score(MNAP_N(y_true_mod, X_val_mod))
get_recall(y_true, X_val, size=20)
###Output
_____no_output_____
###Markdown
Make submission after training on full dataset Approach 1
###Code
rev_total = reviews.loc[reviews['is_tourist'], ['user_id', 'org_id', 'rating', 'user_city', 'is_tourist']]
_, train_positives = process_reviews(rev_total)
rev_total = rev_total.merge(train_positives, on='user_id', how='left')
N_NEGATIVE_SAMPLES = 100
N_POOL = 1000
N_TEST_POOL = 20
def choose_popular_orgs(reviews, n_popular=N_TEST_POOL):
return reviews[reviews.org_id.isin(
np.hstack([spb_orgs[:n_popular] , msk_orgs[:n_popular]]
))]
X_subm, y_subm, ids_sumb, X_subm_raw = get_dataset(
test_users.merge(train_positives, on='user_id', how='left')\
.merge(users[['user_id', 'city']], on='user_id', how='left')\
.rename({"city": "user_city"}, axis=1),
N_TEST_POOL, N_TEST_POOL,for_submission=True)
different_preds = []
for i in range(5):
X_full, y_full, ids_full, X_full_raw = get_dataset(rev_total, N_POOL, N_NEGATIVE_SAMPLES)
model = lgb.LGBMRanker(
objective='lambdarank',
random_state=i,
learning_rate = 0.0001,
#subsample=0.8, subsample_freq=5,
reg_alpha = 0.001,
#reg_lambda = 0.1,
colsample_bytree = 0.8,
n_estimators = 200,
n_jobs = -1, first_metric_only=True
)
model.fit(
X=X_full, y=y_full, group=ids_full,
eval_set=[(X_full, y_full)], eval_group=[ids_full],
#X=X_train, y=y_train, group=ids_train,
#eval_set=[(X_test, y_test)], eval_group=[ids_test],
eval_metric=['map'], verbose=-1,
eval_at=[20, 100], early_stopping_rounds=None)
print(i, model.best_score_)
inds = X_subm_raw.org_id.isin(
np.hstack([spb_orgs[:N_TEST_POOL], msk_orgs[:N_TEST_POOL]])
)
predicted_vals = model.predict(X_subm[inds], raw_score=False)
different_preds.append(predicted_vals)
inds = X_subm_raw.org_id.isin(
np.hstack([spb_orgs[:N_TEST_POOL], msk_orgs[:N_TEST_POOL]])
)
predicted_vals = pd.DataFrame(
{f'pred{i}': different_preds[i] for i in range(len(different_preds))},
index=X_subm_raw.index).sum(axis=1).rename('prediction')
X_subm_raw.loc[inds, 'prediction'] = predicted_vals
predictions = X_subm_raw[inds]\
.sort_values(['user_id', 'prediction'], ascending=[True, False])\
.groupby('user_id')['org_id'].apply(lambda x: " ".join(map(str, list(x[:20]))))\
.rename('target').to_frame().reindex(test_users.user_id)
predictions.to_csv('submission11.csv')
###Output
0 defaultdict(<class 'collections.OrderedDict'>, {'training': OrderedDict([('map@20', 0.29501514180950916), ('map@100', 0.30932296432047374), ('ndcg@20', 0.37503559052929025), ('ndcg@100', 0.4598981667808404)])})
1 defaultdict(<class 'collections.OrderedDict'>, {'training': OrderedDict([('map@20', 0.28127071432375544), ('map@100', 0.2955597422642228), ('ndcg@20', 0.3548088504258281), ('ndcg@100', 0.4455742020366839)])})
2 defaultdict(<class 'collections.OrderedDict'>, {'training': OrderedDict([('map@20', 0.28576968732869285), ('map@100', 0.30001415823333977), ('ndcg@20', 0.3620459301700502), ('ndcg@100', 0.45039843697983484)])})
3 defaultdict(<class 'collections.OrderedDict'>, {'training': OrderedDict([('map@20', 0.28324467326714403), ('map@100', 0.29751947519117256), ('ndcg@20', 0.3550870684787132), ('ndcg@100', 0.44669506155618355)])})
4 defaultdict(<class 'collections.OrderedDict'>, {'training': OrderedDict([('map@20', 0.2842358908173525), ('map@100', 0.2983468865346625), ('ndcg@20', 0.3580821257545114), ('ndcg@100', 0.4479427634715357)])})
###Markdown
Approach 2
###Code
rev_total = reviews.loc[reviews['is_tourist'], ['user_id', 'org_id', 'rating', 'user_city']]
_, train_positives = process_reviews(rev_total)
rev_total = rev_total.merge(train_positives, on='user_id', how='left')
X_full, y_full, ids_full, X_full_raw = get_dataset(rev_total)
X_subm, y_subm, ids_sumb, X_subm_raw = get_dataset(
test_users.merge(train_positives, on='user_id', how='left')\
.merge(users[['user_id', 'city']], on='user_id', how='left')\
.rename({"city": "user_city"}, axis=1),
for_submission=True)
final_model = lgb.LGBMRanker(
objective='lambdarank',
random_state=42,
learning_rate = 0.05,
n_estimators = 100,
n_jobs = -1
).fit(X=X_full, y=y_full, group=ids_full,
eval_set=[(X_full, y_full)], eval_group=[ids_full],
eval_metric=['map', 'average_precision'],
eval_at=[20, 100])
inds = X_subm_raw.org_id.isin(
np.hstack([spb_orgs[:20], msk_orgs[:20]])
)
predicted_vals = final_model.predict(X_subm[inds], raw_score=False)
X_subm_raw.loc[inds, 'prediction'] = predicted_vals
predictions = X_subm_raw[inds]\
.sort_values(['user_id', 'prediction'], ascending=[True, False])\
.groupby('user_id')['org_id'].apply(lambda x: " ".join(map(str, list(x[:20]))))\
.rename('target').to_frame().reindex(test_users.user_id)
predictions.to_csv('submission5.csv')
###Output
_____no_output_____
###Markdown
Some statistics
###Code
np.setdiff1d(rev_test.user_id.unique(), rev_train.user_id.unique()).size
X_raw_test.user_id.nunique()
(np.setdiff1d(X_raw_test.user_id.unique(), revs_for_FE.user_id.unique()).size,
X_raw_test.user_id.nunique())
(np.setdiff1d(test_users.user_id.unique(), reviews.user_id.unique()).size,
X_raw_test.user_id.nunique())
(np.setdiff1d(test_users.user_id.unique(), reviews.loc[reviews['is_tourist']].user_id.unique()).size,
test_users.user_id.nunique())
(np.setdiff1d(test_users.user_id.unique(), users.user_id.unique()).size,
users.user_id.nunique())
np.setdiff1d(X_raw_train.user_id.unique(), rev_train.user_id.unique()).size
###Output
_____no_output_____
###Markdown
LightFM preprocessing
###Code
from sklearn.model_selection import train_test_split
from lightfm.data import Dataset
from lightfm import LightFM
from lightfm.evaluation import precision_at_k, reciprocal_rank, recall_at_k
from time import ctime
rev_train, rev_test = train_test_split(
reviews[['new_user_id', 'new_org_id', 'rating']].drop_duplicates().to_numpy(dtype='uint64'),
test_size=0.1,
random_state=10)
rev_train = reviews.loc[~reviews['is_tourist'], ['new_user_id', 'new_org_id', 'rating']].to_numpy(dtype='uint64')
rev_test = reviews.loc[reviews['is_tourist'], ['new_user_id', 'new_org_id', 'rating']].to_numpy(dtype='uint64')
#.sample(frac=1, random_state=42).to_numpy(dtype='uint64')
rev_train = np.vstack([rev_train, rev_test[:-15000]])
rev_test = rev_test[-15000:]
from scipy.sparse import csr_matrix
feats = users.set_index('user_id')\
.merge(user_rubrics, on='user_id', how='left')\
.merge(user_count_rating, on='user_id', how='left')\
.merge(user_mean_bill, on='user_id', how='left')\
.merge(user_median_bill, on='user_id', how='left')\
.drop(columns=['city', 'new_user_id']).fillna(0)
users_feats_sparse = csr_matrix(feats.values)
feats = orgs.set_index('org_id')\
.merge(orgs_rubrics, on='org_id', how='left')\
.merge(org_count_rating, on='org_id', how='left')\
.merge(org_mean_rating, on='org_id', how='left')\
.merge(org_median_rating, on='org_id', how='left')\
.drop(columns=['city', 'new_org_id', 'rubrics_id', 'features_id'])\
.fillna(0)
orgs_feats_sparse = csr_matrix(feats.values)
#rev_train = reviews.loc[reviews['is_tourist'], ['new_user_id', 'new_org_id', 'rating']].to_numpy(dtype='uint64')
#rev_test = rev_train[-15000:]
#rev_train = rev_train[:-15000]
rev_train_pd = pd.DataFrame(rev_train, columns=['user_id', 'org_id', 'rating'])
rev_test_pd = pd.DataFrame(rev_test, columns=['user_id', 'org_id', 'rating'])
ds = Dataset()
ds.fit(users=users['new_user_id'], items=orgs['new_org_id'])
binary_test, ranked_test = ds.build_interactions(rev_test)
binary_train, ranked_train = ds.build_interactions(rev_train)
X_train, y_train = process_reviews(rev_train_pd)
X_test, y_test = process_reviews(rev_test_pd)
N_BEST_IN_CITY = 5000
threshold_day = reviews.loc[reviews['is_tourist']].iloc[-15000]['ts']
threshold_day
non_eq_cities = reviews[reviews.user_city != reviews.org_city]
non_eq_cities = non_eq_cities.query('ts <= @threshold_day & ts >= @threshold_day - 500')
msk_orgs = non_eq_cities[non_eq_cities['org_city'] == 'msk']['new_org_id']
msk_orgs = msk_orgs.value_counts().index[:N_BEST_IN_CITY].to_list()
spb_orgs = non_eq_cities[non_eq_cities['org_city'] == 'spb']['new_org_id']
spb_orgs = spb_orgs.value_counts().index[:N_BEST_IN_CITY].to_list()
best_orgs = msk_orgs + spb_orgs
###Output
_____no_output_____
###Markdown
LightFM
###Code
rank = 40
model = LightFM(no_components=rank, loss='warp', random_state=1)
model.fit(ranked_train, epochs=30, num_threads=42,
user_features=users_feats_sparse, item_features=orgs_feats_sparse
)
recall = recall_at_k(model, test_interactions=ranked_test,
train_interactions=ranked_train, k=1000)
recall.mean()
def get_predictions(X_test, model, y_train=None, n_best=20):
ranked_predictions = []
items = orgs.new_org_id.values
items = np.array(best_orgs)
items_msk = np.array(msk_orgs)
items_spb = np.array(spb_orgs)
for i in range(len(X_test)):
if X_test['city'][i] == 'msk': local_items = items_spb
else: local_items = items_msk
pred = model.predict(
int(X_test['user_id'][i]), local_items,
# user_features=users_feats_sparse, item_features=orgs_feats_sparse
)
positions = pred.argsort()[::-1]
#assert (orgs.new_org_id.values[positions] == positions).all(), 'Wrong'
#print(positions)
#print(X_test['city'][i], local_items[positions])
#print(pred[positions])
ranked_predictions.append({'target': local_items[positions]})
all_predictions = pd.DataFrame.from_records(ranked_predictions)
all_predictions['user_id'] = X_test['user_id'].values
all_predictions = all_predictions[['user_id', 'target']]
print(all_predictions)
if y_train is not None:
all_predictions = all_predictions.merge(y_train, on='user_id', how='left')
all_predictions['target'] = all_predictions.apply(
lambda row: np.setdiff1d(row['target_x'], row['target_y'], assume_unique=True), axis=1)
all_predictions['target'] = all_predictions['target'].apply(lambda x: x[:n_best])
return all_predictions[['user_id', 'target']]
test_users_with_locations = X_test.merge(
users, right_on='new_user_id', left_on='user_id', how='left').\
rename({'user_id_x': 'user_id'}, axis=1)
predictions = get_predictions(
X_test=test_users_with_locations, model=model, y_train=y_train, n_best=20)
test_users_with_locations = X_test.merge(
users, right_on='new_user_id', left_on='user_id', how='left').\
rename({'user_id_x': 'user_id'}, axis=1)
predictions = get_predictions(
X_test=test_users_with_locations, model=model, y_train=y_train, n_best=20)
print_score(MNAP_N(y_test, predictions))
print_score(MNAP_N(y_test, predictions))
###Output
Score: 6.68
###Markdown
Make submission on full dataset
###Code
rev_train = reviews.loc[:, ['new_user_id', 'new_org_id', 'rating']].to_numpy(dtype='uint64')
rev_train_pd = pd.DataFrame(rev_train, columns=['user_id', 'org_id', 'rating'])
ds = Dataset()
ds.fit(users=users['new_user_id'], items=orgs['new_org_id'])
binary_train, ranked_train = ds.build_interactions(rev_train)
X_train, y_train = process_reviews(rev_train_pd)
N_BEST_IN_CITY = 20
threshold_day = reviews.ts.max()
non_eq_cities = reviews[reviews.user_city != reviews.org_city]
non_eq_cities = non_eq_cities.query('ts <= @threshold_day & ts >= @threshold_day - 500')
msk_orgs = non_eq_cities[non_eq_cities['org_city'] == 'msk']['new_org_id']
msk_orgs = msk_orgs.value_counts().index[:N_BEST_IN_CITY].to_list()
spb_orgs = non_eq_cities[non_eq_cities['org_city'] == 'spb']['new_org_id']
spb_orgs = spb_orgs.value_counts().index[:N_BEST_IN_CITY].to_list()
best_orgs = msk_orgs + spb_orgs
rank = 40
model = LightFM(no_components=rank, loss='warp', random_state=42)
model.fit(ranked_train, epochs=30, num_threads=2)
submission_users = test_users.merge(users, how='left', on='user_id').\
rename({'user_id': 'old_user_id', 'new_user_id': 'user_id'}, axis=1)
predictions = get_predictions(
X_test=submission_users, model=model, y_train=y_train, n_best=20)
predictions['user_id'] = predictions['user_id'].apply(lambda x: new_to_uid[x])
predictions['target'] = predictions['target']\
.apply(lambda arr: ' '.join([str(new_to_oid[x]) for x in arr]))
assert (predictions.user_id == test_users.user_id).all(), 'Error'
predictions.to_csv('sumbission3.csv', index=None)
###Output
user_id target
0 17 [31670, 13349, 16392, 33679, 14632, 14305, 136...
1 64 [58227, 31670, 27618, 27630, 33679, 14632, 535...
2 90 [31670, 13349, 33679, 58227, 16392, 27630, 247...
3 101 [31670, 16392, 13349, 62072, 33679, 13638, 143...
4 912298 [20140, 11844, 2879, 30685, 64993, 17792, 4990...
... ... ...
16962 911919 [31670, 13349, 62072, 16392, 33679, 14372, 146...
16963 912000 [31670, 13349, 16392, 27618, 33679, 18187, 620...
16964 912046 [31670, 58227, 33679, 16392, 14632, 13638, 128...
16965 1252709 [11844, 30685, 64993, 49173, 2879, 20140, 1486...
16966 912235 [31670, 13349, 16392, 33679, 25219, 14372, 620...
[16967 rows x 2 columns]
###Markdown
Public score 4.92 N_most_popular
###Code
test_users_with_locations = X_test.merge(
users, right_on='new_user_id', left_on='user_id', how='left').\
rename({'user_id_x': 'user_id'}, axis=1)
choose = lambda x: spb_orgs if x['city'] == 'msk' else msk_orgs
target = test_users_with_locations.apply(choose, axis=1)
predictions = X_test.copy()
predictions['target'] = target
print_score(MNAP_N(y_test, predictions))
get_recall(y_test, predictions, size=100)
###Output
_____no_output_____
###Markdown
EDA
###Code
sns.displot(data=reviews, x='ts', height=8)
plt.title('Распределение отзывов по дням')
plt.show()
last_reviews = reviews.groupby('org_id')['ts'].max().value_counts()
sns.scatterplot(data=last_reviews)
plt.title('Last reviews by day')
plt.show()
reviews[reviews.new_org_id.isin(spb_orgs + msk_orgs)].groupby('org_id')['ts'].max()
us_org_pairs = reviews.groupby(['user_id', 'org_id'])['rating'].count()
print(us_org_pairs[us_org_pairs > 1].size, 'number of non unique user-org pairs')
print(us_org_pairs.size, 'number of all unique user-org pairs')
print(us_org_pairs.max(), 'max number of reviews for the same org by one user')
print(reviews.query("org_city != user_city").shape, 'number of different cities user-org pairs')
reviews.query("org_city != user_city")['user_id'].nunique()
print(reviews.user_id.nunique())
print(reviews.query("org_city == 'msk'").org_id.nunique())
print(reviews.query("org_city == 'spb'").org_id.nunique())
non_eq_cities = reviews[reviews.user_city != reviews.org_city]
print(non_eq_cities.query("org_city == 'msk'").org_id.nunique())
print(non_eq_cities.query("org_city == 'spb'").org_id.nunique())
print(reviews.user_id.nunique())
print(reviews.query("org_city == 'msk'").user_id.nunique())
print(reviews.query("org_city == 'spb'").user_id.nunique())
non_eq_cities = reviews[reviews.user_city != reviews.org_city]
print(non_eq_cities.query("org_city == 'msk'").user_id.nunique())
print(non_eq_cities.query("org_city == 'spb'").user_id.nunique())
reviews.user_id.value_counts().describe()
reviews.user_id.isin(test_users).value_counts().clip(upper=40).hist(bins=40, figsize=(20, 10))
reviews.user_id.value_counts().clip(upper=40).hist(bins=40, figsize=(20, 10))
###Output
_____no_output_____
###Markdown
Train-test split
###Code
def clear_df(df, suffixes=['_x', '_y'], inplace=True):
'''
clear_df(df, suffixes=['_x', '_y'], inplace=True)
Удаляет из входного df все колонки, оканчивающиеся на заданные суффиксы.
Parameters
----------
df : pandas.DataFrame
suffixies : Iterable, default=['_x', '_y']
Суффиксы колонок, подлежащих удалению
inplace : bool, default=True
Нужно ли удалить колонки "на месте" или же создать копию DataFrame.
Returns
-------
pandas.DataFrame (optional)
df с удалёнными колонками
'''
def bad_suffix(column):
nonlocal suffixes
return any(column.endswith(suffix) for suffix in suffixes)
columns_to_drop = [col for col in df.columns if bad_suffix(col)]
return df.drop(columns_to_drop, axis=1, inplace=inplace)
def extract_unique(reviews, column):
'''
extract_unique(reviews, column)
Извлекает уникальные значения из колонки в DataFrame.
Parameters
----------
reviews : pandas.DataFrame
pandas.DataFrame, из которого будут извлечены значения.
column : str
Имя колонки в <reviews>.
Returns
-------
pandas.DataFrame
Содержит одну именованную колонку с уникальными значениями.
'''
unique = reviews[column].unique()
return pd.DataFrame({column: unique})
def count_unique(reviews, column):
'''
count_unique(reviews, column)
Извлекает и подсчитывает уникальные значения из колонки в DataFrame.
Parameters
----------
reviews : pandas.DataFrame
pandas.DataFrame, из которого будут извлечены значения.
column : str
Имя колонки в <reviews>.
Returns
-------
pandas.DataFrame
Содержит две колонки: с уникальными значениями и счётчиком встреченных.
'''
return reviews[column].value_counts().reset_index(name='count').rename({'index': column}, axis=1)
def filter_reviews(reviews, users=None, orgs=None):
'''
filter_reviews(reviews, users=None, orgs=None)
Оставляет в выборке только отзывы, оставленные заданными пользователями на заданные организации.
Parameters
----------
users: pandas.DataFrame, default=None
DataFrame, содержащий колонку <user_id>.
Если None, то фильтрация не происходит.
orgs: pandas.DataFrame, default=None
DataFrame, содержащий колонку <org_id>.
Если None, то фильтрация не происходит.
Returns
-------
pandas.DataFrame
Отфильтрованная выборка отзывов.
'''
if users is not None:
reviews = reviews.merge(users, on='user_id', how='inner')
clear_df(reviews)
if orgs is not None:
reviews = reviews.merge(orgs, on='org_id', how='inner')
clear_df(reviews)
return reviews
def train_test_split(reviews, ts_start, ts_end=None):
'''
train_test_split(reviews, ts_start, ts_end=None)
Разделяет выборку отзывов на две части: обучающую и тестовую.
В тестовую выборку попадают только отзывы с user_id и org_id, встречающимися в обучающей выборке.
Parameters
----------
reviews : pandas.DataFrame
Отзывы из reviews.csv с обязательными полями:
<rating>, <ts>, <user_id>, <user_city>, <org_id>, <org_city>.
ts_start : int
Первый день отзывов из тестовой выборки (включительно).
ts_end : int, default=None
Последний день отзывов из обучающей выборки (включительно)
Если параметр равен None, то ts_end == reviews['ts'].max().
Returns
-------
splitting : tuple
Кортеж из двух pandas.DataFrame такой же структуры, как и reviews:
в первом отзывы, попавшие в обучающую выборку, во втором - в тестовую.
'''
if not ts_end:
ts_end = reviews['ts'].max()
reviews_train = reviews[(reviews['ts'] < ts_start) | (reviews['ts'] > ts_end)]
reviews_test = reviews[(ts_start <= reviews['ts']) & (reviews['ts'] <= ts_end)]
# 1. Выбираем только отзывы на понравившиеся места у путешественников
reviews_test = reviews_test[reviews_test['rating'] >= 4.0]
reviews_test = reviews_test[reviews_test['user_city'] != reviews_test['org_city']]
# 2. Оставляем в тесте только тех пользователей и организации, которые встречались в трейне
train_orgs = extract_unique(reviews_train, 'org_id')
train_users = extract_unique(reviews_train, 'user_id')
reviews_test = filter_reviews(reviews_test, orgs=train_orgs)
return reviews_train, reviews_test
def process_reviews(reviews):
'''
process_reviews(reviews)
Извлекает из набора отзывов тестовых пользователей и таргет.
Parameters
----------
reviews : pandas.DataFrame
DataFrame с отзывами, содержащий колонки <user_id> и <org_id>
Returns
-------
X : pandas.DataFrame
DataFrame такой же структуры, как и в test_users.csv
y : pandas.DataFrame
DataFrame с колонками <user_id> и <target>.
В <target> содержится список org_id, посещённых пользователем.
'''
y = reviews.groupby('user_id')['org_id'].apply(list).reset_index(name='target')
X = pd.DataFrame(y['user_id'])
return X, y
reviews['ts'].max()
###Output
_____no_output_____
###Markdown
Всего в выборку попали отызывы за **1216** дней. Отложим в тестовую выборку отзывы за последние **100** дней.
###Code
train_reviews, test_reviews = train_test_split(reviews, 1116)
X_test, y_test = process_reviews(test_reviews)
###Output
_____no_output_____
###Markdown
Посмотрим, сколько всего уникальных пользователей попало в эту тестовую выборку:
###Code
len(X_test)
###Output
_____no_output_____
###Markdown
Метрика Метрика принимает на вход два DataFrame, имеющих такую же структуру, как и **y_test**.`print_score` домножает реальное значение метрики на 100 так же, как и в контесте.Подобная имплементация используется для оценки **submission**.
###Code
def get_recall(y_true, predictions, size=20):
'''
Calculates recall at `size`
Parameters
----------
y_true : pd.DataFrame
DataFrame с колонками <user_id> и <target>.
В <target> содержится список настоящих org_id, посещённых пользователем.
predictions : pd.DataFrame
DataFrame с колонками <user_id> и <target>.
В <target> содержится список рекомендованных для пользователя org_id.
Returns
-------
float
Значение метрики.
'''
y_true = y_true.rename({'target': 'y_true'}, axis='columns')
predictions = predictions.rename({'target': 'predictions'}, axis='columns')
merged = y_true.merge(predictions, left_on='user_id', right_on='user_id')
merged['intersection'] = merged.apply(
lambda row: np.intersect1d(row['y_true'], row['predictions'][:size]).size,
axis=1
)
merged['cardinality'] = merged['y_true'].apply(len)
merged['recall'] = merged['intersection'] / merged['cardinality']
return merged['recall'].mean()
def MNAP(size=20):
'''
MNAP(size=20)
Создаёт метрику под <size> сделанных предсказаний.
Parameters
----------
size : int, default=20
Размер рекомендованной выборки для каждого пользователя
Returns
-------
func(pd.DataFrame, pd.DataFrame) -> float
Функция, вычисляющая MNAP.
'''
assert size >= 1, "Size must be greater than zero!"
def metric(y_true, predictions, size=size):
'''
metric(y_true, predictions, size=size)
Метрика MNAP для двух перемешанных наборов <y_true> и <y_pred>.
Parameters
----------
y_true : pd.DataFrame
DataFrame с колонками <user_id> и <target>.
В <target> содержится список настоящих org_id, посещённых пользователем.
predictions : pd.DataFrame
DataFrame с колонками <user_id> и <target>.
В <target> содержится список рекомендованных для пользователя org_id.
Returns
-------
float
Значение метрики.
'''
y_true = y_true.rename({'target': 'y_true'}, axis='columns')
predictions = predictions.rename({'target': 'predictions'}, axis='columns')
merged = y_true.merge(predictions, left_on='user_id', right_on='user_id')
def score(x, size=size):
'''
Вспомогательная функция.
'''
y_true = x[1][1]
predictions = x[1][2][:size]
weight = 0
inner_weights = [0]
for n, item in enumerate(predictions):
inner_weight = inner_weights[-1] + (1 if item in y_true else 0)
inner_weights.append(inner_weight)
for n, item in enumerate(predictions):
if item in y_true:
weight += inner_weights[n + 1] / (n + 1)
return weight / min(len(y_true), size)
return np.mean([score(row) for row in merged.iterrows()])
return metric
def print_score(score):
print(f"Score: {score*100.0:.2f}")
N = 20
MNAP_N = MNAP(N)
###Output
_____no_output_____
###Markdown
Подходы без машинного обучения Случайные N мест Попробуем предлагать пользователям случайные места из другого города.
###Code
spb_orgs = orgs[orgs['city'] == 'spb']['org_id']
msk_orgs = orgs[orgs['city'] == 'msk']['org_id']
test_users_with_locations = X_test.merge(users, on='user_id')
%%time
np.random.seed(1337)
choose = lambda x: np.random.choice(spb_orgs, N) if x['city'] == 'msk' else np.random.choice(msk_orgs, N)
target = test_users_with_locations.apply(choose, axis=1)
predictions = X_test.copy()
predictions['target'] = target
print_score(MNAP_N(y_test, predictions))
###Output
Score: 0.02
CPU times: user 2.2 s, sys: 59.9 ms, total: 2.26 s
Wall time: 2.22 s
###Markdown
N самых популярных мест Предыдущий подход, очевидно, не очень удачно предсказывает, какие места посетит пользователей. Попробуем улучшить стратегию: будем предлагать пользователям самые популярные места, то есть те, на которые оставлено больше всего отзывов.
###Code
msk_orgs = train_reviews[(train_reviews['rating'] >= 4) & (train_reviews['org_city'] == 'msk')]['org_id']
msk_orgs = msk_orgs.value_counts().index[:N].to_list()
spb_orgs = train_reviews[(train_reviews['rating'] >= 4) & (train_reviews['org_city'] == 'spb')]['org_id']
spb_orgs = spb_orgs.value_counts().index[:N].to_list()
%%time
choose = lambda x: spb_orgs if x['city'] == 'msk' else msk_orgs
target = test_users_with_locations.apply(choose, axis=1)
predictions = X_test.copy()
predictions['target'] = target
print_score(MNAP_N(y_test, predictions))
###Output
Score: 4.21
CPU times: user 637 ms, sys: 9.89 ms, total: 647 ms
Wall time: 647 ms
###Markdown
Отлично, метрика немного улучшилась. Но стоит попробовать доработать эту тактику. N самых популярных мест среди туристов
###Code
tourist_reviews = train_reviews[train_reviews['rating'] >= 4.0]
# набор отзывов только от туристов
tourist_reviews = tourist_reviews[tourist_reviews['user_city'] != tourist_reviews['org_city']]
# выбираем самые популярные места среди туристов из Москвы и Питера
msk_orgs = tourist_reviews[tourist_reviews['org_city'] == 'msk']['org_id']
msk_orgs = msk_orgs.value_counts().index[:N].to_list()
spb_orgs = tourist_reviews[tourist_reviews['org_city'] == 'spb']['org_id']
spb_orgs = spb_orgs.value_counts().index[:N].to_list()
%%time
choose = lambda x: spb_orgs if x['city'] == 'msk' else msk_orgs
target = test_users_with_locations.apply(choose, axis=1)
predictions = X_test.copy()
predictions['target'] = target
print_score(MNAP_N(y_test, predictions))
###Output
Score: 6.40
CPU times: user 652 ms, sys: 5.35 ms, total: 657 ms
Wall time: 657 ms
###Markdown
Метрика улучшилась ещё немного. N / rubrics_count самых популярных мест из каждой рубрики
###Code
def extract_top_by_rubrics(reviews, N):
'''
extract_top_by_rubrics(reviews, N)
Набирает самые популярные организации по рубрикам, сохраняя распределение.
Parameters
----------
reviews : pd.DataFrame
Отзывы пользователей для рекомендации.
N : int
Число рекомендаций.
Returns
-------
orgs_list : list
Список отобранных организаций.
'''
# извлечение популярных рубрик
reviews = reviews.merge(orgs, on='org_id')[['org_id', 'rubrics_id']]
rubrics = reviews.explode('rubrics_id').groupby('rubrics_id').size()
rubrics = (rubrics / rubrics.sum() * N).apply(round).sort_values(ascending=False)
# вывод списка рубрик по убыванию популярности
# print(
# pd.read_csv('data/rubrics.csv')
# .merge(rubrics.reset_index(), left_index=True, right_on='rubrics_id')
# .sort_values(by=0, ascending=False)[['rubric_id', 0]]
# )
# извлечение популярных организаций
train_orgs = reviews.groupby('org_id').size().reset_index(name='count').merge(orgs, on='org_id')
train_orgs = train_orgs[['org_id', 'count', 'rubrics_id']]
most_popular_rubric = lambda rubrics_id: max(rubrics_id, key=lambda rubric_id: rubrics[rubric_id])
train_orgs['rubrics_id'] = train_orgs['rubrics_id'].apply(most_popular_rubric)
orgs_by_rubrics = train_orgs.sort_values(by='count', ascending=False).groupby('rubrics_id')['org_id'].apply(list)
# соберём самые популярные организации в рубриках в один список
orgs_list = []
for rubric_id, count in zip(rubrics.index, rubrics):
if rubric_id not in orgs_by_rubrics:
continue
orgs_list.extend(orgs_by_rubrics[rubric_id][:count])
return orgs_list
msk_orgs = extract_top_by_rubrics(tourist_reviews[tourist_reviews['org_city'] == 'msk'], N)
spb_orgs = extract_top_by_rubrics(tourist_reviews[tourist_reviews['org_city'] == 'spb'], N)
%%time
choose = lambda x: spb_orgs if x['city'] == 'msk' else msk_orgs
target = test_users_with_locations.apply(choose, axis=1)
predictions = X_test.copy()
predictions['target'] = target
print_score(MNAP_N(y_test, predictions))
###Output
Score: 5.77
CPU times: user 642 ms, sys: 5 ms, total: 647 ms
Wall time: 647 ms
###Markdown
Время ML! Коллаборативная фильтрация Memory-basedДля этой группы методов требуется явное построение матрицы __пользователь-организация__ (__interaction matrix__), где на пересечении $i$-ой строки и $j$-ого столбца будет рейтинг, который $i$-ый пользователь выставил $j$-ой организации или же пропуск, если рейтинг не был установлен.
###Code
def reduce_reviews(reviews, min_user_reviews=5, min_org_reviews=13):
'''
reduce_reviews(reviews, min_user_reviews=5, min_org_reviews=13)
Убирает из выборки пользователей и организации, у которых менее <min_reviews> отзывов в родном городе.
Оставляет только отзывы туристов.
Parameters
----------
reviews : pandas.DataFrame
Выборка отзывов с обязательными полями:
<user_id>, <user_city>.
min_user_reviews : int, default=5
Минимальное количество отзывов у пользователя, необходимое для включения в выборку.
min_org_reviews : int, default=13
Минимальное количество отзывов у организации, необходимое для включения в выборку.
Returns
-------
splitting : tuple
Кортеж из двух наборов.
Каждый набор содержит 2 pandas.DataFrame:
1. Урезанная выборка отзывов
2. Набор уникальных организаций
Первый набор содержит DataFrame-ы, относящиеся к отзывам, оставленным в родном городе, а второй -
к отзывам, оставленным в чужом городе. ё
users : pd.DataFrame
Набор уникальных пользователей в выборке
'''
inner_reviews = reviews[reviews['user_city'] == reviews['org_city']]
outer_reviews = reviews[reviews['user_city'] != reviews['org_city']]
# оставляем только отзывы туристов на родной город
tourist_users = extract_unique(outer_reviews, 'user_id')
inner_reviews = filter_reviews(inner_reviews, users=tourist_users)
# выбираем только тех пользователей и организации, у которых есть <min_reviews> отзывов
top_users = count_unique(inner_reviews, 'user_id')
top_users = top_users[top_users['count'] >= min_user_reviews]
top_orgs = count_unique(inner_reviews, 'org_id')
top_orgs = top_orgs[top_orgs['count'] >= min_org_reviews]
inner_reviews = filter_reviews(inner_reviews, users=top_users, orgs=top_orgs)
outer_reviews = filter_reviews(outer_reviews, users=top_users)
# combine reviews
reviews = pd.concat([inner_reviews, outer_reviews])
users = extract_unique(reviews, 'user_id')
orgs = extract_unique(reviews, 'org_id')
return (
(
inner_reviews,
extract_unique(inner_reviews, 'org_id')
),
(
outer_reviews,
extract_unique(outer_reviews, 'org_id')
),
extract_unique(inner_reviews, 'user_id')
)
def create_mappings(df, column):
'''
create_mappings(df, column)
Создаёт маппинг между оригинальными ключами словаря и новыми порядковыми.
Parameters
----------
df : pandas.DataFrame
DataFrame с данными.
column : str
Название колонки, содержащей нужны ключи.
Returns
-------
code_to_idx : dict
Словарь с маппингом: "оригинальный ключ" -> "новый ключ".
idx_to_code : dict
Словарь с маппингом: "новый ключ" -> "оригинальный ключ".
'''
code_to_idx = {}
idx_to_code = {}
for idx, code in enumerate(df[column].to_list()):
code_to_idx[code] = idx
idx_to_code[idx] = code
return code_to_idx, idx_to_code
def map_ids(row, mapping):
'''
Вспомогательная функция
'''
return mapping[row]
def interaction_matrix(reviews, test_users, min_user_reviews=5, min_org_reviews=12):
'''
interaction_matrix(reviews, test_users, min_user_reviews=5, min_org_reviews=12)
Создаёт блочную матрицу взаимодействий (вид матрицы описан в Returns)
Parameters
----------
reviews : pd.DataFrame
Отзывы пользователей для матрицы взаимодействий.
test_users : pd.DataFrame
Пользователи, для которых будет выполнятся предсказание.
min_user_reviews : int, default=5
Минимальное число отзывов от пользователя, необходимое для включения его в матрицу.
min_org_reviews : int, default=12
Минимальное число отзывов на организацию, необходимое для включения её в матрицу.
Returns
-------
InteractionMatrix : scipy.sparse.csr_matrix
Матрица, содержащая рейтинги, выставленные пользователями.
Она блочная и имеет такой вид:
---------------------------------------------------
| TRAIN USERS, INNER ORGS | TRAIN USERS, OUTER ORGS |
| | |
---------------------------------------------------
| TEST USERS, INNER ORGS | TEST USERS, OUTER ORGS |
| | |
---------------------------------------------------
splitting : tuple
Кортеж, содержащий два целых числа:
1. Число пользователей в обучающей выборке
2. Число организаций в домашнем регионе
splitting: tuple
Кортеж, содержащий два котрежа из двух словарей:
1. (idx_to_uid, uid_to_idx) - содержит маппинг индекса к user_id
2. (idx_to_oid, oid_to_idx) - содержит маппинг индекса к org_id
'''
info = reduce_reviews(train_reviews, min_user_reviews, min_org_reviews)
(inner_reviews, inner_orgs), (outer_reviews, outer_orgs), train_users = info
# удалим из обучающей выборки пользователей, которые есть в тестовой
test_users = test_users[['user_id']]
train_users = (
pd.merge(train_users, test_users, indicator=True, how='outer')
.query('_merge=="left_only"')
.drop('_merge', axis=1)
)
inner_reviews = filter_reviews(inner_reviews, train_users)
outer_reviews = filter_reviews(outer_reviews, train_users)
# оставляем отзывы, оставленные тестовыми пользователями
test_reviews = filter_reviews(reviews, test_users, pd.concat([inner_orgs, outer_orgs]))
# получаем полный набор маппингов
all_users = pd.concat([train_users, test_users])
all_orgs = pd.concat([inner_orgs, outer_orgs])
uid_to_idx, idx_to_uid = create_mappings(all_users, 'user_id')
oid_to_idx, idx_to_oid = create_mappings(all_orgs, 'org_id')
# собираем матрицу взаимодействий
reviews = pd.concat([inner_reviews, outer_reviews, test_reviews])
I = reviews['user_id'].apply(map_ids, args=[uid_to_idx]).values
J = reviews['org_id'].apply(map_ids, args=[oid_to_idx]).values
values = reviews['rating']
interactions = sparse.coo_matrix(
(values, (I, J)),
shape=(len(all_users), len(all_orgs)),
dtype=np.float64
).tocsr()
return (
interactions,
(len(train_users), len(inner_orgs)),
(
(idx_to_uid, uid_to_idx),
(idx_to_oid, oid_to_idx)
)
)
###Output
_____no_output_____
###Markdown
ALS
###Code
%%time
import implicit
def make_predictions(interactions, X_test, N):
'''
make_predictions(interactions, X_test, N)
Делает рекомендации для пользователей из <X_test> на основе матрицы взаимодействий.
Parameters
----------
interactions : scipy.sparse.csr_matrix
Разреженная матрица взаимодействий.
X_test : pd.DataFrame
Набор тестовых пользователей, для которых нужно сделать рекомендации.
N : int
Число рекомендаций для каждого пользователя.
Returns
-------
predictions : pd.DataFrame
DataFrame с колонками <user_id> и <target>.
В <target> содержится список рекомендованных для пользователя org_id.
'''
predictions = X_test[['user_id']].copy()
predictions['target'] = pd.Series(dtype=object)
predictions = predictions.set_index('user_id')
interactions, (train_users_len, inner_orgs_len), mappings = interactions
(idx_to_uid, uid_to_idx), (idx_to_oid, oid_to_idx) = mappings
base_model = implicit.als.AlternatingLeastSquares(
factors=5,
iterations=75,
regularization=0.05,
random_state=42
)
base_model.fit(interactions.T)
orgs_to_filter = list(np.arange(inner_orgs_len))
recommendations = base_model.recommend_all(
interactions,
N=N,
filter_already_liked_items=True,
filter_items=orgs_to_filter,
show_progress=True
)
for user_id in tqdm(X_test['user_id'].values, leave=False):
predictions.loc[user_id, 'target'] = list(
map(
lambda org_idx: idx_to_oid[org_idx],
recommendations[uid_to_idx[user_id]]
)
)
return predictions.reset_index()
msk_interactions = interaction_matrix(
train_reviews[train_reviews['user_city'] == 'msk'],
test_users_with_locations[test_users_with_locations['city'] == 'msk'],
)
spb_interactions = interaction_matrix(
train_reviews[train_reviews['user_city'] == 'spb'],
test_users_with_locations[test_users_with_locations['city'] == 'spb'],
)
test_msk_users = test_users_with_locations[test_users_with_locations['city'] == 'msk']
test_spb_users = test_users_with_locations[test_users_with_locations['city'] == 'spb']
msk_predictions = make_predictions(msk_interactions, test_msk_users, N)
spb_predictions = make_predictions(spb_interactions, test_spb_users, N)
predictions = pd.concat([msk_predictions, spb_predictions])
%%time
print_score(MNAP_N(y_test, predictions))
###Output
Score: 0.85
CPU times: user 592 ms, sys: 12.3 ms, total: 604 ms
Wall time: 607 ms
###Markdown
SubmissionВыберем лучший метод на валидации, переобучим его на всей выборке и сделаем предсказание на тестовой выборке. Without ML
###Code
tourist_reviews.query('ts >= 1216 - 500')
# набор отзывов только от туристов
tourist_reviews = reviews[reviews['rating'] >= 4.0]
tourist_reviews = tourist_reviews[tourist_reviews['user_city'] != tourist_reviews['org_city']]
tourist_reviews = tourist_reviews.query('ts >= 1216 - 500')
# выбираем самые популярные места среди туристов из Москвы и Питера
msk_orgs = tourist_reviews[tourist_reviews['org_city'] == 'msk']['org_id']
msk_orgs = msk_orgs.value_counts().index[:N].to_list()
spb_orgs = tourist_reviews[tourist_reviews['org_city'] == 'spb']['org_id']
spb_orgs = spb_orgs.value_counts().index[:N].to_list()
msk_orgs = str(' '.join(map(str, msk_orgs)))
spb_orgs = str(' '.join(map(str, spb_orgs)))
test_users = pd.read_csv('data/test_users.csv')
test_users['city'] = test_users.merge(users, on='user_id')['city']
choose = lambda x: spb_orgs if x['city'] == 'msk' else msk_orgs
target = test_users.apply(choose, axis=1)
predictions = test_users[['user_id']]
predictions['target'] = target
predictions.head()
predictions.to_csv('sumbission1.csv', index=None)
###Output
_____no_output_____
###Markdown
With ML
###Code
test_users = pd.read_csv('data/test_users.csv')
test_users = test_users.merge(users, on='user_id')
test_msk_users = test_users[test_users['city'] == 'msk'][['user_id', 'city']]
test_spb_users = test_users[test_users['city'] == 'spb'][['user_id', 'city']]
msk_interactions = interaction_matrix(
reviews[reviews['user_city'] == 'msk'],
test_msk_users
)
spb_interactions = interaction_matrix(
reviews[reviews['user_city'] == 'spb'],
test_spb_users
)
msk_predictions = make_predictions(msk_interactions, test_msk_users, N)
spb_predictions = make_predictions(spb_interactions, test_spb_users, N)
predictions = pd.concat([msk_predictions, spb_predictions])
predictions['target'] = predictions['target'].apply(lambda orgs: ' '.join(map(str, orgs)))
predictions.head()
predictions.to_csv('answers_ml.csv', index=None)
###Output
_____no_output_____ |
matplotlib/.ipynb_checkpoints/plottypes-checkpoint.ipynb | ###Markdown
GRAPH PLOTTING A graphical representation of dataset is graph plotting. Matplotlib is very popular library. %matplotlib inline is a magic function use for static graph with in cell. we don't need to write plt.show() all the time.
###Code
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import style
%matplotlib inline
plt.style.available #the list of style available in packages
#syntax: plt.style.use('style_name')
data=np.random.randn(100)
plt.style.use('ggplot')
plt.plot(data)
plt.style.use('tableau-colorblind10')
plt.plot(data)
plt.style.use('fivethirtyeight')
plt.plot([1,2,3,4,5],[6,7,8,9,10])
plt.style.use('grayscale')
x=np.random.randn(100)
#plt.hist() is a histogram which is type of bar plot that use shows the frequency or comparison
plt.hist(x, linewidth=2, edgecolor='blue')
###Output
_____no_output_____
###Markdown
plt.hist(x,.......) , x: the sequence of data.
###Code
plt.hist
pip install ipympl
pip install ipympl
###Output
Collecting ipympl
Downloading ipympl-0.8.7-py2.py3-none-any.whl (507 kB)
Note: you may need to restart the kernel to use updated packages.
|
LinearRegression1503.ipynb | ###Markdown
###Code
import numpy as np
import pandas as pd
df1=pd.read_csv('insurance.csv')
df1.columns
df1.head()
df2=df1[['age','bmi','charges']]
df2.head()
X=np.array(df2[['age','bmi']])
X.shape
y=np.array(df2['charges'])
y.shape
X1=np.append(X,np.ones((X.shape[0],1)),axis=1)
X1.shape
X1[1]
X2=np.array(np.ones((X.shape[0],1)))
X3=np.append(X2,X,axis=1)
X3.shape
X3[0:5]
theta=np.zeros((X3.shape[1],1))
theta
print(y.shape,X3.shape,theta.shape)
y=np.expand_dims(y,axis=1)
np.linalg.inv(np.transpose(X3))
marker=10
"marker='{0}'".format(marker)
###Output
_____no_output_____ |
pytorch/tensor_tutorial1.ipynb | ###Markdown
Tensors=======Tensors behave almost exactly the same way in PyTorch as they do inTorch.Create a tensor of size (5 x 7) with uninitialized memory:
###Code
import torch
a = torch.FloatTensor(5, 7)
###Output
_____no_output_____
###Markdown
Initialize a tensor randomized with a normal distribution with mean=0, var=1:
###Code
a = torch.randn(5, 7)
print(a)
print(a.size())
###Output
_____no_output_____
###Markdown
Note``torch.Size`` is in fact a tuple, so it supports the same operationsInplace / Out-of-place----------------------The first difference is that ALL operations on the tensor that operatein-place on it will have an ``_`` postfix. For example, ``add`` is theout-of-place version, and ``add_`` is the in-place version.
###Code
a.fill_(3.5)
# a has now been filled with the value 3.5
b = a.add(4.0)
# a is still filled with 3.5
# new tensor b is returned with values 3.5 + 4.0 = 7.5
print(a, b)
###Output
_____no_output_____
###Markdown
Some operations like ``narrow`` do not have in-place versions, andhence, ``.narrow_`` does not exist. Similarly, some operations like``fill_`` do not have an out-of-place version, so ``.fill`` does notexist.Zero Indexing-------------Another difference is that Tensors are zero-indexed. (In lua, tensors areone-indexed)
###Code
b = a[0, 3] # select 1st row, 4th column from a
###Output
_____no_output_____
###Markdown
Tensors can be also indexed with Python's slicing
###Code
b = a[:, 3:5] # selects all rows, 4th column and 5th column from a
###Output
_____no_output_____
###Markdown
No camel casing---------------The next small difference is that all functions are now NOT camelCaseanymore. For example ``indexAdd`` is now called ``index_add_``
###Code
x = torch.ones(5, 5)
print(x)
z = torch.Tensor(5, 2)
z[:, 0] = 10
z[:, 1] = 100
print(z)
x.index_add_(1, torch.LongTensor([4, 0]), z)
print(x)
###Output
_____no_output_____
###Markdown
Numpy Bridge------------Converting a torch Tensor to a numpy array and vice versa is a breeze.The torch Tensor and numpy array will share their underlying memorylocations, and changing one will change the other.Converting torch Tensor to numpy Array^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
###Code
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
a.add_(1)
print(a)
print(b) # see how the numpy array changed in value
###Output
_____no_output_____
###Markdown
Converting numpy Array to torch Tensor^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
###Code
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b) # see how changing the np array changed the torch Tensor automatically
###Output
_____no_output_____
###Markdown
All the Tensors on the CPU except a CharTensor support converting toNumPy and back.CUDA Tensors------------CUDA Tensors are nice and easy in pytorch, and transfering a CUDA tensorfrom the CPU to GPU will retain its underlying type.
###Code
# let us run this cell only if CUDA is available
if torch.cuda.is_available():
# creates a LongTensor and transfers it
# to GPU as torch.cuda.LongTensor
a = torch.LongTensor(10).fill_(3).cuda()
print(type(a))
b = a.cpu()
# transfers it to CPU, back to
# being a torch.LongTensor
###Output
_____no_output_____ |
testing other features/randomwalk2d.ipynb | ###Markdown
discretize a point in a (3,3) matrix
###Code
def findquadrant(point,size):
y,x = point
halfsize = size/2
if x < -halfsize:
if y > halfsize: return [0,0]
if y < -halfsize: return [2,0]
return [1,0]
if x > halfsize:
if y > halfsize: return [0,2]
if y < -halfsize: return [2,2]
return [1,2]
if y > halfsize: return [0,1]
if y < -halfsize: return [2,1]
return [1,1]
def findStep(points, box):
tempo = 0
while check_howmany(points, box) < 0.05:
for i in points:
a = uniform(0, 2*pi)
vvar, hvar = V*dt*sin(a), V*dt*cos(a)
i[0] += vvar; i[1] += hvar
tempo += dt
return tempo
###Output
_____no_output_____
###Markdown
randomwalk each point for 1 day equivalent
###Code
def randomWalk(points, nonacessquadrants, time):
dt = 1
for atime in range(time):
for i in points:
a = uniform(0, 2*pi)
vvar, hvar = V*dt*sin(a), V*dt*cos(a)
i[0] += vvar; i[1] += hvar
#if findquadrant(i, size) in nonacessquadrants: i[0] -= 2*vvar
#if findquadrant(i, size) in nonacessquadrants: i[0] += 2*vvar; i[1] -= 2*hvar
#if findquadrant(i, size) in nonacessquadrants: i[0] -= 2*vvar
return points
###Output
_____no_output_____
###Markdown
make a grid from a scatter of many points
###Code
def gridify(somelist, size):
shape = (3,3)
grid = np.zeros(shape)
for point in somelist:
quadrant = findquadrant(point,size)
grid[quadrant[0]][quadrant[1]] += 1
grid = grid/grid.sum()
return np.array(grid)
V = 300/60 #meters per minute
dt = 1 #min
npoints = 40000
size = 68
def newpoints(n):
return np.array([[uniform(-size/2,size/2),uniform(-size/2,size/2)] for i in range(n)])
###Output
_____no_output_____
###Markdown
Find maximum time step without leaking mosquitos from the 3x3 grid
###Code
%%time
def MaxStep(box):
a = 0
for i in range(7):
a += findStep(newpoints(npoints), box)
a = a/5
for i in range(int(a),0, -1):
if 24*60 % i == 0: return i
return("deu ruim")
b= MaxStep(68.66)
a = 24*60/b
print(a)
###Output
6.0
CPU times: user 7min 14s, sys: 284 ms, total: 7min 14s
Wall time: 7min 15s
###Markdown
matrix generator for geting all possible combinations of matrix0 | 3 | 5 1 | X | 6 2 | 4 | 7
###Code
%%time
allmatrices = list(product(*(repeat((0, 1), 8))))
print(len(allmatrices))
dictionary_matrix_to_num = {}
dict_num_to_weights = {}
nowalls = gridify(randomWalk(newpoints(npoints), [], MaxStep(68.66)), size)
avgcorner = (nowalls[0,0]+nowalls[2,2]+nowalls[2,0]+nowalls[0,2])/4
avgwall = (nowalls[1,0]+nowalls[0,1]+nowalls[2,1]+nowalls[1,2])/4
nowalls[0,0], nowalls[2,2], nowalls[2,0], nowalls[0,2] = [avgcorner for i in range(4)]
nowalls[1,0], nowalls[0,1], nowalls[2,1], nowalls[1,2] = [avgwall for i in range(4)]
print(nowalls)
for index, case in enumerate(allmatrices):
dictionary_matrix_to_num[case] = index
multiplier = np.ones((3,3))
if case[0] == 1: multiplier[0,0] = 0
if case[1] == 1: multiplier[1,0] = 0
if case[2] == 1: multiplier[2,0] = 0
if case[3] == 1: multiplier[0,1] = 0
if case[4] == 1: multiplier[2,1] = 0
if case[5] == 1: multiplier[0,2] = 0
if case[6] == 1: multiplier[1,2] = 0
if case[7] == 1: multiplier[2,2] = 0
if index%25 == 0: print(index, case)
dict_num_to_weights[index] = nowalls*multiplier/(nowalls*multiplier).sum()
a = dict_num_to_weights[145]
print(a)
plt.imshow(dict_num_to_weights[145])
plt.show()
import pickle as pkl
MyDicts = [dictionary_matrix_to_num, dict_num_to_weights]
pkl.dump( MyDicts, open( "myDicts.p", "wb" ) )
#to read the pickled dicts use:
# dictionary_matrix_to_num, dict_num_to_weights = pkl.load( open ("myDicts.p", "rb") )
a = [(1,2), (3,4)]
a, b = zip(*a)
a
###Output
_____no_output_____ |
python/PICSURE_API_101.ipynb | ###Markdown
PIC-SURE API use-case: Phenome-Wide analysis on Cure Sickle Cell data This is a tutorial notebook, aimed for the user to be quickly up and running with the python PIC-SURE API. It covers the main functionalities of the API. PIC-SURE python API What is PIC-SURE? -->Databases exposed through PIC-SURE API encompass a wide heterogeneity of architectures and data organizations underneath. PIC-SURE hide this complexity and expose the different databases in the same format, allowing researchers to focus on the analysis and medical insights, thus easing the process of reproducible sciences. More about PIC-SUREPIC-SURE stands for Patient-centered Information Commons: Standardized Unification of Research Elements. The API is available in two different programming languages, python and R, allowing investigators to query databases in the same way using any of those languages.PIC-SURE is a large project from which the R/python PIC-SURE API is only a brick. Among other things, PIC-SURE also offers a graphical user interface, allowing research scientist to get quick knowledge about variables and data available for a specific data source.The python API is actively developed by the Avillach-Lab at Harvard Medical School.GitHub repo:* https://github.com/hms-dbmi/pic-sure-python-adapter-hpds* https://github.com/hms-dbmi/pic-sure-python-client ------- Getting your own user-specific security token **Before running this notebook, please be sure to review the get_your_token.ipynb notebook. It contains explanation about how to get a security token, mandatory to access the databases.** Environment set-up Pre-requisite- python 3.6 or later (although earlier versions of Python 3 must work too)- pip: python package manager, already available in most system with a python interpreter installed ([pip installation instructions](https://pip.pypa.io/en/stable/installing/)) IPython magic commandThose two lines of code below do load the `autoreload` IPython extension. Although not necessary to execute the rest of the Notebook, it does enable to reload every dependency each time python code is executed, thus enabling to take into account changes in external file imported into this Notebook (e.g. user defined function stored in separate file), without having to manually reload libraries. Turns out very handy when developing interactively. More about [IPython Magic commands](https://ipython.readthedocs.io/en/stable/interactive/magics.html).
###Code
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Installation of required python packagesUsing the pip package manager, we install the packages listed in the `requirements.txt` file.
###Code
!cat requirements.txt
import sys
!{sys.executable} -m pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Import all the external dependencies, as well as user-defined functions stored in the `python_lib` folder
###Code
import json
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import PicSureHpdsLib
import PicSureClient
from python_lib.utils import get_multiIndex_variablesDict, get_dic_renaming_vars, match_dummies_to_varNames, joining_variablesDict_onCol
from python_lib.HPDS_connection_manager import tokenManager
print("NB: This Jupyter Notebook has been written using PIC-SURE API following versions:\n- PicSureClient: 0.1.0\n- PicSureHpdsLib: 1.1.0\n")
print("The PIC-SURE API libraries versions you've been downloading are: \n- PicSureClient: {0}\n- PicSureHpdsLib: {1}".format(PicSureClient.__version__, PicSureHpdsLib.__version__))
###Output
_____no_output_____
###Markdown
Set up the options for displaying tables and plots in this Notebook
###Code
# Pandas DataFrame display options
pd.set_option("max.rows", 435)
# Matplotlib parameters options
fig_size = plt.rcParams["figure.figsize"]
# Prints: [8.0, 6.0]
fig_size[0] = 14
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
font = {'weight' : 'bold',
'size' : 12}
plt.rc('font', **font)
###Output
_____no_output_____
###Markdown
Connecting to a PIC-SURE network Several information are needed to get access to data through the PIC-SURE API: a network URL, a resource id, and a user security token which is specific to a given URL + resource.
###Code
PICSURE_network_URL = "https://biodatacatalyst.integration.hms.harvard.edu/picsure"
PICSURE_network_URL = "https://curesc.hms.harvard.edu/picsure"
resource_id = "37663534-6161-3830-6264-323031316539"
token_file = "token.txt"
with open(token_file, "r") as f:
my_token = f.read()
client = PicSureClient.Client()
connection = client.connect(PICSURE_network_URL, my_token)
adapter = PicSureHpdsLib.Adapter(connection)
resource = adapter.useResource(resource_id)
###Output
_____no_output_____
###Markdown
Two objects are created here: a `connection` and a `resource` object, using respectively the `picsure` and `hpds` libraries. As we will only be using one single resource, **the `resource` object is actually the only one we will need to proceed with data analysis hereafter** (FYI, the `connection` object is useful to get access to different databases stored in different resources). It is connected to the specific data source ID we specified, and enables to query and retrieve data from this source. Getting help with the PIC-SURE python API Each object exposed by the PicSureHpdsLib library got a `help()` method. Calling it will print out a helper message about it.
###Code
resource.help()
###Output
_____no_output_____
###Markdown
For instance, this output tells us that this `resource` object got 2 methods, and it gives insights about their function. Using the *variables dictionnary* Once connection to the desired resource has been established, we first need to get a quick grasp of which variables are available in the database. To this end, we will use the `dictionary` method of the `resource` object. A `dictionary` instance offers the possibility to retrieve matching records according to a specific term, or to retrieve information about all available variables, using the `find()` method. For instance, looking for variables containing the term `Stroke` is done this way:
###Code
dictionary = resource.dictionary()
dictionary_search = dictionary.find("Stroke")
###Output
_____no_output_____
###Markdown
Objects created by the `dictionary.find` exposes the search result using 4 different methods: `.count()`, `.keys()`, `.entries()`, and `.DataFrame()`.
###Code
dictionary_search.DataFrame().sort_index()
###Output
_____no_output_____
###Markdown
**`.DataFrame()` enables to get the result of the dictionary search in a pandas DataFrame format** The dictionary provide various information about the variables, such as:- observationCount: number of entries with non-null value- categorical: type of the variables, True if categorical, False if continuous/numerical- min/max: only provided for non-categorical variables- HpdsDataType: 'phenotypes' or 'genotypes'. Currently, the API only expsoses'phenotypes' variablesHence, it enables to:* Use the various variables information as criteria for variable selection.* Use the row names of the DataFrame to get the actual variables names, to be used in the query, as shown below.
###Code
pprint({"Count": dictionary_search.count(),
"Keys": dictionary_search.keys()[0:5],
"Entries": dictionary_search.entries()[0:5]})
###Output
_____no_output_____
###Markdown
Variable names, as currently implemented in the API, are long and got backslashes that prevent from using copy-pasting to directly select a variable name. However, using the dictionary to select variables can help to deal with this. Hence, one way to proceed is to retrieve the whole dictionary in the form of a pandas DataFrame, as below:
###Code
plain_variablesDict = resource.dictionary().find().DataFrame()
###Output
_____no_output_____
###Markdown
Indeed, using the `dictionary.find()` function without arguments return every entries, as shown in the help documentation.
###Code
resource.dictionary().help()
plain_variablesDict.iloc[10:20,:]
###Output
_____no_output_____
###Markdown
Variable dictionary + pandas multiIndex Though helpful, we can use a simple user-defined function (`get_multiIndex_variablesDict`) to add a little more information and ease dealing with variables names. It takes advantage of pandas MultiIndex functionality [see pandas official documentation on this topic](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html).Although not an official feature of the API, such functionality illustrate how to quickly scan an select groups of related variables.Printing the 'multiIndexed' variable Dictionary allows to quickly see the tree-like organisation of the variables. Moreover, original and simplified variable names are now stored respectively in the "varName" and "simplified_varName" columns.
###Code
variablesDict = get_multiIndex_variablesDict(plain_variablesDict)
variablesDict
# We limit the number of lines to be displayed for the future outputs
pd.set_option("max.rows", 50)
###Output
_____no_output_____
###Markdown
Below is a simple example to illustrate the ease of use a multiIndex dictionary. Let's say we are interested in every variables pertaining to the "Medical history" and "Medication history" subcategories.
###Code
mask_study = variablesDict.index.get_level_values(0) == "CIBMTR - Cure Sickle Cell Disease"
mask_transplant = variablesDict.index.get_level_values(1) == "3 - Transplant related"
medication_history_variables = variablesDict.loc[mask_study & mask_transplant,:]
medication_history_variables
###Output
_____no_output_____
###Markdown
Although pretty simple, it can be easily combined with other filters to quickly select necessary variables. Querying and retrieving data Beside from the dictionary, the second cornerstone of the API is the `query` object. It is the entering point to retrieve data from the resource.
###Code
my_query = resource.query()
###Output
_____no_output_____
###Markdown
The query object got several methods that enable to build a query - The `query.select().add()` method accept variable names as string or list of strings as argument, and will allow the query to return all variables included in the list, without any record (ie subjects/rows) subsetting. - The `query.require().add()` method accept variable names as string or list of strings as argument, and will allow the query to return all the variables passed, and only records that do not contain any null values for those variables. - The `query.anyof().add()` method accept variable names as string or list of strings as argument, and will allow the query to return all variables included in the list, and only records that do contain at least one non-null value for those variables. - The `query.filter().add()` method accept variable names a variable name as strings as argument, plus additional values to filter on that given variable. The query will return this variable and only the records that do match this filter. All those 4 methods can be combined when building a query. The record eventually returned by the query have to meet all the different specified filters. Building the query Let's say we want to select a cohort from the "CBTR study" whom individuals are children (age < 18), and for which individuals got a stroke post HCT (Hematopoietic cell transplantation).
###Code
# Selecting all variables from "CIBMTR - Cure Sickle Cell Disease" study
mask_study = variablesDict.index.get_level_values(0) == "CIBMTR - Cure Sickle Cell Disease"
varnames = variablesDict.loc[mask_study, "varName"]
# Getting variable names to filter query on
mask_age = variablesDict["simplified_varName"] == "Patient age at transplant years"
age_transplant = variablesDict.loc[mask_age, "varName"]
mask_stroke = variablesDict["simplified_varName"] == "Stroke post HCT"
stroke_post_HCT = variablesDict.loc[mask_stroke, "varName"]
values_stroke_post_HCT = variablesDict.loc[mask_stroke, "categoryValues"]
my_query.filter().add(age_transplant, max=18)
my_query.filter().add(stroke_post_HCT, values=values_stroke_post_HCT)
my_query.select().add(varnames)
###Output
_____no_output_____
###Markdown
Retrieving the data Once our query object is finally built, we use the `query.run` function to retrieve the data corresponding to our query
###Code
query_result = my_query.getResultsDataFrame().set_index("Patient ID")
query_result.shape
query_result.head()
###Output
_____no_output_____ |
etl/steps/data/meadow/who/2021-07-01/ghe.ipynb | ###Markdown
WHO GHE (2021-07-01)
###Code
dest_dir = "/tmp/ghe_20210701"
from owid import walden, catalog # type: ignore
import tempfile
from zipfile import ZipFile
import os
import pandas as pd
from etl.steps.data.converters import convert_walden_metadata
###Output
_____no_output_____
###Markdown
1. Locate the dataset in Walden
###Code
raw_dataset = walden.Catalog().find_one("who", "2021-07-01", "ghe")
raw_dataset
###Output
_____no_output_____
###Markdown
2. Extract the zip file to a temporary directory
###Code
with tempfile.TemporaryDirectory() as dirname:
pass
os.mkdir(dirname)
dirname
ZipFile(raw_dataset.local_path).extractall(dirname)
dirname
csv_file = os.path.join(dirname, "who_ghe", "_all_countries.csv")
###Output
_____no_output_____
###Markdown
3. Load the data frame and prune excess columns
###Code
df = pd.read_csv(csv_file)
df.iloc[:1].T
df.drop(["Unnamed: 0", "Unnamed: 0.1"], axis=1, inplace=True)
df.drop([col for col in df.columns if col.startswith("Sys_")], axis=1, inplace=True)
df.drop([col for col in df.columns if col.startswith("FL_")], axis=1, inplace=True)
df.columns = [col.lower() for col in df.columns]
df.drop("_recordid", axis=1, inplace=True)
df["country_code"] = df["country_code"].astype("category")
df["ghe_cause_title"] = df["ghe_cause_title"].astype("category")
df["sex_code"] = df["sex_code"].astype("category")
df["agegroup_code"] = df["agegroup_code"].astype("category")
df.iloc[0]
###Output
_____no_output_____
###Markdown
4. Save as a dataset
###Code
raw_dataset
ds = catalog.Dataset.create_empty(dest_dir)
ds.metadata = convert_walden_metadata(raw_dataset)
ds.save()
###Output
_____no_output_____
###Markdown
Add cause codes
###Code
ghe_causes = (
df[["ghe_cause_code", "ghe_cause_title"]]
.drop_duplicates()
.set_index("ghe_cause_code")
)
ghe_causes = catalog.Table(ghe_causes)
ghe_causes
ghe_causes.metadata = catalog.TableMeta(
short_name="ghe_causes",
title="GHE Cause Codes",
description="Integer codes for common GHE causes and their human readable names",
)
ds.add(ghe_causes)
###Output
_____no_output_____
###Markdown
Add estimates
###Code
df.drop("ghe_cause_code", axis=1, inplace=True)
df.head()
estimates = catalog.Table(df)
estimates.set_index(
["country_code", "year", "ghe_cause_title", "sex_code", "agegroup_code"],
inplace=True,
)
estimates.head()
estimates.metadata.short_name = "estimates"
estimates.metadata.description = "GHE estimated burden of disease"
ds.add(estimates)
###Output
_____no_output_____
###Markdown
Cleanup
###Code
import shutil
shutil.rmtree(dirname)
###Output
_____no_output_____ |
ob_work/Task3_681b3aebjson.ipynb | ###Markdown
Task 3: 681b3aeb.json
###Code
import json
import numpy as np
from IPython.display import Image
from pprint import pprint
###Output
_____no_output_____
###Markdown
We always want to be able to take anything non zero, merge it into a perfect 3x3 matrix
###Code
Image("C://Users/oisin/Documents/College/PTAI/Assignment 3/ARC/ob_work/test_train_plots/681b3aeb.PNG")
###Output
_____no_output_____
###Markdown
Below is the matrix interpretation of the above
###Code
with open("C://Users/oisin/Documents/College/PTAI/Assignment 3/ARC/data/training/681b3aeb.json") as f:
data = json.load(f)
pprint(data)
train = data['train']
for i in train:
pprint(i)
print()
###Output
{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 7, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 7, 7]],
'output': [[3, 3, 7], [3, 7, 7], [3, 7, 7]]}
{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 4, 4],
[0, 0, 0, 6, 6, 6, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 6, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
'output': [[6, 6, 6], [4, 6, 6], [4, 4, 6]]}
{'input': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0]],
'output': [[1, 1, 1], [1, 3, 1], [3, 3, 3]]}
###Markdown
Train-test split below
###Code
train = data['train']
train = [i['input'] for i in train]
example = train[2]
for row in example:
pprint(row)
train = data['train']
train = [i['output'] for i in train]
example = train[2]
for row in example:
pprint(row)
test = data['test']
test = [i['output'] for i in test]
for i in test:
pprint(i)
###Output
[[8, 8, 2], [8, 2, 2], [8, 8, 8]]
|
notebooks/Data_Source_Analysis/test-yfinance.ipynb | ###Markdown
YFinance - Tutorial
###Code
import yfinance as yf
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import time
import seaborn as sns
###Output
_____no_output_____
###Markdown
Instalation First, we will have to install YFinance by performing the following command: - pip install yfinanceWith this library, we have no limitations of use, so you can execute all the instructions you want. We do not recommend to use numpy==1.19.4 because it throws some incompatibility exceptions with YFinance. We have had some problems when using virtual environments such as anaconda.
###Code
#Getting information about a certain company
msft = yf.Ticker("MSFT")
#Historical information
msft.info
###Output
_____no_output_____
###Markdown
We get some information from the Company Zip Code until the averages values from the last five years. As we have more data than we need, we will use the next fields, which show the essential information of a company. - Zip- Sector- FullTimeEmployees- LongBusinessSummary- City - Phone- State- Website- Industry- PreviousClose is the value with which the stock closed the previous period.
###Code
hist = msft.history(period="max")
hist
#Filtering Dataset by ticket and date
ticket = input("Introduzca los ticket de la empresa: ")
fecha_inicio = input("Introduzca la fecha de inicio (AAAA-MM-DD): ")
fecha_fin = input("Introduzca la fecha de fin (AAAA-MM-DD); ")
df = yf.download(ticket, start=fecha_inicio, end=fecha_fin)
msft = yf.Ticker(ticket)
company_inf = {"name": msft.info["longName"],
"sector": msft.info["sector"],
"fullTimeEmployees" : msft.info["fullTimeEmployees"],
"city": msft.info["city"],
"state": msft.info["state"],
"phone" : msft.info["phone"],
"webpage" : msft.info["website"],
"industry" : msft.info["industry"],
"previousClose" : msft.info["previousClose"]
}
company_inf
###Output
Introduzca los ticket de la empresa: GOOG
Introduzca la fecha de inicio (AAAA-MM-DD): 2000-01-01
Introduzca la fecha de fin (AAAA-MM-DD); 2020-01-01
[*********************100%***********************] 1 of 1 completed
###Markdown
The dataset consists of several attributes defined below:- Date- Open is the price at which the financial security opens in the market when trading begins. It may or may not be different from the previous day's closing price.- High is the highest price at which a security, such as a stock, has traded during the time period.- Low is the lowest price at which a security, such as a stock, has traded during the time period.- Close is the final price at which it trades during regular market hours on any given day. The closing price is considered the most accurate valuation of a stock or other security until trading resumes on the next trading day.- Adjusted Close (Adj Close) amends a stock's closing price to reflect that stock's value after accounting for any corporate actions. It is often used when examining historical returns or doing a detailed analysis of past performance.- Volume is the amount of an asset or security that changes hands over some period of time, often over the course of a day. For instance, stock trading volume would refer to the number of shares of a security traded between its daily open and close.ref: https://www.investopedia.com/We have detected that those values that are unknown for a certain date are NaN values so, they will be replaced by the arithmetic mean.
###Code
#Replacing null by means values.
if df.isnull().values.any():
df = df.fillna(data.mean())
print (df)
#Saving dataframe
df.to_csv('out.csv',index=True)
###Output
Open High Low Close Adj Close \
Date
2004-08-19 49.813286 51.835709 47.800831 49.982655 49.982655
2004-08-20 50.316402 54.336334 50.062355 53.952770 53.952770
2004-08-23 55.168217 56.528118 54.321388 54.495735 54.495735
2004-08-24 55.412300 55.591629 51.591621 52.239193 52.239193
2004-08-25 52.284027 53.798351 51.746044 52.802086 52.802086
... ... ... ... ... ...
2019-12-24 1348.500000 1350.260010 1342.780029 1343.560059 1343.560059
2019-12-26 1346.170044 1361.327026 1344.469971 1360.400024 1360.400024
2019-12-27 1362.989990 1364.530029 1349.310059 1351.890015 1351.890015
2019-12-30 1350.000000 1353.000000 1334.020020 1336.140015 1336.140015
2019-12-31 1330.109985 1338.000000 1329.084961 1337.020020 1337.020020
Volume
Date
2004-08-19 44871300
2004-08-20 22942800
2004-08-23 18342800
2004-08-24 15319700
2004-08-25 9232100
... ...
2019-12-24 347500
2019-12-26 667500
2019-12-27 1038400
2019-12-30 1050900
2019-12-31 961800
[3869 rows x 6 columns]
###Markdown
We use the described function to obtain the distribution that follows the dataset data obtained from the user's inputs.
###Code
df.describe()
###Output
_____no_output_____
###Markdown
From this moment on, we will apply visualization techniques that can provide additional information on the dataset. (**IN PROCESS**)
###Code
sns.lineplot(data=df, x="Date", y=df["Close"]-df["Open"])
###Output
_____no_output_____
###Markdown
We can see how as time goes by the value that increases or decreases the company is greater.
###Code
spark = SparkSession.builder.appName('aggs').getOrCreate()
logger = spark._jvm.org.apache.log4j
logger.LogManager.getLogger("org").setLevel(logger.Level.WARN)
fields = [StructField("Date", StringType(), True),
StructField("Open", DoubleType(), True),
StructField("High", DoubleType(), True),
StructField("Low", DoubleType(), True),
StructField("Close", DoubleType(), True),
StructField("Adj Close", DoubleType(), True),
StructField("Volume", IntegerType(), True)]
schema = StructType(fields)
#Loading dataframe using spark
spark_df = spark \
.read \
.format("csv") \
.option("header","true")\
.schema(schema)\
.load("out.csv")
#Printing Schema
spark_df.printSchema()
spark_df.show()
sns.lineplot(data=df, x="Date", y=df.Close/df.Open)
###Output
_____no_output_____ |
TinyFacesGAN_implementation.ipynb | ###Markdown
Finding Tiny Faces in the Wild with Generative Adversarial Networkimplementation in **keras with tensorflow backend**.Link to [the paper](https://openaccess.thecvf.com/content_cvpr_2018/papers/Bai_Finding_Tiny_Faces_CVPR_2018_paper.pdf)
###Code
# Code to Load Regions of Interest (ROI) i.e. Faces and Non Faces.
from __future__ import print_function
import os
import sys
import gzip
import json
import shutil
import zipfile
import requests
import subprocess
from tqdm import tqdm
from six.moves import urllib
def download_file_from_google_drive(fileid, path):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': fileid}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': fileid, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, path)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, path):
CHUNK_SIZE = 32768
with open(path, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk:
f.write(chunk)
def download(url, dirpath):
filename = url.split('/')[-1]
filepath = os.path.join(dirpath, filename)
u = urllib.request.urlopen(url)
f = open(filepath, 'wb')
filesize = int(u.headers["Content-Length"])
print("Downloading: %s Bytes: %s" % (filename, filesize))
downloaded = 0
block_sz = 8192
status_width = 70
while True:
buf = u.read(block_sz)
if not buf:
print('')
break
else:
print('', end='\r')
downloaded += len(buf)
f.write(buf)
status = (("[%-" + str(status_width + 1) + "s] %3.2f%%") %
('=' * int(float(downloaded) / filesize * status_width) + '>', (downloaded * 100. / filesize * 8192)))
print(status, end='')
sys.stdout.flush()
f.close()
return filepath
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={ 'id': id }, stream=True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination, chunk_size=32*1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size,
unit='B', unit_scale=True, desc=destination):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def unzip(filepath):
print("Extracting: " + filepath)
dirpath = os.path.dirname(filepath)
with zipfile.ZipFile(filepath) as zf:
zf.extractall(dirpath)
os.remove(filepath)
def download_file(dirpath, filename, drive_id):
data_dir = 'ROI'
# if os.path.exists(os.path.join(dirpath, data_dir)):
# print('Found ROI - skip')
# return
#filename, drive_id = "WIDER_train.zip", "0B6eKvaijfFUDQUUwd21EckhUbWs"
save_path = os.path.join(dirpath, filename)
# if os.path.exists(save_path):
# print('[*] {} already exists'.format(save_path))
# else:
download_file_from_google_drive(drive_id, save_path)
zip_dir = ''
with zipfile.ZipFile(save_path) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dirpath)
os.remove(save_path)
os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, data_dir))
if __name__ == '__main__':
download_file('/', 'nthumbs.zip' ,'1r8rY_1f76yzNdYz9RKwOw5AhIXdma0kp')
download_file('/', 'thumbs.zip' ,'1XbkaHeY1sg5vYVn1nj1qThHH9xSG33mb')
download_file('/', 'LRthumbs.zip' ,'1yuCwXoVHCBx0A_TCNER696XMzAutHx-9')
download_file('/', 'LRnthumbs.zip' ,'1IFcxjsnG_aRNB8PLYjbRcDv53ivnc8vr')
nremove = !ls nthumbs | head -1
remove = !ls thumbs | head -1
!rm /content/thumbs/{remove[0]}
!rm /content/nthumbs/{nremove[0]}
import glob
import numpy as np
import cv2
from PIL import Image
fileListThumbs = glob.glob('thumbs/*.jpg')
fileListNotthumbs = glob.glob('nthumbs/*.jpg')
LRfileListThumbs = glob.glob('LRthumbs/*.jpg')
LRfileListNotthumbs = glob.glob('LRnthumbs/*.jpg')
thumbs = np.array([np.array(Image.open(fname)) for fname in fileListThumbs]) #All thumbs (18298) as numpy array
notThumbs = np.array([np.array(Image.open(fname)) for fname in fileListNotthumbs])
LRthumbs = np.array([np.array(Image.open(fname)) for fname in LRfileListThumbs]) #All LR thumbs (18298) as numpy array
LRnotThumbs = np.array([np.array(Image.open(fname)) for fname in LRfileListNotthumbs])
def normalization(X):
return X / 127.5 - 1 #To Bring pixel values in range [-1, 1]
def gen_batch(X, batch_size):
#X is numpy array of all files
while True:
idx = np.random.choice(X.shape[0], batch_size, replace=False) #Generates a random batch from the dataset
yield X[idx] #Return files with yield on the go
from __future__ import print_function, division
import scipy
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Add, MaxPooling2D, Flatten
from keras.layers.advanced_activations import PReLU, LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
import keras.backend as K
import sys
import os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from keras.applications.vgg19 import VGG19
channels=3
n_residual_blocks = 8
lr_shape=(12,12,channels)
hr_shape=(48,48,channels)
alpha = 0.001
beta = 0.01
def residual_block(layer_input, filters):
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.9)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.9)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.9)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.9)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.9)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.9)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = Activation('relu')(d)
d = BatchNormalization(momentum=0.9)(d)
d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
d = BatchNormalization(momentum=0.9)(d)
d = Add()([d, layer_input])
return d
def deconv2d(layer_input):
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
u = Activation('relu')(u)
return u
img_lr = Input(shape=lr_shape)
# Pre-residual block
cpr1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr)
cpr1 = Activation('relu')(cpr1)
# Propogate through residual blocks
r1 = residual_block(cpr1,64)
for _ in range(n_residual_blocks - 1):
r1 = residual_block(r1, 64)
# Post-residual block
cpr2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r1)
cpr2 = BatchNormalization(momentum=0.9)(cpr2)
cpr2 = Add()([cpr2, cpr1])
# Upsampling
u1 = deconv2d(cpr2)
u2 = deconv2d(u1)
inter_sr=Conv2D(channels, kernel_size=1, strides=1, padding='same')(u2)
##refinement network
# Pre-residual block
cpr3 = Conv2D(64, kernel_size=9, strides=1, padding='same')(inter_sr)
cpr3 = Activation('relu')(cpr3)
# Propogate through residual blocks
r2 = residual_block(cpr3,64)
for _ in range(n_residual_blocks - 1):
r2 = residual_block(r2, 64)
# Post-residual block
cpr4 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r2)
cpr4 = BatchNormalization(momentum=0.9)(cpr4)
cpr5 = Conv2D(256, kernel_size=3, strides=1, padding='same')(cpr4)
cpr5 = BatchNormalization(momentum=0.9)(cpr5)
cpr6 = Conv2D(256, kernel_size=3, strides=1, padding='same')(cpr5)
cpr6 = BatchNormalization(momentum=0.9)(cpr6)
img_sr=Conv2D(channels, kernel_size=3, strides=1, padding='same')(cpr6)
generator=Model(img_lr, [inter_sr, img_sr])
generator.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 12, 12, 3) 0
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 12, 12, 64) 15616 input_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 12, 12, 64) 0 conv2d_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 12, 12, 64) 36928 activation_1[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 12, 12, 64) 0 conv2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 12, 12, 64) 256 activation_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_1[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 12, 12, 64) 0 conv2d_3[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 12, 12, 64) 256 activation_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_2[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 12, 12, 64) 0 conv2d_4[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 12, 12, 64) 256 activation_4[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_3[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 12, 12, 64) 0 conv2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 12, 12, 64) 256 activation_5[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_4[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 12, 12, 64) 0 conv2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 12, 12, 64) 256 activation_6[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_5[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 12, 12, 64) 0 conv2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 12, 12, 64) 256 activation_7[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_6[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 12, 12, 64) 0 conv2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 12, 12, 64) 256 activation_8[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_7[0][0]
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 12, 12, 64) 256 conv2d_9[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 12, 12, 64) 0 batch_normalization_8[0][0]
activation_1[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 12, 12, 64) 36928 add_1[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 12, 12, 64) 0 conv2d_10[0][0]
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 12, 12, 64) 256 activation_9[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_9[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 12, 12, 64) 0 conv2d_11[0][0]
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 12, 12, 64) 256 activation_10[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_10[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 12, 12, 64) 0 conv2d_12[0][0]
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 12, 12, 64) 256 activation_11[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_11[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 12, 12, 64) 0 conv2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 12, 12, 64) 256 activation_12[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_12[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 12, 12, 64) 0 conv2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 12, 12, 64) 256 activation_13[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_13[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 12, 12, 64) 0 conv2d_15[0][0]
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 12, 12, 64) 256 activation_14[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_14[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 12, 12, 64) 0 conv2d_16[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 12, 12, 64) 256 activation_15[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_15[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 12, 12, 64) 256 conv2d_17[0][0]
__________________________________________________________________________________________________
add_2 (Add) (None, 12, 12, 64) 0 batch_normalization_16[0][0]
add_1[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 12, 12, 64) 36928 add_2[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 12, 12, 64) 0 conv2d_18[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 12, 12, 64) 256 activation_16[0][0]
__________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_17[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 12, 12, 64) 0 conv2d_19[0][0]
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 12, 12, 64) 256 activation_17[0][0]
__________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_18[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 12, 12, 64) 0 conv2d_20[0][0]
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 12, 12, 64) 256 activation_18[0][0]
__________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_19[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 12, 12, 64) 0 conv2d_21[0][0]
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 12, 12, 64) 256 activation_19[0][0]
__________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_20[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 12, 12, 64) 0 conv2d_22[0][0]
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 12, 12, 64) 256 activation_20[0][0]
__________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_21[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 12, 12, 64) 0 conv2d_23[0][0]
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 12, 12, 64) 256 activation_21[0][0]
__________________________________________________________________________________________________
conv2d_24 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_22[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 12, 12, 64) 0 conv2d_24[0][0]
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 12, 12, 64) 256 activation_22[0][0]
__________________________________________________________________________________________________
conv2d_25 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_23[0][0]
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 12, 12, 64) 256 conv2d_25[0][0]
__________________________________________________________________________________________________
add_3 (Add) (None, 12, 12, 64) 0 batch_normalization_24[0][0]
add_2[0][0]
__________________________________________________________________________________________________
conv2d_26 (Conv2D) (None, 12, 12, 64) 36928 add_3[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 12, 12, 64) 0 conv2d_26[0][0]
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 12, 12, 64) 256 activation_23[0][0]
__________________________________________________________________________________________________
conv2d_27 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_25[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 12, 12, 64) 0 conv2d_27[0][0]
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 12, 12, 64) 256 activation_24[0][0]
__________________________________________________________________________________________________
conv2d_28 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_26[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 12, 12, 64) 0 conv2d_28[0][0]
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 12, 12, 64) 256 activation_25[0][0]
__________________________________________________________________________________________________
conv2d_29 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_27[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 12, 12, 64) 0 conv2d_29[0][0]
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 12, 12, 64) 256 activation_26[0][0]
__________________________________________________________________________________________________
conv2d_30 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_28[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 12, 12, 64) 0 conv2d_30[0][0]
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 12, 12, 64) 256 activation_27[0][0]
__________________________________________________________________________________________________
conv2d_31 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_29[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 12, 12, 64) 0 conv2d_31[0][0]
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 12, 12, 64) 256 activation_28[0][0]
__________________________________________________________________________________________________
conv2d_32 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_30[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 12, 12, 64) 0 conv2d_32[0][0]
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 12, 12, 64) 256 activation_29[0][0]
__________________________________________________________________________________________________
conv2d_33 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_31[0][0]
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 12, 12, 64) 256 conv2d_33[0][0]
__________________________________________________________________________________________________
add_4 (Add) (None, 12, 12, 64) 0 batch_normalization_32[0][0]
add_3[0][0]
__________________________________________________________________________________________________
conv2d_34 (Conv2D) (None, 12, 12, 64) 36928 add_4[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 12, 12, 64) 0 conv2d_34[0][0]
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 12, 12, 64) 256 activation_30[0][0]
__________________________________________________________________________________________________
conv2d_35 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_33[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 12, 12, 64) 0 conv2d_35[0][0]
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 12, 12, 64) 256 activation_31[0][0]
__________________________________________________________________________________________________
conv2d_36 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_34[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 12, 12, 64) 0 conv2d_36[0][0]
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 12, 12, 64) 256 activation_32[0][0]
__________________________________________________________________________________________________
conv2d_37 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_35[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 12, 12, 64) 0 conv2d_37[0][0]
__________________________________________________________________________________________________
batch_normalization_36 (BatchNo (None, 12, 12, 64) 256 activation_33[0][0]
__________________________________________________________________________________________________
conv2d_38 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_36[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 12, 12, 64) 0 conv2d_38[0][0]
__________________________________________________________________________________________________
batch_normalization_37 (BatchNo (None, 12, 12, 64) 256 activation_34[0][0]
__________________________________________________________________________________________________
conv2d_39 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_37[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 12, 12, 64) 0 conv2d_39[0][0]
__________________________________________________________________________________________________
batch_normalization_38 (BatchNo (None, 12, 12, 64) 256 activation_35[0][0]
__________________________________________________________________________________________________
conv2d_40 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_38[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 12, 12, 64) 0 conv2d_40[0][0]
__________________________________________________________________________________________________
batch_normalization_39 (BatchNo (None, 12, 12, 64) 256 activation_36[0][0]
__________________________________________________________________________________________________
conv2d_41 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_39[0][0]
__________________________________________________________________________________________________
batch_normalization_40 (BatchNo (None, 12, 12, 64) 256 conv2d_41[0][0]
__________________________________________________________________________________________________
add_5 (Add) (None, 12, 12, 64) 0 batch_normalization_40[0][0]
add_4[0][0]
__________________________________________________________________________________________________
conv2d_42 (Conv2D) (None, 12, 12, 64) 36928 add_5[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 12, 12, 64) 0 conv2d_42[0][0]
__________________________________________________________________________________________________
batch_normalization_41 (BatchNo (None, 12, 12, 64) 256 activation_37[0][0]
__________________________________________________________________________________________________
conv2d_43 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_41[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 12, 12, 64) 0 conv2d_43[0][0]
__________________________________________________________________________________________________
batch_normalization_42 (BatchNo (None, 12, 12, 64) 256 activation_38[0][0]
__________________________________________________________________________________________________
conv2d_44 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_42[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 12, 12, 64) 0 conv2d_44[0][0]
__________________________________________________________________________________________________
batch_normalization_43 (BatchNo (None, 12, 12, 64) 256 activation_39[0][0]
__________________________________________________________________________________________________
conv2d_45 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_43[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 12, 12, 64) 0 conv2d_45[0][0]
__________________________________________________________________________________________________
batch_normalization_44 (BatchNo (None, 12, 12, 64) 256 activation_40[0][0]
__________________________________________________________________________________________________
conv2d_46 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_44[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 12, 12, 64) 0 conv2d_46[0][0]
__________________________________________________________________________________________________
batch_normalization_45 (BatchNo (None, 12, 12, 64) 256 activation_41[0][0]
__________________________________________________________________________________________________
conv2d_47 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_45[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 12, 12, 64) 0 conv2d_47[0][0]
__________________________________________________________________________________________________
batch_normalization_46 (BatchNo (None, 12, 12, 64) 256 activation_42[0][0]
__________________________________________________________________________________________________
conv2d_48 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_46[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 12, 12, 64) 0 conv2d_48[0][0]
__________________________________________________________________________________________________
batch_normalization_47 (BatchNo (None, 12, 12, 64) 256 activation_43[0][0]
__________________________________________________________________________________________________
conv2d_49 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_47[0][0]
__________________________________________________________________________________________________
batch_normalization_48 (BatchNo (None, 12, 12, 64) 256 conv2d_49[0][0]
__________________________________________________________________________________________________
add_6 (Add) (None, 12, 12, 64) 0 batch_normalization_48[0][0]
add_5[0][0]
__________________________________________________________________________________________________
conv2d_50 (Conv2D) (None, 12, 12, 64) 36928 add_6[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 12, 12, 64) 0 conv2d_50[0][0]
__________________________________________________________________________________________________
batch_normalization_49 (BatchNo (None, 12, 12, 64) 256 activation_44[0][0]
__________________________________________________________________________________________________
conv2d_51 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_49[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 12, 12, 64) 0 conv2d_51[0][0]
__________________________________________________________________________________________________
batch_normalization_50 (BatchNo (None, 12, 12, 64) 256 activation_45[0][0]
__________________________________________________________________________________________________
conv2d_52 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_50[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 12, 12, 64) 0 conv2d_52[0][0]
__________________________________________________________________________________________________
batch_normalization_51 (BatchNo (None, 12, 12, 64) 256 activation_46[0][0]
__________________________________________________________________________________________________
conv2d_53 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_51[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 12, 12, 64) 0 conv2d_53[0][0]
__________________________________________________________________________________________________
batch_normalization_52 (BatchNo (None, 12, 12, 64) 256 activation_47[0][0]
__________________________________________________________________________________________________
conv2d_54 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_52[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 12, 12, 64) 0 conv2d_54[0][0]
__________________________________________________________________________________________________
batch_normalization_53 (BatchNo (None, 12, 12, 64) 256 activation_48[0][0]
__________________________________________________________________________________________________
conv2d_55 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_53[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 12, 12, 64) 0 conv2d_55[0][0]
__________________________________________________________________________________________________
batch_normalization_54 (BatchNo (None, 12, 12, 64) 256 activation_49[0][0]
__________________________________________________________________________________________________
conv2d_56 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_54[0][0]
__________________________________________________________________________________________________
activation_50 (Activation) (None, 12, 12, 64) 0 conv2d_56[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 12, 12, 64) 256 activation_50[0][0]
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_55[0][0]
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 12, 12, 64) 256 conv2d_57[0][0]
__________________________________________________________________________________________________
add_7 (Add) (None, 12, 12, 64) 0 batch_normalization_56[0][0]
add_6[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 12, 12, 64) 36928 add_7[0][0]
__________________________________________________________________________________________________
activation_51 (Activation) (None, 12, 12, 64) 0 conv2d_58[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 12, 12, 64) 256 activation_51[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_57[0][0]
__________________________________________________________________________________________________
activation_52 (Activation) (None, 12, 12, 64) 0 conv2d_59[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 12, 12, 64) 256 activation_52[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_58[0][0]
__________________________________________________________________________________________________
activation_53 (Activation) (None, 12, 12, 64) 0 conv2d_60[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 12, 12, 64) 256 activation_53[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_59[0][0]
__________________________________________________________________________________________________
activation_54 (Activation) (None, 12, 12, 64) 0 conv2d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 12, 12, 64) 256 activation_54[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_60[0][0]
__________________________________________________________________________________________________
activation_55 (Activation) (None, 12, 12, 64) 0 conv2d_62[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 12, 12, 64) 256 activation_55[0][0]
__________________________________________________________________________________________________
conv2d_63 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_61[0][0]
__________________________________________________________________________________________________
activation_56 (Activation) (None, 12, 12, 64) 0 conv2d_63[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 12, 12, 64) 256 activation_56[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_62[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 12, 12, 64) 0 conv2d_64[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 12, 12, 64) 256 activation_57[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 12, 12, 64) 36928 batch_normalization_63[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 12, 12, 64) 256 conv2d_65[0][0]
__________________________________________________________________________________________________
add_8 (Add) (None, 12, 12, 64) 0 batch_normalization_64[0][0]
add_7[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 12, 12, 64) 36928 add_8[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 12, 12, 64) 256 conv2d_66[0][0]
__________________________________________________________________________________________________
add_9 (Add) (None, 12, 12, 64) 0 batch_normalization_65[0][0]
activation_1[0][0]
__________________________________________________________________________________________________
up_sampling2d_1 (UpSampling2D) (None, 24, 24, 64) 0 add_9[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 24, 24, 256) 147712 up_sampling2d_1[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 24, 24, 256) 0 conv2d_67[0][0]
__________________________________________________________________________________________________
up_sampling2d_2 (UpSampling2D) (None, 48, 48, 256) 0 activation_58[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 48, 48, 256) 590080 up_sampling2d_2[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 48, 48, 256) 0 conv2d_68[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 48, 48, 3) 771 activation_59[0][0]
__________________________________________________________________________________________________
conv2d_70 (Conv2D) (None, 48, 48, 64) 15616 conv2d_69[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 48, 48, 64) 0 conv2d_70[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 48, 48, 64) 36928 activation_60[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 48, 48, 64) 0 conv2d_71[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 48, 48, 64) 256 activation_61[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_66[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 48, 48, 64) 0 conv2d_72[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 48, 48, 64) 256 activation_62[0][0]
__________________________________________________________________________________________________
conv2d_73 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_67[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 48, 48, 64) 0 conv2d_73[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 48, 48, 64) 256 activation_63[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_68[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 48, 48, 64) 0 conv2d_74[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 48, 48, 64) 256 activation_64[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_69[0][0]
__________________________________________________________________________________________________
activation_65 (Activation) (None, 48, 48, 64) 0 conv2d_75[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 48, 48, 64) 256 activation_65[0][0]
__________________________________________________________________________________________________
conv2d_76 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_70[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 48, 48, 64) 0 conv2d_76[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 48, 48, 64) 256 activation_66[0][0]
__________________________________________________________________________________________________
conv2d_77 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_71[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 48, 48, 64) 0 conv2d_77[0][0]
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 48, 48, 64) 256 activation_67[0][0]
__________________________________________________________________________________________________
conv2d_78 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_72[0][0]
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 48, 48, 64) 256 conv2d_78[0][0]
__________________________________________________________________________________________________
add_10 (Add) (None, 48, 48, 64) 0 batch_normalization_73[0][0]
activation_60[0][0]
__________________________________________________________________________________________________
conv2d_79 (Conv2D) (None, 48, 48, 64) 36928 add_10[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 48, 48, 64) 0 conv2d_79[0][0]
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 48, 48, 64) 256 activation_68[0][0]
__________________________________________________________________________________________________
conv2d_80 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_74[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 48, 48, 64) 0 conv2d_80[0][0]
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 48, 48, 64) 256 activation_69[0][0]
__________________________________________________________________________________________________
conv2d_81 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_75[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 48, 48, 64) 0 conv2d_81[0][0]
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 48, 48, 64) 256 activation_70[0][0]
__________________________________________________________________________________________________
conv2d_82 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_76[0][0]
__________________________________________________________________________________________________
activation_71 (Activation) (None, 48, 48, 64) 0 conv2d_82[0][0]
__________________________________________________________________________________________________
batch_normalization_77 (BatchNo (None, 48, 48, 64) 256 activation_71[0][0]
__________________________________________________________________________________________________
conv2d_83 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_77[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 48, 48, 64) 0 conv2d_83[0][0]
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 48, 48, 64) 256 activation_72[0][0]
__________________________________________________________________________________________________
conv2d_84 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_78[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 48, 48, 64) 0 conv2d_84[0][0]
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 48, 48, 64) 256 activation_73[0][0]
__________________________________________________________________________________________________
conv2d_85 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_79[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 48, 48, 64) 0 conv2d_85[0][0]
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 48, 48, 64) 256 activation_74[0][0]
__________________________________________________________________________________________________
conv2d_86 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_80[0][0]
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 48, 48, 64) 256 conv2d_86[0][0]
__________________________________________________________________________________________________
add_11 (Add) (None, 48, 48, 64) 0 batch_normalization_81[0][0]
add_10[0][0]
__________________________________________________________________________________________________
conv2d_87 (Conv2D) (None, 48, 48, 64) 36928 add_11[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 48, 48, 64) 0 conv2d_87[0][0]
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 48, 48, 64) 256 activation_75[0][0]
__________________________________________________________________________________________________
conv2d_88 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_82[0][0]
__________________________________________________________________________________________________
activation_76 (Activation) (None, 48, 48, 64) 0 conv2d_88[0][0]
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 48, 48, 64) 256 activation_76[0][0]
__________________________________________________________________________________________________
conv2d_89 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_83[0][0]
__________________________________________________________________________________________________
activation_77 (Activation) (None, 48, 48, 64) 0 conv2d_89[0][0]
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 48, 48, 64) 256 activation_77[0][0]
__________________________________________________________________________________________________
conv2d_90 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_84[0][0]
__________________________________________________________________________________________________
activation_78 (Activation) (None, 48, 48, 64) 0 conv2d_90[0][0]
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 48, 48, 64) 256 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_91 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_85[0][0]
__________________________________________________________________________________________________
activation_79 (Activation) (None, 48, 48, 64) 0 conv2d_91[0][0]
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 48, 48, 64) 256 activation_79[0][0]
__________________________________________________________________________________________________
conv2d_92 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_86[0][0]
__________________________________________________________________________________________________
activation_80 (Activation) (None, 48, 48, 64) 0 conv2d_92[0][0]
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 48, 48, 64) 256 activation_80[0][0]
__________________________________________________________________________________________________
conv2d_93 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_87[0][0]
__________________________________________________________________________________________________
activation_81 (Activation) (None, 48, 48, 64) 0 conv2d_93[0][0]
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 48, 48, 64) 256 activation_81[0][0]
__________________________________________________________________________________________________
conv2d_94 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_88[0][0]
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 48, 48, 64) 256 conv2d_94[0][0]
__________________________________________________________________________________________________
add_12 (Add) (None, 48, 48, 64) 0 batch_normalization_89[0][0]
add_11[0][0]
__________________________________________________________________________________________________
conv2d_95 (Conv2D) (None, 48, 48, 64) 36928 add_12[0][0]
__________________________________________________________________________________________________
activation_82 (Activation) (None, 48, 48, 64) 0 conv2d_95[0][0]
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 48, 48, 64) 256 activation_82[0][0]
__________________________________________________________________________________________________
conv2d_96 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_90[0][0]
__________________________________________________________________________________________________
activation_83 (Activation) (None, 48, 48, 64) 0 conv2d_96[0][0]
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 48, 48, 64) 256 activation_83[0][0]
__________________________________________________________________________________________________
conv2d_97 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_91[0][0]
__________________________________________________________________________________________________
activation_84 (Activation) (None, 48, 48, 64) 0 conv2d_97[0][0]
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 48, 48, 64) 256 activation_84[0][0]
__________________________________________________________________________________________________
conv2d_98 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_92[0][0]
__________________________________________________________________________________________________
activation_85 (Activation) (None, 48, 48, 64) 0 conv2d_98[0][0]
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 48, 48, 64) 256 activation_85[0][0]
__________________________________________________________________________________________________
conv2d_99 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_93[0][0]
__________________________________________________________________________________________________
activation_86 (Activation) (None, 48, 48, 64) 0 conv2d_99[0][0]
__________________________________________________________________________________________________
batch_normalization_94 (BatchNo (None, 48, 48, 64) 256 activation_86[0][0]
__________________________________________________________________________________________________
conv2d_100 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_94[0][0]
__________________________________________________________________________________________________
activation_87 (Activation) (None, 48, 48, 64) 0 conv2d_100[0][0]
__________________________________________________________________________________________________
batch_normalization_95 (BatchNo (None, 48, 48, 64) 256 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_101 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_95[0][0]
__________________________________________________________________________________________________
activation_88 (Activation) (None, 48, 48, 64) 0 conv2d_101[0][0]
__________________________________________________________________________________________________
batch_normalization_96 (BatchNo (None, 48, 48, 64) 256 activation_88[0][0]
__________________________________________________________________________________________________
conv2d_102 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_96[0][0]
__________________________________________________________________________________________________
batch_normalization_97 (BatchNo (None, 48, 48, 64) 256 conv2d_102[0][0]
__________________________________________________________________________________________________
add_13 (Add) (None, 48, 48, 64) 0 batch_normalization_97[0][0]
add_12[0][0]
__________________________________________________________________________________________________
conv2d_103 (Conv2D) (None, 48, 48, 64) 36928 add_13[0][0]
__________________________________________________________________________________________________
activation_89 (Activation) (None, 48, 48, 64) 0 conv2d_103[0][0]
__________________________________________________________________________________________________
batch_normalization_98 (BatchNo (None, 48, 48, 64) 256 activation_89[0][0]
__________________________________________________________________________________________________
conv2d_104 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_98[0][0]
__________________________________________________________________________________________________
activation_90 (Activation) (None, 48, 48, 64) 0 conv2d_104[0][0]
__________________________________________________________________________________________________
batch_normalization_99 (BatchNo (None, 48, 48, 64) 256 activation_90[0][0]
__________________________________________________________________________________________________
conv2d_105 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_99[0][0]
__________________________________________________________________________________________________
activation_91 (Activation) (None, 48, 48, 64) 0 conv2d_105[0][0]
__________________________________________________________________________________________________
batch_normalization_100 (BatchN (None, 48, 48, 64) 256 activation_91[0][0]
__________________________________________________________________________________________________
conv2d_106 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_100[0][0]
__________________________________________________________________________________________________
activation_92 (Activation) (None, 48, 48, 64) 0 conv2d_106[0][0]
__________________________________________________________________________________________________
batch_normalization_101 (BatchN (None, 48, 48, 64) 256 activation_92[0][0]
__________________________________________________________________________________________________
conv2d_107 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_101[0][0]
__________________________________________________________________________________________________
activation_93 (Activation) (None, 48, 48, 64) 0 conv2d_107[0][0]
__________________________________________________________________________________________________
batch_normalization_102 (BatchN (None, 48, 48, 64) 256 activation_93[0][0]
__________________________________________________________________________________________________
conv2d_108 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_102[0][0]
__________________________________________________________________________________________________
activation_94 (Activation) (None, 48, 48, 64) 0 conv2d_108[0][0]
__________________________________________________________________________________________________
batch_normalization_103 (BatchN (None, 48, 48, 64) 256 activation_94[0][0]
__________________________________________________________________________________________________
conv2d_109 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_103[0][0]
__________________________________________________________________________________________________
activation_95 (Activation) (None, 48, 48, 64) 0 conv2d_109[0][0]
__________________________________________________________________________________________________
batch_normalization_104 (BatchN (None, 48, 48, 64) 256 activation_95[0][0]
__________________________________________________________________________________________________
conv2d_110 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_104[0][0]
__________________________________________________________________________________________________
batch_normalization_105 (BatchN (None, 48, 48, 64) 256 conv2d_110[0][0]
__________________________________________________________________________________________________
add_14 (Add) (None, 48, 48, 64) 0 batch_normalization_105[0][0]
add_13[0][0]
__________________________________________________________________________________________________
conv2d_111 (Conv2D) (None, 48, 48, 64) 36928 add_14[0][0]
__________________________________________________________________________________________________
activation_96 (Activation) (None, 48, 48, 64) 0 conv2d_111[0][0]
__________________________________________________________________________________________________
batch_normalization_106 (BatchN (None, 48, 48, 64) 256 activation_96[0][0]
__________________________________________________________________________________________________
conv2d_112 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_106[0][0]
__________________________________________________________________________________________________
activation_97 (Activation) (None, 48, 48, 64) 0 conv2d_112[0][0]
__________________________________________________________________________________________________
batch_normalization_107 (BatchN (None, 48, 48, 64) 256 activation_97[0][0]
__________________________________________________________________________________________________
conv2d_113 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_107[0][0]
__________________________________________________________________________________________________
activation_98 (Activation) (None, 48, 48, 64) 0 conv2d_113[0][0]
__________________________________________________________________________________________________
batch_normalization_108 (BatchN (None, 48, 48, 64) 256 activation_98[0][0]
__________________________________________________________________________________________________
conv2d_114 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_108[0][0]
__________________________________________________________________________________________________
activation_99 (Activation) (None, 48, 48, 64) 0 conv2d_114[0][0]
__________________________________________________________________________________________________
batch_normalization_109 (BatchN (None, 48, 48, 64) 256 activation_99[0][0]
__________________________________________________________________________________________________
conv2d_115 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_109[0][0]
__________________________________________________________________________________________________
activation_100 (Activation) (None, 48, 48, 64) 0 conv2d_115[0][0]
__________________________________________________________________________________________________
batch_normalization_110 (BatchN (None, 48, 48, 64) 256 activation_100[0][0]
__________________________________________________________________________________________________
conv2d_116 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_110[0][0]
__________________________________________________________________________________________________
activation_101 (Activation) (None, 48, 48, 64) 0 conv2d_116[0][0]
__________________________________________________________________________________________________
batch_normalization_111 (BatchN (None, 48, 48, 64) 256 activation_101[0][0]
__________________________________________________________________________________________________
conv2d_117 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_111[0][0]
__________________________________________________________________________________________________
activation_102 (Activation) (None, 48, 48, 64) 0 conv2d_117[0][0]
__________________________________________________________________________________________________
batch_normalization_112 (BatchN (None, 48, 48, 64) 256 activation_102[0][0]
__________________________________________________________________________________________________
conv2d_118 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_112[0][0]
__________________________________________________________________________________________________
batch_normalization_113 (BatchN (None, 48, 48, 64) 256 conv2d_118[0][0]
__________________________________________________________________________________________________
add_15 (Add) (None, 48, 48, 64) 0 batch_normalization_113[0][0]
add_14[0][0]
__________________________________________________________________________________________________
conv2d_119 (Conv2D) (None, 48, 48, 64) 36928 add_15[0][0]
__________________________________________________________________________________________________
activation_103 (Activation) (None, 48, 48, 64) 0 conv2d_119[0][0]
__________________________________________________________________________________________________
batch_normalization_114 (BatchN (None, 48, 48, 64) 256 activation_103[0][0]
__________________________________________________________________________________________________
conv2d_120 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_114[0][0]
__________________________________________________________________________________________________
activation_104 (Activation) (None, 48, 48, 64) 0 conv2d_120[0][0]
__________________________________________________________________________________________________
batch_normalization_115 (BatchN (None, 48, 48, 64) 256 activation_104[0][0]
__________________________________________________________________________________________________
conv2d_121 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_115[0][0]
__________________________________________________________________________________________________
activation_105 (Activation) (None, 48, 48, 64) 0 conv2d_121[0][0]
__________________________________________________________________________________________________
batch_normalization_116 (BatchN (None, 48, 48, 64) 256 activation_105[0][0]
__________________________________________________________________________________________________
conv2d_122 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_116[0][0]
__________________________________________________________________________________________________
activation_106 (Activation) (None, 48, 48, 64) 0 conv2d_122[0][0]
__________________________________________________________________________________________________
batch_normalization_117 (BatchN (None, 48, 48, 64) 256 activation_106[0][0]
__________________________________________________________________________________________________
conv2d_123 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_117[0][0]
__________________________________________________________________________________________________
activation_107 (Activation) (None, 48, 48, 64) 0 conv2d_123[0][0]
__________________________________________________________________________________________________
batch_normalization_118 (BatchN (None, 48, 48, 64) 256 activation_107[0][0]
__________________________________________________________________________________________________
conv2d_124 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_118[0][0]
__________________________________________________________________________________________________
activation_108 (Activation) (None, 48, 48, 64) 0 conv2d_124[0][0]
__________________________________________________________________________________________________
batch_normalization_119 (BatchN (None, 48, 48, 64) 256 activation_108[0][0]
__________________________________________________________________________________________________
conv2d_125 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_119[0][0]
__________________________________________________________________________________________________
activation_109 (Activation) (None, 48, 48, 64) 0 conv2d_125[0][0]
__________________________________________________________________________________________________
batch_normalization_120 (BatchN (None, 48, 48, 64) 256 activation_109[0][0]
__________________________________________________________________________________________________
conv2d_126 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_120[0][0]
__________________________________________________________________________________________________
batch_normalization_121 (BatchN (None, 48, 48, 64) 256 conv2d_126[0][0]
__________________________________________________________________________________________________
add_16 (Add) (None, 48, 48, 64) 0 batch_normalization_121[0][0]
add_15[0][0]
__________________________________________________________________________________________________
conv2d_127 (Conv2D) (None, 48, 48, 64) 36928 add_16[0][0]
__________________________________________________________________________________________________
activation_110 (Activation) (None, 48, 48, 64) 0 conv2d_127[0][0]
__________________________________________________________________________________________________
batch_normalization_122 (BatchN (None, 48, 48, 64) 256 activation_110[0][0]
__________________________________________________________________________________________________
conv2d_128 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_122[0][0]
__________________________________________________________________________________________________
activation_111 (Activation) (None, 48, 48, 64) 0 conv2d_128[0][0]
__________________________________________________________________________________________________
batch_normalization_123 (BatchN (None, 48, 48, 64) 256 activation_111[0][0]
__________________________________________________________________________________________________
conv2d_129 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_123[0][0]
__________________________________________________________________________________________________
activation_112 (Activation) (None, 48, 48, 64) 0 conv2d_129[0][0]
__________________________________________________________________________________________________
batch_normalization_124 (BatchN (None, 48, 48, 64) 256 activation_112[0][0]
__________________________________________________________________________________________________
conv2d_130 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_124[0][0]
__________________________________________________________________________________________________
activation_113 (Activation) (None, 48, 48, 64) 0 conv2d_130[0][0]
__________________________________________________________________________________________________
batch_normalization_125 (BatchN (None, 48, 48, 64) 256 activation_113[0][0]
__________________________________________________________________________________________________
conv2d_131 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_125[0][0]
__________________________________________________________________________________________________
activation_114 (Activation) (None, 48, 48, 64) 0 conv2d_131[0][0]
__________________________________________________________________________________________________
batch_normalization_126 (BatchN (None, 48, 48, 64) 256 activation_114[0][0]
__________________________________________________________________________________________________
conv2d_132 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_126[0][0]
__________________________________________________________________________________________________
activation_115 (Activation) (None, 48, 48, 64) 0 conv2d_132[0][0]
__________________________________________________________________________________________________
batch_normalization_127 (BatchN (None, 48, 48, 64) 256 activation_115[0][0]
__________________________________________________________________________________________________
conv2d_133 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_127[0][0]
__________________________________________________________________________________________________
activation_116 (Activation) (None, 48, 48, 64) 0 conv2d_133[0][0]
__________________________________________________________________________________________________
batch_normalization_128 (BatchN (None, 48, 48, 64) 256 activation_116[0][0]
__________________________________________________________________________________________________
conv2d_134 (Conv2D) (None, 48, 48, 64) 36928 batch_normalization_128[0][0]
__________________________________________________________________________________________________
batch_normalization_129 (BatchN (None, 48, 48, 64) 256 conv2d_134[0][0]
__________________________________________________________________________________________________
add_17 (Add) (None, 48, 48, 64) 0 batch_normalization_129[0][0]
add_16[0][0]
__________________________________________________________________________________________________
conv2d_135 (Conv2D) (None, 48, 48, 64) 36928 add_17[0][0]
__________________________________________________________________________________________________
batch_normalization_130 (BatchN (None, 48, 48, 64) 256 conv2d_135[0][0]
__________________________________________________________________________________________________
conv2d_136 (Conv2D) (None, 48, 48, 256) 147712 batch_normalization_130[0][0]
__________________________________________________________________________________________________
batch_normalization_131 (BatchN (None, 48, 48, 256) 1024 conv2d_136[0][0]
__________________________________________________________________________________________________
conv2d_137 (Conv2D) (None, 48, 48, 256) 590080 batch_normalization_131[0][0]
__________________________________________________________________________________________________
batch_normalization_132 (BatchN (None, 48, 48, 256) 1024 conv2d_137[0][0]
__________________________________________________________________________________________________
conv2d_138 (Conv2D) (None, 48, 48, 3) 6915 batch_normalization_132[0][0]
==================================================================================================
Total params: 6,350,470
Trainable params: 6,332,806
Non-trainable params: 17,664
__________________________________________________________________________________________________
###Markdown
We employ VGG19 as our backbone network in the discriminator
###Code
vgg19 = VGG19(weights='imagenet', include_top=False, input_shape=(48,48,3))
vgg19.summary()
vgg19.layers
X = Flatten()(vgg19.layers[-2].output)
Fc_RorG=Dense(1, activation='sigmoid')(X) ###check for real vs. generated image
Fc_ForNF=Dense(1,activation='sigmoid')(X) ###check for face vs. non-face
trail_discriminator=Model(inputs = vgg19.input, outputs = [Fc_RorG,Fc_ForNF])
#### there are two outputs for the discriminator!!
trail_discriminator.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) (None, 48, 48, 3) 0
__________________________________________________________________________________________________
block1_conv1 (Conv2D) (None, 48, 48, 64) 1792 input_2[0][0]
__________________________________________________________________________________________________
block1_conv2 (Conv2D) (None, 48, 48, 64) 36928 block1_conv1[0][0]
__________________________________________________________________________________________________
block1_pool (MaxPooling2D) (None, 24, 24, 64) 0 block1_conv2[0][0]
__________________________________________________________________________________________________
block2_conv1 (Conv2D) (None, 24, 24, 128) 73856 block1_pool[0][0]
__________________________________________________________________________________________________
block2_conv2 (Conv2D) (None, 24, 24, 128) 147584 block2_conv1[0][0]
__________________________________________________________________________________________________
block2_pool (MaxPooling2D) (None, 12, 12, 128) 0 block2_conv2[0][0]
__________________________________________________________________________________________________
block3_conv1 (Conv2D) (None, 12, 12, 256) 295168 block2_pool[0][0]
__________________________________________________________________________________________________
block3_conv2 (Conv2D) (None, 12, 12, 256) 590080 block3_conv1[0][0]
__________________________________________________________________________________________________
block3_conv3 (Conv2D) (None, 12, 12, 256) 590080 block3_conv2[0][0]
__________________________________________________________________________________________________
block3_conv4 (Conv2D) (None, 12, 12, 256) 590080 block3_conv3[0][0]
__________________________________________________________________________________________________
block3_pool (MaxPooling2D) (None, 6, 6, 256) 0 block3_conv4[0][0]
__________________________________________________________________________________________________
block4_conv1 (Conv2D) (None, 6, 6, 512) 1180160 block3_pool[0][0]
__________________________________________________________________________________________________
block4_conv2 (Conv2D) (None, 6, 6, 512) 2359808 block4_conv1[0][0]
__________________________________________________________________________________________________
block4_conv3 (Conv2D) (None, 6, 6, 512) 2359808 block4_conv2[0][0]
__________________________________________________________________________________________________
block4_conv4 (Conv2D) (None, 6, 6, 512) 2359808 block4_conv3[0][0]
__________________________________________________________________________________________________
block4_pool (MaxPooling2D) (None, 3, 3, 512) 0 block4_conv4[0][0]
__________________________________________________________________________________________________
block5_conv1 (Conv2D) (None, 3, 3, 512) 2359808 block4_pool[0][0]
__________________________________________________________________________________________________
block5_conv2 (Conv2D) (None, 3, 3, 512) 2359808 block5_conv1[0][0]
__________________________________________________________________________________________________
block5_conv3 (Conv2D) (None, 3, 3, 512) 2359808 block5_conv2[0][0]
__________________________________________________________________________________________________
block5_conv4 (Conv2D) (None, 3, 3, 512) 2359808 block5_conv3[0][0]
__________________________________________________________________________________________________
flatten_2 (Flatten) (None, 4608) 0 block5_conv4[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 1) 4609 flatten_2[0][0]
__________________________________________________________________________________________________
dense_4 (Dense) (None, 1) 4609 flatten_2[0][0]
==================================================================================================
Total params: 20,033,602
Trainable params: 20,033,602
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
When we apply binary_crossentropy to both the parallel outputs of discriminator we attempt at maximizing the adversarial loss and minimizing the classification loss.....
###Code
trail_discriminator.compile(optimizer=Adam(lr=1e-3), loss=['binary_crossentropy', 'binary_crossentropy'], loss_weights=[alpha, beta])
trail_discriminator.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) (None, 48, 48, 3) 0
__________________________________________________________________________________________________
block1_conv1 (Conv2D) (None, 48, 48, 64) 1792 input_2[0][0]
__________________________________________________________________________________________________
block1_conv2 (Conv2D) (None, 48, 48, 64) 36928 block1_conv1[0][0]
__________________________________________________________________________________________________
block1_pool (MaxPooling2D) (None, 24, 24, 64) 0 block1_conv2[0][0]
__________________________________________________________________________________________________
block2_conv1 (Conv2D) (None, 24, 24, 128) 73856 block1_pool[0][0]
__________________________________________________________________________________________________
block2_conv2 (Conv2D) (None, 24, 24, 128) 147584 block2_conv1[0][0]
__________________________________________________________________________________________________
block2_pool (MaxPooling2D) (None, 12, 12, 128) 0 block2_conv2[0][0]
__________________________________________________________________________________________________
block3_conv1 (Conv2D) (None, 12, 12, 256) 295168 block2_pool[0][0]
__________________________________________________________________________________________________
block3_conv2 (Conv2D) (None, 12, 12, 256) 590080 block3_conv1[0][0]
__________________________________________________________________________________________________
block3_conv3 (Conv2D) (None, 12, 12, 256) 590080 block3_conv2[0][0]
__________________________________________________________________________________________________
block3_conv4 (Conv2D) (None, 12, 12, 256) 590080 block3_conv3[0][0]
__________________________________________________________________________________________________
block3_pool (MaxPooling2D) (None, 6, 6, 256) 0 block3_conv4[0][0]
__________________________________________________________________________________________________
block4_conv1 (Conv2D) (None, 6, 6, 512) 1180160 block3_pool[0][0]
__________________________________________________________________________________________________
block4_conv2 (Conv2D) (None, 6, 6, 512) 2359808 block4_conv1[0][0]
__________________________________________________________________________________________________
block4_conv3 (Conv2D) (None, 6, 6, 512) 2359808 block4_conv2[0][0]
__________________________________________________________________________________________________
block4_conv4 (Conv2D) (None, 6, 6, 512) 2359808 block4_conv3[0][0]
__________________________________________________________________________________________________
block4_pool (MaxPooling2D) (None, 3, 3, 512) 0 block4_conv4[0][0]
__________________________________________________________________________________________________
block5_conv1 (Conv2D) (None, 3, 3, 512) 2359808 block4_pool[0][0]
__________________________________________________________________________________________________
block5_conv2 (Conv2D) (None, 3, 3, 512) 2359808 block5_conv1[0][0]
__________________________________________________________________________________________________
block5_conv3 (Conv2D) (None, 3, 3, 512) 2359808 block5_conv2[0][0]
__________________________________________________________________________________________________
block5_conv4 (Conv2D) (None, 3, 3, 512) 2359808 block5_conv3[0][0]
__________________________________________________________________________________________________
flatten_2 (Flatten) (None, 4608) 0 block5_conv4[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 1) 4609 flatten_2[0][0]
__________________________________________________________________________________________________
dense_4 (Dense) (None, 1) 4609 flatten_2[0][0]
==================================================================================================
Total params: 20,033,602
Trainable params: 20,033,602
Non-trainable params: 0
__________________________________________________________________________________________________
###Markdown
We will create model with generator and discriminator stacked to train the generator!!
###Code
# High res. and low res. images
img_hr = Input(shape=hr_shape)
img_lr = Input(shape=lr_shape)
# Generate super resolution version from low resolution version of an image.
inter_sr, img_sr = generator(img_lr) #super-resolution : G1(ILR) , #refinement : G2(G1(ILR))
validity, face = trail_discriminator(img_sr)
GD_combined = Model([img_lr, img_hr], [validity, face, inter_sr, img_sr])
### there are 4 outputs from complete GAN model: 'validity' for adversarial loss, 'face' for classification loss, 'inter_sr' and 'img_sr' for pixel-wise loss.
### All these losses will be minimized to train the generator!!!
###Output
_____no_output_____
###Markdown
Before compiling the combine model we need to freeze the discriminator weights!!
###Code
trail_discriminator.trainable = False
GD_combined.compile(optimizer=Adam(lr=1e-3), loss=['binary_crossentropy', 'binary_crossentropy', 'mse', 'mse'],loss_weights=[alpha, beta, 1, 1])
GD_combined.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_6 (InputLayer) (None, 12, 12, 3) 0
_________________________________________________________________
model_1 (Model) [(None, 48, 48, 3), (None 6350470
_________________________________________________________________
model_3 (Model) [(None, 1), (None, 1)] 20033602
=================================================================
Total params: 26,384,072
Trainable params: 6,332,806
Non-trainable params: 20,051,266
_________________________________________________________________
###Markdown
the definition of train function is incomplete since our input images batch is not ready!!! But the model.train_on_batch function is ready for training discriminator and generator!!
###Code
def train(epochs, batch_size=1):
start_time = datetime.datetime.now()
for epoch in range(epochs):
# ----------------------
# Train Discriminator
# ----------------------
# Sample images and their conditioning counterparts
# NOTE: how will we load the batch of data is yet to figure out. So this line is just written for represention of that task!!
imgs_hr, imgs_lr, y = load_data(batch_size) ##################IMPORTANT TO FEED#########################
# From low res. image generate high res. version
inter_sr, img_sr = generator.predict(imgs_lr)
valid = np.ones((batch_size,))
fake = np.zeros((batch_size,))
d_loss_real = trail_discriminator.train_on_batch(imgs_hr, [valid,y]) ### there are two outputs for discriminator and training will take place taking into account of both of them
d_loss_fake = trail_discriminator.train_on_batch(img_sr, [fake,y])
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ------------------
# Train Generator
# ------------------
# Sample images and their conditioning counterparts
imgs_hr, imgs_lr, y = load_data(batch_size) #######################IMPORTANT TO FEED#####################
# The generators want the discriminators to label the generated images as real
valid = np.ones((batch_size,))
# Train the generators
g_loss = GD_combined.train_on_batch([imgs_lr, imgs_hr], [valid, y, imgs_hr, img_hr])
elapsed_time = datetime.datetime.now() - start_time
# Plot the progress
print ("%d time: %s" % (epoch, elapsed_time))
####start training
train(epochs,batch_size)
###Output
_____no_output_____ |
Lab5_BestWork.ipynb | ###Markdown
COMP 215 - LAB 5 Cellular automata Date: April 12 2022Code examples from [Think Complexity, 2nd edition](https://thinkcomplex.com).Copyright 2016 Allen Downey, [MIT License](http://opensource.org/licenses/MIT)
###Code
import os
if not os.path.exists('utils.py'):
!wget https://raw.githubusercontent.com/AllenDowney/ThinkComplexity2/master/notebooks/utils.py
%matplotlib inline
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import seaborn as sns
from utils import decorate
###Output
_____no_output_____
###Markdown
Zero-dimensional CA Here's a simple implementation of the 0-D CA I mentioned in the book, with one cell.
###Code
n = 10
x = np.zeros(n)
print(x)
###Output
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
###Markdown
To get the state of the cell in the next time step, we increment the current state mod 2.
###Code
x[1] = (x[0] + 1) % 2
x[1]
###Output
_____no_output_____
###Markdown
Filling in the rest of the array.
###Code
for i in range(2, n):
x[i] = (x[i-1] + 1) % 2
print(x)
###Output
[0. 1. 0. 1. 0. 1. 0. 1. 0. 1.]
###Markdown
So the behavior of this CA is simple: it blinks. One-dimensional CA Just as we used a 1-D array to show the state of a single cell over time, we'll use a 2-D array to show the state of a 1-D CA over time, with one column per cell and one row per timestep.
###Code
rows = 5
cols = 11
array = np.zeros((rows, cols), dtype=np.uint8)
array[0, 5] = 1
print(array)
###Output
[[0 0 0 0 0 1 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0]]
###Markdown
To plot the array I use `plt.imshow`
###Code
def plot_ca(array):
plt.imshow(array, cmap='Blues', interpolation='none')
###Output
_____no_output_____
###Markdown
Here's what it looks like after we initialize the first row.
###Code
plot_ca(array)
###Output
_____no_output_____
###Markdown
And here's the function that fills in the next row. The rule for this CA is to take the sum of a cell and its two neighbors mod 2.
###Code
def step(array, i):
"""Compute row i of a CA.
"""
rows, cols = array.shape
row = array[i-1]
for j in range(1, cols):
elts = row[j-1:j+2]
array[i, j] = sum(elts) % 2
###Output
_____no_output_____
###Markdown
Here's the second row.
###Code
step(array, 1)
plot_ca(array)
###Output
_____no_output_____
###Markdown
And here's what it looks like with the rest of the cells filled in.
###Code
for i in range(1, rows):
step(array, i)
plot_ca(array)
###Output
_____no_output_____
###Markdown
For a simple set of rules, the behavior is more interesting than you might expect. **Exercise:** Modify this code to increase the number of rows and columns and see what this CA does after more time steps. Cross correlation We can update the CA more quickly using "cross correlation". The cross correlation of an array, `a`, with a window, `w`, is a new array, `c`, where element `k` is:$ c_k = \sum_{n=0}^{N-1} a_{n+k} \cdot w_n $In Python, we can compute element `k` like this:
###Code
def c_k(a, w, k):
"""Compute element k of the cross correlation of a and w.
"""
N = len(w)
return sum(a[k:k+N] * w)
###Output
_____no_output_____
###Markdown
To see how this works, I'll create an array:
###Code
N = 10
row = np.arange(N, dtype=np.uint8)
print(row)
###Output
[0 1 2 3 4 5 6 7 8 9]
###Markdown
And a window:
###Code
window = [1, 1, 1]
print(window)
###Output
[1, 1, 1]
###Markdown
With this window, each element of `c` is the sum of three neighbors in the array:
###Code
c_k(row, window, 0)
c_k(row, window, 1)
###Output
_____no_output_____
###Markdown
The following function computes the elements of `c` for all values of `k` where the window can overlap with the array:
###Code
def correlate(row, window):
"""Compute the cross correlation of a and w.
"""
cols = len(row)
N = len(window)
c = [c_k(row, window, k) for k in range(cols-N+1)]
return np.array(c)
c = correlate(row, window)
print(c)
###Output
[ 3 6 9 12 15 18 21 24]
###Markdown
This operation is useful in many domains, so libraries like NumPy usually provide an implementation. Here's the version from NumPy.
###Code
c = np.correlate(row, window, mode='valid')
print(c)
###Output
[ 3 6 9 12 15 18 21 24]
###Markdown
With `mode='valid'`, the NumPy version does the same thing as mine: it only computes the elements of `c` where the window overlaps with the array. A drawback of this mode is that the result is smaller than `array`.And alternative is `mode='same'`, which makes the result the same size as `array` by extending array with zeros on both sides. Here's the result:
###Code
c = np.correlate(row, window, mode='same')
print(c)
###Output
[ 1 3 6 9 12 15 18 21 24 17]
###Markdown
**Exercise 1 correlate (sliding dot product)**a) Write a version of correlate that returns the same result asnp.correlate with mode='same'.b) Notice this “pads” the row with zeros at either end, which will create a bias at the edge of theautomata. It would be ideal if the edges “wrapped”. Update your algorithm so it pads each end ofthe row with values from the opposite end, effectively connecting the two edges of the CA.c) Lookup the documentation for the np.pad function and notice it has a “wrap” mode. Do a smallexperiment to see if np.pad(...., mode=”wrap”) works the same as your paddingalgorithm.
###Code
# Hint: use np.pad to add zeros at the beginning and end of `row`
def myCorrelate(row, window):
"""Compute the cross correlation of a and w.
"""
N = len(window)
padded_c = np.pad(row, (1,), 'constant', constant_values=(0,0))
cols = len(padded_c)
print(padded_c)
c = [c_k(padded_c, window, k) for k in range(cols-N+1)]
return np.array(c)
c = myCorrelate(row, window)
print(c)
#Excercise 1 (b,c)
#1,
def myExperimentalCorrelate(row, window):
"""Compute the cross correlation of a and w.
"""
N = len(window)
padded_c = np.pad(row, (1,), mode='wrap')
cols = len(padded_c)
print(padded_c)
c = [c_k(padded_c, window, k) for k in range(cols-N+1)]
return np.array(c)
c = myExperimentalCorrelate(row, window)
print(c)
#1c
#Results are similar to my correlate function, except for the beginning.
###Output
[9 0 1 2 3 4 5 6 7 8 9 0]
[10 3 6 9 12 15 18 21 24 17]
###Markdown
Update with correlateNow we can use `np.correlate` to update the array. I'll start again with an array that contains one column for each cell and one row for each time step, and I'll initialize the first row with a single "on" cell in the middle:
###Code
rows = 5
cols = 11
array = np.zeros((rows, cols), dtype=np.uint8)
array[0, 5] = 1
print(array)
###Output
[[0 0 0 0 0 1 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0]]
###Markdown
Now here's a version of `step` that uses `np.correlate`
###Code
def step2(array, i, window=[1,1,1]):
"""Compute row i of a CA.
"""
row = array[i-1]
c = np.correlate(row, window, mode='same')
array[i] = c % 2
###Output
_____no_output_____
###Markdown
And the result is the same.
###Code
for i in range(1, rows):
step2(array, i)
plot_ca(array)
###Output
_____no_output_____
###Markdown
The Cell1D object `Cell1D` encapsulates the code from the previous section.
###Code
class Cell1D:
"""Represents a 1-D a cellular automaton"""
def __init__(self, rule, n, m=None):
"""Initializes the CA.
rule: integer
n: number of rows
m: number of columns
Attributes:
table: rule dictionary that maps from triple to next state.
array: the numpy array that contains the data.
next: the index of the next empty row.
"""
self.table = make_table(rule)
self.n = n
self.m = 2*n + 1 if m is None else m
self.array = np.zeros((n, self.m), dtype=np.int8)
self.next = 0
def start_single(self):
"""Starts with one cell in the middle of the top row."""
self.array[0, self.m//2] = 1
self.next += 1
def start_random(self, prop):
"""Start with random values in the top row."""
randoms = [0,1]
self.array[0] = np.array([int(np.random.choice(randoms, 1, p=[prop, 1-prop])) for item in range(self.m)])
self.next += 1
print(self.array)
def start_string(self, s):
"""Start with values from a string of 1s and 0s."""
self.array[0] = np.array([int(x) for x in s])
self.next += 1
def loop(self, steps=1):
"""Executes the given number of time steps."""
for i in range(steps):
self.step()
def step(self):
"""Executes one time step by computing the next row of the array."""
a = self.array
i = self.next
window = [4, 2, 1]
c = np.correlate(a[i-1], window, mode='same')
a[i] = self.table[c]
self.next += 1
def draw(self, start=0, end=None):
"""Draws the CA using pyplot.imshow.
start: index of the first column to be shown
end: index of the last column to be shown
"""
a = self.array[:, start:end]
plt.imshow(a, cmap='Blues', alpha=0.7)
# turn off axis tick marks
plt.xticks([])
plt.yticks([])
###Output
_____no_output_____
###Markdown
The following function makes and draws a CA.
###Code
def draw_ca(rule, n=32):
"""Makes and draw a 1D CA with a given rule.
rule: int rule number
n: number of rows
"""
ca = Cell1D(rule, n)
ca.start_single()
ca.loop(n-1)
ca.draw()
###Output
_____no_output_____
###Markdown
Here's an example that runs a Rule 50 CA for 10 steps.
###Code
draw_ca(rule=50, n=10)
plt.show('figs/chap05-1')
###Output
_____no_output_____
###Markdown
Another example:
###Code
draw_ca(rule=150, n=5)
plt.show('figs/chap05-2')
###Output
_____no_output_____
###Markdown
And one more example showing recursive structure.
###Code
draw_ca(rule=18, n=64)
plt.show('figs/chap05-3')
###Output
_____no_output_____
###Markdown
Rule 30 generates a sequence of bits that is indistinguishable from random:
###Code
draw_ca(rule=30, n=100)
plt.show('figs/chap05-4')
###Output
_____no_output_____
###Markdown
And Rule 110 is Turing complete!
###Code
draw_ca(rule=110, n=100)
plt.show('figs/chap05-5')
###Output
_____no_output_____
###Markdown
Here's a longer run that has some spaceships.
###Code
np.random.seed(21)
ca = Cell1D(rule=110, n=600)
ca.start_random(prop=0.5)
ca.loop(n-1)
ca.draw()
plt.show('figs/chap05-6')
###Output
[[0 0 1 ... 1 0 1]
[0 0 0 ... 0 0 0]
[0 0 0 ... 0 0 0]
...
[0 0 0 ... 0 0 0]
[0 0 0 ... 0 0 0]
[0 0 0 ... 0 0 0]]
###Markdown
**Exercise 2 make_table**This function is effectively a decimal to binary conversion with 3 important constraints:a) the “bit string” output is an numpy.array of 0’s and 1’sb) the length of the output bit string is fixed – it is always 8 bits longc) the bit ordering appears “reversed” because of the way sequence are printed (with index 0 first)i.e., the lowest-order bit is at index 0 and the highest-order bit at index 7Develop your own version of function make_table(rule) that adheres to the same constraints, butdoes not use numpy to unpack the bits.Hints:- do some examples with pencil-and-paper – you can “extract” bits from an integer with themodulus operator, %, and you can “pop” bits off an integer with the integer divide operator, //;- this is a list accumulator algorithm but is not easily written as a list comprehension – try a whileloop;- pad the front of the bit-string with zeros to make it the right length – try: [0] * n
###Code
# Excercise 2 make_table
def myMake_Table(rule):
binString = ''
zero = 0
while rule >= 1:
binString = binString + str(rule%2)
rule = rule // 2
c = [int(item) for item in binString]
for i in range((abs(len(c) - 8))):
c.append(zero)
d = np.array(c)
return d
z = myMake_Table(50)
print(z)
###Output
[0 1 0 0 1 1 0 0]
###Markdown
**Exercise 3 start_random**The start_random method of Cell1D distributes on/off cells with a "uniform distribution", such thatapprox. half the cells are on and half off. Add a default parameter p=0.5, that distributes “on” cellswith probability p, on [0..1]. E.g. when p=0.2, only approx. 20% of cells are randomly turned “on”.Hints:- this is neatly written as a list comprehension;- but needs to be returned as a np.array with dtype=np.uint8Upgrade draw_ca function so you can optionally pass in value of p. If p is None (default value),draw_ca works as usual. If p is not None, then ca.start_random is called with the value of p.
###Code
def mydraw_ca(rule, n, p):
"""Makes and draw a 1D CA with a given rule.
rule: int rule number
n: number of rows
"""
ca = Cell1D(rule, n)
if p == 'none':
ca.start_single()
else:
ca.start_random(p)
ca.loop(n-1)
ca.draw()
mydraw_ca(110, 400, 'none')
###Output
_____no_output_____
###Markdown
**Exercise 4 CA experiment**Experiment with these basic CA to identify at least 1 example from each of Wolfram’s 4 CA “classes” :1. rapidly converge to a uniform state.2. rapidly converge to a repetitive or stable state.3. generate non-repeating, random states.4. generate chaos, with areas of repetitive or stable states, but also structures that interactcomplex ways.Create a 2x2 grid of plots that shows an example of each class of CA side-by-side.Hint: plt.subplots(nrows=2, ncols=2) returns a 2x2 array of axes – see matplotlib docs. What does it mean that such a simple system can produce this range of behaviours? What lessons or conclusions might we draw about studying complex phenomena in the realworld from our study of elementary cellular automata?
###Code
#Experiment 4 CA Experiment
# 1. Rule 0 will rapidly converge to a uniform state.
# 2. Rule 18 will converge to a repititive state.
# 3. Rule 30 is a class 3 rule, generating non-repeating random states.
# 4. Rule 110 is a class 4 rule, generating stable and non-stable areas, with areas that appear like spaceships, interacting in complex ways along each time-step.
plt.subplot(2, 2, 1)
draw_ca(rule=0, n = 100)
plt.subplot(2,2,2)
draw_ca(rule=18, n = 100)
plt.subplot(2,2,3)
draw_ca(rule=30, n = 100)
plt.subplot(2,2,4)
draw_ca(rule=110, n = 200)
#This system producing these wide range of behaviours, signifies that even a simple algorithm
#can produce complex results over time.
#We can see in the real world how simple living things, such as viruses, or monocellular organisms can
#be produced from a very simple pattern or 'seed.'
###Output
_____no_output_____ |
FormalExperiments/Experiment-Facebook.ipynb | ###Markdown
F(v) = int(node_map[node] == 1)
###Code
def node_fn(node):
return int(node_map[node]==1)
F_org = sum([node_fn(i) for i in G.nodes()])/G_no_nodes
print(F_org)
MSE_MH_t = 0
for ii in range(1,no_runs+1):
MSE_MH_t += (MH_sampling(G,max_B)-F_org)**2
MSE_MH = MSE_MH_t/(no_runs)
MSE_MH = np.sqrt(MSE_MH)/F_org
MSE_rds_t = 0
for ii in range(1,no_runs+1):
MSE_rds_t += (RDS_sampling(G,max_B)-F_org)**2
MSE_rds = MSE_rds_t/(no_runs)
MSE_rds = np.sqrt(MSE_rds)/F_org
MSE_rdsrr_t = 0
for ii in range(1,no_runs+1):
MSE_rdsrr_t += (RDSRR_sampling(G,max_B)-F_org)**2
MSE_rdsrr = MSE_rdsrr_t/(no_runs)
MSE_rdsrr = np.sqrt(MSE_rdsrr)/F_org
MSE_mhrr_t = 0
for ii in range(1,no_runs+1):
MSE_mhrr_t += (MHRR_sampling(G,max_B)-F_org)**2
MSE_mhrr = MSE_mhrr_t/(no_runs)
MSE_mhrr = np.sqrt(MSE_mhrr)/F_org
plt.figure(figsize=(10,8))
plt.plot(np.array(list(range(len(MSE_MH)))),MSE_MH,color='red',linewidth=1.5,label='MH')
plt.plot(np.array(list(range(len(MSE_mhrr)))),MSE_mhrr,color='blue',linewidth=1.5,label='MHRR')
plt.plot(np.array(list(range(len(MSE_rds)))),MSE_rds,color='black',linewidth=1.5,label='RDS')
plt.plot(np.array(list(range(len(MSE_rdsrr)))),MSE_rdsrr,color='purple',linewidth=1.5,label='RDSRR')
legend = plt.legend(loc='best', shadow=True, fontsize='xx-large')
legend.get_frame().set_facecolor('0.90')
for legobj in legend.legendHandles:
legobj.set_linewidth(2.5)
plt.ylim(top=2.5)
plt.grid()
###Output
_____no_output_____
###Markdown
F(v) = int(G.degree(node)>100)
###Code
def node_fn(node):
return int(G.degree(node)>100)
F_org = sum([node_fn(i) for i in G.nodes()])/G_no_nodes
print(F_org)
MSE_MH_t = 0
for ii in range(1,no_runs+1):
MSE_MH_t += (MH_sampling(G,max_B)-F_org)**2
MSE_MH = MSE_MH_t/(no_runs)
MSE_MH = np.sqrt(MSE_MH)/F_org
MSE_rds_t = 0
for ii in range(1,no_runs+1):
MSE_rds_t += (RDS_sampling(G,max_B)-F_org)**2
MSE_rds = MSE_rds_t/(no_runs)
MSE_rds = np.sqrt(MSE_rds)/F_org
MSE_rdsrr_t = 0
for ii in range(1,no_runs+1):
MSE_rdsrr_t += (RDSRR_sampling(G,max_B)-F_org)**2
MSE_rdsrr = MSE_rdsrr_t/(no_runs)
MSE_rdsrr = np.sqrt(MSE_rdsrr)/F_org
MSE_mhrr_t = 0
for ii in range(1,no_runs+1):
MSE_mhrr_t += (MHRR_sampling(G,max_B)-F_org)**2
MSE_mhrr = MSE_mhrr_t/(no_runs)
MSE_mhrr = np.sqrt(MSE_mhrr)/F_org
plt.figure(figsize=(10,8))
plt.plot(np.array(list(range(len(MSE_MH)))),MSE_MH,color='red',linewidth=1.5,label='MH')
plt.plot(np.array(list(range(len(MSE_mhrr)))),MSE_mhrr,color='blue',linewidth=1.5,label='MHRR')
plt.plot(np.array(list(range(len(MSE_rds)))),MSE_rds,color='black',linewidth=1.5,label='RDS')
plt.plot(np.array(list(range(len(MSE_rdsrr)))),MSE_rdsrr,color='purple',linewidth=1.5,label='RDSRR')
legend = plt.legend(loc='best', shadow=True, fontsize='xx-large')
legend.get_frame().set_facecolor('0.90')
for legobj in legend.legendHandles:
legobj.set_linewidth(2.5)
plt.ylim(top=2)
plt.grid()
###Output
_____no_output_____
###Markdown
F(v) = isprime(v)
###Code
def node_fn(node):
return int(sympy.isprime(G.degree(node)))
F_org = sum([node_fn(i) for i in G.nodes()])/G_no_nodes
print(F_org)
MSE_MH_t = 0
for ii in range(1,no_runs+1):
MSE_MH_t += (MH_sampling(G,max_B)-F_org)**2
MSE_MH = MSE_MH_t/(no_runs)
MSE_MH = np.sqrt(MSE_MH)/F_org
MSE_rds_t = 0
for ii in range(1,no_runs+1):
MSE_rds_t += (RDS_sampling(G,max_B)-F_org)**2
MSE_rds = MSE_rds_t/(no_runs)
MSE_rds = np.sqrt(MSE_rds)/F_org
MSE_rdsrr_t = 0
for ii in range(1,no_runs+1):
MSE_rdsrr_t += (RDSRR_sampling(G,max_B)-F_org)**2
MSE_rdsrr = MSE_rdsrr_t/(no_runs)
MSE_rdsrr = np.sqrt(MSE_rdsrr)/F_org
MSE_mhrr_t = 0
for ii in range(1,no_runs+1):
MSE_mhrr_t += (MHRR_sampling(G,max_B)-F_org)**2
MSE_mhrr = MSE_mhrr_t/(no_runs)
MSE_mhrr = np.sqrt(MSE_mhrr)/F_org
plt.figure(figsize=(10,8))
plt.plot(np.array(list(range(len(MSE_MH)))),MSE_MH,color='red',linewidth=1.5,label='MH')
plt.plot(np.array(list(range(len(MSE_mhrr)))),MSE_mhrr,color='blue',linewidth=1.5,label='MHRR')
plt.plot(np.array(list(range(len(MSE_rds)))),MSE_rds,color='black',linewidth=1.5,label='RDS')
plt.plot(np.array(list(range(len(MSE_rdsrr)))),MSE_rdsrr,color='purple',linewidth=1.5,label='RDSRR')
plt.ylim(0,0.5)
legend = plt.legend(loc='best', shadow=True, fontsize='xx-large')
legend.get_frame().set_facecolor('0.90')
for legobj in legend.legendHandles:
legobj.set_linewidth(2.5)
plt.grid()
###Output
_____no_output_____
###Markdown
F(v) = random
###Code
fn_mapping = np.random.exponential(1,size=(G_no_nodes))
def node_fn(node):
return fn_mapping[node]
F_org = sum([node_fn(i) for i in G.nodes()])/G_no_nodes
print(F_org)
MSE_MH_t = 0
for ii in range(1,no_runs+1):
MSE_MH_t += (MH_sampling(G,max_B)-F_org)**2
MSE_MH = MSE_MH_t/(no_runs)
MSE_MH = np.sqrt(MSE_MH)/F_org
MSE_rds_t = 0
for ii in range(1,no_runs+1):
MSE_rds_t += (RDS_sampling(G,max_B)-F_org)**2
MSE_rds = MSE_rds_t/(no_runs)
MSE_rds = np.sqrt(MSE_rds)/F_org
MSE_rdsrr_t = 0
for ii in range(1,no_runs+1):
MSE_rdsrr_t += (RDSRR_sampling(G,max_B)-F_org)**2
MSE_rdsrr = MSE_rdsrr_t/(no_runs)
MSE_rdsrr = np.sqrt(MSE_rdsrr)/F_org
MSE_mhrr_t = 0
for ii in range(1,no_runs+1):
MSE_mhrr_t += (MHRR_sampling(G,max_B)-F_org)**2
MSE_mhrr = MSE_mhrr_t/(no_runs)
MSE_mhrr = np.sqrt(MSE_mhrr)/F_org
plt.figure(figsize=(10,8))
plt.plot(np.array(list(range(len(MSE_MH)))),MSE_MH,color='red',linewidth=1.5,label='MH')
plt.plot(np.array(list(range(len(MSE_mhrr)))),MSE_mhrr,color='blue',linewidth=1.5,label='MHRR')
plt.plot(np.array(list(range(len(MSE_rds)))),MSE_rds,color='black',linewidth=1.5,label='RDS')
plt.plot(np.array(list(range(len(MSE_rdsrr)))),MSE_rdsrr,color='purple',linewidth=1.5,label='RDSRR')
legend = plt.legend(loc='best', shadow=True, fontsize='xx-large')
legend.get_frame().set_facecolor('0.90')
for legobj in legend.legendHandles:
legobj.set_linewidth(2.5)
plt.ylim(0,0.3)
plt.grid()
###Output
_____no_output_____ |
.ipynb_checkpoints/python_version-checkpoint.ipynb | ###Markdown
This is very nice
###Code
import numpy as np
import sounddevice as sd
import matplotlib.pyplot as plt
def make_note(midi):
return 2**((midi-49) / 12) * 440
fs = 44100
c_min = np.array([
make_note(52),
make_note(55),
make_note(59)
])
amp = np.array([
1,
.5,
.8
])
secs = 2
x = np.arange(fs * secs) / fs
X = np.tile(x, c_min.size).reshape((c_min.size, x.size))
Y = np.sin(X.T * (2 * np.pi) * c_min) * amp
y = Y.sum(axis=1)
y /= y.max()
plt.figure(figsize=(20,4))
plt.plot(y)
sd.play(y)
###Output
_____no_output_____ |
Code/CalculateandFit_TK.ipynb | ###Markdown
Figure 3.1: Temporal STA (TK) of the iP-RGC and mP-RGC.
###Code
plt.rcParams["font.size"] = 12
os.chdir('..')
data_folder = os.getcwd()+"\\Experimental_Data_Example\\" # Note that use absolute path on your computer instead.
dt = 1/60
cn = 9
annots = loadmat(data_folder+'OLED_Data\\merge_0224_cSTA_wf_3min_Q100', squeeze_me = True)
x = annots['bin_pos']
x = (x-np.mean(x))/np.std(x)
spike = annots['reconstruct_spikes'][cn-1]
rstate, _ = np.histogram(spike, np.arange(len(x)+1)*dt)
cSTA = np.correlate(x, rstate, 'same')/ np.correlate(np.ones_like(x), rstate, 'same')
cSTA = cSTA[int(len(cSTA)/2):int(len(cSTA)/2-1/dt)-1:-1]
taxis = -np.arange(len(cSTA))*dt
OLEDtaxis = taxis
plt.plot(taxis, cSTA, 'b+:')
OLEDcSTA = cSTA
name_list = ['epsilon', 'gamma', 'omegastar', 'deltastar', 'tau_y', 'Dmp']
para_dict = {}
for l in range(len(name_list)):
para_dict[name_list[l]] = np.zeros(60)
#-------------------------------------
para_dict['error'] = np.zeros(60)
epsilon = 10. #1/sec
gamma = 25.
omegastar = 30.
deltastar = 0.
tau_y = 0.04
Dmp = 10.
popt,pcov = curve_fit(NGD2L_TK_AS, np.abs(taxis), cSTA,
p0 = [epsilon, gamma , omegastar, deltastar, tau_y, Dmp ],
bounds = ([0 , 0 , 0 , -np.pi/2 , 0 , 0 ],
[np.inf , np.inf, np.inf , np.pi/2 , 0.1 , np.inf ] ))
for l in range(len(popt)):
para_dict[name_list[l]][cn-1] = popt[l]
# print(popt)
fit_cSTA = NGD2L_TK_AS(np.abs(taxis), *popt).copy()
OLEDfit_cSTA = fit_cSTA
# para_dict['error'][cn-1] = sum((fit_cSTA_list[cn-1]-cSTA_list[cn-1])**2)
plt.plot(taxis, fit_cSTA, 'r-')
plt.xlabel(r'$\delta t$ (s)', fontsize = 20)
plt.ylabel('$\chi(\gamma, s; \delta t) = K(-\delta t)$ ', fontsize = 20)
plt.xlim([-0.6,0])
fig = plt.gcf()
ax = plt.gca()
np.savez(data_folder+'\\OLED_Data\\fitNGD2LASpara.npz', para_dict=para_dict)
dt = 0.01
cn = 53
annots = loadmat(data_folder+'LED_Data\\20200408_cSTA_sort_unit2', squeeze_me = True)
sampling_rate = 20000
TimeStamps = annots['TimeStamps']
x = annots['a_data'][0, int(TimeStamps[0]*sampling_rate):int(TimeStamps[1]*sampling_rate)+1]
x = ndimage.gaussian_filter1d(x, sigma=int(sampling_rate*dt/2), mode='reflect') / dt
x = x[::int(sampling_rate*dt)]
x = x.astype(float)
x = (x -np.mean(x))/np.std(x)
T=np.arange(len(x))*dt+dt
rstate,_ = np.histogram(annots['Spikes'][cn-1]-TimeStamps[0], np.append(0,T))
cSTA = np.correlate(x, rstate, 'same')/ np.correlate(np.ones_like(x), rstate, 'same')
cSTA = cSTA[int(len(cSTA)/2):int(len(cSTA)/2-1/dt)-1:-1]
taxis = -np.arange(len(cSTA))*dt
LEDtaxis = taxis
plt.plot(taxis, cSTA, 'b+:')
LEDcSTA = cSTA
name_list = ['epsilon', 'gamma', 'omegastar', 'deltastar', 'tau_y', 'Dmp']
para_dict = {}
for l in range(len(name_list)):
para_dict[name_list[l]] = np.zeros(60)
#-------------------------------------
para_dict['error'] = np.zeros(60)
epsilon = 10. #1/sec
gamma = 25.
omegastar = 30.
deltastar = 0.
tau_y = 0.04
Dmp = 10.
popt,pcov = curve_fit(NGD2L_TK_AS, np.abs(taxis), cSTA,
p0 = [epsilon, gamma , omegastar, deltastar, tau_y, Dmp ],
bounds = ([0 , 0 , 0 , -np.pi/2 , 0 , 0 ],
[np.inf , np.inf, np.inf , np.pi/2 , 0.1 , np.inf ] ))
for l in range(len(popt)):
para_dict[name_list[l]][cn] = popt[l]
# print(popt)
fit_cSTA = NGD2L_TK_AS(np.abs(taxis), *popt).copy()
LEDfit_cSTA = fit_cSTA
# para_dict['error'][cn] = sum((fit_cSTA_list[cn]-cSTA_list[cn])**2)
plt.plot(taxis, fit_cSTA, 'r-')
plt.xlabel(r'$\delta t$ (s)', fontsize = 20)
plt.ylabel('$\chi(\gamma, s; \delta t) = K(-\delta t)$ ', fontsize = 20)
plt.axhline(0, c='gray')
plt.legend( (r'measured $K_t(-\delta t)$', r'fitted $(K_{Delay}*K_w)(-\delta t)$'), fontsize = 16 )
plt.xlim([-0.6,0])
fig = plt.gcf()
fig.set_size_inches(10, 5)
###Output
_____no_output_____ |
notebooks/experiments/bitcoin/Bitcoin_Alpha_CVX_optimization.ipynb | ###Markdown
Imports
###Code
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import cvxpy as cp
import time
import collections
from typing import Dict
from typing import List
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import imp
import os
import pickle as pk
%matplotlib inline
import sys
sys.path.insert(0, '../../../src/')
import network_utils
import utils
###Output
_____no_output_____
###Markdown
Helper functions
###Code
def reload():
imp.reload(network_utils)
imp.reload(utils)
def get_array_of_138(a):
r = a
if len(a) < 138:
r = np.array(list(a) + [0 for i in range(138 - len(a))])
return r
def get_matrix_stochastic(a):
a = a / np.sum(a)
return np.matrix(a)
###Output
_____no_output_____
###Markdown
Body
###Code
triad_map, triad_list = network_utils.generate_all_possible_sparse_triads()
unique_triad_num = len(triad_list)
transitives = []
for triad in triad_list:
transitives.append(network_utils.is_sparsely_transitive_balanced(triad))
transitives = np.array(transitives)
t = np.sum(transitives)
print('{} transitive and {} nontransitive.'.format(t, 138-t))
ch = []
for triad in triad_list:
ch.append(network_utils.is_sparsely_cartwright_harary_balanced(triad))
ch = np.array(ch)
t = np.sum(ch)
print('{} C&H balance and {} non C&H balance.'.format(t, 138-t))
cluster = []
for triad in triad_list:
cluster.append(network_utils.is_sparsely_clustering_balanced(triad))
cluster = np.array(cluster)
t = np.sum(cluster)
print('{} clustering balance and {} non C&H balance.'.format(t, 138-t))
###Output
93 transitive and 45 nontransitive.
24 C&H balance and 114 non C&H balance.
44 clustering balance and 94 non C&H balance.
###Markdown
Convex optimization problem
###Code
loaded_d = utils.load_it('/home/omid/Downloads/DT/cvx_data_bitcoin_alpha_separated.pk')
obs = loaded_d['obs']
T = loaded_d['T']
obs_mat = []
for o in obs:
obs_mat.append(np.matrix(o))
obs_normalized = []
for o in obs:
obs_normalized.append(get_matrix_stochastic(o))
# l = l - 1 # one less than actual value
r = obs_normalized
l = len(T) # 66
test_numbers = 10
l
start_time = time.time()
n = 138
eps = 0.01
# lam1 = 0.5
errs = []
for test_number in np.arange(test_numbers, 0, -1):
P = [cp.Variable(n, n) for _ in range(l - test_number - 1)]
term1 = 0
for i in range(1, l - test_number - 1):
term1 += cp.norm2(P[i] - P[i - 1])
# term2 = 0
# for i in range(1, l - test_number - 1):
# term2 += cp.norm1(P[i] - P[i - 1])
objective = cp.Minimize(term1) # + term2 * lam1)
# Constraints.
constraints = []
for i in range(l - test_number - 1):
constraints += (
[0 < P[i],
P[i] <= 1,
P[i] * np.ones(n) == np.ones(n),
r[i] * P[i] == r[i + 1],
# r[i + 1] * P[i] == r[i + 1]])
cp.norm2(r[i + 1] * P[i] - r[i + 1]) < eps])
# Problem.
prob = cp.Problem(objective, constraints)
# Solving the problem.
res = prob.solve(cp.MOSEK)
err = np.linalg.norm(r[l - test_number] - (r[l - test_number - 1] * P[l - test_number - 2].value), 2)
errs.append(err)
duration = time.time() - start_time
print('It took :{} mins.'.format(round(duration/60, 2)))
print('Errors: {} +- {}'.format(round(np.mean(errs), 4), round(np.std(errs)), 6))
print(errs)
# Baselines.
mean_errs = []
for test_number in np.arange(test_numbers, 0, -1):
mean_err = np.linalg.norm(r[l - test_number] - np.mean(r[:l - test_number - 1], axis=0)[0], 2)
mean_errs.append(mean_err)
last_errs = []
for test_number in np.arange(test_numbers, 0, -1):
last_err = np.linalg.norm(r[l - test_number] - r[l - test_number - 1], 2)
last_errs.append(last_err)
# rnd_errs = []
# for test_number in np.arange(test_numbers, 0, -1):
# rnd_err = np.linalg.norm(r[l - test_number] - (1/138) * np.ones(138), 2)
# rnd_errs.append(rnd_err)
plt.plot(errs)
# plt.plot(rnd_errs)
plt.plot(mean_errs)
plt.plot(last_errs)
plt.legend(['Time-varying Markov Chains', 'Average', 'Last']);
# plt.legend(['Time-varying Markov Chains', 'Random', 'Average', 'Last']);
# # Saving the estimated transition matrices (P).
# estimated_matrices = []
# for i in range(len(P)):
# estimated_matrices.append(P[i].value)
# with open('estimated_matrices_alpha.pk', 'wb') as f:
# pk.dump(estimated_matrices, f)
# # Loading.
# with open('estimated_matrices_alpha.pk', 'rb') as f:
# estimated_matrices = pk.load(f)
errs
mean_errs
last_errs
sns.set(rc={'figure.figsize': (6, 4)})
diff = []
for i in range(1, l-2):
diff.append(np.linalg.norm(P[i].value - P[i-1].value))
plt.plot(diff);
sns.set(rc={'figure.figsize': (14, 6)})
legends = []
for i, transition_matrix in enumerate(P):
st_dist = network_utils.get_stationary_distribution(np.asarray(transition_matrix.value))
plt.plot(st_dist)
# legends.append(i)
# plt.legend(legends)
self_transitive_means = []
self_nontransitive_means = []
nontransitive_to_transitive_means = []
transitive_to_nontransitive_means = []
self_transitive_stds = []
self_nontransitive_stds = []
nontransitive_to_transitive_stds = []
transitive_to_nontransitive_stds = []
for matrix in P:
trans_matrix = matrix.value
probs = np.sum(trans_matrix[transitives, :][:, transitives], axis=1)
self_transitive_means.append(np.mean(probs))
self_transitive_stds.append(np.std(probs))
probs = np.sum(trans_matrix[~transitives, :][:, transitives], axis=1)
nontransitive_to_transitive_means.append(np.mean(probs))
nontransitive_to_transitive_stds.append(np.std(probs))
probs = np.sum(trans_matrix[~transitives, :][:, ~transitives], axis=1)
transitive_to_nontransitive_means.append(np.mean(probs))
transitive_to_nontransitive_stds.append(np.std(probs))
probs = np.sum(trans_matrix[transitives, :][:, ~transitives], axis=1)
self_nontransitive_means.append(np.mean(probs))
self_nontransitive_stds.append(np.std(probs))
plt.errorbar(x=np.arange(l-2), y=self_transitive_means, yerr=self_transitive_stds, fmt='r')
plt.errorbar(x=np.arange(l-2), y=nontransitive_to_transitive_means, yerr=nontransitive_to_transitive_stds, fmt='g')
plt.errorbar(x=np.arange(l-2), y=self_nontransitive_means, yerr=self_nontransitive_stds, fmt='b')
plt.errorbar(x=np.arange(l-2), y=transitive_to_nontransitive_means, yerr=transitive_to_nontransitive_stds, fmt='k')
plt.legend(['self transitive', 'nontransitive to transitive', 'self nontransitive', 'transitive to nontransitive']);
# plt.errorbar(x=np.arange(39), y=self_transitive_means) #, yerr=self_transitive_stds)
# plt.errorbar(x=np.arange(39), y=nontransitive_to_transitive_means) #, yerr=nontransitive_to_transitive_stds)
# plt.errorbar(x=np.arange(39), y=self_nontransitive_means) #, yerr=self_nontransitive_stds)
# plt.errorbar(x=np.arange(39), y=transitive_to_nontransitive_means) #, yerr=transitive_to_nontransitive_stds)
# plt.legend(['self transitive', 'nontransitive to transitive', 'self nontransitive', 'transitive to nontransitive']);
trans_matrix = P[-1].value
# trans_matrix= estimated_matrices[-1]
sns.set(rc={'figure.figsize': (6, 4)})
probs = np.sum(trans_matrix[transitives, :][:, transitives], axis=1)
plt.hist(probs)
print('Transition probability of "transitive to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~transitives, :][:, transitives], axis=1)
plt.hist(probs)
print('Transition probability of "not transitive to transitive": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~transitives, :][:, ~transitives], axis=1)
plt.hist(probs)
print('Transition probability of "not transitive to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[transitives, :][:, ~transitives], axis=1)
plt.hist(probs)
print('Transition probability of "transitive to not transitive": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
plt.legend(['self balanced', 'unbalanced to balanced', 'self unbalanced', 'balanced to unbalanced'])
plt.xlabel('Probability')
plt.ylabel('#Triads');
sns.set(rc={'figure.figsize': (6, 4)})
probs = np.sum(trans_matrix[ch, :][:, ch], axis=1)
plt.hist(probs)
print('Transition probability of "C&H balance to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~ch, :][:, ch], axis=1)
plt.hist(probs)
print('Transition probability of "not C&H balance to C&H balance": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~ch, :][:, ~ch], axis=1)
plt.hist(probs)
print('Transition probability of "not C&H balance to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[ch, :][:, ~ch], axis=1)
plt.hist(probs)
print('Transition probability of "C&H balance to not C&H balance": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
plt.legend(['self balanced', 'unbalanced to balanced', 'self unbalanced', 'balanced to unbalanced']);
sns.set(rc={'figure.figsize': (6, 4)})
probs = np.sum(trans_matrix[cluster, :][:, cluster], axis=1)
plt.hist(probs)
print('Transition probability of "clustering to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~cluster, :][:, cluster], axis=1)
plt.hist(probs)
print('Transition probability of "not clustering to clustering": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~cluster, :][:, ~cluster], axis=1)
plt.hist(probs)
print('Transition probability of "not clustering to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[cluster, :][:, ~cluster], axis=1)
plt.hist(probs)
print('Transition probability of "clustering to not clustering": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
plt.legend(['self balanced', 'unbalanced to balanced', 'self unbalanced', 'balanced to unbalanced'])
plt.xlabel('Probability')
plt.ylabel('#Triads');
plt.savefig('country_clustering_transitionprobabilities.png');
###Output
Transition probability of "clustering to self": 0.91 +- 0.18
Transition probability of "not clustering to clustering": 0.72 +- 0.23
Transition probability of "not clustering to self": 0.28 +- 0.23
Transition probability of "clustering to not clustering": 0.09 +- 0.18
###Markdown
Specific triads transitions in different transition probability matrices
###Code
def print_those(from_triad, to_triad):
probs = []
for l in range(len(T)):
probs.append(
T[l][from_triad, to_triad])
print('{} +- {}'.format(np.mean(probs), np.std(probs)))
probs = []
for l in range(len(estimated_matrices)):
probs.append(
estimated_matrices[l][from_triad, to_triad])
print('{} +- {}\n'.format(np.mean(probs), np.std(probs)))
# transitivity balanced
print_those(from_triad=8, to_triad=22)
#classically balanced
print_those(from_triad=18, to_triad=33)
print_those(from_triad=15, to_triad=26)
print_those(from_triad=11, to_triad=37)
np.where(estimated_matrices[0] > 0.99)
reload()
utils.plot_box_plot_for_transitions(estimated_matrices[-1], transitives, 'Bitcoin Alpha')
# reload()
# utils.plot_box_plot_for_transitions(
# estimated_matrices[-1], transitives, 'Bitcoin_Alpha_transitivity', 'Bitcoin Alpha')
reload()
utils.plot_box_plot_for_transitions(
estimated_matrices[-1], transitives, False, 'Bitcoin_Alpha_transitivity')
# reload()
# utils.plot_box_plot_for_transitions(
# estimated_matrices[-1], transitives, 'Bitcoin_Alpha_clustering', 'Bitcoin Alpha')
reload()
utils.plot_box_plot_for_transitions(
estimated_matrices[-1], cluster, False, 'Bitcoin_Alpha_clustering')
reload()
utils.plot_box_plot_for_transitions(
estimated_matrices[-1], ch, False, 'Bitcoin_Alpha_classical')
trans_matrix = estimated_matrices[-1]
# trans_matrix = estimated_matrices[-1]
sns.set(rc={'figure.figsize': (6, 4)})
probs = np.sum(trans_matrix[transitives, :][:, transitives], axis=1)
plt.hist(probs)
print('Transition probability of "transitive to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~transitives, :][:, transitives], axis=1)
plt.hist(probs)
print('Transition probability of "not transitive to transitive": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[transitives, :][:, ~transitives], axis=1)
plt.hist(probs)
print('Transition probability of "transitive to not transitive": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~transitives, :][:, ~transitives], axis=1)
plt.hist(probs)
print('Transition probability of "not transitive to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
plt.xlabel('Probability')
plt.ylabel('#Triads');
plt.legend(['balanced -> balanced', 'unbalanced -> balanced', 'balanced -> unbalanced', 'unbalanced -> unbalanced'])
# plt.title('(a)', weight='bold')
plt.savefig('BitcoinAlpha_transitivity_transitionprobabilities.pdf');
bins = 5
sns.set(rc={'figure.figsize': (6, 4)})
probs = np.sum(trans_matrix[transitives, :][:, transitives], axis=1)
sns.distplot(probs, bins=bins)
print('Transition probability of "transitive to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~transitives, :][:, transitives], axis=1)
sns.distplot(probs, bins=bins)
print('Transition probability of "not transitive to transitive": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[transitives, :][:, ~transitives], axis=1)
sns.distplot(probs, bins=bins)
print('Transition probability of "transitive to not transitive": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
probs = np.sum(trans_matrix[~transitives, :][:, ~transitives], axis=1)
sns.distplot(probs, bins=bins)
print('Transition probability of "not transitive to self": {} +- {}'.format(
round(np.mean(probs), 2), round(np.std(probs), 2)))
plt.xlabel('Probability')
plt.ylabel('#Triads');
plt.legend([r'B $\rightarrow$ B', r'U $\rightarrow$ B', r'B $\rightarrow$ U', r'U $\rightarrow$ U'], loc='upper center')
plt.savefig('BitcoinAlpha_transitivity_transitionprobabilities_kde.pdf');
sns.set_style('white', rc={'figure.figsize': (6, 4)})
probs1 = np.sum(trans_matrix[transitives, :][:, transitives], axis=1)
probs2 = np.sum(trans_matrix[~transitives, :][:, transitives], axis=1)
probs3 = np.sum(trans_matrix[transitives, :][:, ~transitives], axis=1)
probs4 = np.sum(trans_matrix[~transitives, :][:, ~transitives], axis=1)
colors = ['#e66101', '#fdb863', '#b2abd2', '#5e3c99']
plt.hist([probs1, probs2, probs3, probs4], color=colors)
plt.xlabel('Probability')
plt.ylabel('#Triads');
plt.legend([r'B $\rightarrow$ B', r'U $\rightarrow$ B', r'B $\rightarrow$ U', r'U $\rightarrow$ U'], loc='upper center')
plt.savefig('BitcoinAlpha_transitivity_transitionprobabilities_binbeside.pdf');
def set_the_hatch(bars, hatch):
for patch in bars.patches:
if not patch.get_hatch():
patch.set_hatch(hatch)
sns.set_style('white', rc={'figure.figsize': (6, 4)})
ax = plt.gca()
bins = np.arange(0, 1, 0.05)
alpha = 1
# Define some hatches
hatches = ['-', '+', 'x', '\\', '*', 'o']
probs1 = np.sum(trans_matrix[transitives, :][:, transitives], axis=1)
probs2 = np.sum(trans_matrix[~transitives, :][:, transitives], axis=1)
probs3 = np.sum(trans_matrix[transitives, :][:, ~transitives], axis=1)
probs4 = np.sum(trans_matrix[~transitives, :][:, ~transitives], axis=1)
bars = sns.distplot(probs1, bins=bins, norm_hist=False, hist_kws={"linewidth": 3, "alpha": alpha})
set_the_hatch(bars, hatches[1])
bars = sns.distplot(probs2, bins=bins, norm_hist=False, hist_kws={"linewidth": 3, "alpha": alpha})
set_the_hatch(bars, hatches[4])
bars = sns.distplot(probs3, bins=bins, norm_hist=False, hist_kws={"linewidth": 3, "alpha": alpha})
set_the_hatch(bars, hatches[3])
bars = sns.distplot(probs4, bins=bins, norm_hist=False, hist_kws={"linewidth": 3, "alpha": alpha})
set_the_hatch(bars, hatches[5])
ax.set_xlim([0, 1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.grid(b=True, which='major', linestyle='--')
ax.xaxis.grid(b=True, which='minor', linestyle=':')
ax.yaxis.grid(b=True, which='major', linestyle='--')
plt.tight_layout()
plt.xlabel('Probability')
plt.ylabel('#Transitions');
plt.legend([r'B $\rightarrow$ B', r'U $\rightarrow$ B', r'B $\rightarrow$ U', r'U $\rightarrow$ U'], loc='upper center');
plt.savefig('BitcoinAlpha_transitivity_transitionprobabilities_kde2.pdf');
sns.set_style('white', rc={'figure.figsize': (6, 4)})
ax = plt.gca()
bins = np.arange(0, 1, 0.05)
alpha = 0.5
# Define some hatches
hatches = ['-', '+', 'x', '\\', '*', 'o']
probs1 = np.sum(trans_matrix[transitives, :][:, transitives], axis=1)
probs2 = np.sum(trans_matrix[~transitives, :][:, transitives], axis=1)
probs3 = np.sum(trans_matrix[transitives, :][:, ~transitives], axis=1)
probs4 = np.sum(trans_matrix[~transitives, :][:, ~transitives], axis=1)
sns.distplot(probs1, bins=bins, norm_hist=False, hist_kws={"linewidth": 3, "alpha": alpha})
sns.distplot(probs2, bins=bins, norm_hist=False, hist_kws={"linewidth": 3, "alpha": alpha})
sns.distplot(probs3, bins=bins, norm_hist=False, hist_kws={"linewidth": 3, "alpha": alpha})
sns.distplot(probs4, bins=bins, norm_hist=False, hist_kws={"linewidth": 3, "alpha": alpha})
ax.set_xlim([0, 1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.xaxis.grid(b=True, which='major', linestyle='--')
ax.xaxis.grid(b=True, which='minor', linestyle=':')
ax.yaxis.grid(b=True, which='major', linestyle='--')
plt.tight_layout(pad=1.5)
plt.xlabel('Probability')
plt.ylabel('#Transitions');
plt.legend([r'B $\rightarrow$ B', r'U $\rightarrow$ B', r'B $\rightarrow$ U', r'U $\rightarrow$ U'], loc='upper center');
plt.savefig('BitcoinAlpha_transitivity_transitionprobabilities_kde.pdf');
###Output
/home/omid/.local/lib/python3.5/site-packages/scipy/stats/stats.py:1706: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
|
notebooks/7. Augmenting Images.ipynb | ###Markdown
Augmenting Images
###Code
#Import the required libraries
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.optimizers import SGD
from PIL import Image
import matplotlib.pyplot as plt
import scipy as sp
%matplotlib inline
path_to_data = ""
#Load the training and testing data
(X_train, y_train), (X_test, y_test) = mnist.load_data() #path_to_data)
# (X_train, y_train), (X_test, y_test) = mnist.load_data(path_to_data)
img_rows, img_cols = 28, 28
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
#Seed for reproducibilty
np.random.seed(1338)
#test data
X_test = X_test.copy()
Y = y_test.copy()
#Converting the output to binary classification(Six=1,Not Six=0)
Y_test = Y == 6
Y_test = Y_test.astype(int)
#Selecting the 5918 examples where the output is 6
X_six = X_train[y_train == 6].copy()
Y_six = y_train[y_train == 6].copy()
#Selecting the examples where the output is not 6
X_not_six = X_train[y_train != 6].copy()
Y_not_six = y_train[y_train != 6].copy()
#Selecting 6000 random examples from the data that contains only the data where the output is not 6
random_rows = np.random.randint(0,X_six.shape[0],6000)
X_not_six = X_not_six[random_rows]
Y_not_six = Y_not_six[random_rows]
#Appending the data with output as 6 and data with output as not six
X_train = np.append(X_six,X_not_six)
#Reshaping the appended data to appropraite form
X_train = X_train.reshape(X_six.shape[0] + X_not_six.shape[0], 1, img_rows, img_cols)
#Appending the labels and converting the labels to binary classification(Six=1,Not Six=0)
Y_labels = np.append(Y_six,Y_not_six)
Y_train = Y_labels == 6
Y_train = Y_train.astype(int)
print(X_train.shape, Y_labels.shape, Y_test.shape, Y_test.shape)
#Converting the classes to its binary categorical form
nb_classes = 2
Y_train = np_utils.to_categorical(Y_train, nb_classes)
Y_test = np_utils.to_categorical(Y_test, nb_classes)
###Output
_____no_output_____
###Markdown
Rotating the images
###Code
#Initializing the array which will contain images rotated by 15 degrees anti clockwise
anti_X_train = sp.misc.imrotate(X_train[0].reshape(28,28), angle = 15)
anti_X_train = anti_X_train.reshape(1, 28,28)
#Initializing the array which will contain images rotated by 15 degrees clockwise
clock_X_train = sp.misc.imrotate(X_train[0].reshape(28,28), angle = -15)
clock_X_train = clock_X_train.reshape(1, 28,28)
%%time
#Performing clockwise and anticlockwise rotation for the rest of the images. Again reshaping needs to be done
#below for the same reason as described above
for i in range(1,len(X_train)):
rotate_anti = sp.misc.imrotate(X_train[i].reshape(28,28), angle = 15)
rotate_anti = rotate_anti.reshape(1, 28,28)
rotate_clock = sp.misc.imrotate(X_train[i].reshape(28,28), angle = -15)
rotate_clock = rotate_clock.reshape(1, 28,28)
#Appending the rotated images to the resoective arrays
anti_X_train = np.append(anti_X_train,rotate_anti,axis=0)
clock_X_train = np.append(clock_X_train,rotate_clock,axis=0)
#Displaying the original and rotated images
def image_compare(original,clockwise,anticlockwise):
original = original.reshape(28,28)
plt.figure(figsize=(20, 6))
ax = plt.subplot(1, 3, 1)
plt.imshow(original)
plt.xlabel('ORIGINAL')
plt.gray()
ax = plt.subplot(1, 3, 2)
plt.imshow(clockwise)
plt.xlabel('ROTATED CLOCKWISE')
plt.gray()
ax = plt.subplot(1, 3, 3)
plt.imshow(anticlockwise)
plt.xlabel('ROTATED ANTI-CLOCKWISE')
plt.gray()
plt.show()
image_compare(X_train[0],clock_X_train[0],anti_X_train[0])
image_compare(X_train[11100],clock_X_train[11100],anti_X_train[11100])
###Output
_____no_output_____
###Markdown
Exercise:Print some more digits and see how the rotation has happened
###Code
# Append the datasets to form the updated training dataset
print(X_train.shape, clock_X_train.shape, anti_X_train.shape)
X_train = X_train.reshape(len(X_train), 784)
anti_X_train = anti_X_train.reshape(len(anti_X_train), 784)
clock_X_train = clock_X_train.reshape(len(clock_X_train), 784)
print(X_train.shape, clock_X_train.shape, anti_X_train.shape)
rotated_X_train = np.concatenate((X_train, anti_X_train, clock_X_train), axis=0)
rotated_X_train.shape
rotated_Y_train = np.concatenate((Y_train, Y_train, Y_train), axis=0)
rotated_Y_train.shape
X_test = X_test.reshape(len(X_test), 784)
###Output
_____no_output_____
###Markdown
A simple MLP
###Code
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
nb_epoch=50
%%time
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(rotated_X_train, rotated_Y_train, batch_size=128, nb_epoch=nb_epoch,verbose=1,
validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
###Output
_____no_output_____ |
asn1/KNN/Knn.ipynb | ###Markdown
Importing Libraries
###Code
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA as sklearnPCA
###Output
_____no_output_____
###Markdown
Class for building random data , splitting it in a given ratio and plotting data
###Code
class Build_data:
def __init__(self,n_data,features,classes,max_val,split):
self.train_data=np.array([])
self.test_data=np.array([])
self.data=np.array([])
self.n_data=n_data
self.features=features
self.classes=classes
self.max_val=max_val
self.split=split
# creating the dataset randomly
def create_dataset(self):
unique = 0
while unique!=self.n_data:
for i in range(self.n_data-unique):
# appending index
self.data=np.append(self.data,[i+1])
for j in range(self.features):
# appenind features
self.data=np.append(self.data,[np.random.randint(0,self.max_val+1)])
# appending class label
self.data=np.append(self.data,[np.random.randint(0,self.classes)])
# reshaping to make a matrix
self.data=np.reshape(self.data,(self.n_data,self.features+2))
# Conforming with the uniqueness of data
self._data = [list(x for x in set(tuple(x) for x in self.data))]
unique = len(self.data)
# preprocessing the dataset , splitting the dataset into test and train
def process_dataset(self):
# randomly shuffling the data
clone_data=np.random.permutation(self.data)
# splitting it ,80% to training set,remaining to test
self.train_data = clone_data[:int((len(self.data)+1)*self.split/100)]
self.test_data = clone_data[int(len(self.data)*self.split/100):]
def create_csv(self):
#saving the dataset ,test set , and train set to csv files
np.savetxt("dataset.csv",self.data,delimiter=",")
np.savetxt("train_data.csv",self.train_data, delimiter=",")
np.savetxt("test_data.csv",self.test_data,delimiter=",")
# Plotting the data , I have plotted the distance of the data point against its first feature
# no normalization was required as as the range value of all the features is same
def plot_data(self):
train_dist0=[]
train_label0=[]
train_dist1=[]
train_label1=[]
test_dist0=[]
test_label0=[]
test_dist1=[]
test_label1=[]
for train_dp in self.train_data:
if train_dp[7]==0:
train_label0.append(train_dp[1])
train_dist0.append((np.sum(train_dp[1:7]**2))**0.5)
else :
train_label1.append(train_dp[1])
train_dist1.append((np.sum(train_dp[1:7]**2))**0.5)
for test_dp in self.test_data:
if test_dp[7]==0:
test_label0.append(test_dp[1])
test_dist0.append((np.sum(test_dp[1:7]**2))**0.5)
else :
test_label1.append(test_dp[1])
test_dist1.append((np.sum(test_dp[1:7]**2))**0.5)
plt.figure(figsize=(9,7))
plt.plot(train_label0,train_dist0,'r.' ,label='Train label 0')
plt.plot(train_label1,train_dist1,'b.',label='Train label 1')
plt.plot(test_label0,test_dist0,'g.',label='Test label 0')
plt.plot(test_label1,test_dist1,'y.',label='Test label 1')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
creating a class object and plotting the data
###Code
c1 = Build_data(1000,6,2,100,80)
c1.create_dataset()
c1.process_dataset()
c1.create_csv()
c1.plot_data()
###Output
_____no_output_____
###Markdown
Class where custom Knn and scikit Knn is implemented
###Code
class Knn:
def __init__(self,k,features):
self.k=k
self.features=features
# training and testing Knn on the built dataset
def run(self):
# dist list array's first value is the distance of test dp from a train dp,
# 2nd value is 1 if the both the classes of both the points match otherwise it is 0
positive=0
train_data=np.genfromtxt('train_data.csv',delimiter=',')
test_data=np.genfromtxt('test_data.csv',delimiter=',')
# for each point in the test set , predicting its class
for test in test_data:
dist=np.array([[0,0]]) # appending 0 0 dummy array for later concatenating ,this is a programming trick , we dont use this value
true=0
x=test[1:self.features+1]
y=test[self.features+1]
# calculating the test dp distance form all the training data points
max_dist=0
n_elemnts=0
for train in train_data:
xt=train[1:self.features+1]
yt=train[self.features+1]
train_distance=np.sum((x-xt)**2) # calculating only sum of square of differnce of coordinates as it gives a similar measure of distance
if max_dist<train_distance: # after encountering 50 datapoint if we get a train point far away(wrt those 50 points) , ignore it
if n_elemnts<51:
n_elemnts=n_elemnts+1
max_dist = train_distance
dist=np.concatenate((dist,[[train_distance,int(y==yt)]]),axis=0)
else :
n_elemnts = n_elemnts+1
dist=np.concatenate((dist,[[train_distance,int(y==yt)]]),axis=0)
# sorting the distance array , the first value
dist=dist[np.argsort(dist[:, 0])]
# selecting the k best training datapoints based on distance
# and checking if majority of training points had the same class of test dp
for kn in range(1,self.k+1): # start from 1 as the 0th dimensional array is a dummy
if dist[kn][1]==1: # dist[kn][1]==1 if the knth nearest point have same label as test datapoint
true=true+1
# if majority have same class as test dp , then our prediction is correct otherwise wrong
if true>self.k-true:
positive=positive+1
elif true==self.k-true:
if np.random.randint(0,2) == 1:
positive=positive+1
else:
;
# reinitializing the dist array for the remaining test dps
dist=np.array([[0,0]])
# calculating and returning the accuracy
accuracy=float(positive)/len(test_data)
return accuracy
# scikit implementation of knn
def scikit_run(self):
train_data = np.genfromtxt('train_data.csv',delimiter=',')
test_data = np.genfromtxt('test_data.csv',delimiter=',')
x_train = train_data[:, 1:self.features+1]
y_train = train_data[:,self.features+1]
x_test = test_data[:, 1:self.features+1]
y_test = test_data[:,self.features+1]
knn = KNeighborsClassifier(n_neighbors=self.k)
# fitting the model
knn.fit(x_train, y_train)
# predict the response
pred = knn.predict(x_test)
# evaluate accuracy
accuracy=knn.score(x_test, y_test)
return accuracy
###Output
_____no_output_____
###Markdown
Running the Knn for custom and scikit implementation
###Code
import time
my_accuracy=[]
scikit_accuracy=[]
my_runtime=[]
scikit_runtime=[]
k1=np.array([i for i in range(1,22)])
for k in range(1,22):
example1=Knn(k,6)
print "k = ",k
start=time.clock()
accuracy = example1.run()
end=time.clock()
print "My accuracy = ",accuracy
my_accuracy+=[accuracy]
my_runtime+=[end-start]
print "My time = ",end-start
print "\n"
start = time.clock()
accuracy = example1.scikit_run()
end = time.clock()
print "Scikit accuracy = ",accuracy
scikit_accuracy += [accuracy]
scikit_runtime += [end-start]
print "Scikit Time = ",end-start
print "\n"
###Output
k = 1
My accuracy = 0.5
My time = 1.477181
Scikit accuracy = 0.5
Scikit Time = 0.01307
k = 2
My accuracy = 0.48
My time = 1.492728
Scikit accuracy = 0.47
Scikit Time = 0.012706
k = 3
My accuracy = 0.45
My time = 1.521324
Scikit accuracy = 0.45
Scikit Time = 0.0132
k = 4
My accuracy = 0.455
My time = 1.762708
Scikit accuracy = 0.435
Scikit Time = 0.013583
k = 5
My accuracy = 0.47
My time = 1.462436
Scikit accuracy = 0.47
Scikit Time = 0.013552
k = 6
My accuracy = 0.465
My time = 1.464596
Scikit accuracy = 0.435
Scikit Time = 0.01436
k = 7
My accuracy = 0.45
My time = 1.480391
Scikit accuracy = 0.45
Scikit Time = 0.013654
k = 8
My accuracy = 0.47
My time = 1.461039
Scikit accuracy = 0.46
Scikit Time = 0.01455
k = 9
My accuracy = 0.46
My time = 1.536168
Scikit accuracy = 0.46
Scikit Time = 0.031541
k = 10
My accuracy = 0.455
My time = 1.641023
Scikit accuracy = 0.45
Scikit Time = 0.01994
k = 11
My accuracy = 0.445
My time = 1.669256
Scikit accuracy = 0.45
Scikit Time = 0.014697
k = 12
My accuracy = 0.47
My time = 1.675602
Scikit accuracy = 0.425
Scikit Time = 0.015035
k = 13
My accuracy = 0.44
My time = 1.48201
Scikit accuracy = 0.44
Scikit Time = 0.016347
k = 14
My accuracy = 0.49
My time = 1.518797
Scikit accuracy = 0.44
Scikit Time = 0.015631
k = 15
My accuracy = 0.48
My time = 2.004459
Scikit accuracy = 0.48
Scikit Time = 0.015767
k = 16
My accuracy = 0.455
My time = 2.110559
Scikit accuracy = 0.455
Scikit Time = 0.0169
k = 17
My accuracy = 0.465
My time = 1.545139
Scikit accuracy = 0.465
Scikit Time = 0.017106
k = 18
My accuracy = 0.445
My time = 1.541602
Scikit accuracy = 0.46
Scikit Time = 0.016883
k = 19
My accuracy = 0.46
My time = 1.541004
Scikit accuracy = 0.46
Scikit Time = 0.017254
k = 20
My accuracy = 0.465
My time = 1.54495
Scikit accuracy = 0.465
Scikit Time = 0.022324
k = 21
My accuracy = 0.46
My time = 1.549506
Scikit accuracy = 0.465
Scikit Time = 0.017501
###Markdown
Plotting the custom accuracy,time and Knn accuracy,time
###Code
# My accuracy and test time is shown in red and scikit's in blue
plt.plot(k1,my_accuracy,'r',label='My accuracy')
plt.plot(k1,scikit_accuracy,'b',label='Scikit Accuracy')
plt.legend()
plt.show()
plt.plot(k1,my_runtime,'r',label='My Time')
plt.plot(k1,scikit_runtime,'b',label='Scikit Time')
plt.legend()
plt.show()
###Output
_____no_output_____ |
HousepriceAnalysis.ipynb | ###Markdown
Results- Models used with hyperparameters - KNN regressor, linear regression, linear regression with SGD, Ridge, Lasso, ElasticNet, Polynomial regression, SVM simple and with kernels (rbf, poly, and sigmoid kernel), Decision Tree regression, Two models with Pasting, Two models with Bagging, Random Forest, Ada Boost (with decision tree, Gradient Boost, Extra-Trees, XGBoost, Voting Regressor to combine results of top 5 models, Voting Regressor to combine results of models with least correlation, Stacking Regressor to combine results of top 5 models, Stacking Regressor to combine results of models with least correlation- Best Model parameters - 'learning_rate': 0.1, 'max_depth': 4, 'min_child_weight': 1, 'n_estimators': 150, 'subsample': 0.8 (XGBoost)- Mean Cross validation score of Best model - 0.8981992683459357 (XGBoost)- Test score of best model - 0.8776048030903614 (XGBoost)- Train score of best model - 0.979396879296572 (XGBoost)- r2_score of best model - 0.8776048030903614 (XGBoost) Data PreProcessing
###Code
from math import sqrt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
pd.pandas.set_option('display.max_columns', None)
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load Datasets
###Code
# load dataset
# your code here
data = pd.read_csv(r"C:\Users\Rahul\Downloads\houseprice.csv")
###Output
_____no_output_____
###Markdown
Types of variables
###Code
# we have an Id variable, that we should not use for predictions:
print('Number of House Id labels: ', len(data.Id.unique()))
print('Number of Houses in the Dataset: ', len(data))
###Output
Number of House Id labels: 1460
Number of Houses in the Dataset: 1460
###Markdown
Find categorical variables
###Code
# find categorical variables- hint data type = 'O'
categorical = [var for var in data.columns if data[var].dtype=='O']
print(f'There are {len(categorical)} categorical variables')
###Output
There are 43 categorical variables
###Markdown
Find temporal variables
###Code
# make a list of the numerical variables first= Hint data type != O
numerical = [var for var in data.columns if data[var].dtype!='O']
# list of variables that contain year information= Hint variable namme has Yr or
year_vars = [var for var in numerical if 'Yr' in var or 'Year' in var]
year_vars
###Output
_____no_output_____
###Markdown
Find discrete variablesTo identify discrete variables- numerical variables with less than 20 unique values
###Code
# let's visualise the values of the discrete variables
discrete = [var for var in numerical if len(data[var].unique()) < 20 and var not in year_vars]
print(f'There are {len(discrete)} discrete variables')
###Output
There are 14 discrete variables
###Markdown
Continuous variables
###Code
# find continuous variables- hint numerical variables not in discrete and year_years
# Also remove the Id variable and the target variable SalePrice
# which are both also numerical
continuous = [var for var in numerical if var not in discrete and var not in [
'Id', 'SalePrice'] and var not in year_vars]
print('There are {} numerical and continuous variables'.format(len(numerical)))
###Output
There are 38 numerical and continuous variables
###Markdown
Separate train and test set
###Code
# Let's separate into train and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data.drop(['Id', 'SalePrice'], axis=1),
data['SalePrice'],
test_size=0.1,
random_state=0)
X_train.shape, X_test.shape
###Output
_____no_output_____
###Markdown
**Now we will move on and engineer the features of this dataset. The most important part for this course.** Craete New VariablesReplace 'YearBuilt', 'YearRemodAdd', 'GarageYrBlt with time elapsed since YrSoldSo YearBuilt = YrSold-YearBuilt. Similarly transform 'YearRemodAdd', 'GarageYrBlt.After making transformation drop YrSold
###Code
# function to calculate elapsed time
def elapsed_years(df, var):
# capture difference between year variable and
# year the house was sold
df[var] = df['YrSold'] - df[var]
return df
for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']:
X_train = elapsed_years(X_train, var)
X_test = elapsed_years(X_test, var)
# drop YrSold
X_train.drop('YrSold', axis=1, inplace=True)
X_test.drop('YrSold', axis=1, inplace=True)
year_vars.remove('YrSold')
# capture the column names for use later in the notebook
final_columns = X_train.columns
final_columns
###Output
_____no_output_____
###Markdown
Feature Engineering Pipeline
###Code
# I will treat discrete variables as if they were categorical
# to treat discrete as categorical using Feature-engine
# we need to re-cast them as object
X_train[discrete] = X_train[discrete].astype('O')
X_test[discrete] = X_test[discrete].astype('O')
# import relevant modules for feature engineering
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from feature_engine import missing_data_imputers as mdi
from feature_engine import categorical_encoders as ce
from feature_engine.variable_transformers import YeoJohnsonTransformer
from sklearn.preprocessing import StandardScaler
from feature_engine.discretisers import DecisionTreeDiscretiser
house_preprocess = Pipeline([
# missing data imputation
('missing_ind', mdi.AddNaNBinaryImputer(
variables=['LotFrontage', 'MasVnrArea', 'GarageYrBlt'])),
('imputer_num', mdi.MeanMedianImputer(imputation_method='mean',
variables=['LotFrontage', 'MasVnrArea', 'GarageYrBlt'])),
('imputer_cat', mdi.CategoricalVariableImputer(variables=categorical)),
# categorical encoding
('rare_label_enc', ce.RareLabelCategoricalEncoder(
tol=0.01,n_categories=6, variables=categorical+discrete)),
('categorical_enc', ce.MeanCategoricalEncoder(variables = categorical + discrete)),
# Transforming Numerical Variables
('yjt', YeoJohnsonTransformer(variables = ['LotFrontage','MasVnrArea', 'GarageYrBlt'])),
# discretisation and encoding
('treeDisc', DecisionTreeDiscretiser(cv=2, scoring='neg_mean_squared_error',
regression=True,
param_grid={'max_depth': [1,2,3,4,5,6]})),
# feature Scaling
('scaler', StandardScaler()),
])
house_preprocess.fit(X_train,y_train)
# Apply Transformations
X_train=house_preprocess.transform(X_train)
X_test=house_preprocess.transform(X_test)
###Output
_____no_output_____
###Markdown
Regression Models- Tune different models one by one
###Code
# Train a linear regression model, report the coefficients and model performance
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import r2_score
lr = LinearRegression().fit(X_train, y_train)
cv_scores = cross_val_score(lr, X_train, y_train)
y_pred_linear = lr.predict(X_test)
# Mean Cross validation Score
print("Mean Cross-validation scores: {}".format(cv_scores.mean()))
# Print Co-efficients
print("lr.coef_:", lr.coef_)
print("lr.intercept_:", lr.intercept_)
# Check test data set performance
print("LR Performance Test: ", lr.score(X_train,y_train))
print('r2_score: ', r2_score(y_test,y_pred_linear))
# Train a KNN regressor model
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
knn_reg = KNeighborsRegressor()
knn_param_grid = {'n_neighbors' : range(1,20), 'p': [1,2], 'weights': ['distance','uniform']}
grid_knn = GridSearchCV(estimator = knn_reg, param_grid = knn_param_grid, cv=5, return_train_score=True, n_jobs= -1)
grid_knn.fit(X_train, y_train)
y_pred_knn = grid_knn.predict(X_test)
best_parameters_knn=grid_knn.best_params_
print('train score: ', grid_knn.score(X_train, y_train))
# Mean Cross Validation Score
print("Best Mean Cross-validation score: {:.2f}".format(grid_knn.best_score_))
print()
#find best parameters
print('KNN parameters: ', grid_knn.best_params_)
# Check test data set performance
print("KNN Test Performance: ", grid_knn.score(X_test,y_test))
print('r2_score: ', r2_score(y_test,y_pred_knn))
# Train a Ridge regression model, report the coefficients, the best parameters, and model performance
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
ridge = Ridge()
#define a list of parameters
param_ridge = {'alpha':[0.001, 0.01, 0.1, 1, 10, 100, 200] }
grid_ridge = GridSearchCV(ridge, param_ridge, cv=6, return_train_score = True)
grid_ridge.fit(X_train, y_train)
y_pred_ridge = grid_ridge.predict(X_test)
# Mean Cross Validation Score
print("Best Mean Cross-validation score: {:.2f}".format(grid_ridge.best_score_))
print('train score: ', grid_ridge.score(X_train, y_train))
#find best parameters
print('Ridge parameters: ', grid_ridge.best_params_)
# print co-eff
print("Ridge.coef_:", grid_ridge.best_estimator_.coef_)
print("Ridge.intercept_:", grid_ridge.best_estimator_.intercept_)
# Check test data set performance
print("Ridge Test Performance: ", grid_ridge.score(X_test,y_test))
print('r2_score: ', r2_score(y_test,y_pred_ridge))
# Train a Lasso regression model, report the coefficients, the best parameters, and model performance
# YOUR CODE HERE
from sklearn.linear_model import Lasso
lasso = Lasso(random_state=0)
#define a list of parameters
param_lasso = {'alpha':[0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 200] }
grid_lasso = GridSearchCV(lasso, param_lasso, cv=6, return_train_score = True)
grid_lasso.fit(X_train, y_train)
y_pred_lasso = grid_lasso.predict(X_test)
# Mean Cross Validation Score
print("Best Mean Cross-validation score: {:.2f}".format(grid_lasso.best_score_))
print('train score: ', grid_lasso.score(X_train, y_train))
#find best parameters
print('Lasso parameters: ', grid_lasso.best_params_)
# print co-eff
print("Lasso.coef_:", grid_lasso.best_estimator_.coef_)
print("Lasso.intercept_:", grid_lasso.best_estimator_.intercept_)
# Check test data set performance
print("Lasso Test Performance: ", grid_lasso.score(X_test,y_test))
print('r2_score: ', r2_score(y_test,y_pred_lasso))
# Train a ElasticNet regression model
from sklearn.linear_model import ElasticNet
elasticnet = ElasticNet(max_iter=10000, tol=0.6)
#define a list of parameters
param_elasticnet = {'alpha':[0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100], 'l1_ratio' :[0.2,0.4,0.6,0.8]}
grid_elasticnet = GridSearchCV(elasticnet , param_elasticnet, cv=5, return_train_score = True)
grid_elasticnet.fit(X_train, y_train)
y_pred_elasticnet = grid_elasticnet.predict(X_test)
grid_elasticnet_train_score = grid_elasticnet.score(X_train, y_train)
grid_elasticnet_test_score = grid_elasticnet.score(X_test, y_test)
print('Training set score: ', grid_elasticnet_train_score)
print('Test score: ', grid_elasticnet_test_score)
#find best parameters
print('Best parameters: ', grid_elasticnet.best_params_)
print('Best cross-validation score:', grid_elasticnet.best_score_)
print('r2_score: ', r2_score(y_test,y_pred_elasticnet))
# Train a linear regression with SGD model
from sklearn.linear_model import SGDRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
# create pipeline
reg_sgd_pipe = Pipeline([
# feature Scaling
('scaler', MinMaxScaler()),
# regression
('sgd_reg', SGDRegressor(max_iter=10000, tol = 1e-6))
])
param_sgd = {'sgd_reg__eta0':[0.01, 0.05, 0.1 ,0.5]}
grid_sgd = GridSearchCV(reg_sgd_pipe, param_sgd,cv=5, n_jobs=-1, return_train_score = True)
grid_sgd.fit(X_train, y_train)
y_pred_sgd = grid_sgd.predict(X_test)
grid_sgd_train_score = grid_sgd.score(X_train, y_train)
grid_sgd_test_score = grid_sgd.score(X_test, y_test)
print('Training set score: ', grid_sgd_train_score)
print('Test score: ', grid_sgd_test_score)
print("Best parameters: {}".format(grid_sgd.best_params_))
print("Best cross-validation score: {:.2f}".format(grid_sgd.best_score_))
print('r2_score: ', r2_score(y_test,y_pred_sgd))
#apply polynomial regression in pipeline
from sklearn.preprocessing import PolynomialFeatures
pipe_poly=Pipeline([
('polynomialfeatures', PolynomialFeatures()),
('scaler',MinMaxScaler()),
('ridge', Ridge())
])
#define a list of parameters
param_poly = {'polynomialfeatures__degree':range(1,3)}
#apply polynomial regression in pipeline
grid_poly = GridSearchCV(pipe_poly, param_poly,cv=5, n_jobs=-1, return_train_score = True)
grid_poly.fit(X_train, y_train)
y_pred_poly=grid_poly.predict(X_test)
print('train score: ', grid_poly.score(X_train, y_train))
# Mean Cross Validation Score
#print("Cross Validation training results", grid_poly.cv_results_['best_train_score'])
#print("Cross Validation testing results", grid_poly.cv_results_['best_test_score'])
#find best parameters
print('Poly parameters: ', grid_poly.best_params_)
print("Best cross-validation score: {:.4f}".format(grid_poly.best_score_))
# print the coefficients
print('Poly features: ', grid_poly.best_estimator_.named_steps['polynomialfeatures'].n_output_features_)
print('Coefficients: ', grid_poly.best_estimator_.named_steps['ridge'].coef_)
# Check test data set performance
print("Poly Performance Test : ", grid_poly.score(X_test,y_test))
print('R2 score: ', r2_score(y_test,y_pred_poly))
# Train a Decision Tree regression model
from sklearn.tree import DecisionTreeRegressor
dtree = DecisionTreeRegressor(random_state=0)
#define a list of parameters
param_dtree = {'max_depth': range(1,20), 'min_samples_leaf' : range(1,10), 'max_leaf_nodes': range(2,5)}
#apply grid search
grid_dtree = GridSearchCV(dtree, param_dtree, cv=5, return_train_score = True)
grid_dtree.fit(X_train, y_train)
y_pred_tree = grid_dtree.predict(X_test)
print('train score: ', grid_dtree.score(X_train, y_train))
# Mean Cross Validation Score
print("Best Mean Cross-validation score: {:.2f}".format(grid_dtree.best_score_))
print()
#find best parameters
print('Decision Tree parameters: ', grid_dtree.best_params_)
# Check test data set performance
print("Decision Tree Performance: ", grid_dtree.score(X_test,y_test))
print('R2 score: ', r2_score(y_test,y_pred_tree))
# Train a Linear SVM model
from sklearn.svm import LinearSVR,SVR
import warnings
lin_svr = LinearSVR()
param_grid_linearSVR = {'C' : [ 0.01, 0.1, 1, 10, 100, 1000]}
CV_linearSVR_class = GridSearchCV(estimator = lin_svr, param_grid = param_grid_linearSVR ,cv = 5, verbose = 1, n_jobs = -1, return_train_score = True)
GS_results_linearSVR = CV_linearSVR_class.fit(X_train, y_train)
y_pred_svr = GS_results_linearSVR.predict(X_test)
best_parameters_linearSVR_class = CV_linearSVR_class.best_params_
#find best parameters
print('SVM parameters: ', best_parameters_linearSVR_class)
print('train score: ', GS_results_linearSVR.score(X_train, y_train))
print("Best Mean Cross-validation score: {:.2f}".format(GS_results_linearSVR.best_score_))
# Check test data set performance
print("SVM Tree Performance: ", GS_results_linearSVR.score(X_test,y_test))
print('r2_score: ', r2_score(y_test,y_pred_svr))
# Train a Kernelized Support Vector Machine
svr_kernel = SVR(kernel = 'rbf')
#define a list of parameters
param_grid_svr = {'C': [0.1, 1, 10, 100, 1000, 10000],'gamma':[0.001, 0.01, 0.1, 1, 10, 100]}
#apply grid search
grid_svr_kernel = GridSearchCV(estimator = svr_kernel, param_grid = param_grid_svr, cv=5, n_jobs = -1, return_train_score = True)
grid_svr_kernel.fit(X_train, y_train)
y_pred_rbf = grid_svr_kernel.predict(X_test)
print('train score: ', grid_svr_kernel.score(X_train, y_train))
print("Best parameters: {}".format(grid_svr_kernel.best_params_))
print("Best Mean cross-validation score: {:.2f}".format(grid_svr_kernel.best_score_))
print("Performance: ", grid_svr_kernel.score(X_test,y_test))
print('r2_score: ', r2_score(y_test,y_pred_rbf))
svr_kernel = SVR(kernel = 'poly')
#define a list of parameters
param_grid_svr_P = {'C': [1, 10, 100,1000,10000],'degree':[1,3]}
#apply grid search
grid_svr_kernel_P = GridSearchCV(estimator = svr_kernel, param_grid = param_grid_svr_P, cv=5, n_jobs = -1, return_train_score = True)
grid_svr_kernel_P.fit(X_train, y_train)
y_pred_poly_P = grid_svr_kernel_P.predict(X_test)
print('train score: ', grid_svr_kernel_P.score(X_train, y_train))
print("Best parameters: {}".format(grid_svr_kernel_P.best_params_))
print("Best Mean cross-validation score: {:.2f}".format(grid_svr_kernel_P.best_score_))
print("Performance: ", grid_svr_kernel_P.score(X_test,y_test))
print('r2_score: ', r2_score(y_test,y_pred_poly_P))
svr_kernel = SVR(kernel = 'sigmoid')
#define a list of parameters
param_grid_svr_S = {'C': [1, 10, 100,1000,10000], 'gamma':[0.001, 0.01, 0.1, 1, 10, 100]}
#apply grid search
grid_svr_kernel_S = GridSearchCV(estimator = svr_kernel, param_grid = param_grid_svr_S, cv=5, n_jobs = -1, return_train_score = True)
grid_svr_kernel_S.fit(X_train, y_train)
y_pred_sigmoid = grid_svr_kernel_S.predict(X_test)
print('train score: ', grid_svr_kernel_S.score(X_train, y_train))
print("Best parameters: {}".format(grid_svr_kernel_S.best_params_))
print("Best Mean cross-validation score: {:.2f}".format(grid_svr_kernel_S.best_score_))
print("Performance: ", grid_svr_kernel_S.score(X_test,y_test))
print('r2_score: ', r2_score(y_test,y_pred_sigmoid))
###Output
train score: 0.7948480055794955
Best parameters: {'C': 10000, 'gamma': 0.001}
Best Mean cross-validation score: 0.76
Performance: 0.749803095245099
r2_score: 0.749803095245099
###Markdown
Tune Multiple Models with one GridSearch
###Code
model_gs = Pipeline([("regressor", LinearRegression())])
model_parm_gd = [
{ 'regressor': [LinearRegression()]},
{ 'regressor': [Ridge()],
'regressor__alpha':[0.001, 0.01, 0.1, 1, 10, 100,200] },
{ 'regressor': [Lasso(random_state=0)],
'regressor__alpha':[0.001, 0.01, 0.1, 1, 10, 100,200]},
]
grid_search_house_pipe = GridSearchCV(model_gs, model_parm_gd)
grid_search_house_pipe.fit(X_train,y_train)
print(grid_search_house_pipe.best_params_)
# let's get the predictions
X_train_preds = grid_search_house_pipe.predict(X_train)
X_test_preds = grid_search_house_pipe.predict(X_test)
print("Best Mean Cross-validation score: {:.2f}".format(grid_search_house_pipe.best_score_))
# check model performance:
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
print('train mse: {}'.format(mean_squared_error(y_train, X_train_preds)))
print('train rmse: {}'.format(sqrt(mean_squared_error(y_train, X_train_preds))))
print('train r2: {}'.format(r2_score(y_train, X_train_preds)))
print()
print('test mse: {}'.format(mean_squared_error(y_test, X_test_preds)))
print('test rmse: {}'.format(sqrt(mean_squared_error(y_test, X_test_preds))))
print('test r2: {}'.format(r2_score(y_test, X_test_preds)))
###Output
train mse: 559886970.9352162
train rmse: 23661.93083700517
train r2: 0.9103295889443213
test mse: 871707753.5558221
test rmse: 29524.69734909779
test r2: 0.8731529205790172
###Markdown
Ensemble Models
###Code
# Train decision tree model with bagging
from sklearn.ensemble import BaggingRegressor
bag_dtree1 = BaggingRegressor(base_estimator=DecisionTreeRegressor(), bootstrap=True, random_state=0, oob_score=False)
bag_dtree1_param = {
'base_estimator__max_depth': range(1,10),
'max_samples': [0.8,1],
'n_estimators': [10,25,100]}
bag_dtree1_grid = GridSearchCV(bag_dtree1, bag_dtree1_param,cv=5, return_train_score=True, )
bag_dtree1_grid.fit(X_train,y_train)
y_pred = bag_dtree1_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {bag_dtree1_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {bag_dtree1_grid.best_params_}')
print(f'Train score is {bag_dtree1_grid.score(X_train,y_train)}')
print(f'Test score is {bag_dtree1_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
bag_dtree2 = BaggingRegressor(DecisionTreeRegressor(max_depth= 7, max_leaf_nodes=5, min_samples_split= 3, splitter= 'random'), bootstrap=True, random_state=0, oob_score=False)
bag_dtree2_param = {
'max_samples': [0.8,1],
'n_estimators': [10,25,100]}
bag_dtree2_grid = GridSearchCV(bag_dtree2, bag_dtree2_param,cv=5, return_train_score=True, )
bag_dtree2_grid.fit(X_train,y_train)
y_pred = bag_dtree2_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {bag_dtree2_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {bag_dtree2_grid.best_params_}')
print(f'Train score is {bag_dtree2_grid.score(X_train,y_train)}')
print(f'Test score is {bag_dtree2_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
import warnings
warnings.filterwarnings('ignore')
bag_lasso = BaggingRegressor(base_estimator=Lasso(), bootstrap=True, random_state=0, oob_score=False)
bag_lasso_param = {
'base_estimator__alpha': [0.01, 0.1, 1, 10, 100, 200],
'max_samples': [0.8,1],
'n_estimators': [10,25,100]}
bag_lasso_grid = GridSearchCV(bag_lasso, bag_lasso_param,cv=6, return_train_score=True, )
bag_lasso_grid.fit(X_train,y_train)
y_pred = bag_lasso_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {bag_lasso_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {bag_lasso_grid.best_params_}')
print(f'Train score is {bag_lasso_grid.score(X_train,y_train)}')
print(f'Test score is {bag_lasso_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
# Train decision tree model with pasting
paste_dtree1 = BaggingRegressor(base_estimator=DecisionTreeRegressor(), bootstrap=False, random_state=0, oob_score=False)
paste_dtree1_param = {
'base_estimator__max_depth': range(1,10),
'max_samples': [0.8,1],
'n_estimators': [10,25,100]}
paste_dtree1_grid = GridSearchCV(paste_dtree1, paste_dtree1_param,cv=5, return_train_score=True, )
paste_dtree1_grid.fit(X_train,y_train)
y_pred = paste_dtree1_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {paste_dtree1_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {paste_dtree1_grid.best_params_}')
print(f'Train score is {paste_dtree1_grid.score(X_train,y_train)}')
print(f'Test score is {paste_dtree1_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
paste_dtree2 = BaggingRegressor(DecisionTreeRegressor(max_depth= 7, max_leaf_nodes=5, min_samples_split= 3, splitter= 'random'), bootstrap=False, random_state=0, oob_score=False)
paste_dtree2_param = {
'max_samples': [0.8,1],
'n_estimators': [10,25,100]}
paste_dtree2_grid = GridSearchCV(paste_dtree2, paste_dtree2_param,cv=5, return_train_score=True, )
paste_dtree2_grid.fit(X_train,y_train)
y_pred = paste_dtree2_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {paste_dtree2_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {paste_dtree2_grid.best_params_}')
print(f'Train score is {paste_dtree2_grid.score(X_train,y_train)}')
print(f'Test score is {paste_dtree2_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
paste_lasso = BaggingRegressor(base_estimator=Lasso(), bootstrap=False, random_state=0, oob_score=False)
paste_lasso_param = {
'base_estimator__alpha': [0.01, 0.1, 1, 10, 100, 200],
'max_samples': [0.8,1],
'n_estimators': [10,25,100]}
paste_lasso_grid = GridSearchCV(paste_lasso, paste_lasso_param,cv=6, return_train_score=True, )
paste_lasso_grid.fit(X_train,y_train)
y_pred = paste_lasso_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {paste_lasso_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {paste_lasso_grid.best_params_}')
print(f'Train score is {paste_lasso_grid.score(X_train,y_train)}')
print(f'Test score is {paste_lasso_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
# Train a Random Forest model
from sklearn.ensemble import RandomForestRegressor
rfr =RandomForestRegressor(random_state=42)
rfr_param = {
'n_estimators': [200, 500],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [2,4,5,6,7,8],
'criterion' :['mse', 'mae']
}
rfr_grid = GridSearchCV(rfr, rfr_param,cv=5, return_train_score=True, )
rfr_grid.fit(X_train,y_train)
y_pred = rfr_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {rfr_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {rfr_grid.best_params_}')
print(f'Train score is {rfr_grid.score(X_train,y_train)}')
print(f'Test score is {rfr_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
# Train an ExtraTree model
from sklearn.ensemble import ExtraTreesRegressor
etr= ExtraTreesRegressor(random_state=42)
etr_param = {
'n_estimators': [200, 500],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [2,4,5,6,7,8],
'criterion' :['mse', 'mae']
}
etr_grid = GridSearchCV(etr, etr_param,cv=5, return_train_score=True, )
etr_grid.fit(X_train,y_train)
y_pred = etr_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {etr_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {etr_grid.best_params_}')
print(f'Train score is {etr_grid.score(X_train,y_train)}')
print(f'Test score is {etr_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
# Train an AdaBoost model
from sklearn.ensemble import AdaBoostRegressor
adr_dtree =AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),random_state=42)
adr_dtree_param = {
'base_estimator__criterion' : ["mse", "mae"],
'base_estimator__splitter' : ["best", "random"],
'base_estimator__max_depth' : [2,4,6],
'n_estimators' : [100,150],
'learning_rate' : [0.5,1.0,2],
}
adr_dtree_grid = GridSearchCV(adr_dtree, adr_dtree_param,cv=5, return_train_score=True, )
adr_dtree_grid.fit(X_train,y_train)
y_pred = adr_dtree_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {adr_dtree_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {adr_dtree_grid.best_params_}')
print(f'Train score is {adr_dtree_grid.score(X_train,y_train)}')
print(f'Test score is {adr_dtree_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
# Train a Gradient Boosting model
from sklearn.ensemble import GradientBoostingRegressor
gbr= GradientBoostingRegressor(random_state=42)
gbr_param = {
'max_depth' : [2,3,4],
'n_estimators' : [100,150],
'learning_rate' : [0.5,1.0,2],
}
gbr_grid = GridSearchCV(gbr, gbr_param,cv=5, return_train_score=True, )
gbr_grid.fit(X_train,y_train)
y_pred = gbr_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {gbr_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {gbr_grid.best_params_}')
print(f'Train score is {gbr_grid.score(X_train,y_train)}')
print(f'Test score is {gbr_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
pip install xgboost
# Train a XGBoost model
from xgboost import XGBRegressor
xgbr= XGBRegressor(random_state=42,early_stopping_rounds=2)
xgbr_param = {
'max_depth' : [2,4,6],
'n_estimators' : [50,100,150],
'learning_rate' : [0.1,0.5,0.6,0.8],
'min_child_weight' : [1,3,5,7],
'subsample':[0.6,0.7,0.8,0.9,1]
}
xgbr_grid = GridSearchCV(xgbr, xgbr_param,cv=5, return_train_score=True, )
xgbr_grid.fit(X_train,y_train)
y_pred = xgbr_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {xgbr_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {xgbr_grid.best_params_}')
print(f'Train score is {xgbr_grid.score(X_train,y_train)}')
print(f'Test score is {xgbr_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
###Output
Best Mean Cross Validation Score is 0.8981992683459357
Best Mean Cross Validation Score is {'learning_rate': 0.1, 'max_depth': 4, 'min_child_weight': 1, 'n_estimators': 150, 'subsample': 0.8}
Train score is 0.979396879296572
Test score is 0.8776048030903614
r2_score: 0.8776048030903614
###Markdown
Summary
###Code
regressors={'knn':grid_knn,
'lsvr':CV_linearSVR_class,
'ridge':grid_ridge,
'lasso':grid_lasso,
'elasticnet':grid_elasticnet,
'polynomial':grid_poly,
'linearsgd':grid_sgd,
'ksvr_R':grid_svr_kernel,
'ksvr_P':grid_svr_kernel_P,
'ksvr_S':grid_svr_kernel_S,
'dtree':grid_dtree,
'bag_dtree1':bag_dtree1_grid,
'bag_dtree1':bag_dtree2_grid,
'bag_lasso':bag_lasso_grid,
'paste_dtree1': paste_dtree1_grid,
'paste_dtree1':paste_dtree2_grid,
'paste_lasso': paste_lasso_grid,
'rfr': rfr_grid,
'etr': etr_grid,
'adr_dtree':adr_dtree_grid,
'gbr': gbr_grid,
'xgbr': xgbr_grid}
regressors.keys()
results_mean_std = []
for key, value in regressors.items():
mean = value.cv_results_['mean_test_score'][value.best_index_]
std=value.cv_results_['std_test_score'][value.best_index_]
results_mean_std.append({
"model": key,
"mean": mean,
"std": std
})
# Create a Pandas DataFrame with the mean+std results
accuracy_df = pd.DataFrame(results_mean_std, columns=['model', 'mean', 'std'])
# Show the accuracy dataframe
accuracy_df.sort_values(by=['mean'], inplace=True,ascending=False)
accuracy_df
# Create a prediction of all models on the test set
predictions_all = {}
for key, value in regressors.items():
# Get best estimator
best_model = value.best_estimator_
# Predict test labels
predictions = best_model.predict(X_test)
# Save predictions to a list
predictions_all[key] = predictions
# Creat a DataFrame for the predictions
pred = pd.DataFrame(predictions_all)
# Plot a heatmap of all correlations for easier visualization
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9,6))
g = sns.heatmap(pred.corr(), annot=True, cmap='coolwarm', ax=ax)
g.set_title('Correlation of the test set label prediction between models')
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, n=5):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=True)
return au_corr[0:n]
print("Top least Correlations")
print(get_top_abs_correlations(pred, 5))
###Output
Top least Correlations
dtree gbr 0.813337
knn dtree 0.814900
ksvr_R dtree 0.822717
lsvr dtree 0.826213
ksvr_P dtree 0.828280
dtype: float64
###Markdown
Stacking
###Code
xgbr_grid.best_estimator_
# Voting top 5
from sklearn.ensemble import VotingRegressor
vrlf1 = VotingRegressor(estimators=
[('xgbr', xgbr_grid.best_estimator_),
('bag_lasso', bag_lasso_grid.best_estimator_),
('paste_lasso', paste_lasso_grid.best_estimator_),
('ridge', grid_ridge.best_estimator_),
('lasso', grid_lasso.best_estimator_),
])
vrlf1_param = {
'weights' : [[1,2,1.5,1,1], [1,1,2,1.5,1], [1,1,1,2,1.5], [1.5,1,1,1,2], [2,1.5,1,1,1]],
}
vrlf1_grid = GridSearchCV(vrlf1, vrlf1_param,cv=5, return_train_score=True, )
vrlf1_grid.fit(X_train,y_train)
y_pred = vrlf1_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {vrlf1_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {vrlf1_grid.best_params_}')
print(f'Train score is {vrlf1_grid.score(X_train,y_train)}')
print(f'Test score is {vrlf1_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
# Voting Least Correlated
vrlf2 = VotingRegressor(estimators=
[('dtree', grid_dtree.best_estimator_),
('knn', grid_knn.best_estimator_),
('gbr', gbr_grid.best_estimator_),
], )
vrlf2_param = {
'weights':[[1,2,3],[2,1,3],[3,2,1]],
}
vrlf2_grid = GridSearchCV(vrlf2, vrlf2_param,cv=5, return_train_score=True, )
vrlf2_grid.fit(X_train,y_train)
y_pred = vrlf2_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {vrlf2_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {vrlf2_grid.best_params_}')
print(f'Train score is {vrlf2_grid.score(X_train,y_train)}')
print(f'Test score is {vrlf2_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
# Stacking top 5
from sklearn.ensemble import StackingRegressor
srlf1 = StackingRegressor(estimators=
[('xgbr', xgbr_grid.best_estimator_),
('bag_lasso', bag_lasso_grid.best_estimator_),
('paste_lasso', paste_lasso_grid.best_estimator_),
('ridge', grid_ridge.best_estimator_),
('lasso', grid_lasso.best_estimator_)
], final_estimator=XGBRegressor(random_state=42,early_stopping_rounds=2))
srlf1_param = {
'final_estimator__C' : [0.1,0.2],
}
srlf1_grid = GridSearchCV(srlf1, srlf1_param,cv=5, return_train_score=True, )
srlf1_grid.fit(X_train,y_train)
y_pred = srlf1_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {srlf1_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {srlf1_grid.best_params_}')
print(f'Train score is {srlf1_grid.score(X_train,y_train)}')
print(f'Test score is {srlf1_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
# Stacking Least Correlated
srlf2 = StackingRegressor(estimators=
[('dtree', grid_dtree.best_estimator_),
('knn', grid_knn.best_estimator_),
('gbr', gbr_grid.best_estimator_),
],
final_estimator=
XGBRegressor(random_state=42,early_stopping_rounds=2))
srlf2_param = {
'final_estimator__max_depth' : [2,6],
'final_estimator__n_estimators' : [50,150],
'final_estimator__learning_rate' : [0.1,0.6,0.8],
'final_estimator__min_child_weight' : [1,3,7],
'final_estimator__subsample':[0.6,0.9,1],
}
srlf2_grid = GridSearchCV(srlf2, srlf2_param,cv=5, return_train_score=True, )
srlf2_grid.fit(X_train,y_train)
y_pred = srlf2_grid.predict(X_test)
print(f'Best Mean Cross Validation Score is {srlf2_grid.best_score_}')
print(f'Best Mean Cross Validation Score is {srlf2_grid.best_params_}')
print(f'Train score is {srlf2_grid.score(X_train,y_train)}')
print(f'Test score is {srlf2_grid.score(X_test,y_test)}')
print('r2_score: ', r2_score(y_test,y_pred))
###Output
Best Mean Cross Validation Score is 0.8712330028119567
Best Mean Cross Validation Score is {'final_estimator__learning_rate': 0.1, 'final_estimator__max_depth': 2, 'final_estimator__min_child_weight': 7, 'final_estimator__n_estimators': 50, 'final_estimator__subsample': 0.6}
Train score is 0.9712463399874721
Test score is 0.8463499552765346
r2_score: 0.8463499552765346
|
Experiments/BERT_SentEmbed_Models.ipynb | ###Markdown
Imports
###Code
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import *
from sklearn.model_selection import train_test_split
from sklearn.metrics import *
from sklearn import svm
from scipy import stats
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import joblib
###Output
_____no_output_____
###Markdown
Load Data
###Code
%cd '/content/drive/My Drive/IIITD/SEM-7/ML/ML Project/Code/Dataset'
X_train = joblib.load('X_train_cls')
X_test = joblib.load('X_test_cls')
y_train = joblib.load('y_train')
y_test = pd.read_csv('labels-levela.csv',index_col=0,header=None).to_numpy().ravel()
###Output
_____no_output_____
###Markdown
Standardize
###Code
# scaler = StandardScaler()
# X_train = scaler.fit_transform(X_train)
# X_test = scaler.fit_transform(X_test)
k_fold = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
###Output
_____no_output_____
###Markdown
Models Logistic Regression Training
###Code
log_classifier = LogisticRegression(max_iter=2000)
log_classifier = log_classifier.fit(X_train,y_train)
# joblib.dump(log_classifier,'logregression_BERT.model')
# log_classifier = joblib.load('logregression_BERT.model')
###Output
_____no_output_____
###Markdown
Prediction
###Code
y_pred_logistic = log_classifier.predict(X_test)
print(classification_report(y_test,y_pred_logistic))
plot = plot_confusion_matrix(log_classifier,X_test,y_test)
plot.ax_.set_title("Confusion Matrix (Logistic Regression)")
plt.show()
###Output
_____no_output_____
###Markdown
Naive Bayes Classifier Training
###Code
nb_classifier = GaussianNB()
nb_classifier = nb_classifier.fit(X_train,y_train)
# joblib.dump(nb_classifier,'naivebayes_BERT.model')
# nb_classifier = joblib.load('naivebayes_BERT.model')
###Output
_____no_output_____
###Markdown
Prediction
###Code
y_pred_nb = nb_classifier.predict(X_test)
print(classification_report(y_test,y_pred_nb))
plot = plot_confusion_matrix(nb_classifier,X_test,y_test)
plot.ax_.set_title("Confusion Matrix (Naive Bayes)")
plt.show()
###Output
_____no_output_____
###Markdown
Random Forest Classifier Training
###Code
rf_classifier = RandomForestClassifier()
rf_classifier = rf_classifier.fit(X_train,y_train)
# joblib.dump(rf_classifier,'randomforest_BERT.model')
# rf_classifier = joblib.load('randomforest_BERT.model')
###Output
_____no_output_____
###Markdown
Prediction
###Code
y_pred_rf = rf_classifier.predict(X_test)
print(classification_report(y_test,y_pred_rf))
plot = plot_confusion_matrix(rf_classifier,X_test,y_test)
plot.ax_.set_title("Confusion Matrix (Random Forest)")
plt.show()
###Output
_____no_output_____
###Markdown
SVM Training
###Code
svm_classifier = svm.SVC()
svm_classifier = svm_classifier.fit(X_train, y_train)
# joblib.dump(svm_classifier,'svm_BERT.model')
# svm_classifier = joblib.load('svm_BERT.model')
###Output
_____no_output_____
###Markdown
Prediction
###Code
y_pred_svm = svm_classifier.predict(X_test)
print(classification_report(y_test,y_pred_svm))
plot = plot_confusion_matrix(svm_classifier,X_test,y_test)
plot.ax_.set_title("Confusion Matrix (SVM)")
plt.show()
###Output
_____no_output_____
###Markdown
Artifical Neural Network
###Code
class NN:
def __init__(self,layers,activation,alpha):
self.n_layers = len(layers)
self.layers = layers
self.activation = activation
self.alpha = alpha
self.model = MLPClassifier(hidden_layer_sizes=self.layers,
activation=self.activation,
alpha=self.alpha,
max_iter=500)
def fit(self,X,y):
self.model = self.model.fit(X,y)
def predict(self,X):
return self.model.predict(X)
def loss(self):
return self.model.loss_
NN_classifier = NN(layers=[200,100,100,50],activation='relu',alpha=1e-4)
NN_classifier.fit(X_train,y_train)
y_pred_NN = NN_classifier.predict(X_test)
print(classification_report(y_test,y_pred_NN))
plot = plot_confusion_matrix(NN_classifier.model,X_test,y_test)
plot.ax_.set_title("Confusion Matrix (NN)")
plt.show()
###Output
_____no_output_____
###Markdown
Majority Voting
###Code
all_models = {'naive bayes': y_pred_nb,
'logistic regression': y_pred_logistic,
'random forest': y_pred_rf,
'SVM': y_pred_svm,
'NN':y_pred_NN}
for key_1 in all_models:
combination = []
model_rep = ''
for key_2 in all_models:
if key_1 != key_2:
combination.append(all_models[key_2])
model_rep += ' + ' + key_2
print(model_rep[3:])
y_pred_voting = stats.mode(np.array(combination))[0][0]
print('accuracy:',accuracy_score(y_test,y_pred_voting))
print('f1 (macro):',f1_score(y_test, y_pred_voting, average='macro'))
print()
###Output
logistic regression + random forest + SVM + NN
accuracy: 0.8186046511627907
f1 (macro): 0.7303511705685619
naive bayes + random forest + SVM + NN
accuracy: 0.8058139534883721
f1 (macro): 0.7152515489467162
naive bayes + logistic regression + SVM + NN
accuracy: 0.8232558139534883
f1 (macro): 0.7488935333169413
naive bayes + logistic regression + random forest + NN
accuracy: 0.8104651162790698
f1 (macro): 0.724184881522276
naive bayes + logistic regression + random forest + SVM
accuracy: 0.8162790697674419
f1 (macro): 0.7279679679679679
|
post_precessing/ModelSSE.ipynb | ###Markdown
Visualization of long-term SSE model outputs Goal: Identify SSE episodes based on slip rate and cut model output, e.g. slip rate, shear tracton, etc, into small pieces named by event number. Input:Model output binary data start with "slipz1_" and end with ".dat" Output:figures: maximum slip rate and final fault slipsnapshots: slip ratepieces of data: slip rate, shear traction and final fault slip AuthorshipD. Li, 27.10.2021email: [email protected]
###Code
# initialize and load modules
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import pyproj
import scipy.io as sio
from scipy import spatial
# from cmcrameri import cm
from scipy.io import netcdf_file as netcdf
import matplotlib
matplotlib.rc('xtick', labelsize=9)
matplotlib.rc('ytick', labelsize=9)
print('finish module loading')
# plot max slip rate on the entire fault
# set folder, model name and appendix
# modelname = 'SSE2_2/'
# folder = '/Volumes/LINUX_GOFAR/guillimin/Model24_450/'
folder = '/Volumes/LINUX_GOFAR/Geosphere/Model24_all/'
modelname = 'seff2_9/'
appendix = '-h5_ef20_s25s41.dat'
# modelname = 'seff2_13/'
# maximum slip rate and SR at observation points.
fmaxv = np.loadtxt(folder + modelname +'maxv'+ appendix )
fmaxs1= np.loadtxt(folder + modelname +'maxv_150'+appendix )
fmaxs2= np.loadtxt(folder + modelname +'maxv_250'+appendix )
fmaxs3= np.loadtxt(folder + modelname +'maxv_300'+appendix )
fmaxs4= np.loadtxt(folder + modelname +'maxv_350'+appendix )
fmaxs5= np.loadtxt(folder + modelname +'maxv_400'+appendix )
fmaxs6= np.loadtxt(folder + modelname +'maxv_400'+appendix )
fmaxs7= np.loadtxt(folder + modelname +'maxv_50'+appendix )
fmaxs8= np.loadtxt(folder + modelname +'maxv_200'+appendix )
# set colormap
number = 10
cmap = plt.get_cmap('plasma_r')
colors = [cmap(i) for i in np.linspace(0, 1, number)]
colors = colors[1:]
# plot and save fig
plt.figure()
plt.plot(fmaxv[:,0],fmaxv[:,1],'-k')
# plt.plot(fmaxv[:,0],fmaxs1[:],color=colors[1])
plt.plot(fmaxv[:,0],fmaxs2[:],color=colors[2])
plt.plot(fmaxv[:,0],fmaxs3[:],color=colors[3])
plt.plot(fmaxv[:,0],fmaxs4[:],color=colors[4])
plt.plot(fmaxv[:,0],fmaxs5[:],color=colors[5])
plt.plot(fmaxv[:,0],fmaxs6[:],color=colors[6])
plt.plot(fmaxv[:,0],fmaxs7[:],color=colors[7])
plt.plot(fmaxv[:,0],fmaxs8[:],color=colors[8])
# plt.xlim((300,500))
plt.show()
outname = folder + modelname + 'maxv2'+'.png'
plt.savefig(outname,dpi=100,transparent=False)
# load geometry and mesh
vertex = np.loadtxt(folder + 'vertex2.txt')
connect = np.loadtxt(folder + 'cell3.txt')
# data1 = np.loadtxt(folder + '/vertex.txt')
# data2 = np.loadtxt(folder + '/cell_107200.txt')
# vertex = data1/1e3
# connect = data2-1
nvex = len(vertex[:,1])
ncell = len(connect[:,1])
miu = 30e+9;
coeff = 1e+6;
vpl = 41/1000/365/24/3600;
yrs = 365*24*3600 ;
print('load geometry and triangular mesh')
# create triangular mesh
xr = vertex[:,0]
yr = vertex[:,1]
triang = tri.Triangulation(xr,yr,connect)
bb = np.array([xr,yr])
print(bb.shape,ncell)
# set Cartesian-to-geographic projection, if necessary
# myproj = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
# lla = pyproj.Proj(proj='utm',zone='11N',ellps='WGS84', datum='WGS84')
# # trench = np.loadtxt('/import/deadlock-data/dli/Mexico/Launch_SeisSol/trench.txt')
# # epi = np.loadtxt('/import/schreck-data/dli/Mexico/Launch_Seissol/smallslab/2014EQ/2014Eq_USGS.txt')
# # aft = np.loadtxt('/import/schreck-data/dli/Mexico/Launch_Seissol/smallslab/2014EQ/2014Eq_aftershock.txt')
# # transform coordinates
# refer = [2211956.564907321, 2065452.360267957]
# xx = vertex[:,0]*1e3
# yy = vertex[:,1]*1e3
# # rotate
# theta = -65/180*np.pi
# x1 = np.cos(theta)*xx + np.sin(theta)*yy;
# y1 = -np.sin(theta)*xx + np.cos(theta)*yy;
# x2 = x1+refer[0]
# y2 = y1+refer[1]
# # project
# coords = pyproj.transform(lla, myproj, x2,y2, x2-x2, radians=False)
# xr = coords[0]
# yr = coords[1]
# triang = tri.Triangulation(xr,yr,connect)
# bb = np.array([xr,yr])
# print(bb.shape)
# print(npoint)
# find timing for SSE event
# folder = '/import/freenas-m-05-seissol/dli/Mexico/Mesh_all_depth/'
tfile = np.loadtxt(folder + modelname + '/t_sse'+ appendix);
# vpl = np.log10(42)
# data = np.where(fmaxs1 > 1*vpl)
dtsse = tfile[1:] - tfile[0:-1]
# identify individual events if separated by 15 days.
data = np.where(dtsse > 15/365)
T2=data[0]
neve = T2.shape[0]
print(neve)
T1 = np.append(0,T2[0:-1])
twin = np.array([T1,T2,T2-T1+1])
np.savetxt(folder + modelname + 't_sse.txt',twin.transpose())
print(twin)
# read binary file and cut into files dataSR... and dataTau...
# folder = '/import/freenas-m-05-seissol/dli/Mexico/Mesh_all_depth/'
ncell = 110256
sfilename = folder + modelname + '/slipz1_sse'+appendix
sfile = open(sfilename,mode='rb')
discard = np.fromfile(sfile,count=1,dtype='int32')
# if startting point is not ieve=0
# discard = np.fromfile(sfile,count=2*twin[1,40]*ncell,dtype='<f8')
## extract shear traction dataTau
tfilename = folder + modelname + '/slipz1_tau'+appendix
tfile = open(tfilename,mode='rb')
discard = np.fromfile(tfile,count=1,dtype='int32')
# # if startting point is not ieve=0
# discard = np.fromfile(tfile,count=2*twin[1,40]*ncell,dtype='<f8')
# begin to loop for plotting snaps of slip rate
for ieve in range(0,neve):
nbegin = twin[0,ieve]
nend = twin[1,ieve]
nlength= twin[2,ieve]*2
print(nbegin, nend, nlength)
if (twin[2,ieve] < 29) :
print('not applicable')
continue
else:
print(ieve)
rawdata0 = np.fromfile(sfile,count=nlength*ncell,dtype='<f8')
rawdata1 = np.fromfile(tfile,count=nlength*ncell,dtype='<f8')
sr = rawdata0[::2]
tau= rawdata1[::2]
outname = folder + modelname +'data/dataSR'+ str(ieve)+ '.bin'
outname1= folder + modelname + 'data/dataTau'+ str(ieve)+ '.bin'
f1 = open(outname,'wb+')
f2 = open(outname1,'wb+')
f1.write(bytearray(sr))
f2.write(bytearray(tau))
print('done '+ str(ieve))
sfile.close()
tfile.close()
# read binary file
# folder = '/import/freenas-m-05-seissol/dli/Mexico/Mesh_all_depth/'
tfile = np.loadtxt(folder + modelname + '/t_sse'+appendix);
sfilename = folder + modelname + '/slipz1_sse'+appendix
sfile = open(sfilename,mode='rb')
discard = np.fromfile(sfile,count=1,dtype='int32')
# if startting point is not ieve=0
# discard = np.fromfile(sfile,count=twin[1,2]*2*ncell,dtype='<f8')
# begin to loop for plotting snaps of slip rate
for ieve in range(1,neve):
nbegin = twin[0,ieve]
nend = twin[1,ieve]
nlength= twin[2,ieve]*2
print(nbegin, nend, nlength)
rawdata0 = np.fromfile(sfile,count=nlength*ncell,dtype='<f8')
sr = rawdata0[0::2]
print(sr.shape[0]/ncell)
if (twin[2,ieve] < 29) :
print('not applicable')
continue
else:
step = np.int(twin[2,ieve]/6)
pp = [twin[0,ieve]+step, twin[0,ieve]+2*step,twin[0,ieve]+3*step,
twin[0,ieve]+4*step,twin[0,ieve]+5*step,twin[1,ieve]-1]
print(pp)
yrs = 365
dt = tfile[pp] - tfile[pp[0]]
dt = dt*yrs
vcos = np.zeros((6,ncell))
#stress = np.zeros((6,ncell))
for i in range(0,6):
jj = pp[i] - twin[0,ieve]
vcos[i,:] = sr[jj*ncell-ncell:jj*ncell]
# stress[i,:]=0.5*sr0[jj*ncell-ncell:jj*ncell]+ 0.5*sr0[jj*ncell:jj*ncell+ncell]
srmax=-5
fig,([ax0,ax1,ax2],[ax3,ax4,ax5]) = plt.subplots(nrows=2,ncols=3,figsize=(7,4))
sc = ax0.tripcolor(triang,vcos[0,:], cmap='rainbow',shading='flat',vmin=-11,vmax=srmax)
# cl = fig.colorbar(sc,ax=ax0,shrink=0.75)
# ax0.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax0.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax0.text(-102, 16.7, 'day '+str(np.floor(dt[0])),fontsize=12);
sc = ax1.tripcolor(triang,vcos[1,:], cmap='rainbow',shading='flat',vmin=-11,vmax=srmax)
# cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax1.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax1.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax1.text(-102, 16.7, 'day '+str(np.floor(dt[1])),fontsize=9);
sc = ax2.tripcolor(triang,vcos[2,:], cmap='rainbow',shading='flat',vmin=-11,vmax=srmax)
# cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax2.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax2.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax2.text(-102, 16.7, 'day '+str(np.floor(dt[2])),fontsize=9);
sc = ax3.tripcolor(triang,vcos[3,:], cmap='rainbow',shading='flat',vmin=-11,vmax=srmax)
# cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax3.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax3.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax3.text(-102, 16.7, 'day '+str(np.floor(dt[3])),fontsize=9);
sc = ax4.tripcolor(triang,vcos[4,:], cmap='rainbow',shading='flat',vmin=-11,vmax=srmax)
# cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax4.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax4.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax4.text(-102, 16.7, 'day '+str(np.floor(dt[4])),fontsize=9);
sc = ax5.tripcolor(triang,vcos[5,:], cmap='rainbow',shading='flat',vmin=-11,vmax=srmax)
# cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax5.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax5.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# # ax5.text(-102, 16.7, 'day '+str(np.floor(dt[5])),fontsize=9)
# ax5.text(-102, 16.7, 'day '+str(np.floor(dt[5])),fontsize=9)
plt.show()
outname =folder + modelname + 'snapshots/snap_sr'+ str(ieve)+ '.png'
plt.savefig(outname,dpi=100,transparent=False)
sfile.close()
# calculate cumulative final slip and plot
# modelname = 'h90_N25_T2_May28/'
# folder = '/import/freenas-m-05-seissol/dli/Mexico/Mesh_all_depth/'
sfilename = folder + modelname + '/slipz1_sse'+appendix
tfile = np.loadtxt(folder + modelname + '/t_sse'+appendix);
sfile = open(sfilename,mode='rb')
discard = np.fromfile(sfile,count=1,dtype='int32')
# begin to loop for plotting snaps of slip rate
for ieve in range(0,neve):
nbegin = twin[0,ieve]
nend = twin[1,ieve]
nlength= twin[2,ieve]*2
print(nbegin, nend, nlength)
rawdata0 = np.fromfile(sfile,count=nlength*ncell,dtype='<f8')
sr = rawdata0[::2]
print(sr.shape[0]/ncell)
if (twin[2,ieve] < 29) :
print('not applicable')
continue
else:
step = np.int(twin[2,ieve]/6)
pp = [twin[0,ieve]+step, twin[0,ieve]+2*step,twin[0,ieve]+3*step,
twin[0,ieve]+4*step,twin[0,ieve]+5*step,twin[1,ieve]-1]
dt = tfile[twin[0,ieve]+1:twin[1,ieve]] - tfile[twin[0,ieve]:twin[1,ieve]-1]
dt = dt*365*24*3600
slp = np.zeros((ncell))
for i in range(1,twin[2,ieve]-3):
slp = slp + (0.5*10**sr[i*ncell-ncell:i*ncell]+0.5*10**sr[i*ncell+ncell:i*ncell+2*ncell])*dt[i]
slp = slp*100
print(slp.max(),slp.min())
fig,ax0 = plt.subplots(nrows=1,ncols=1,figsize=(4,3))
sc = ax0.tripcolor(triang,slp, cmap='RdYlBu_r',shading='flat',vmin=0,vmax=25)
cl = fig.colorbar(sc,ax=ax0,shrink=0.75)
# ax0.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax0.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
plt.show()
outname = folder + modelname + 'snapshots/finalslip_'+str(ieve)+'.png'
plt.savefig(outname,dpi=100,transparent=False)
np.savetxt(folder + modelname + 'data/fault_slip_' + str(ieve)+'.txt',slp.transpose())
sfile.close()
## plot snapshot of slip rate for a single event
# print(ieve)
# print(sr.shape[0]/ncell)
# print(jj)
# print(i)
# ieve = 17
# nbegin = twin[0,ieve]
# nend = twin[1,ieve]
# nlength= twin[2,ieve]*2
# print(nbegin, nend, nlength)
# #rawdata0 = np.fromfile(sfile,count=nlength*ncell,dtype='<f8')
# #sr = rawdata0[::2]
# #print(sr.shape[0]/ncell)
# if ( twin[2,ieve] < 6) :
# continue
# else:
# step = np.int(twin[2,ieve]/6)
# pp = [twin[0,ieve]+step, twin[0,ieve]+2*step,twin[0,ieve]+3*step,
# twin[0,ieve]+4*step,twin[0,ieve]+5*step,twin[1,ieve]-1]
# print(pp)
# yrs = 365
# dt = tfile[pp] - tfile[pp[0]]
# dt = dt*yrs
# vcos = np.zeros((6,ncell))
# #stress = np.zeros((6,ncell))
# for i in range(0,6):
# jj = pp[i] - twin[0,ieve]
# vcos[i,:] = sr[jj*ncell-ncell:jj*ncell]
# # stress[i,:]=0.5*sr0[jj*ncell-ncell:jj*ncell]+ 0.5*sr0[jj*ncell:jj*ncell+ncell]
# fig,([ax0,ax1,ax2],[ax3,ax4,ax5]) = plt.subplots(nrows=2,ncols=3,figsize=(6.5,3))
# sc = ax0.tripcolor(triang,vcos[0,:], cmap='rainbow',shading='flat',vmin=-10,vmax=-6)
# # cl = fig.colorbar(sc,ax=ax0,shrink=0.75)
# ax0.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax0.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax0.text(-102, 16.7, 'day '+str(np.floor(dt[0])),fontsize=12);
# sc = ax1.tripcolor(triang,vcos[1,:], cmap='rainbow',shading='flat',vmin=-10,vmax=-6)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax1.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax1.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax1.text(-102, 16.7, 'day '+str(np.floor(dt[1])),fontsize=9);
# sc = ax2.tripcolor(triang,vcos[2,:], cmap='rainbow',shading='flat',vmin=-10,vmax=-6)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax2.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax2.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax2.text(-102, 16.7, 'day '+str(np.floor(dt[2])),fontsize=9);
# sc = ax3.tripcolor(triang,vcos[3,:], cmap='rainbow',shading='flat',vmin=-10,vmax=-6)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax3.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax3.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax3.text(-102, 16.7, 'day '+str(np.floor(dt[3])),fontsize=9);
# sc = ax4.tripcolor(triang,vcos[4,:], cmap='rainbow',shading='flat',vmin=-10,vmax=-6)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax4.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax4.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax4.text(-102, 16.7, 'day '+str(np.floor(dt[4])),fontsize=9);
# sc = ax5.tripcolor(triang,vcos[5,:], cmap='rainbow',shading='flat',vmin=-10,vmax=-6)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax5.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax5.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# # ax5.text(-102, 16.7, 'day '+str(np.floor(dt[5])),fontsize=9)
# ax5.text(-102, 16.7, 'day '+str(np.floor(dt[5])),fontsize=9)
# plt.show()
# outname = 'snap_sr'+ str(ieve)+ '.png'
# plt.savefig(outname,dpi=100,transparent=False)
# Make mapviews of variables: eff. normal stress, dc, a-b and a
# folder = '/import/freenas-m-05-seissol/dli/Mexico/Mesh_all_depth/'
# model = 'h90_N25_T1'
varfile = np.loadtxt(folder + modelname + '/vardep'+appendix);
depth = varfile[:,0];
eff = varfile[:,1]/10;
dc = varfile[:,2];
pab = varfile[:,3];
pa = varfile[:,4];
fig,([ax0,ax1],[ax3,ax4]) = plt.subplots(nrows=2,ncols=2,figsize=(6.5,4))
sc = ax0.tripcolor(triang,eff, cmap='rainbow',shading='flat')
cl = fig.colorbar(sc,ax=ax0,shrink=0.75)
# ax0.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax0.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
sc = ax1.tripcolor(triang,dc, cmap='viridis',shading='flat')
cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax1.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax1.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
sc = ax3.tripcolor(triang,pab, cmap='viridis',shading='flat')
cl = fig.colorbar(sc,ax=ax3,shrink=0.75)
# ax3.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax3.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
sc = ax4.tripcolor(triang,pa, cmap='viridis',shading='flat')
cl = fig.colorbar(sc,ax=ax4,shrink=0.75)
# ax4.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax4.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
plt.show()
outname = folder + modelname + 'vardep'+'.png'
plt.savefig(outname,dpi=100,transparent=False)
# folder = '/import/freenas-m-05-seissol/dli/Mexico/Mesh_all_depth/'
# sfilename = folder + modelname + '/slipz1-v-h90_N25_T2.dat'
# sfile = open(sfilename,mode='rb')
# discard = np.fromfile(sfile,count=1,dtype='int32')
# # if startting point is not ieve=0
# #discard = np.fromfile(sfile,count=2*twin[1,12]*ncell,dtype='<f8')
# # begin to loop for plotting snaps of slip rate
# for ieve in range(0,neve):
# nbegin = twin[0]
# nend = twin[1]
# nlength= twin[2]*2
# print(nbegin, nend, nlength)
# rawdata0 = np.fromfile(sfile,count=nlength*ncell,dtype='<f8')
# sr = rawdata0[::2]
# print(sr.shape[0]/ncell)
# if (twin[2] < 6) :
# pp = [twin[0]+1, twin[0]+1,twin[0]+2,twin[0]+3, twin[0]+3, twin[0]+3 ]
# print('not applicable')
# continue
# else:
# step = np.int(twin[2]/6)
# pp = [twin[0]+step, twin[0]+2*step,twin[0]+3*step,
# twin[0]+4*step,twin[0]+5*step,twin[1]-1]
# print(pp)
# yrs = 365
# dt = tfile[pp] - tfile[pp[0]]
# dt = dt*yrs
# vcos = np.zeros((6,ncell))
# #stress = np.zeros((6,ncell))
# for i in range(0,6):
# jj = pp[i] - twin[0]
# vcos[i,:] = sr[jj*ncell-ncell:jj*ncell]
# # stress[i,:]=0.5*sr0[jj*ncell-ncell:jj*ncell]+ 0.5*sr0[jj*ncell:jj*ncell+ncell]
# fig,([ax0,ax1,ax2],[ax3,ax4,ax5]) = plt.subplots(nrows=2,ncols=3,figsize=(6.5,3))
# sc = ax0.tripcolor(triang,vcos[0,:], cmap='viridis',shading='flat',vmin=-10,vmax=-1)
# # cl = fig.colorbar(sc,ax=ax0,shrink=0.75)
# ax0.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax0.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax0.text(-102, 16.7, 'day '+str(np.floor(dt[0])),fontsize=12);
# sc = ax1.tripcolor(triang,vcos[1,:], cmap='viridis',shading='flat',vmin=-10,vmax=-1)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax1.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax1.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax1.text(-102, 16.7, 'day '+str(np.floor(dt[1])),fontsize=9);
# sc = ax2.tripcolor(triang,vcos[2,:], cmap='viridis',shading='flat',vmin=-10,vmax=-1)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax2.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax2.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax2.text(-102, 16.7, 'day '+str(np.floor(dt[2])),fontsize=9);
# sc = ax3.tripcolor(triang,vcos[3,:], cmap='viridis',shading='flat',vmin=-10,vmax=-1)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax3.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax3.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax3.text(-102, 16.7, 'day '+str(np.floor(dt[3])),fontsize=9);
# sc = ax4.tripcolor(triang,vcos[4,:], cmap='viridis',shading='flat',vmin=-10,vmax=-1)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax4.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax4.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# ax4.text(-102, 16.7, 'day '+str(np.floor(dt[4])),fontsize=9);
# sc = ax5.tripcolor(triang,vcos[5,:], cmap='viridis',shading='flat',vmin=-10,vmax=-1)
# # cl = fig.colorbar(sc,ax=ax1,shrink=0.75)
# ax5.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax5.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# # ax5.text(-102, 16.7, 'day '+str(np.floor(dt[5])),fontsize=9)
# ax5.text(-102, 16.7, 'day '+str(np.floor(dt[5])),fontsize=9)
# plt.show()
# outname = 'snap_cosr'+ str(ieve)+ '.png'
# plt.savefig(outname,dpi=100,transparent=False)
# sfile.close()
# calculate cumulative final slip and plot of a single episode
# folder = '/import/freenas-m-05-seissol/dli/Mexico/Mesh_all_depth/'
# ieve = 17
# sfilename = folder + modelname + 'data/dataSR'+str(ieve)+'.bin'
# sfile = open(sfilename,mode='rb')
# tfile = np.loadtxt(folder + modelname + '/t_sse-'+appendix);
# # begin to loop for plotting snaps of slip rate
# nbegin = twin[0,ieve]
# nend = twin[1,ieve]
# nlength= twin[2,ieve]
# print(nbegin, nend, nlength)
# rawdata0 = np.fromfile(sfile,count=nlength*ncell,dtype='<f8')
# sr = rawdata0[:]
# print(sr.shape[0]/ncell)
# dt = tfile[twin[0,ieve]+1:twin[1,ieve]] - tfile[twin[0,ieve]:twin[1,ieve]-1]
# dt = dt*365*24*3600
# slp = np.zeros((ncell))
# for i in range(1,250):
# slp = slp + (0.5*10**sr[i*ncell-ncell:i*ncell]+0.5*10**sr[i*ncell+ncell:i*ncell+2*ncell])*dt[i]
# slp = slp*100
# print(slp.max(),slp.min())
# fig,ax0 = plt.subplots(nrows=1,ncols=1,figsize=(4,3))
# sc = ax0.tripcolor(triang,slp, cmap='RdYlBu_r',shading='flat',vmin=0,vmax=25)
# cl = fig.colorbar(sc,ax=ax0,shrink=0.75)
# ax0.set(xlim=(-102.5, -99),ylim=(16.5,19))
# ax0.plot(coast['ncst'][:,0],coast['ncst'][:,1],'-k',markersize=0.1)
# plt.show()
# outname = folder + modelname + 'snapshots/cumuslip_'+str(ieve)+'.png'
# plt.savefig(outname,dpi=100,transparent=False)
# sfile.close()
###Output
_____no_output_____ |
notebooks/Projeto_Agrupamento_Clientes_KMeans.ipynb | ###Markdown
Projeto: Análise e agrupamento de consumidores com MLAutora: Carla Edila Santos da Rosa SilveiraContato: [email protected]ão original: [Felipe Santana](https://minerandodados.com.br/analise-e-agrupamento-de-clientes-com-machine-learning-k-means/)Tecnologias: Google Colab, Kaggle, CCSearch, BeFunky, Github, bibliotecas PythonDataset: Mall Customer Segmentation Data (perfil de consumidor alvo)["cell cluster 3" by Anthony Mattox](https://search.creativecommons.org/photos/822003f4-dc29-44bf-a39d-6cc773e66770) is licensed under CC BY-NC 2.0 Importação de bibliotecas do Python
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly as py
import plotly.graph_objs as go
from sklearn.cluster import KMeans
import warnings
import os
warnings.filterwarnings("ignore")
py.offline.init_notebook_mode(connected = True)
###Output
_____no_output_____
###Markdown
Carregamento do dataset
###Code
df = pd.read_csv('Mall_Customers.csv') #disponivel em: https://www.kaggle.com/vjchoudhary7/customer-segmentation-tutorial-in-python#
###Output
_____no_output_____
###Markdown
Leitura inicial dos dados
###Code
df.head()
###Output
_____no_output_____
###Markdown
-----------Legenda-------------CustomerID = código do clienteGender = SexoAge = IdadeAnnual Income (kS) = Renda anual (k = mil)Spending Score (1-100) = Pontuação de gastos (1 gasto baixo, 100 gasto alto) Análise Descritiva
###Code
df.shape #definiçao de nro de linhas e colunas
(200, 5)
df.describe()
###Output
_____no_output_____
###Markdown
Estrutura dos dados
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Resumo de variáveis por registros nulos
###Code
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Visualização dos dados
###Code
plt.style.use('fivethirtyeight') #define estilo de paragrafos
plt.figure(1 , figsize = (15 , 6)) #verifica a distribuição dos dados
n = 0
for x in ['Age' , 'Annual Income (k$)' , 'Spending Score (1-100)']:
n += 1
plt.subplot(1 , 3 , n)
plt.subplots_adjust(hspace =0.5 , wspace = 0.5)
sns.distplot(df[x] , bins = 25)
plt.title('{} '.format(x))
plt.show()
###Output
_____no_output_____
###Markdown
Contagem de amostra por sexo
###Code
plt.figure(1 , figsize = (15 , 5))
sns.countplot(y = 'Gender' , data = df)
plt.show()
###Output
_____no_output_____
###Markdown
Relação linear Idade X Renda Anual
###Code
plt.figure(1 , figsize = (15 , 6))
for gender in ['Male' , 'Female']:
plt.scatter(x = 'Age' , y = 'Annual Income (k$)' , data = df[df['Gender'] == gender] ,
s = 200 , alpha = 0.5 , label = gender)
plt.xlabel('Age'), plt.ylabel('Annual Income (k$)')
plt.title('Idade vs Renda Anual')
plt.legend() #mostra dados misturados e tendência pouco definida.
plt.show() #com aumento da idade a renda anual diminui, as maiores rendas estão entre jovens.
###Output
_____no_output_____
###Markdown
Relação linear Renda Anual X Pontuação de Gastos
###Code
plt.figure(1 , figsize = (15 , 6))
for gender in ['Male' , 'Female']:
plt.scatter(x = 'Annual Income (k$)',y = 'Spending Score (1-100)' ,
data = df[df['Gender'] == gender] ,s = 200 , alpha = 0.5 , label = gender)
plt.xlabel('Annual Income (k$)'), plt.ylabel('Spending Score (1-100)')
plt.title('Renda Anual vs Pontuação de Gastos')
plt.legend()
plt.show() #maior concentração na faixa etária 40-65 anos e na pontuação entre 40-60 pontos.
###Output
_____no_output_____
###Markdown
Distribuição de Idade, Renda Anual e Pontuação de Gastos por Sexo
###Code
plt.figure(1 , figsize = (15 , 7))
n = 0
for cols in ['Age' , 'Annual Income (k$)' , 'Spending Score (1-100)']:
n += 1
plt.subplot(1 , 3 , n)
plt.subplots_adjust(hspace = 0.5 , wspace = 0.5)
sns.violinplot(x = cols , y = 'Gender' , data = df , palette = 'vlag')
sns.swarmplot(x = cols , y = 'Gender' , data = df)
plt.ylabel('Gender' if n == 1 else '')
plt.title('Idade, Renda Anual e Pontuação de Gastos por Sexo' if n == 2 else '')
plt.show() #maior diferença está na pontuação, concentrada em torno de 50-80 pontos para mulheres.
###Output
_____no_output_____
###Markdown
Agrupamento de dados com algoritmo K-Means
###Code
X2 = df[['Annual Income (k$)' , 'Spending Score (1-100)']].iloc[: , :].values #seleção do nro de clusters com método Elbow (soma das distâncias quadráticas intra clusters)
inertia = []
for n in range(1 , 11):
algorithm = (KMeans(n_clusters = n))
algorithm.fit(X2)
inertia.append(algorithm.inertia_)
plt.figure(1 , figsize = (15 ,6)) #configurações do gráfico
plt.plot(np.arange(1 , 11) , inertia , 'o')
plt.plot(np.arange(1 , 11) , inertia , '-' , alpha = 0.5)
plt.xlabel('Número de Clusters') , plt.ylabel('Soma das Distâncias Q intra Clusters')
plt.show() #enquanto o nro clusters aumenta, a soma das distâncias quadráticas intra clusters diminui.
#quando a diferença entre a distância é quase insignificante chega-se no valor ótimo de k (neste exemplo é 4).
###Output
_____no_output_____
###Markdown
Inicialização e computação do KMeans com 4 clusters
###Code
algorithm = (KMeans(n_clusters = 4)) #dados serão separados em 4 clusters ou grupos
algorithm.fit(X2)
###Output
_____no_output_____
###Markdown
Visualização dos grupos e centróides
###Code
labels2 = algorithm.labels_
centroids2 = algorithm.cluster_centers_ #cada grupo terá um centroide (ponto vermelho no gráfico) de onde partirá o cluster
h = 0.02
x_min, x_max = X2[:, 0].min() - 1, X2[:, 0].max() + 1
y_min, y_max = X2[:, 1].min() - 1, X2[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
plt.figure(1 , figsize = (15 , 7) ) #configuração do gráfico
plt.clf()
Z2 = Z.reshape(xx.shape)
plt.imshow(Z2 , interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap = plt.cm.Pastel2, aspect = 'auto', origin='lower')
plt.scatter( x = 'Annual Income (k$)' ,y = 'Spending Score (1-100)' , data = df , c = labels2 , s = 200 )
plt.scatter(x = centroids2[: , 0] , y = centroids2[: , 1] , s = 300 , c = 'red' , alpha = 0.5)
plt.ylabel('Pontuação de Gastos (1-100)') , plt.xlabel('Renda Anual (k$)')
plt.show()
###Output
_____no_output_____
###Markdown
Análise dos dados agrupados
###Code
df["clusters"] = algorithm.labels_
df.head() #leitura das 5 primeiras linhas
###Output
_____no_output_____
###Markdown
Análise descritiva dos clusters
###Code
df_group = df.drop(["CustomerID","Age"],axis=1).groupby("clusters") #exclusão de colunas não utilizadas
df_group.describe()
###Output
_____no_output_____ |
week-4-ridge-regression/ridge-regression-gd.ipynb | ###Markdown
Regression Week 4: Ridge Regression (gradient descent) In this notebook, you will implement ridge regression via gradient descent. You will:* Convert an SFrame into a Numpy array* Write a Numpy function to compute the derivative of the regression weights with respect to a single feature* Write gradient descent function to compute the regression weights given an initial weight vector, step size, tolerance, and L2 penalty Fire up graphlab create Make sure you have the latest version of GraphLab Create (>= 1.7)
###Code
import graphlab
###Output
_____no_output_____
###Markdown
Load in house sales dataDataset is from house sales in King County, the region where the city of Seattle, WA is located.
###Code
sales = graphlab.SFrame('kc_house_data.gl/')
###Output
[INFO] [1;32m1449114987 : INFO: (initialize_globals_from_environment:282): Setting configuration variable GRAPHLAB_FILEIO_ALTERNATIVE_SSL_CERT_FILE to C:\Users\linghao\AppData\Local\Dato\Dato Launcher\lib\site-packages\certifi\cacert.pem
[0m[1;32m1449114987 : INFO: (initialize_globals_from_environment:282): Setting configuration variable GRAPHLAB_FILEIO_ALTERNATIVE_SSL_CERT_DIR to
[0mThis non-commercial license of GraphLab Create is assigned to [email protected] and will expire on September 21, 2016. For commercial licensing options, visit https://dato.com/buy/.
[INFO] Start server at: ipc:///tmp/graphlab_server-12884 - Server binary: C:\Users\linghao\AppData\Local\Dato\Dato Launcher\lib\site-packages\graphlab\unity_server.exe - Server log: C:\Users\linghao\AppData\Local\Temp\graphlab_server_1449114987.log.0
[INFO] GraphLab Server Version: 1.7.1
###Markdown
If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features. Import useful functions from previous notebook As in Week 2, we convert the SFrame into a 2D Numpy array. Copy and paste `get_num_data()` from the second notebook of Week 2.
###Code
import numpy as np # note this allows us to refer to numpy as np instead
def get_numpy_data(data_sframe, features, output):
data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame
# add the column 'constant' to the front of the features list so that we can extract it along with the others:
features = ['constant'] + features # this is how you combine two lists
# select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):
features_sframe = data_sframe[features]
# the following line will convert the features_SFrame into a numpy matrix:
feature_matrix = features_sframe.to_numpy()
# assign the column of data_sframe associated with the output to the SArray output_sarray
output_sarray = data_sframe[output]
# the following will convert the SArray into a numpy array by first converting it to a list
output_array = output_sarray.to_numpy()
return(feature_matrix, output_array)
###Output
_____no_output_____
###Markdown
Also, copy and paste the `predict_output()` function to compute the predictions for an entire matrix of features given the matrix and the weights:
###Code
def predict_output(feature_matrix, weights):
# assume feature_matrix is a numpy matrix containing the features as columns and weights is a corresponding numpy array
# create the predictions vector by using np.dot()
predictions = []
for col in range(feature_matrix.shape[0]):
predictions.append(np.dot(feature_matrix[col,], weights))
return(predictions)
###Output
_____no_output_____
###Markdown
Computing the Derivative We are now going to move to computing the derivative of the regression cost function. Recall that the cost function is the sum over the data points of the squared difference between an observed output and a predicted output, plus the L2 penalty term.```Cost(w)= SUM[ (prediction - output)^2 ]+ l2_penalty*(w[0]^2 + w[1]^2 + ... + w[k]^2).```Since the derivative of a sum is the sum of the derivatives, we can take the derivative of the first part (the RSS) as we did in the notebook for the unregularized case in Week 2 and add the derivative of the regularization part. As we saw, the derivative of the RSS with respect to `w[i]` can be written as: ```2*SUM[ error*[feature_i] ].```The derivative of the regularization term with respect to `w[i]` is:```2*l2_penalty*w[i].```Summing both, we get```2*SUM[ error*[feature_i] ] + 2*l2_penalty*w[i].```That is, the derivative for the weight for feature i is the sum (over data points) of 2 times the product of the error and the feature itself, plus `2*l2_penalty*w[i]`. **We will not regularize the constant.** Thus, in the case of the constant, the derivative is just twice the sum of the errors (without the `2*l2_penalty*w[0]` term).Recall that twice the sum of the product of two vectors is just twice the dot product of the two vectors. Therefore the derivative for the weight for feature_i is just two times the dot product between the values of feature_i and the current errors, plus `2*l2_penalty*w[i]`.With this in mind complete the following derivative function which computes the derivative of the weight given the value of the feature (over all data points) and the errors (over all data points). To decide when to we are dealing with the constant (so we don't regularize it) we added the extra parameter to the call `feature_is_constant` which you should set to `True` when computing the derivative of the constant and `False` otherwise.
###Code
def feature_derivative_ridge(errors, feature, weight, l2_penalty, feature_is_constant):
# If feature_is_constant is True, derivative is twice the dot product of errors and feature
if feature_is_constant:
derivative = 2 * np.dot(errors, feature)
# Otherwise, derivative is twice the dot product plus 2*l2_penalty*weight
else:
derivative = 2 * np.dot(errors, feature) + 2 * l2_penalty * weight
return derivative
###Output
_____no_output_____
###Markdown
To test your feature derivartive run the following:
###Code
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
my_weights = np.array([1., 10.])
test_predictions = predict_output(example_features, my_weights)
errors = test_predictions - example_output # prediction errors
# next two lines should print the same values
print feature_derivative_ridge(errors, example_features[:,1], my_weights[1], 1, False)
print np.sum(errors*example_features[:,1])*2+20.
print ''
# next two lines should print the same values
print feature_derivative_ridge(errors, example_features[:,0], my_weights[0], 1, True)
print np.sum(errors)*2.
###Output
-5.65541667824e+13
-5.65541667824e+13
-22446749336.0
-22446749336.0
###Markdown
Gradient Descent Now we will write a function that performs a gradient descent. The basic premise is simple. Given a starting point we update the current weights by moving in the negative gradient direction. Recall that the gradient is the direction of *increase* and therefore the negative gradient is the direction of *decrease* and we're trying to *minimize* a cost function. The amount by which we move in the negative gradient *direction* is called the 'step size'. We stop when we are 'sufficiently close' to the optimum. Unlike in Week 2, this time we will set a **maximum number of iterations** and take gradient steps until we reach this maximum number. If no maximum number is supplied, the maximum should be set 100 by default. (Use default parameter values in Python.)With this in mind, complete the following gradient descent function below using your derivative function above. For each step in the gradient descent, we update the weight for each feature before computing our stopping criteria.
###Code
def ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations=100):
weights = np.array(initial_weights) # make sure it's a numpy array
#while not reached maximum number of iterations:
for _iter in range(max_iterations):
# compute the predictions based on feature_matrix and weights using your predict_output() function
predictions = predict_output(feature_matrix, weights)
# compute the errors as predictions - output
errors = predictions - output
for i in xrange(len(weights)): # loop over each weight
# Recall that feature_matrix[:,i] is the feature column associated with weights[i]
# compute the derivative for weight[i].
#(Remember: when i=0, you are computing the derivative of the constant!)
derivative = feature_derivative_ridge(errors, feature_matrix[:,i], weights[i], l2_penalty, bool(i == 0))
# subtract the step size times the derivative from the current weight
weights[i] -= step_size * derivative
return weights
###Output
_____no_output_____
###Markdown
Visualizing effect of L2 penalty The L2 penalty gets its name because it causes weights to have small L2 norms than otherwise. Let's see how large weights get penalized. Let us consider a simple model with 1 feature:
###Code
simple_features = ['sqft_living']
my_output = 'price'
###Output
_____no_output_____
###Markdown
Let us split the dataset into training set and test set. Make sure to use `seed=0`:
###Code
train_data,test_data = sales.random_split(.8,seed=0)
###Output
_____no_output_____
###Markdown
In this part, we will only use `'sqft_living'` to predict `'price'`. Use the `get_numpy_data` function to get a Numpy versions of your data with only this feature, for both the `train_data` and the `test_data`.
###Code
(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
(simple_test_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)
###Output
_____no_output_____
###Markdown
Let's set the parameters for our optimization:
###Code
initial_weights = np.array([0., 0.])
step_size = 1e-12
max_iterations=1000
###Output
_____no_output_____
###Markdown
First, let's consider no regularization. Set the `l2_penalty` to `0.0` and run your ridge regression algorithm to learn the weights of your model. Call your weights:`simple_weights_0_penalty`we'll use them later.
###Code
simple_weights_0_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, 0, max_iterations)
###Output
_____no_output_____
###Markdown
Next, let's consider high regularization. Set the `l2_penalty` to `1e11` and run your ridge regression algorithm to learn the weights of your model. Call your weights:`simple_weights_high_penalty`we'll use them later.
###Code
simple_weights_high_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, 1e11, max_iterations)
###Output
_____no_output_____
###Markdown
This code will plot the two learned models. (The blue line is for the model with no regularization and the red line is for the one with high regularization.)
###Code
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(simple_feature_matrix,output,'k.',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_0_penalty),'b-',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_high_penalty),'r-')
###Output
_____no_output_____
###Markdown
Compute the RSS on the TEST data for the following three sets of weights:1. The initial weights (all zeros)2. The weights learned with no regularization3. The weights learned with high regularizationWhich weights perform best?
###Code
predictions_1 = predict_output(simple_test_feature_matrix, initial_weights)
residuals_1 = [(predictions_1[i] - test_output[i]) ** 2 for i in range(len(predictions_1))]
print sum(residuals_1)
predictions_2 = predict_output(simple_test_feature_matrix, simple_weights_0_penalty)
residuals_2 = [(predictions_2[i] - test_output[i]) ** 2 for i in range(len(predictions_2))]
print sum(residuals_2)
predictions_3 = predict_output(simple_test_feature_matrix, simple_weights_high_penalty)
residuals_3 = [(predictions_3[i] - test_output[i]) ** 2 for i in range(len(predictions_3))]
print sum(residuals_3)
###Output
6.94642100914e+14
###Markdown
***QUIZ QUESTIONS***1. What is the value of the coefficient for `sqft_living` that you learned with no regularization, rounded to 1 decimal place? What about the one with high regularization?2. Comparing the lines you fit with the with no regularization versus high regularization, which one is steeper?3. What are the RSS on the test data for each of the set of weights above (initial, no regularization, high regularization)?
###Code
simple_weights_0_penalty
simple_weights_high_penalty
###Output
_____no_output_____
###Markdown
Running a multiple regression with L2 penalty Let us now consider a model with 2 features: `['sqft_living', 'sqft_living15']`. First, create Numpy versions of your training and test data with these two features.
###Code
model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors.
my_output = 'price'
(feature_matrix, train_output) = get_numpy_data(train_data, model_features, my_output)
(test_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
###Output
_____no_output_____
###Markdown
We need to re-inialize the weights, since we have one extra parameter. Let us also set the step size and maximum number of iterations.
###Code
initial_weights = np.array([0.0,0.0,0.0])
step_size = 1e-12
max_iterations = 1000
###Output
_____no_output_____
###Markdown
First, let's consider no regularization. Set the `l2_penalty` to `0.0` and run your ridge regression algorithm to learn the weights of your model. Call your weights:`multiple_weights_0_penalty`
###Code
multiple_weights_0_penalty = ridge_regression_gradient_descent(feature_matrix, train_output, initial_weights, step_size, 0, max_iterations)
###Output
_____no_output_____
###Markdown
Next, let's consider high regularization. Set the `l2_penalty` to `1e11` and run your ridge regression algorithm to learn the weights of your model. Call your weights:`multiple_weights_high_penalty`
###Code
multiple_weights_high_penalty = ridge_regression_gradient_descent(feature_matrix, train_output, initial_weights, step_size, 1e11, max_iterations)
###Output
_____no_output_____
###Markdown
Compute the RSS on the TEST data for the following three sets of weights:1. The initial weights (all zeros)2. The weights learned with no regularization3. The weights learned with high regularizationWhich weights perform best?
###Code
predictions_4 = predict_output(test_feature_matrix, initial_weights)
residuals_4 = [(predictions_4[i] - test_output[i]) ** 2 for i in range(len(predictions_4))]
print sum(residuals_4)
predictions_5 = predict_output(test_feature_matrix, multiple_weights_0_penalty)
residuals_5 = [(predictions_5[i] - test_output[i]) ** 2 for i in range(len(predictions_5))]
print sum(residuals_5)
predictions_6 = predict_output(test_feature_matrix, multiple_weights_high_penalty)
residuals_6 = [(predictions_6[i] - test_output[i]) ** 2 for i in range(len(predictions_6))]
print sum(residuals_6)
###Output
5.0040480058e+14
###Markdown
Predict the house price for the 1st house in the test set using the no regularization and high regularization models. (Remember that python starts indexing from 0.) How far is the prediction from the actual price? Which weights perform best for the 1st house?
###Code
first = test_data[0]
a, b, c= multiple_weights_0_penalty
p_0 = a + b * first['sqft_living'] + c * first['sqft_living15']
print p_0
d, e, f = multiple_weights_high_penalty
p_high = d + e * first['sqft_living'] + f * first['sqft_living15']
print p_high
first['price']
###Output
_____no_output_____
###Markdown
***QUIZ QUESTIONS***1. What is the value of the coefficient for `sqft_living` that you learned with no regularization, rounded to 1 decimal place? What about the one with high regularization?2. What are the RSS on the test data for each of the set of weights above (initial, no regularization, high regularization)? 3. We make prediction for the first house in the test set using two sets of weights (no regularization vs high regularization). Which weights make better prediction for that particular house?
###Code
multiple_weights_0_penalty
multiple_weights_high_penalty
###Output
_____no_output_____ |
freshman/matplotlib.ipynb | ###Markdown
Matplotlib Tutorial 楊家融 Radiologist Programmer
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Figure & Axes
###Code
fig = plt.figure()
ax = fig.add_subplot(111)
# Simple syntex
fig,ax = plt.subplots(1,1)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
fig, axs = plt.subplots(2,1)
ax1 = axs[0]
ax2 = axs[1]
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax3 = fig.add_subplot(335)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax4 = fig.add_axes([0.5, 0.5, 0.2, 0.2])
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.plot(np.random.rand(10))
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax2.set_xlim(-1, 12)
ax2.set_xlabel('sec')
ax2.set_ylabel('signal')
ax2.grid(True)
ax2.set_xticks(range(10))
ax2.plot(np.random.rand(10))
###Output
_____no_output_____
###Markdown
Another way (only for simple plots)
###Code
plt.plot(np.random.rand(10))
plt.title('Demo')
plt.xlim(-1, 12)
###Output
_____no_output_____
###Markdown
Colormap
###Code
data = np.random.rand(10, 10)
data = np.sort(data, axis=0)
data = np.sort(data, axis=1)
fig, ax = plt.subplots()
ax.pcolor(data)
fig, ax = plt.subplots()
ax.pcolor(data, cmap='plasma')
###Output
_____no_output_____
###Markdown
Using context manager as figure template
###Code
from contextlib import contextmanager
@contextmanager
def myplot(fig, subplot, size=5):
ax = fig.add_subplot(subplot)
ax.set_xlim(-1, 12)
ax.set_ylim(0, 1)
ax.set_xlabel('sec')
ax.set_ylabel('signal')
ax.grid(True)
ax.set_xticks(range(10))
# Plot the data
yield ax
title = ax.get_title()
ax.set_title(title)
fig = plt.figure(figsize=(12, 3))
fig.suptitle('Demo', fontsize=15)
blue = np.random.rand(10) * 0.7 + 0.1
orange = np.random.rand(10) * 0.3 + 0.6
with myplot(fig, 121) as ax:
ax.plot(blue, color='blue')
ax.set_title('blue')
with myplot(fig, 122) as ax:
ax.plot(blue, color='blue')
ax.plot(orange, color='orange')
ax.set_title('blue & orange')
with myplot(fig, 223) as ax:
ax.plot(orange, color='orange')
ax.set_title('orange')
###Output
_____no_output_____ |
writeups/reviews/2017-01-29.ipynb | ###Markdown
Parameter ranges
###Code
ggplot(results, aes(x=cov, y=var)) +
geom_point() +
scale_x_log10() +
scale_y_log10() +
theme_bw()
###Output
_____no_output_____
###Markdown
PIPerformance across $\pi$ at both 8x and 32x
###Code
pi.dat = results %>%
filter(cov==32 | cov==8) %>%
select(rho, metric, var, cov, seed) %>%
mutate(cov.f = as.factor(cov),
var.f = as.factor(var),
seed = as.factor(seed))
pi.dat.summ = pi.dat %>%
group_by(cov, metric, var) %>%
summarise(rho_av=mean(rho),
rho_sd=sd(rho),
rho_med=median(rho),
rho_25=quantile(rho, p=c(1/4)),
rho_75=quantile(rho, p=c(3/4))) %>%
mutate(cov.f = as.factor(cov),
var.f = as.factor(var))
str(pi.dat)
summary(pi.dat)
str(pi.dat.summ)
summary(pi.dat.summ)
p = ggplot(pi.dat, aes(x=var.f, y=rho, fill=metric)) +
geom_boxplot(aes(fill=metric)) +
xlab(expression(paste('Mean pairwise nucleotide divergence (', pi, ')'))) +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
facet_wrap(~cov.f ) +
ylim(0, 1) +
theme_classic() +
theme(axis.text.x=element_text(angle = 45, hjust = 1, vjust=1),
legend.position = "bottom")
pdf("plots/pi_both_box.pdf", width=7, height = 3)
print(p)
dev.off()
svg("plots/pi_both_box.svg", width=7, height = 3)
print(p)
dev.off()
# print(p)
p = ggplot(pi.dat.summ, aes(x=var, y=rho_med)) +
geom_line(aes(linetype=metric)) +
geom_ribbon(aes(fill=metric, ymin=rho_25, ymax=rho_75), alpha=0.2) +
scale_x_log10() +
facet_wrap(~cov) +
xlab(expression(paste('Mean pairwise nucleotide divergence (', pi, ')'))) +
ylab(expression(paste("spearman's ", rho))) +
ylim(0, 1) +
theme_bw()
pdf("plots/pi_both_quartline.pdf", width=7, height = 3)
print(p)
dev.off()
svg("plots/pi_both_quartline.svg", width=7, height = 3)
print(p)
dev.off()
# print(p)
p = ggplot(filter(pi.dat, cov==8), aes(x=var.f, y=rho, fill=metric)) +
geom_boxplot(aes(fill=metric)) +
xlab(expression(paste('Mean pairwise nucleotide divergence (', pi, ')'))) +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
ylim(0, 1) +
theme_bw() +
theme(axis.text.x=element_text(angle = 45, hjust = 1, vjust=1))
pdf("plots/pi_8x_box.pdf", width=4, height = 3)
print(p)
dev.off()
svg("plots/pi_8x_box.svg", width=4, height = 3)
print(p)
dev.off()
# print(p)
p = ggplot(filter(pi.dat.summ, cov==8), aes(x=var, y=rho_av, fill=metric)) +
geom_line(aes(linetype=metric)) +
geom_ribbon(aes(fill=metric, ymin=rho_av - rho_sd, ymax=rho_av + rho_sd), alpha=0.2) +
xlab(expression(paste('Mean pairwise nucleotide divergence (', pi, ')'))) +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
ylim(0, 1) +
scale_x_log10() +
theme_bw()# +
#theme(axis.text.x=element_text(angle = 45, hjust = 1, vjust=1))
pdf("plots/pi_8x_avgsd.pdf", width=6, height = 3)
print(p)
dev.off()
svg("plots/pi_8x_avgsd.svg", width=6, height = 3)
print(p)
dev.off()
# print(p)
###Output
_____no_output_____
###Markdown
Coverage
###Code
cov.dat = results %>%
filter(var %in% c(0.002, 0.005, 0.01)) %>%
select(rho, metric, cov, var, seed) %>%
mutate(cov.f = as.factor(cov),
var.f = as.factor(var))
cov.dat.summ = cov.dat %>%
group_by(cov, metric, var) %>%
summarise(rho_av=mean(rho),
rho_sd=sd(rho),
rho_med=median(rho),
rho_25=quantile(rho, p=c(1/4)),
rho_75=quantile(rho, p=c(3/4))) %>%
mutate(cov.f = as.factor(cov),
var.f = as.factor(var))
p = ggplot(cov.dat, aes(x=cov.f, y=rho, fill=metric)) +
geom_boxplot(aes(fill=metric)) +
scale_fill_discrete(guide = guide_legend(title = "Metric")) +
xlab(expression(paste('Mean sequencing depth'))) +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
facet_wrap(~var.f ) +
ylim(0, 1) +
theme_classic() +
theme(legend.position = "bottom")
pdf("plots/cov_all_box.pdf", width=7, height = 3)
print(p)
dev.off()
# print(p)
p = ggplot(filter(cov.dat, var==0.002), aes(x=cov.f, y=rho, fill=metric)) +
geom_boxplot(aes(fill=metric)) +
scale_fill_discrete(guide = guide_legend(title = "Metric")) +
xlab(expression(paste('Mean sequencing depth'))) +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
ylim(0, 1) +
theme_classic() +
theme(axis.text.x=element_text(angle = 45, hjust = 1, vjust=1)) +
theme(legend.position = "bottom")
pdf("plots/cov_500_box.pdf", width=7, height = 3)
print(p)
dev.off()
# print(p)
p = ggplot(cov.dat.summ, aes(x=cov, y=rho_med)) +
geom_line(aes(linetype=metric)) +
geom_ribbon(aes(fill=metric, ymin=rho_25, ymax=rho_75), alpha=0.2) +
scale_x_log10() +
facet_wrap(~var) +
xlab(expression(paste('Mean sequencing depth'))) +
ylab(expression(paste("Spearman's ", rho))) +
ylim(0, 1) +
theme_bw()
pdf("plots/cov_all_avgsd.pdf", width=7, height = 3)
print(p)
dev.off()
# print(p)
p = ggplot(filter(cov.dat.summ, var==0.002), aes(x=cov, y=rho_av)) +
geom_line(aes(linetype=metric)) +
geom_ribbon(aes(fill=metric, ymin=rho_av-rho_sd, ymax=rho_av+rho_sd), alpha=0.2) +
scale_x_log10() +
xlab(expression(paste('Mean sequencing depth'))) +
ylab(expression(paste("Spearman's ", rho, " +- SD"))) +
ylim(0, 1) +
theme_bw()
pdf("plots/cov_500_avgsd.pdf", width=6, height = 3)
print(p)
dev.off()
svg("plots/cov_500_avgsd.svg", width=6, height = 3)
print(p)
dev.off()
# print(p)
###Output
_____no_output_____ |
all-data/census_data.ipynb | ###Markdown
Week 1: Census Data Analysis Notes About the PDB- Data is from the Census Planning Database (PDB) (full dataset is downloadable as a .csv).- The PDB contains data from both the 2010 decennial census and the 2010-2014 American Community Survey (ACS). Since the purpose of the ACS is to measure changing social and economic characteristics of the population, we primarily refer to ACS variables in this analysis.- PDB data is at the census tract or block group (which is more granular) level.- Variable names are explained here: https://api.census.gov/data/2016/pdb/blockgroup/variables.html, https://api.census.gov/data/2016/pdb/tract/variables.html Getting geographic information- Locations are given as State/County/Tract/BG codes. In order to interpret these as longitude/latitude coordinates, we need a mapping from block group/census tract to geography.- Census tract to longitude/latitude coordinates are available in the Census Tracts Gazetteer file (https://www.census.gov/geo/maps-data/data/gazetteer2017.html). This is what we use in this preliminary analysis.- Mappings from block group can be accessed by opening the relevant shapefiles in ArcGIS (http://gif.berkeley.edu/resources/arcgis_education_edition.html). - About Shapefiles: https://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2017/TGRSHP2017_TechDoc_Ch2.pdf
###Code
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
%pylab inline
import numpy as np
# Census Planning Database - Block Group
# full_pdb16_bg_df = pd.read_csv("raw-data/pdb2016_bg_v8_us.csv", encoding="ISO-8859-1")
# Census Planning Database - Census Tract
full_pdb16_tr_df = pd.read_csv("raw-data/pdb2016_tr_v8_us.csv", encoding="ISO-8859-1")
###Output
_____no_output_____
###Markdown
Alameda County
###Code
def df_for_county(county_name):
return full_pdb16_tr_df.loc[full_pdb16_tr_df['County_name'] == county_name]
alameda_tr_df = df_for_county("Alameda County")
# Importing longitude/latitude mappings
gaz_tracts_df = pd.read_csv("raw-data/2017_gaz_tracts_06.csv", encoding="ISO-8859-1")
def map_lat_long_geoid(geoid_min, geoid_max, df):
# Adds latitude and longitude columns to the dataframe.
# Modifies df in place.
lat_long_df = gaz_tracts_df[gaz_tracts_df['GEOID'] >= geoid_min]
lat_long_df = lat_long_df[lat_long_df['GEOID'] < geoid_max]
lat_long_df = lat_long_df[['GEOID', 'INTPTLAT', 'INTPTLONG ']]
num_tracts = len(lat_long_df) - 1
gidtr_lat, gidtr_long = {}, {}
for i in range(num_tracts):
geoid, lat, long = lat_long_df.iloc[i][0], lat_long_df.iloc[i][1], lat_long_df.iloc[i][2]
gidtr_lat[geoid], gidtr_long[geoid] = lat, long
df['Latitude'] = df['GIDTR'].map(gidtr_lat)
df['Longitude'] = df['GIDTR'].map(gidtr_long)
map_lat_long_geoid(6001000000, 6002000000, alameda_tr_df)
alameda_tr_df
# Some preliminary datasets
gender_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']]
ethnicity_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']]
health_ins_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']]
income_alameda_tr_df = alameda_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']]
income_alameda_tr_df
gender_alameda_tr_df.to_csv('datasets/alameda/gender_alameda_tr.csv')
ethnicity_alameda_tr_df.to_csv('datasets/alameda/ethnicity_alameda_tr.csv')
health_ins_alameda_tr_df.to_csv('datasets/alameda/health_ins_alameda_tr.csv')
income_alameda_tr_df.to_csv('datasets/alameda/income_alameda_tr.csv')
###Output
_____no_output_____
###Markdown
Other Bay Area Counties
###Code
sanfrancisco_tr_df = df_for_county("San Francisco County")
map_lat_long_geoid(6075000000, 6076000000, sanfrancisco_tr_df)
gender_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']]
ethnicity_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']]
health_ins_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']]
income_sanfrancisco_tr_df = sanfrancisco_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']]
gender_sanfrancisco_tr_df.to_csv('datasets/san-francisco/gender_sanfrancisco_tr.csv')
ethnicity_sanfrancisco_tr_df.to_csv('datasets/san-francisco/ethnicity_sanfrancisco_tr.csv')
health_ins_sanfrancisco_tr_df.to_csv('datasets/san-francisco/health_ins_sanfrancisco_tr.csv')
income_sanfrancisco_tr_df.to_csv('datasets/san-francisco/income_sanfrancisco_tr.csv')
sanmateo_tr_df = df_for_county("San Mateo County")
map_lat_long_geoid(6081000000, 6082000000, sanmateo_tr_df)
gender_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']]
ethnicity_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']]
health_ins_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']]
income_sanmateo_tr_df = sanmateo_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']]
gender_sanmateo_tr_df.to_csv('datasets/san-mateo/gender_sanmateo_tr.csv')
ethnicity_sanmateo_tr_df.to_csv('datasets/san-mateo/ethnicity_sanmateo_tr.csv')
health_ins_sanmateo_tr_df.to_csv('datasets/san-mateo/health_ins_sanmateo_tr.csv')
income_sanmateo_tr_df.to_csv('datasets/san-mateo/income_sanmateo_tr.csv')
santaclara_tr_df = df_for_county("Santa Clara County")
map_lat_long_geoid(6085000000, 6086000000, santaclara_tr_df)
gender_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']]
ethnicity_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']]
health_ins_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']]
income_santaclara_tr_df = santaclara_tr_df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']]
gender_santaclara_tr_df.to_csv('datasets/santa-clara/gender_santaclara_tr.csv')
ethnicity_santaclara_tr_df.to_csv('datasets/santa-clara/ethnicity_santaclara_tr.csv')
health_ins_santaclara_tr_df.to_csv('datasets/santa-clara/health_ins_santaclara_tr.csv')
income_santaclara_tr_df.to_csv('datasets/santa-clara/income_santaclara_tr.csv')
def write_datasets_for_county(county_name, dir_path):
# Gets the data for the county, maps the latitude/longitude coordinates,
# and writes the relevant datasets.
df = df_for_county(county_name)
state, county = df['State'].iloc[0], df['County'].iloc[0]
gidtr_min = int(str(state) + str(county).zfill(3) + '000000')
gidtr_max = int(str(state) + str(county + 1).zfill(3) + '000000')
map_lat_long_geoid(gidtr_min, gidtr_max, df)
gender = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'pct_Females_ACS_10_14']]
ethnicity = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'NH_AIAN_alone_ACS_10_14', 'NH_Asian_alone_ACS_10_14', 'NH_Blk_alone_ACS_10_14', 'NH_NHOPI_alone_ACS_10_14', 'NH_SOR_alone_ACS_10_14', 'NH_White_alone_ACS_10_14']]
health_ins = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'No_Health_Ins_ACS_10_14', 'pct_No_Health_Ins_ACS_10_14', 'One_Health_Ins_ACS_10_14', 'pct_One_Health_Ins_ACS_10_14', 'pct_TwoPHealthIns_ACS_10_14', 'Two_Plus_Health_Ins_ACS_10_14']]
income = df[['Latitude', 'Longitude', 'LAND_AREA', 'Tot_Population_ACS_10_14', 'Med_HHD_Inc_ACS_10_14', 'Prs_Blw_Pov_Lev_ACS_10_14', 'PUB_ASST_INC_ACS_10_14']]
gender.to_csv(dir_path + "gender_" + county_name.lower().replace(" ", "") + "_tr.csv")
ethnicity.to_csv(dir_path + "ethnicity_" + county_name.lower().replace(" ", "") + "_tr.csv")
health_ins.to_csv(dir_path + "health_ins_" + county_name.lower().replace(" ", "") + "_tr.csv")
income.to_csv(dir_path + "income_" + county_name.lower().replace(" ", "") + "_tr.csv")
write_datasets_for_county("Marin County", "datasets/marin/")
write_datasets_for_county("Contra Costa County", "datasets/contra-costa/")
write_datasets_for_county("Napa County", "datasets/napa/")
write_datasets_for_county("Sonoma County", "datasets/sonoma/")
write_datasets_for_county("Solano County", "datasets/solano/")
###Output
/Users/Helen/anaconda3/lib/python3.4/site-packages/IPython/kernel/__main__.py:17: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/Users/Helen/anaconda3/lib/python3.4/site-packages/IPython/kernel/__main__.py:18: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
Pre-Process Census Data for Unknowns Contra Costa Ethnicity
###Code
pd.read_csv('census-datasets/contra-costa/ethnicity_contracostacounty_tr.csv')
set(pd.read_csv('census-datasets/alameda/poverty_level_alameda_tr_split.csv')['Variable'])
###Output
_____no_output_____
###Markdown
Health Ins
###Code
pd.read_csv('census-datasets/alameda/income_alameda_tr.csv')
set(pd.read_csv('census-datasets/alameda/poverty_level_alameda_tr_split.csv')['Variable'])
health_ins_binarized = pd.read_csv('census-datasets/alameda/health_ins_alameda_tr_split_binarized.csv')
health_ins_binarized[(health_ins_binarized['variable'] == 'One_Plus_Health_Ins')\
& (np.abs(health_ins_binarized['Latitude'] - 37.867) < 5e-4)]
health_ins = pd.read_csv('census-datasets/alameda/health_ins_alameda_tr.csv')
health_ins.shape
health_ins[np.abs(health_ins['Latitude'] - 37.867) < 5e-4]
health_ins_binarized.iloc[493]['Latitude'] - health_ins_binarized.iloc[855]['Latitude']
health_ins_binarized.shape
health_ins_binarized.iloc[3]
health_ins_binarized.iloc[[3, 363, 723, 1083]]
###Output
_____no_output_____
###Markdown
Add Two Health Ins to One Health Ins
###Code
health_ins_cleaned_df = pd.DataFrame.copy(health_ins_binarized.iloc[:720])
vals_to_add = np.zeros(shape=len(health_ins_cleaned_df,))
vals_to_add.shape
# add one and two health ins populations together for each tract
for i in np.arange(720, 1080):
vals_to_add[i - 360] = health_ins_binarized.iloc[i]['value']
health_ins_cleaned_df['value'] = health_ins_cleaned_df['value'] + vals_to_add
###Output
_____no_output_____
###Markdown
Split the Unknowns
###Code
unknown_pop_vals = np.array(health_ins_binarized.iloc[1080:]['value'])
vals_to_add = np.zeros(shape=len(health_ins_cleaned_df, ))
vals_to_add.shape
for i in range(360):
no_ins_pop = health_ins_cleaned_df.iloc[i]['value']
one_ins_pop = health_ins_cleaned_df.iloc[i + 360]['value']
frac_no_ins = no_ins_pop / (no_ins_pop + one_ins_pop)
frac_with_ins = 1.0 - frac_no_ins
vals_to_add[i] = np.round(frac_no_ins * unknown_pop_vals[i], decimals=0)
vals_to_add[360 + i] = np.round(frac_with_ins * unknown_pop_vals[i], decimals=0)
for i in range(360):
print(vals_to_add[i] + vals_to_add[360 + i] - unknown_pop_vals[i])
health_ins_cleaned_df['value'] = health_ins_cleaned_df['value'] + vals_to_add
health_ins_cleaned_df.shape
health_ins.shape
health_ins_cleaned_df.head()
health_ins_cleaned_df.to_csv('census-datasets/alameda/health_ins_binarized_unknown_removed.csv')
###Output
_____no_output_____
###Markdown
Race
###Code
race_split_df = pd.read_csv('census-datasets/alameda/ethnicity_alameda_tr_racial_split.csv')
race_split_df.head()
set(race_split_df['Variable'].values)
vals_to_add = np.zeros(shape=len(race_split_df) - 360,)
unknown_pop_vals = np.array(race_split_df['Value'].iloc[2160:].values)
unknown_pop_vals.shape
for i in range(360):
indices = i + np.arange(0, 6) * 360
pop_values = np.array(race_split_df.iloc[indices]['Value'].values)
pop_fractions = pop_values / np.sum(pop_values)
to_add = np.round(pop_fractions * unknown_pop_vals[i])
for relative_index, true_index in enumerate(indices):
vals_to_add[true_index] = to_add[relative_index]
vals_to_add.shape
race_split_df_clean = pd.DataFrame.copy(race_split_df.iloc[:2160])
race_s
race_split_df_clean['Value'] = race_split_df_clean['Value'] + vals_to_add
race_split_df_clean['Value'] - race_split_df.iloc[:2160]['Value']
race_split_df_clean.to_csv('census-datasets/alameda/ethnicity_alameda_tr_split_unknown_removed.csv')
vals_to_add
np.arange(0, 6) * 360 + 359
5 + np.arange(0, 7) * 360
###Output
_____no_output_____ |
financial_models/Gold_Linear_Logistic_Regression_5.5+.ipynb | ###Markdown
Linear Regression
###Code
X = df_quake_gold[['dates', 'Mag', 'Lat', 'Long', 'Depth']]
y = df_quake_gold['Appr_Day_30']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=42)
print("Original shape:", X.shape, "\n")
print("X_train shape:", X_train.shape)
print("X_test shape:", X_test.shape)
print("y_train shape:", y_train.shape)
print("y_test shape:", y_test.shape)
model = LinearRegression()
linear_reg = model.fit(X_train, y_train)
lin_reg_score = linear_reg.score(X_train, y_train)
beta_0 = model.intercept_
beta_i = model.coef_[0]
print("Slope Coefficient: ", beta_i)
print("\nIntercept Value: ", beta_0)
print("\nCoefficients:")
for i in range(X.shape[1]):
print(X.columns[i], '\t', model.coef_[i])
y_test_predict = model.predict(X_test)
RMSE = np.sqrt(mean_squared_error(y_test, y_test_predict))
R2= r2_score(y_test, y_test_predict)
print("For Gold, Incident Mag >= 5.5 ({} incidents)".format(df_quake_gold.shape[0]))
print("Linear Regression Model score:", lin_reg_score)
print('\nLinear Regression Model Predictive Accuracy:')
print('RMSE is {}'.format(RMSE))
print('R^2 is {}'.format(R2))
###Output
For Gold, Incident Mag >= 5.5 (23510 incidents)
Linear Regression Model score: 0.0028840265748341087
Linear Regression Model Predictive Accuracy:
RMSE is 5.604158503260323
R^2 is 0.0009993130243650672
###Markdown
Logistic Regression
###Code
df = df_quake_gold
#encode object columns
object_columns = list(df.select_dtypes(include=['object']))
df[object_columns] = df[object_columns].apply(LabelEncoder().fit_transform)
print(df.info())
y = df['Appr_Day_30'].astype(str)
X = df[['dates', 'Mag', 'Lat', 'Long', 'Depth', 'magType', 'Place', 'Type', 'locationSource', 'magSource']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
X_train.sample()
%%time
log_reg = LogisticRegression(multi_class='ovr',
solver='liblinear',
max_iter=100)
log_reg_fit = log_reg.fit(X_train, y_train)
log_reg
print("For Gold, Incident Mag >= 5.5 ({} incidents)".format(df_quake_gold.shape[0]))
print("Logistic Regression Model score:", log_reg_fit.score(X_train, y_train))
predictions = log_reg.predict(X_test)
print("Logistic Regression prediction accuracy:", accuracy_score(y_test, predictions))
log_reg.coef_[0]
###Output
_____no_output_____ |
notebooks/GateSynthesis.ipynb | ###Markdown
Quantum gate synthesisThis notebook works through the process used to produce the gate synthesis results presented in [*"Machine learning method for state preparation and gate synthesis on photonic quantum computers"*](https://iopscience.iop.org/article/10.1088/2058-9565/aaf59e/pdf).We use the continuous-variable (CV) quantum optical circuit package [Strawberry Fields](https://github.com/XanaduAI/strawberryfields), and in particular its TensorFlow backend, to perform quantum circuit optimization. By leveraging Tensorflow, we have access to a number of additional funtionalities, including GPU integration, automatic gradient computation, built-in optimization algorithms, and other machine learning tools. Variational quantum circuits A key element of machine learning is optimization. We can use Tensorflow’s automatic differentiation tools to optimize the parameters of variational quantum circuits constructed using Strawberry Fields. In this approach, we fix a circuit architecture where the states, gates, and/or measurements may have learnable parameters $\vec{\theta}$ associated with them. We then define a loss function based on the output state of this circuit. In this case, we define a loss function such that the action of the variational quantum circuit is close to some specified target unitary. For more details on the TensorFlow backend in Strawberry Fields, please see the [Strawberry Fields documentation](http://strawberryfields.readthedocs.io/en/stable/tutorials/tutorial_machine_learning.html).For arbitrary gate synthesis using optimization, we need to make use of a quantum circuit with a layer structure that is **universal** - that is, by 'stacking' the layers, we can guarantee that we can produce *any* CV state with at-most polynomial overhead. Therefore, the architecture we choose must consist of layers with each layer containing parameterized Gaussian *and* non-Gaussian gates. **The non-Gaussian gates provide both the nonlinearity and the universality of the model.** To this end, we employ the CV quantum neural network architecture described below:Here,* $\mathcal{U}_i(\theta_i,\phi_i)$ is an N-mode linear optical interferometer composed of two-mode beamsplitters $BS(\theta,\phi)$ and single-mode rotation gates $R(\phi)=e^{i\phi\hat{n}}$,* $\mathcal{D}(\alpha_i)$ are single mode displacements in the phase space by complex value $\alpha_i$,* $\mathcal{S}(r_i, \phi_i)$ are single mode squeezing operations of magnitude $r_i$ and phase $\phi_i$, and* $\Phi(\lambda_i)$ is a single mode non-Gaussian operation, in this case chosen to be the Kerr interaction $\mathcal{K}(\kappa_i)=e^{i\kappa_i\hat{n}^2}$ of strength $\kappa_i$.ReferenceKilloran, N., Bromley, T. R., Arrazola, J. M., Schuld, M., Quesada, N., & Lloyd, S. (2018). "Continuous-variable quantum neural networks." arXiv:1806.06871. HyperparametersFirst, we must define the **hyperparameters** of our layer structure:* `cutoff`: the simulation Fock space truncation we will use in the optimization. The TensorFlow backend will perform numerical operations in this truncated Fock space when performing the optimization.* `depth`: The number of layer ansatz in our variational quantum circuit. As a general rule, increasing the number of layers (and thus, the number of parameters we are optimizing over) increases the optimizers chance of finding a reasonable local minimum in the optimization landscape.* `reps`: the number of steps in the optimization routine performing gradient descentSome other optional hyperparameters include:* The standard deviation of initial parameters. Note that we make a distinction between the standard deviation of *passive* parameters (those that preserve photon number when changed, such as phase parameters), and *active* parameters (those that introduce or remove energy from the system when changed).
###Code
# Cutoff dimension
cutoff = 10
# gate cutoff
gate_cutoff = 4
# Number of layers
depth = 25
# Number of steps in optimization routine performing gradient descent
reps = 1000
# Standard deviation of initial parameters
passive_sd = 0.1
active_sd = 0.001
###Output
_____no_output_____
###Markdown
Note that, unlike in state learning, we must also specify a *gate cutoff* $d$. This restricts the target unitary to its action on a $d$-dimensional subspace of the truncated Fock space, where $d\leq D$, where $D$ is the overall simulation Fock basis cutoff. As a result, we restrict the gate synthesis optimization to only $d$ input-output relations. The layer parameters $\vec{\theta}$We use TensorFlow to create the variables corresponding to the gate parameters. Note that each variable has shape `[depth]`, with each individual element representing the gate parameter in layer $i$.
###Code
import tensorflow as tf
# squeeze gate
sq_r = tf.Variable(tf.random_normal(shape=[depth], stddev=active_sd))
sq_phi = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
# displacement gate
d_r = tf.Variable(tf.random_normal(shape=[depth], stddev=active_sd))
d_phi = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
# rotation gates
r1 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
r2 = tf.Variable(tf.random_normal(shape=[depth], stddev=passive_sd))
# kerr gate
kappa = tf.Variable(tf.random_normal(shape=[depth], stddev=active_sd))
###Output
_____no_output_____
###Markdown
For convenience, we store the TensorFlow variables representing the parameters in a list:
###Code
params = [r1, sq_r, sq_phi, r2, d_r, d_phi, kappa]
###Output
_____no_output_____
###Markdown
Now, we can create a function to define the $i$th layer, acting on qumode `q`. This allows us to simply call this function in a loop later on when we build our circuit.
###Code
# layer architecture
def layer(i, q):
Rgate(r1[i]) | q
Sgate(sq_r[i], sq_phi[i]) | q
Rgate(r2[i]) | q
Dgate(d_r[i], d_phi[i]) | q
Kgate(kappa[i]) | q
return q
###Output
_____no_output_____
###Markdown
Constructing the circuitNow that we have defined our gate parameters and our layer structure, we can import Strawberry Fields and construct our variational quantum circuit. Note that, to ensure the TensorFlow backend computes the circuit symbolically, we specify `eval=False`.
###Code
import numpy as np
import strawberryfields as sf
from strawberryfields.ops import *
###Output
_____no_output_____
###Markdown
We must also specify the input states to the variational quantum circuit - these are the Fock state $\ket{i}$, $i=0,\dots,d$, allowing us to optimize the circuit parameters to learn the target unitary acting on all input Fock states within the $d$-dimensional subspace.
###Code
in_ket = np.zeros([gate_cutoff, cutoff])
np.fill_diagonal(in_ket, 1)
# Start SF Program
prog = sf.Program(1)
# Apply circuit of layers with corresponding depth
with prog.context as q:
Ket(in_ket) | q
for k in range(depth):
layer(k, q[0])
# Run engine
eng = sf.Engine("tf", backend_options={"cutoff_dim": cutoff, "batch_size": gate_cutoff})
state = eng.run(prog, run_options={"eval": False}).state
ket = state.ket()
###Output
_____no_output_____
###Markdown
Here, we use the `batch_size` argument to perform the optimization in parallel - each batch calculates the variational quantum circuit acting on a different input Fock state: $U(\vec{\theta})\left|{n}\right\rangle$. Note that the output state vector is an unevaluated tensor:
###Code
ket
###Output
_____no_output_____
###Markdown
Performing the optimization$\newcommand{ket}[1]{\left|1\right\rangle}$ With the Strawberry Fields TensorFlow backend calculating the resulting state of the circuit symbolically, we can use TensorFlow to optimize the gate parameters to minimize the cost function we specify. With gate synthesis, we minimize the overlaps in the Fock basis between the target and learnt unitaries via the following cost function:$$C(\vec{\theta}) = \frac{1}{d}\sum_{i=0}^{d-1} \left| \langle i \mid V^\dagger U(\vec{\theta})\mid 0\rangle - 1\right|$$where $V$ is the target unitary, $U(\vec{\theta})$ is the learnt unitary, and $d$ is the gate cutoff. Note that this is a generalization of state preparation to more than one input-output relation.For our target unitary, lets use Strawberry Fields to generate a 4x4 random unitary:
###Code
from strawberryfields.utils import random_interferometer
target_unitary = np.identity(cutoff, dtype=np.complex128)
target_unitary[:gate_cutoff, :gate_cutoff] = random_interferometer(4)
###Output
_____no_output_____
###Markdown
This matches the gate cutoff of $d=4$ that we chose above when defining our hyperparameters. Using this target state, we calculate the cost function we would like to minimize. We must use TensorFlow functions to manipulate this data, as were are working with symbolic variables!
###Code
in_state = np.arange(gate_cutoff)
# extract action of the target unitary acting on
# the allowed input fock states.
target_kets = np.array([target_unitary[:, i] for i in in_state])
target_kets = tf.constant(target_kets, dtype=tf.complex64)
# overlaps
overlaps = tf.real(tf.einsum('bi,bi->b', tf.conj(target_kets), ket))
mean_overlap = tf.reduce_mean(overlaps)
# cost
cost = tf.reduce_sum(tf.abs(overlaps - 1))
###Output
_____no_output_____
###Markdown
Now that the cost function is defined, we can define and run the optimization. Below, we choose the Adam optimizer that is built into TensorFlow.
###Code
# Using Adam algorithm for optimization
optimiser = tf.train.AdamOptimizer()
min_cost = optimiser.minimize(cost)
# Begin Tensorflow session
session = tf.Session()
session.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
We then loop over all repetitions, storing the best predicted fidelity value.
###Code
overlap_progress = []
cost_progress = []
# Run optimization
for i in range(reps):
# one repitition of the optimization
_, cost_val, overlaps_val, ket_val, params_val = session.run(
[min_cost, cost, overlaps, ket, params])
# calculate the mean overlap
# This gives us an idea of how the optimization is progressing
mean_overlap_val = np.mean(overlaps_val)
# store cost at each step
cost_progress.append(cost_val)
overlap_progress.append(overlaps_val)
# Prints progress at every 100 reps
if i % 100 == 0:
# print progress
print("Rep: {} Cost: {:.4f} Mean overlap: {:.4f}".format(i, cost_val, mean_overlap_val))
###Output
Rep: 0 Cost: 2.5749 Mean overlap: 0.3563
Rep: 100 Cost: 0.5427 Mean overlap: 0.8643
Rep: 200 Cost: 0.1412 Mean overlap: 0.9647
Rep: 300 Cost: 0.0594 Mean overlap: 0.9851
Rep: 400 Cost: 0.0360 Mean overlap: 0.9910
Rep: 500 Cost: 0.0235 Mean overlap: 0.9941
Rep: 600 Cost: 0.0168 Mean overlap: 0.9958
Rep: 700 Cost: 0.0117 Mean overlap: 0.9971
Rep: 800 Cost: 0.0079 Mean overlap: 0.9980
Rep: 900 Cost: 0.0050 Mean overlap: 0.9988
###Markdown
Results and visualisation Plotting the cost vs. optimization step:
###Code
from matplotlib import pyplot as plt
%matplotlib inline
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.sans-serif'] = ['Computer Modern Roman']
plt.style.use('default')
plt.plot(cost_progress)
plt.ylabel('Cost')
plt.xlabel('Step');
###Output
_____no_output_____
###Markdown
We can use matrix plots to plot the real and imaginary components of the target and learnt unitary.
###Code
learnt_unitary = ket_val.T[:gate_cutoff, :gate_cutoff]
target_unitary = target_unitary[:gate_cutoff, :gate_cutoff]
fig, ax = plt.subplots(1, 4, figsize=(7, 4))
ax[0].matshow(target_unitary.real, cmap=plt.get_cmap('Reds'))
ax[1].matshow(target_unitary.imag, cmap=plt.get_cmap('Greens'))
ax[2].matshow(learnt_unitary.real, cmap=plt.get_cmap('Reds'))
ax[3].matshow(learnt_unitary.imag, cmap=plt.get_cmap('Greens'))
ax[0].set_xlabel(r'$\mathrm{Re}(V)$')
ax[1].set_xlabel(r'$\mathrm{Im}(V)$')
ax[2].set_xlabel(r'$\mathrm{Re}(U)$')
ax[3].set_xlabel(r'$\mathrm{Im}(U)$');
###Output
_____no_output_____
###Markdown
Process fidelity The process fidelity between the two unitaries is defined by$$F_e = \left| \left\langle \Psi(V) \mid \Psi(U)\right\rangle\right|^2$$where:* $\left|\Psi(V)\right\rangle$ is the action of $V$ on onehalf of a maximally entangled state $\left|\phi\right\rangle$:$$\left|\Psi(V)\right\rangle = (I\otimes V)\left|\phi\right\rangle,$$* $V$ is the target unitary,* $U$ the learnt unitary.
###Code
I = np.identity(gate_cutoff)
phi = I.flatten()/np.sqrt(gate_cutoff)
psiV = np.kron(I, target_unitary) @ phi
psiU = np.kron(I, learnt_unitary) @ phi
np.abs(np.vdot(psiV, psiU))**2
###Output
_____no_output_____ |
Term1/Behavioral-Cloning/Model.ipynb | ###Markdown
Loading
###Code
from pandas import read_csv
import numpy as np
measurments = read_csv('data/driving_log.csv', usecols=[3]).values
C = measurments
L = measurments + 0.2
R = measurments - 0.2
#measurments = np.concatenate((C, L, R, -C, -L, -R), axis=0)
images_C = read_csv('data/driving_log.csv', usecols=[0]).values
images_L = read_csv('data/driving_log.csv', usecols=[1]).values
images_R = read_csv('data/driving_log.csv', usecols=[2]).values
#images_path = np.concatenate((images_C, images_L, images_R), axis=0)
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.image import imread
fig = plt.figure(figsize=(20,10))
a=fig.add_subplot(1,3,1)
a.set_title('Left')
left = imread(images_L[0,0])
plt.imshow(left)
a=fig.add_subplot(1,3,2)
a.set_title('Center')
center = imread(images_C[0,0])
plt.imshow(center)
a=fig.add_subplot(1,3,3)
a.set_title('Right')
right = imread(images_R[0,0])
plt.imshow(right)
from sklearn.utils import shuffle
from matplotlib.image import imread
def genImages(idx, size):
images = []
for path in images_C[idx:idx+size]:
fname = 'data/IMG/'+ path[0].split('/')[-1]
images.append(imread(fname))
for path in images_L[idx:idx+size]:
fname = 'data/IMG/'+ path[0].split('/')[-1]
images.append(imread(fname))
for path in images_R[idx:idx+size]:
fname = 'data/IMG/'+ path[0].split('/')[-1]
images.append(imread(fname))
images = np.array(images)
images = np.concatenate((images, np.fliplr(images)), axis=0)
measurments = np.concatenate((C[idx:idx+size], L[idx:idx+size], R[idx:idx+size],
-C[idx:idx+size], -L[idx:idx+size], -R[idx:idx+size]), axis=0)
X_train, Y_train = shuffle(images, measurments)
return X_train, Y_train
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, Lambda, Cropping2D, MaxPooling2D
def model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1., input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.compile('adam', "mse")
return model
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, TensorBoard
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0)
tensorBoard = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
EPOCHS = 1
BATCH_SIZE = 600
model = model()
size = 2500
for i in range(10):
print('='*51)
print('='*24, i+1, '='*24)
print('='*51)
for idx in range(10):
X_train, Y_train = genImages(idx*size, size)
model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, shuffle=True, validation_split=0.2)
model.save('model.h5')
###Output
_____no_output_____ |
docs/debug_notebooks/comparing beta for sesame, mprep and minfi (with default processing options)_2022-01-06.ipynb | ###Markdown
beta values: methylprep vs sesame
###Code
(mprep_ses_beta - ses_beta).hist(bins=200, range=[-1.0,1.0], figsize=(8,4))
plt.savefig(Path(FIGURES,'paper-methylprep-vs-sesame.png'), dpi=300, facecolor='w')
plt.grid(color='lightgray')# , linestyle='dotted') #, linewidth=0.7)
plt.show()
###Output
_____no_output_____
###Markdown
beta values: methylprep vs minfi
###Code
(mprep_minfi_beta - minfi_beta).hist(bins=200, range=[-1.0,1.0], figsize=(8,4))
plt.savefig(Path(FIGURES,'paper-methylprep-vs-minfi.png'), dpi=300, facecolor='w')
plt.show()
###Output
_____no_output_____
###Markdown
beta values: sesame vs minfi
###Code
(ses_beta - minfi_beta.sort_index()).hist(bins=200, range=[-1.0,1.0], figsize=(8,4))
plt.savefig(Path(FIGURES,'paper-sesame-vs-minfi.png'), dpi=300, facecolor='w')
plt.show()
###Output
_____no_output_____
###Markdown
mprep v160 CSV beta values --vs-- minfi betas
###Code
m_v160_samples = {}
for _csv in Path(m_v160, '3999356047').rglob('*'):
sample_name = '_'.join(Path(_csv).stem.split('_')[:2])
sample = pd.read_csv(_csv).set_index('IlmnID')
m_v160_samples[sample_name] = sample
mprep_s160 = m_v160_samples['3999356047_R01C01']['beta_value']
mprep_s160 = mprep_s160[ ~mprep_s160.index.str.startswith('rs') ]
(mprep_s160 - minfi_beta).hist(bins=200, range=[-1.0,1.0], figsize=(8,4))
plt.show()
###Output
_____no_output_____
###Markdown
mprep v160_ses CSV beta values (using --all) --vs-- sesame betas
###Code
m_v160_ses_samples = {}
for _csv in Path(m_v160_ses, '3999356047').rglob('*'):
sample_name = '_'.join(Path(_csv).stem.split('_')[:2])
sample = pd.read_csv(_csv).set_index('IlmnID')
m_v160_ses_samples[sample_name] = sample
mprep_s160_ses = m_v160_ses_samples['3999356047_R01C01']['beta_value']
mprep_s160_ses = mprep_s160_ses[ ~mprep_s160_ses.index.str.startswith('rs') ]
(mprep_s160_ses - ses_beta).hist(bins=200, range=[-1.0,1.0], figsize=(8,4))
plt.show()
## note that NA probes in sesame output are ignored:
mprep_s160_ses_no_NaN = mprep_s160_ses[ ses_beta.notna()]
(mprep_s160_ses_no_NaN - ses_beta).hist(bins=200, range=[-1.0,1.0], figsize=(8,4))
plt.show()
print(f"mprep 1.6 vs sesame avg {round((mprep_s160_ses_no_NaN - ses_beta).mean(),4)} ({round(100*(mprep_s160_ses_no_NaN - ses_beta).mean(),2)}%) sem {round((mprep_s160_ses_no_NaN - ses_beta).sem(),6)}")
print(f"mprep 1.6 vs minfi avg {round((mprep_s160 - minfi_beta).mean(),4)} ({round(100*(mprep_s160 - minfi_beta).mean(),2)}%) sem {round((mprep_s160 - minfi_beta).sem(),6)}")
print(f"sesame vs minfi {round((ses_beta - minfi_beta).mean(),4)} ({round(100*(ses_beta - minfi_beta).mean(),2)}%) sem {round((ses_beta - minfi_beta).sem(),6)}")
0.000064, 0.000061
###Output
_____no_output_____ |
.ipynb_checkpoints/data_load_example-checkpoint.ipynb | ###Markdown
goog-preemption-data Load data
###Code
import json
with open('data/data.json') as json_file:
all_gcp_data = json.load(json_file)
###Output
_____no_output_____
###Markdown
Simple description of a datapoint* All data point is stored with the random unique machine name and it is also under all_gcp_data['key']['instance_data']['NAME']* There would be two gcloud compute operation message instances inside the all_gcp_data['key'] object. * Message generated at the time of instance creation: all_gcp_data['key']['insert'] * Message generated at the time of instance shutdown: all_gcp_data['key']['compute.instances.preempted'] or all_gcp_data['key']['stop']* All the instance data such as machine type and zone are located in all_gcp_data['key']['instance_data']* Total runtime of a instace canbe easily calculate using the timestamp data inside two google runtime message. We have also calculated the runtime and placed in in all_gcp_data['key']['time_in_sec']
###Code
import pprint
pprint.pprint(all_gcp_data[list(all_gcp_data.keys())[0]])
###Output
{'compute.instances.preempted': {'endTime': '2019-03-25T13:09:53.235-07:00',
'id': '9124290691163523966',
'insertTime': '2019-03-25T13:09:53.235-07:00',
'kind': 'compute#operation',
'name': 'systemevent-1553544593235-584f0c99f5e89-f67cff7e-c0e27d8d',
'operationType': 'compute.instances.preempted',
'progress': 100,
'selfLink': 'https://www.googleapis.com/compute/v1/projects/first-220321/zones/us-central1-c/operations/systemevent-1553544593235-584f0c99f5e89-f67cff7e-c0e27d8d',
'startTime': '2019-03-25T13:09:53.235-07:00',
'status': 'DONE',
'statusMessage': 'Instance was preempted.',
'targetId': '7592324531544691860',
'targetLink': 'https://www.googleapis.com/compute/v1/projects/first-220321/zones/us-central1-c/instances/tjnepf1',
'user': 'system',
'zone': 'https://www.googleapis.com/compute/v1/projects/first-220321/zones/us-central1-c'},
'day_of_week': 'Monday',
'hr_of_day': 13,
'idle_vs_nonidle': 'non-idle',
'insert': {'endTime': '2019-03-25T12:23:13.052-07:00',
'id': '4696199543794209939',
'insertTime': '2019-03-25T12:22:36.614-07:00',
'kind': 'compute#operation',
'name': 'operation-1553541755673-584f0207d9d28-95457698-66cfd314',
'operationType': 'insert',
'progress': 100,
'selfLink': 'https://www.googleapis.com/compute/v1/projects/first-220321/zones/us-central1-c/operations/operation-1553541755673-584f0207d9d28-95457698-66cfd314',
'startTime': '2019-03-25T12:22:36.617-07:00',
'status': 'DONE',
'targetId': '7592324531544691860',
'targetLink': 'https://www.googleapis.com/compute/v1/projects/first-220321/zones/us-central1-c/instances/tjnepf1',
'user': '[email protected]',
'zone': 'https://www.googleapis.com/compute/v1/projects/first-220321/zones/us-central1-c'},
'instance_data': {'MACHINE_TYPE': 'n1-highcpu-16',
'NAME': 'tjnepf1',
'PREEMPTIBLE': 'true',
'ZONE': 'us-central1-c'},
'time_in_sec': 2836.621}
###Markdown
Example 01: Data Distribution across the days of the week
###Code
all_dates = [all_gcp_data[instance]['day_of_week'] for instance in all_gcp_data]
#date_names = list(set(all_dates))
date_names = ['Monday','Tuesday','Wednesday', 'Thursday', 'Friday','Saturday','Sunday']
date_freq={a:all_dates.count(a) for a in date_names}
print(date_freq)
%matplotlib inline
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('default')
plt.figure(figsize=(8,4))
y_pos = np.arange(len(date_names))
plt.bar(y_pos, date_freq.values(), align='center', alpha=0.8)
plt.xticks(y_pos, date_names)
plt.ylabel('VM count')
plt.title('Data Distribution across the days of the week')
plt.show()
###Output
_____no_output_____
###Markdown
Example 02: Data Distribution across the hours of the day
###Code
all_hrs = [all_gcp_data[instance]['hr_of_day'] for instance in all_gcp_data]
hr_names = list(set(all_hrs))
hr_freq={a:all_hrs.count(a) for a in hr_names}
print(hr_freq)
%matplotlib inline
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('default')
plt.figure(figsize=(8,4))
plt.bar(hr_freq.keys(), hr_freq.values(), align='center', alpha=0.8)
#plt.xticks(y_pos, date_names)
plt.ylabel('VM count')
plt.xlabel('0:00h to 24:00h')
plt.title('Data Distribution across the hours of the day')
plt.show()
###Output
_____no_output_____
###Markdown
Example 03: Data Distribution across the different VM types
###Code
all_vm_types = [all_gcp_data[instance]['instance_data']['MACHINE_TYPE'] for instance in all_gcp_data]
vm_names = list(set(all_vm_types))
vm_freq={a:all_vm_types.count(a) for a in vm_names}
print(vm_freq)
%matplotlib inline
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('default')
plt.figure(figsize=(12,4))
y_pos = np.arange(len(vm_names))
plt.bar(y_pos, vm_freq.values(), align='center', alpha=0.8)
plt.xticks(y_pos, vm_names, fontsize=7)
plt.ylabel('VM count')
plt.title('Data Distribution across the different VM types')
plt.show()
###Output
_____no_output_____
###Markdown
Example 04: Data Distribution across the different zones
###Code
all_zone_types = [all_gcp_data[instance]['instance_data']['ZONE'] for instance in all_gcp_data]
zone_names = list(set(all_zone_types))
zone_freq={a:all_zone_types.count(a) for a in zone_names}
print(zone_freq)
%matplotlib inline
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('default')
plt.figure(figsize=(10,4))
y_pos = np.arange(len(zone_names))
plt.bar(y_pos, zone_freq.values(), align='center', alpha=0.8)
plt.xticks(y_pos, zone_names)
plt.ylabel('VM count')
plt.title('Data Distribution across the different zones')
plt.show()
###Output
_____no_output_____ |
01-data-model/.ipynb_checkpoints/vector2d_soln-checkpoint.ipynb | ###Markdown
2-D VectorsThis notebook contains example code from [*Fluent Python*](http://shop.oreilly.com/product/0636920032519.do), by Luciano Ramalho.Code by Luciano Ramalho, modified by Allen Downey.MIT License: https://opensource.org/licenses/MIT This example demonstrates how a user-defined type can emulate a numeric type by providing special methods.`Vector` represents a 2-D Euclidean vector:
###Code
from math import hypot
class Vector:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
return 'Vector(%r, %r)' % (self.x, self.y)
def __abs__(self):
return hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Vector(x, y)
def __mul__(self, scalar):
return Vector(self.x * scalar, self.y * scalar)
###Output
_____no_output_____
###Markdown
Because `Vector` provides `__add__`, we can use the `+` operator to add Vectors.
###Code
v1 = Vector(2, 4)
v2 = Vector(2, 1)
v1 + v2
###Output
_____no_output_____
###Markdown
And because it provides `__abs__`, we can use the built-in method `abs`. For Euclidean vectors, the "absolute value" is the magnitude; for 2-D vectors, the magnitude is the hypoteneuse of the two components:
###Code
v = Vector(3, 4)
abs(v)
###Output
_____no_output_____
###Markdown
`Vector` provides `__mul__`, so we can use the `*` operator.
###Code
v * 3
###Output
_____no_output_____
###Markdown
But `__mul__` only supports scalar multiplication.**Exercise** What happens if you try to multiply two vectors?
###Code
# Solution
v * v
###Output
_____no_output_____
###Markdown
`Vector` defines `__repr__`, which returns a string representation of the object:
###Code
repr(v)
###Output
_____no_output_____
###Markdown
Because `Vector` does not provide `__str__`, Python uses `__repr__`:
###Code
str(v)
###Output
_____no_output_____
###Markdown
So what's the difference? `str` is meant to return a human-readable representation of the object. `repr` should return a string that can be evaluated to re-create the object.If the same representation can perform both roles, you can just define `__repr__`. `Vector` implements `__bool__`, so it can be used in a context where it has to be converted to `boolean`:
###Code
if v:
print(v)
###Output
_____no_output_____
###Markdown
If the magnitude is 0, the Vector is considered `False`:
###Code
if Vector(0, 0):
print("Won't happen.")
###Output
_____no_output_____
###Markdown
**Exercise** Create a class called `SubVector` that extends `Vector` and provides `__sub__`. Test that you can use the `-` operator with `SubVector`.What happens if you subtract a `Vector` from a `SubVector`? How about the other way around?
###Code
# Solution
class SubVector(Vector):
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return SubVector(x, y)
# Solution
v3 = SubVector(5, 6)
v4 = SubVector(7, 8)
v4 - v3
# Solution
v4 - v2
# Solution
v2 - v4
###Output
_____no_output_____ |
Studying Materials/Course 2 Regression/Ridge Regression/week-4-ridge-regression-assignment-1-blank.ipynb | ###Markdown
Regression Week 4: Ridge Regression (interpretation) In this notebook, we will run ridge regression multiple times with different L2 penalties to see which one produces the best fit. We will revisit the example of polynomial regression as a means to see the effect of L2 regularization. In particular, we will:* Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression* Use matplotlib to visualize polynomial regressions* Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression, this time with L2 penalty* Use matplotlib to visualize polynomial regressions under L2 regularization* Choose best L2 penalty using cross-validation.* Assess the final fit using test data.We will continue to use the House data from previous notebooks. (In the next programming assignment for this module, you will implement your own ridge regression learning algorithm using gradient descent.) Fire up graphlab create
###Code
import graphlab
###Output
_____no_output_____
###Markdown
Polynomial regression, revisited We build on the material from Week 3, where we wrote the function to produce an SFrame with columns containing the powers of a given input. Copy and paste the function `polynomial_sframe` from Week 3:
###Code
def polynomial_sframe(feature, degree):
sframe = graphlab.SFrame()
sframe['power_1'] = feature
if degree > 1:
for power in range(2, degree+1):
sframe['power_' + str(power)] = feature.apply(lambda x: x ** power)
return sframe
###Output
_____no_output_____
###Markdown
Let's use matplotlib to visualize what a polynomial regression looks like on the house data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
sales = graphlab.SFrame('kc_house_data.gl/')
###Output
_____no_output_____
###Markdown
As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.
###Code
sales = sales.sort(['sqft_living','price'])
###Output
_____no_output_____
###Markdown
Let us revisit the 15th-order polynomial model using the 'sqft_living' input. Generate polynomial features up to degree 15 using `polynomial_sframe()` and fit a model with these features. When fitting the model, use an L2 penalty of `1e-5`:
###Code
l2_small_penalty = 1e-5
###Output
_____no_output_____
###Markdown
Note: When we have so many features and so few data points, the solution can become highly numerically unstable, which can sometimes lead to strange unpredictable results. Thus, rather than using no regularization, we will introduce a tiny amount of regularization (`l2_penalty=1e-5`) to make the solution numerically stable. (In lecture, we discussed the fact that regularization can also help with numerical stability, and here we are seeing a practical example.)With the L2 penalty specified above, fit the model and print out the learned weights.Hint: make sure to add 'price' column to the new SFrame before calling `graphlab.linear_regression.create()`. Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set=None` in this call.
###Code
poly15 = polynomial_sframe(sales['sqft_living'], degree=15)
my_features = poly15.column_names()
poly15['price'] = sales['price']
model15 = graphlab.linear_regression.create(poly15, target='price', l2_penalty=l2_small_penalty,
features=my_features, validation_set=None, verbose=False)
print("Model coefficients with degree = 15 and L2 = 1e-5")
print(model15.coefficients)
###Output
Model coefficients with degree = 15 and L2 = 1e-5
+-------------+-------+--------------------+--------+
| name | index | value | stderr |
+-------------+-------+--------------------+--------+
| (intercept) | None | 167924.857726 | nan |
| power_1 | None | 103.090951289 | nan |
| power_2 | None | 0.13460455096 | nan |
| power_3 | None | -0.000129071363752 | nan |
| power_4 | None | 5.18928955754e-08 | nan |
| power_5 | None | -7.77169299595e-12 | nan |
| power_6 | None | 1.71144842837e-16 | nan |
| power_7 | None | 4.51177958161e-20 | nan |
| power_8 | None | -4.78839816249e-25 | nan |
| power_9 | None | -2.33343499941e-28 | nan |
+-------------+-------+--------------------+--------+
[16 rows x 4 columns]
Note: Only the head of the SFrame is printed.
You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.
###Markdown
***QUIZ QUESTION: What's the learned value for the coefficient of feature `power_1`?***103.09 Observe overfitting Recall from Week 3 that the polynomial fit of degree 15 changed wildly whenever the data changed. In particular, when we split the sales data into four subsets and fit the model of degree 15, the result came out to be very different for each subset. The model had a *high variance*. We will see in a moment that ridge regression reduces such variance. But first, we must reproduce the experiment we did in Week 3. First, split the data into split the sales data into four subsets of roughly equal size and call them `set_1`, `set_2`, `set_3`, and `set_4`. Use `.random_split` function and make sure you set `seed=0`.
###Code
(semi_split1, semi_split2) = sales.random_split(.5,seed=0)
(set_1, set_2) = semi_split1.random_split(0.5, seed=0)
(set_3, set_4) = semi_split2.random_split(0.5, seed=0)
###Output
_____no_output_____
###Markdown
Next, fit a 15th degree polynomial on `set_1`, `set_2`, `set_3`, and `set_4`, using 'sqft_living' to predict prices. Print the weights and make a plot of the resulting model.Hint: When calling `graphlab.linear_regression.create()`, use the same L2 penalty as before (i.e. `l2_small_penalty`). Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set = None` in this call.
###Code
def make_plot_model(data, degree, l2_penalty=1e-5, feature='sqft_living'):
sframe = polynomial_sframe(data[feature], degree)
my_features = sframe.column_names()
sframe['price'] = data['price']
model = graphlab.linear_regression.create(sframe, target='price', features=my_features, l2_penalty=l2_penalty,
validation_set=None, verbose=False)
predictions = model.predict(sframe)
plt.plot(sframe['power_1'], sframe['price'], ".",
sframe['power_1'], predictions, "-")
print("Model Coeffients with degree: {}".format(degree))
print(model.coefficients.print_rows(num_rows=16))
return model
set_1_model = make_plot_model(set_1, 15)
set_2_model = make_plot_model(set_2, 15)
set_3_model = make_plot_model(set_3, 15)
set_4_model = make_plot_model(set_4, 15)
###Output
Model Coeffients with degree: 15
+-------------+-------+--------------------+-------------------+
| name | index | value | stderr |
+-------------+-------+--------------------+-------------------+
| (intercept) | None | -170240.034791 | 1417346.17184 |
| power_1 | None | 1247.59035088 | 8978.28059127 |
| power_2 | None | -1.2246091264 | 23.6158213076 |
| power_3 | None | 0.000555254626787 | 0.0340561499439 |
| power_4 | None | -6.38262361929e-08 | 2.98955350115e-05 |
| power_5 | None | -2.20215996475e-11 | 1.65791592065e-08 |
| power_6 | None | 4.81834697594e-15 | 5.63745618764e-12 |
| power_7 | None | 4.2146163248e-19 | 8.27510918329e-16 |
| power_8 | None | -7.99880749051e-23 | nan |
| power_9 | None | -1.32365907706e-26 | nan |
| power_10 | None | 1.60197797139e-31 | 5.0301150238e-27 |
| power_11 | None | 2.39904337326e-34 | 8.33599582107e-31 |
| power_12 | None | 2.33354505765e-38 | nan |
| power_13 | None | -1.79874055895e-42 | nan |
| power_14 | None | -6.02862682894e-46 | 3.25730885866e-43 |
| power_15 | None | 4.39472672531e-50 | 1.2200403476e-47 |
+-------------+-------+--------------------+-------------------+
[16 rows x 4 columns]
None
###Markdown
The four curves should differ from one another a lot, as should the coefficients you learned.***QUIZ QUESTION: For the models learned in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature `power_1`?*** (For the purpose of answering this question, negative numbers are considered "smaller" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.)Smallest = -753.25 from model 3Largest = 1247.59 from model 4 Ridge regression comes to the rescue Generally, whenever we see weights change so much in response to change in data, we believe the variance of our estimate to be large. Ridge regression aims to address this issue by penalizing "large" weights. (Weights of `model15` looked quite small, but they are not that small because 'sqft_living' input is in the order of thousands.)With the argument `l2_penalty=1e5`, fit a 15th-order polynomial model on `set_1`, `set_2`, `set_3`, and `set_4`. Other than the change in the `l2_penalty` parameter, the code should be the same as the experiment above. Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set = None` in this call.
###Code
set_1_model_large = make_plot_model(set_1, degree=15, l2_penalty=1e5)
set_2_model_large = make_plot_model(set_2, degree=15, l2_penalty=1e5)
set_3_model_large = make_plot_model(set_3, degree=15, l2_penalty=1e5)
set_4_model_large = make_plot_model(set_4, degree=15, l2_penalty=1e5)
###Output
Model Coeffients with degree: 15
+-------------+-------+-------------------+-------------------+
| name | index | value | stderr |
+-------------+-------+-------------------+-------------------+
| (intercept) | None | 513667.087087 | 1874267.58319 |
| power_1 | None | 1.91040938244 | 11872.6819173 |
| power_2 | None | 0.00110058029175 | 31.2290456676 |
| power_3 | None | 3.12753987879e-07 | 0.0450351079477 |
| power_4 | None | 5.50067886825e-11 | 3.95332017452e-05 |
| power_5 | None | 7.20467557825e-15 | 2.19239175825e-08 |
| power_6 | None | 8.24977249384e-19 | 7.45484878293e-12 |
| power_7 | None | 9.06503223498e-23 | 1.09428234243e-15 |
| power_8 | None | 9.95683160453e-27 | nan |
| power_9 | None | 1.10838127982e-30 | nan |
| power_10 | None | 1.25315224143e-34 | 6.65171410918e-27 |
| power_11 | None | 1.43600781402e-38 | 1.10233385827e-30 |
| power_12 | None | 1.662699678e-42 | nan |
| power_13 | None | 1.9398172453e-46 | nan |
| power_14 | None | 2.2754148577e-50 | 4.30739400403e-43 |
| power_15 | None | 2.67948784897e-54 | 1.61335467589e-47 |
+-------------+-------+-------------------+-------------------+
[16 rows x 4 columns]
None
###Markdown
These curves should vary a lot less, now that you applied a high degree of regularization.***QUIZ QUESTION: For the models learned with the high level of regularization in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature `power_1`?*** (For the purpose of answering this question, negative numbers are considered "smaller" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.)Smallest= 1.91 for model 4Largest = 2.59 for model 1 Selecting an L2 penalty via cross-validation Just like the polynomial degree, the L2 penalty is a "magic" parameter we need to select. We could use the validation set approach as we did in the last module, but that approach has a major disadvantage: it leaves fewer observations available for training. **Cross-validation** seeks to overcome this issue by using all of the training set in a smart way.We will implement a kind of cross-validation called **k-fold cross-validation**. The method gets its name because it involves dividing the training set into k segments of roughtly equal size. Similar to the validation set method, we measure the validation error with one of the segments designated as the validation set. The major difference is that we repeat the process k times as follows:Set aside segment 0 as the validation set, and fit a model on rest of data, and evalutate it on this validation setSet aside segment 1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set...Set aside segment k-1 as the validation set, and fit a model on rest of data, and evalutate it on this validation setAfter this process, we compute the average of the k validation errors, and use it as an estimate of the generalization error. Notice that all observations are used for both training and validation, as we iterate over segments of data. To estimate the generalization error well, it is crucial to shuffle the training data before dividing them into segments. GraphLab Create has a utility function for shuffling a given SFrame. We reserve 10% of the data as the test set and shuffle the remainder. (Make sure to use `seed=1` to get consistent answer.)
###Code
(train_valid, test) = sales.random_split(.9, seed=1)
train_valid_shuffled = graphlab.toolkits.cross_validation.shuffle(train_valid, random_seed=1)
###Output
_____no_output_____
###Markdown
Once the data is shuffled, we divide it into equal segments. Each segment should receive `n/k` elements, where `n` is the number of observations in the training set and `k` is the number of segments. Since the segment 0 starts at index 0 and contains `n/k` elements, it ends at index `(n/k)-1`. The segment 1 starts where the segment 0 left off, at index `(n/k)`. With `n/k` elements, the segment 1 ends at index `(n*2/k)-1`. Continuing in this fashion, we deduce that the segment `i` starts at index `(n*i/k)` and ends at `(n*(i+1)/k)-1`. With this pattern in mind, we write a short loop that prints the starting and ending indices of each segment, just to make sure you are getting the splits right.
###Code
n = len(train_valid_shuffled)
k = 10 # 10-fold cross-validation
for i in xrange(k):
start = (n*i)/k
end = (n*(i+1))/k-1
print i, (start, end)
###Output
0 (0, 1938)
1 (1939, 3878)
2 (3879, 5817)
3 (5818, 7757)
4 (7758, 9697)
5 (9698, 11636)
6 (11637, 13576)
7 (13577, 15515)
8 (15516, 17455)
9 (17456, 19395)
###Markdown
Let us familiarize ourselves with array slicing with SFrame. To extract a continuous slice from an SFrame, use colon in square brackets. For instance, the following cell extracts rows 0 to 9 of `train_valid_shuffled`. Notice that the first index (0) is included in the slice but the last index (10) is omitted.
###Code
train_valid_shuffled[0:10] # rows 0 to 9
###Output
_____no_output_____
###Markdown
Now let us extract individual segments with array slicing. Consider the scenario where we group the houses in the `train_valid_shuffled` dataframe into k=10 segments of roughly equal size, with starting and ending indices computed as above.Extract the fourth segment (segment 3) and assign it to a variable called `validation4`. 0 (0, 1938)1 (1939, 3878)2 (3879, 5817)3 (5818, 7757)4 (7758, 9697)5 (9698, 11636)6 (11637, 13576)7 (13577, 15515)8 (15516, 17455)9 (17456, 19395)
###Code
validation4 = train_valid_shuffled[5818:7758]
###Output
_____no_output_____
###Markdown
To verify that we have the right elements extracted, run the following cell, which computes the average price of the fourth segment. When rounded to nearest whole number, the average should be $536,234.
###Code
print int(round(validation4['price'].mean(), 0))
###Output
536234
###Markdown
After designating one of the k segments as the validation set, we train a model using the rest of the data. To choose the remainder, we slice (0:start) and (end+1:n) of the data and paste them together. SFrame has `append()` method that pastes together two disjoint sets of rows originating from a common dataset. For instance, the following cell pastes together the first and last two rows of the `train_valid_shuffled` dataframe.
###Code
n = len(train_valid_shuffled)
first_two = train_valid_shuffled[0:2]
last_two = train_valid_shuffled[n-2:n]
print first_two.append(last_two)
###Output
+------------+---------------------------+-----------+----------+-----------+
| id | date | price | bedrooms | bathrooms |
+------------+---------------------------+-----------+----------+-----------+
| 2780400035 | 2014-05-05 00:00:00+00:00 | 665000.0 | 4.0 | 2.5 |
| 1703050500 | 2015-03-21 00:00:00+00:00 | 645000.0 | 3.0 | 2.5 |
| 4139480190 | 2014-09-16 00:00:00+00:00 | 1153000.0 | 3.0 | 3.25 |
| 7237300290 | 2015-03-26 00:00:00+00:00 | 338000.0 | 5.0 | 2.5 |
+------------+---------------------------+-----------+----------+-----------+
+-------------+----------+--------+------------+------+-----------+-------+------------+
| sqft_living | sqft_lot | floors | waterfront | view | condition | grade | sqft_above |
+-------------+----------+--------+------------+------+-----------+-------+------------+
| 2800.0 | 5900 | 1 | 0 | 0 | 3 | 8 | 1660 |
| 2490.0 | 5978 | 2 | 0 | 0 | 3 | 9 | 2490 |
| 3780.0 | 10623 | 1 | 0 | 1 | 3 | 11 | 2650 |
| 2400.0 | 4496 | 2 | 0 | 0 | 3 | 7 | 2400 |
+-------------+----------+--------+------------+------+-----------+-------+------------+
+---------------+----------+--------------+---------+-------------+
| sqft_basement | yr_built | yr_renovated | zipcode | lat |
+---------------+----------+--------------+---------+-------------+
| 1140 | 1963 | 0 | 98115 | 47.68093246 |
| 0 | 2003 | 0 | 98074 | 47.62984888 |
| 1130 | 1999 | 0 | 98006 | 47.55061236 |
| 0 | 2004 | 0 | 98042 | 47.36923712 |
+---------------+----------+--------------+---------+-------------+
+---------------+---------------+-----+
| long | sqft_living15 | ... |
+---------------+---------------+-----+
| -122.28583258 | 2580.0 | ... |
| -122.02177564 | 2710.0 | ... |
| -122.10144844 | 3850.0 | ... |
| -122.12606473 | 1880.0 | ... |
+---------------+---------------+-----+
[4 rows x 21 columns]
###Markdown
Extract the remainder of the data after *excluding* fourth segment (segment 3) and assign the subset to `train4`. 0 (0, 1938)1 (1939, 3878)2 (3879, 5817)3 (5818, 7757)4 (7758, 9697)5 (9698, 11636)6 (11637, 13576)7 (13577, 15515)8 (15516, 17455)9 (17456, 19395)
###Code
train4 = train_valid_shuffled[0:5818].append(train_valid_shuffled[7758:])
###Output
_____no_output_____
###Markdown
To verify that we have the right elements extracted, run the following cell, which computes the average price of the data with fourth segment excluded. When rounded to nearest whole number, the average should be $539,450.
###Code
print int(round(train4['price'].mean(), 0))
###Output
539450
###Markdown
Now we are ready to implement k-fold cross-validation. Write a function that computes k validation errors by designating each of the k segments as the validation set. It accepts as parameters (i) `k`, (ii) `l2_penalty`, (iii) dataframe, (iv) name of output column (e.g. `price`) and (v) list of feature names. The function returns the average validation error using k segments as validation sets.* For each i in [0, 1, ..., k-1]: * Compute starting and ending indices of segment i and call 'start' and 'end' * Form validation set by taking a slice (start:end+1) from the data. * Form training set by appending slice (end+1:n) to the end of slice (0:start). * Train a linear model using training set just formed, with a given l2_penalty * Compute validation error using validation set just formed
###Code
import numpy as np
def k_fold_cross_validation(k, l2_penalty, data, output_name, features_list):
N = len(data)
indices = []
fold_size = N // k
start = 0
end = start + fold_size
while end < N:
indices.append( (start, end))
start += fold_size
end = start + fold_size
# List to store validation scores
validation_scores = []
# Iterate through indices to create validation and training sets
for pair in indices:
valid_start_index = pair[0]
valid_stop_index = pair[1] + 1
validation_set = data[valid_start_index: valid_stop_index]
train_set = data[0:valid_start_index].append(data[valid_stop_index:])
# Create model
model = graphlab.linear_regression.create(train_set, target=output_name, features=features_list, l2_penalty=l2_penalty,
validation_set=None, verbose=False)
# Generate predictions for model
predictions = model.predict(validation_set)
# Add RSS to scores
validation_scores.append( ((predictions - validation_set[output_name]) ** 2).sum())
return sum(validation_scores) / k
###Output
_____no_output_____
###Markdown
Once we have a function to compute the average validation error for a model, we can write a loop to find the model that minimizes the average validation error. Write a loop that does the following:* We will again be aiming to fit a 15th-order polynomial model using the `sqft_living` input* For `l2_penalty` in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, you can use this Numpy function: `np.logspace(1, 7, num=13)`.) * Run 10-fold cross-validation with `l2_penalty`* Report which L2 penalty produced the lowest average validation error.Note: since the degree of the polynomial is now fixed to 15, to make things faster, you should generate polynomial features in advance and re-use them throughout the loop. Make sure to use `train_valid_shuffled` when generating polynomial features!
###Code
l2_penalties = np.logspace(1, 7, num=13)
poly_sframe = polynomial_sframe(train_valid_shuffled['sqft_living'], degree=15)
my_features = poly_sframe.column_names()
poly_sframe['price'] = train_valid_shuffled['price']
average_validation_scores = []
for l2_penalty in l2_penalties:
validation_score = k_fold_cross_validation(k=10, l2_penalty=l2_penalty, data=poly_sframe, output_name='price', features_list=my_features)
average_validation_scores.append( (l2_penalty, validation_score))
average_validation_scores = sorted(average_validation_scores, key=lambda x: x[1], reverse=False)
(average_validation_scores)
###Output
_____no_output_____
###Markdown
***QUIZ QUESTIONS: What is the best value for the L2 penalty according to 10-fold validation?***1000 You may find it useful to plot the k-fold cross-validation errors you have obtained to better understand the behavior of the method.
###Code
# Plot the l2_penalty values in the x axis and the cross-validation error in the y axis.
# Using plt.xscale('log') will make your plot more intuitive.
l2_values = [pair[0] for pair in average_validation_scores]
validation_values = [pair[1] for pair in average_validation_scores]
plt.plot(l2_values, validation_values, ".")
plt.xscale('log'); plt.xlabel("L2 penalty"); plt.ylabel("Average Validation Error");
###Output
_____no_output_____
###Markdown
Once you found the best value for the L2 penalty using cross-validation, it is important to retrain a final model on all of the training data using this value of `l2_penalty`. This way, your final model will be trained on the entire dataset.
###Code
poly_sframe = polynomial_sframe(train_valid_shuffled['sqft_living'], degree=15)
my_features = poly_sframe.column_names()
poly_sframe['price'] = train_valid_shuffled['price']
final_model = graphlab.linear_regression.create(poly_sframe, target='price', features=my_features,
l2_penalty=average_validation_scores[0][0], validation_set=None, verbose=True)
###Output
_____no_output_____
###Markdown
***QUIZ QUESTION: Using the best L2 penalty found above, train a model using all training data. What is the RSS on the TEST data of the model you learn with this L2 penalty? ***
###Code
test_poly_sframe = polynomial_sframe(test['sqft_living'], degree=15)
test_predictions = final_model.predict(test_poly_sframe)
RSS = ( (test_predictions - test['price']) ** 2).sum()
print('The final model RSS on the test data is {}'.format(str(RSS)))
###Output
The final model RSS on the test data is 1.28780855058e+14
|
lectures/02-ipython/Beyond Plain Python.ipynb | ###Markdown
IPython: beyond plain Python When executing code in IPython, all valid Python syntax works as-is, but IPython provides a number of features designed to make the interactive experience more fluid and efficient. First things first: running code, getting help In the notebook, to run a cell of code, hit `Shift-Enter`. This executes the cell and puts the cursor in the next cell below, or makes a new one if you are at the end. Alternately, you can use: - `Alt-Enter` to force the creation of a new cell unconditionally (useful when inserting new content in the middle of an existing notebook).- `Control-Enter` executes the cell and keeps the cursor in the same cell, useful for quick experimentation of snippets that you don't need to keep permanently.
###Code
print("Hi")
###Output
Hi
###Markdown
Getting help:
###Code
?
###Output
_____no_output_____
###Markdown
Typing `object_name?` will print all sorts of details about any object, including docstrings, function definition lines (for call arguments) and constructor details for classes.
###Code
import collections
collections.namedtuple?
collections.Counter??
*int*?
###Output
_____no_output_____
###Markdown
An IPython quick reference card:
###Code
%quickref
###Output
_____no_output_____
###Markdown
Tab completion Tab completion, especially for attributes, is a convenient way to explore the structure of any object you’re dealing with. Simply type `object_name.` to view the object’s attributes. Besides Python objects and keywords, tab completion also works on file and directory names.
###Code
collections.
###Output
_____no_output_____
###Markdown
The interactive workflow: input, output, history
###Code
2+10
_+10
###Output
_____no_output_____
###Markdown
You can suppress the storage and rendering of output if you append `;` to the last cell (this comes in handy when plotting with matplotlib, for example):
###Code
10+20;
_
###Output
_____no_output_____
###Markdown
The output is stored in `_N` and `Out[N]` variables:
###Code
_10 == Out[10]
###Output
_____no_output_____
###Markdown
Previous inputs are available, too:
###Code
In[11]
_i
%history -n 1-5
###Output
1: print("Hi")
2: ?
3:
import collections
collections.namedtuple?
4: collections.Counter??
5: *int*?
###Markdown
Accessing the underlying operating system
###Code
!pwd
files = !ls
print("My current directory's files:")
print(files)
!echo $files
!echo {files[0].upper()}
###Output
BEYOND PLAIN PYTHON.IPYNB
###Markdown
Note that all this is available even in multiline blocks:
###Code
import os
for i,f in enumerate(files):
if f.endswith('ipynb'):
!echo {"%02d" % i} - "{os.path.splitext(f)[0]}"
else:
print('--')
###Output
00 - Beyond Plain Python
01 - Index
02 - Notebook Basics
03 - Working With Markdown Cells
--
--
--
--
###Markdown
Beyond Python: magic functions The IPyhton 'magic' functions are a set of commands, invoked by prepending one or two `%` signs to their name, that live in a namespace separate from your normal Python variables and provide a more command-like interface. They take flags with `--` and arguments without quotes, parentheses or commas. The motivation behind this system is two-fold: - To provide an orthogonal namespace for controlling IPython itself and exposing other system-oriented functionality.- To expose a calling mode that requires minimal verbosity and typing while working interactively. Thus the inspiration taken from the classic Unix shell style for commands.
###Code
%magic
###Output
_____no_output_____
###Markdown
Line vs cell magics:
###Code
%timeit list(range(1000))
%%timeit -n 100000
x = list(range(100))
sum(x)
###Output
1.87 µs ± 53.8 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
###Markdown
Line magics can be used even inside code blocks:
###Code
for i in range(1, 5):
size = i*100
print('size:', size, end=' ')
%timeit -n 100000 list(range(size))
###Output
size: 100 1.02 µs ± 38.1 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
size: 200 1.4 µs ± 57.4 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
size: 300 2.35 µs ± 39 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
size: 400 3.82 µs ± 41.2 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
###Markdown
Magics can do anything they want with their input, so it doesn't have to be valid Python:
###Code
%%bash
echo "My shell is:" $SHELL
echo "My disk usage is:"
df -h
###Output
My shell is: /usr/local/bin/bash
My disk usage is:
Filesystem Size Used Avail Capacity iused ifree %iused Mounted on
/dev/disk1 931Gi 501Gi 429Gi 54% 3005697 4291961582 0% /
devfs 192Ki 192Ki 0Bi 100% 664 0 100% /dev
map -hosts 0Bi 0Bi 0Bi 100% 0 0 100% /net
/dev/disk2s2 1.8Ti 517Gi 1.3Ti 28% 4360270 4290607009 0% /Volumes/Time Machine - Seagate Silver
###Markdown
Another interesting cell magic: create any file you want locally from the notebook:
###Code
%%writefile test.txt
This is a test file!
It can contain anything I want...
And more...
!cat test.txt
###Output
This is a test file!
It can contain anything I want...
And more...
###Markdown
Let's see what other magics are currently defined in the system:
###Code
%lsmagic
###Output
_____no_output_____
###Markdown
Running normal Python code: execution and errors Not only can you input normal Python code, you can even paste straight from a Python or IPython shell session:
###Code
>>> # Fibonacci series:
... # the sum of two elements defines the next
... a, b = 0, 1
>>> while b < 10:
... print(b)
... a, b = b, a+b
In [1]: for i in range(10):
...: print(i, end=' ')
...:
###Output
0 1 2 3 4 5 6 7 8 9
###Markdown
And when your code produces errors, you can control how they are displayed with the `%xmode` magic:
###Code
%%writefile mod.py
def f(x):
return 1.0/(x-1)
def g(y):
return f(y+1)
###Output
Overwriting mod.py
###Markdown
Now let's call the function `g` with an argument that would produce an error:
###Code
import mod
mod.g(0)
%xmode plain
mod.g(0)
%xmode verbose
mod.g(0)
###Output
Exception reporting mode: Verbose
###Markdown
The default `%xmode` is "context", which shows additional context but not all local variables. Let's restore that one for the rest of our session.
###Code
%xmode context
###Output
Exception reporting mode: Context
###Markdown
Running code in other languages with special `%%` magics
###Code
%%perl
@months = ("July", "August", "September");
print $months[0];
%%ruby
name = "world"
puts "Hello #{name.capitalize}!"
###Output
Hello World!
###Markdown
Raw Input in the notebook Since 1.0 the IPython notebook web application support `raw_input` which for example allow us to invoke the `%debug` magic in the notebook:
###Code
mod.g(0)
%debug
###Output
> [0;32m/Users/fperez/teach/berkeley/2017-stat159/stat159/lectures/02-ipython/mod.py[0m(3)[0;36mf[0;34m()[0m
[0;32m 1 [0;31m[0;34m[0m[0m
[0m[0;32m 2 [0;31m[0;32mdef[0m [0mf[0m[0;34m([0m[0mx[0m[0;34m)[0m[0;34m:[0m[0;34m[0m[0m
[0m[0;32m----> 3 [0;31m [0;32mreturn[0m [0;36m1.0[0m[0;34m/[0m[0;34m([0m[0mx[0m[0;34m-[0m[0;36m1[0m[0;34m)[0m[0;34m[0m[0m
[0m[0;32m 4 [0;31m[0;34m[0m[0m
[0m[0;32m 5 [0;31m[0;32mdef[0m [0mg[0m[0;34m([0m[0my[0m[0;34m)[0m[0;34m:[0m[0;34m[0m[0m
[0m
ipdb> q
###Markdown
Don't foget to exit your debugging session. Raw input can of course be use to ask for user input:
###Code
enjoy = input('Are you enjoying this tutorial? ')
print('enjoy is:', enjoy)
###Output
Are you enjoying this tutorial? yes!
enjoy is: yes!
###Markdown
Plotting in the notebook This magic configures matplotlib to render its figures inline:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 2*np.pi, 300)
y = np.sin(x**2)
plt.plot(x, y)
plt.title("A little chirp")
fig = plt.gcf() # let's keep the figure object around for later...
###Output
_____no_output_____
###Markdown
The IPython kernel/client model
###Code
%connect_info
###Output
{
"shell_port": 61750,
"iopub_port": 61751,
"stdin_port": 61752,
"control_port": 61753,
"hb_port": 61754,
"ip": "127.0.0.1",
"key": "7cf2d8a1-64149f698b1ca4929d44b73a",
"transport": "tcp",
"signature_scheme": "hmac-sha256",
"kernel_name": ""
}
Paste the above JSON into a file, and connect with:
$> jupyter <app> --existing <file>
or, if you are local, you can connect with just:
$> jupyter <app> --existing kernel-90b8bcf0-cc04-4407-b44f-22dbcb1d2bca.json
or even just:
$> jupyter <app> --existing
if this is the most recent Jupyter kernel you have started.
###Markdown
We can connect automatically a Qt Console to the currently running kernel with the `%qtconsole` magic, or by typing `ipython console --existing ` in any terminal:
###Code
%qtconsole
###Output
_____no_output_____ |
petcircle.ipynb | ###Markdown
Just see the price for the kitty litter
###Code
src_data.product_name.unique()
src_data.head()
kitty_litter = src_data[src_data['product_name']=='rufus and coco wee kitty clumping corn litter']
kitty_litter.head()
###Output
_____no_output_____
###Markdown
Get dollar per kilo
###Code
#kitty_litter['product_size'].str.replace('kg', '')
kitty_litter[['weight', 'unit']] = kitty_litter['product_size'].str.split('([a-z]+)', expand=True).iloc[:, 0:2]
kitty_litter['weight'] = kitty_litter['weight'].astype(float)
kitty_litter.info()
kitty_litter.head()
kitty_litter['dollar_per_kg'] = (kitty_litter.autodelivery_price / kitty_litter.weight)
kitty_litter
kitty_litter.autodelivery_price / kitty_litter.weight
###Output
_____no_output_____ |
Pandas/Test for stationarity of Google data 13-12-2016.ipynb | ###Markdown
https://www.quantstart.com/articles/Backtesting-a-Moving-Average-Crossover-in-Python-with-pandas Manual import
###Code
import datetime
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import pandas as pd
import os
import statsmodels
import statsmodels.api as sm
from statsmodels.tsa.stattools import coint, adfuller
###Output
_____no_output_____
###Markdown
Import using pandas_datareader
###Code
import pandas_datareader.data as web
import datetime
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime(2013, 1, 27)
f = web.DataReader("F", 'google', start, end)
f['Close'].plot()
def check_for_stationarity(X, cutoff=0.01):
# H_0 in adfuller is unit root exists (non-stationary)
# We must observe significant p-value to convince ourselves that the series is stationary
pvalue = adfuller(X)[1]
if pvalue < cutoff:
print ('p-value = ' + str(pvalue) + ' The series ' + X.name +' is likely stationary.')
return True
else:
print( 'p-value = ' + str(pvalue) + ' The series ' + X.name +' is likely non-stationary.')
return False
check_for_stationarity(f['Close'])
###Output
p-value = 0.339765366476 The series Close is likely non-stationary.
|
02.Improving_Deep_Neural_Networks/Week1/1.Initialization/Initialization.ipynb | ###Markdown
InitializationWelcome to the first assignment of "Improving Deep Neural Networks". Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning. If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results. A well chosen initialization can:- Speed up the convergence of gradient descent- Increase the odds of gradient descent converging to a lower training (and generalization) error To get started, run the following cell to load the packages and the planar dataset you will try to classify.
###Code
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
###Output
_____no_output_____
###Markdown
You would like a classifier to separate the blue dots from the red dots. 1 - Neural Network model You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with: - *Zeros initialization* -- setting `initialization = "zeros"` in the input argument.- *Random initialization* -- setting `initialization = "random"` in the input argument. This initializes the weights to large random values. - *He initialization* -- setting `initialization = "he"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015. **Instructions**: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this `model()` calls.
###Code
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
2 - Zero initializationThere are two types of parameters to initialize in a neural network:- the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$- the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$**Exercise**: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.
###Code
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[0. 0. 0.]
[0. 0. 0.]]
b1 = [[0.]
[0.]]
W2 = [[0. 0.]]
b2 = [[0.]]
###Markdown
**Expected Output**: **W1** [[ 0. 0. 0.] [ 0. 0. 0.]] **b1** [[ 0.] [ 0.]] **W2** [[ 0. 0.]] **b2** [[ 0.]] Run the following code to train your model on 15,000 iterations using zeros initialization.
###Code
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
###Output
Cost after iteration 0: 0.6931471805599453
Cost after iteration 1000: 0.6931471805599453
Cost after iteration 2000: 0.6931471805599453
Cost after iteration 3000: 0.6931471805599453
Cost after iteration 4000: 0.6931471805599453
Cost after iteration 5000: 0.6931471805599453
Cost after iteration 6000: 0.6931471805599453
Cost after iteration 7000: 0.6931471805599453
Cost after iteration 8000: 0.6931471805599453
Cost after iteration 9000: 0.6931471805599453
Cost after iteration 10000: 0.6931471805599455
Cost after iteration 11000: 0.6931471805599453
Cost after iteration 12000: 0.6931471805599453
Cost after iteration 13000: 0.6931471805599453
Cost after iteration 14000: 0.6931471805599453
###Markdown
The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:
###Code
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
_____no_output_____
###Markdown
The model is predicting 0 for every example. In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression. **What you should remember**:- The weights $W^{[l]}$ should be initialized randomly to break symmetry. - It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly. 3 - Random initializationTo break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values. **Exercise**: Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. We are using a fixed `np.random.seed(..)` to make sure your "random" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.
###Code
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * 10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[ 17.88628473 4.36509851 0.96497468]
[-18.63492703 -2.77388203 -3.54758979]]
b1 = [[0.]
[0.]]
W2 = [[-0.82741481 -6.27000677]]
b2 = [[0.]]
###Markdown
**Expected Output**: **W1** [[ 17.88628473 4.36509851 0.96497468] [-18.63492703 -2.77388203 -3.54758979]] **b1** [[ 0.] [ 0.]] **W2** [[-0.82741481 -6.27000677]] **b2** [[ 0.]] Run the following code to train your model on 15,000 iterations using random initialization.
###Code
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
###Output
C:\Users\bin_he4\Desktop\deeplearning.ai\02.Improving_Deep_Neural_Networks\Week1\1.Initialization\init_utils.py:145: RuntimeWarning: divide by zero encountered in log
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
C:\Users\bin_he4\Desktop\deeplearning.ai\02.Improving_Deep_Neural_Networks\Week1\1.Initialization\init_utils.py:145: RuntimeWarning: invalid value encountered in multiply
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
###Markdown
If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes. Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
###Code
print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
_____no_output_____
###Markdown
**Observations**:- The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.- Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm. - If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.**In summary**:- Initializing weights to very large random values does not work well. - Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part! 4 - He initializationFinally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)**Exercise**: Implement the following function to initialize your parameters with He initialization.**Hint**: This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
###Code
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * (np.sqrt(2/layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[ 1.78862847 0.43650985]
[ 0.09649747 -1.8634927 ]
[-0.2773882 -0.35475898]
[-0.08274148 -0.62700068]]
b1 = [[0.]
[0.]
[0.]
[0.]]
W2 = [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
b2 = [[0.]]
###Markdown
**Expected Output**: **W1** [[ 1.78862847 0.43650985] [ 0.09649747 -1.8634927 ] [-0.2773882 -0.35475898] [-0.08274148 -0.62700068]] **b1** [[ 0.] [ 0.] [ 0.] [ 0.]] **W2** [[-0.03098412 -0.33744411 -0.92904268 0.62552248]] **b2** [[ 0.]] Run the following code to train your model on 15,000 iterations using He initialization.
###Code
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
###Output
_____no_output_____ |
notebooks/wythoff_exp44.ipynb | ###Markdown
Analysis - exp44- Consistency check DQN parameters('score', 'learning_rate', 'epsilon')(0.878515854265969, 0.000222, 0.3)
###Code
import os
import csv
import numpy as np
import torch as th
from glob import glob
from pprint import pprint
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set(font_scale=1.5)
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from notebook_helpers import load_params
from notebook_helpers import load_monitored
from notebook_helpers import join_monitored
from notebook_helpers import score_summary
def load_data(path, run_index=(0, 20)):
runs = range(run_index[0], run_index[1]+1)
exps = []
for r in runs:
file = os.path.join(path, "run_{}_monitor.csv".format(int(r)))
try:
mon = load_monitored(file)
except FileNotFoundError:
mon = None
exps.append(mon)
return exps
###Output
_____no_output_____
###Markdown
Load data
###Code
path = "/Users/qualia/Code/azad/data/wythoff/exp44/"
exp_44 = load_data(path, run_index=(1, 20))
print(len(exp_44))
pprint(exp_44[1].keys())
pprint(exp_44[1]['score'][:20])
###Output
dict_keys(['file', 'episode', 'loss', 'score'])
[0.5176074024639298,
0.5444599716439139,
0.552084476355806,
0.5556678005449596,
0.5655667838885988,
0.5744560847145985,
0.5744560847145985,
0.57947738371499,
0.5866348168275881,
0.5889060540977662,
0.590971852820893,
0.590971852820893,
0.5928395612555009,
0.5946332195759612,
0.5946332195759612,
0.5962611986138087,
0.6009997091464798,
0.6039829202763324,
0.6068526848794578,
0.6109913508678403]
###Markdown
PlotsTimecourse
###Code
plt.figure(figsize=(6, 3))
for r, mon in enumerate(exp_44):
if mon is not None:
_ = plt.plot(mon['episode'], mon['score'], color='black')
_ = plt.ylim(0, 1)
_ = plt.ylabel("Optimal score")
_ = plt.tight_layout()
_ = plt.xlabel("Episode")
###Output
_____no_output_____
###Markdown
Histograms of final values
###Code
data = []
plt.figure(figsize=(6, 3))
for r, mon in enumerate(exp_44):
if mon is not None:
data.append(np.max(mon['score']))
_ = plt.hist(data, bins=5, range=(0,1), color='black')
_ = plt.xlabel("Max score")
_ = plt.ylabel("Count")
_ = plt.tight_layout()
data = []
plt.figure(figsize=(6, 3))
for r, mon in enumerate(exp_44):
if mon is not None:
data.append(np.mean(mon['score']))
_ = plt.hist(data, bins=5, range=(0,1), color='black')
_ = plt.xlabel("Mean score")
_ = plt.ylabel("Count")
_ = plt.tight_layout()
###Output
_____no_output_____ |
Lesson08/Activity16.ipynb | ###Markdown
Import the required Libraries
###Code
import numpy as np
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
###Output
Using TensorFlow backend.
###Markdown
Initiate the Model
###Code
classifier=ResNet50()
print(classifier.summary())
###Output
WARNING:tensorflow:From C:\Users\RitZ\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5
102858752/102853048 [==============================] - 126s 1us/step
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 224, 224, 3) 0
__________________________________________________________________________________________________
conv1_pad (ZeroPadding2D) (None, 230, 230, 3) 0 input_1[0][0]
__________________________________________________________________________________________________
conv1 (Conv2D) (None, 112, 112, 64) 9472 conv1_pad[0][0]
__________________________________________________________________________________________________
bn_conv1 (BatchNormalization) (None, 112, 112, 64) 256 conv1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 112, 112, 64) 0 bn_conv1[0][0]
__________________________________________________________________________________________________
pool1_pad (ZeroPadding2D) (None, 114, 114, 64) 0 activation_1[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 56, 56, 64) 0 pool1_pad[0][0]
__________________________________________________________________________________________________
res2a_branch2a (Conv2D) (None, 56, 56, 64) 4160 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
bn2a_branch2a (BatchNormalizati (None, 56, 56, 64) 256 res2a_branch2a[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 56, 56, 64) 0 bn2a_branch2a[0][0]
__________________________________________________________________________________________________
res2a_branch2b (Conv2D) (None, 56, 56, 64) 36928 activation_2[0][0]
__________________________________________________________________________________________________
bn2a_branch2b (BatchNormalizati (None, 56, 56, 64) 256 res2a_branch2b[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 56, 56, 64) 0 bn2a_branch2b[0][0]
__________________________________________________________________________________________________
res2a_branch2c (Conv2D) (None, 56, 56, 256) 16640 activation_3[0][0]
__________________________________________________________________________________________________
res2a_branch1 (Conv2D) (None, 56, 56, 256) 16640 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
bn2a_branch2c (BatchNormalizati (None, 56, 56, 256) 1024 res2a_branch2c[0][0]
__________________________________________________________________________________________________
bn2a_branch1 (BatchNormalizatio (None, 56, 56, 256) 1024 res2a_branch1[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 56, 56, 256) 0 bn2a_branch2c[0][0]
bn2a_branch1[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 56, 56, 256) 0 add_1[0][0]
__________________________________________________________________________________________________
res2b_branch2a (Conv2D) (None, 56, 56, 64) 16448 activation_4[0][0]
__________________________________________________________________________________________________
bn2b_branch2a (BatchNormalizati (None, 56, 56, 64) 256 res2b_branch2a[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 56, 56, 64) 0 bn2b_branch2a[0][0]
__________________________________________________________________________________________________
res2b_branch2b (Conv2D) (None, 56, 56, 64) 36928 activation_5[0][0]
__________________________________________________________________________________________________
bn2b_branch2b (BatchNormalizati (None, 56, 56, 64) 256 res2b_branch2b[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 56, 56, 64) 0 bn2b_branch2b[0][0]
__________________________________________________________________________________________________
res2b_branch2c (Conv2D) (None, 56, 56, 256) 16640 activation_6[0][0]
__________________________________________________________________________________________________
bn2b_branch2c (BatchNormalizati (None, 56, 56, 256) 1024 res2b_branch2c[0][0]
__________________________________________________________________________________________________
add_2 (Add) (None, 56, 56, 256) 0 bn2b_branch2c[0][0]
activation_4[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 56, 56, 256) 0 add_2[0][0]
__________________________________________________________________________________________________
res2c_branch2a (Conv2D) (None, 56, 56, 64) 16448 activation_7[0][0]
__________________________________________________________________________________________________
bn2c_branch2a (BatchNormalizati (None, 56, 56, 64) 256 res2c_branch2a[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 56, 56, 64) 0 bn2c_branch2a[0][0]
__________________________________________________________________________________________________
res2c_branch2b (Conv2D) (None, 56, 56, 64) 36928 activation_8[0][0]
__________________________________________________________________________________________________
bn2c_branch2b (BatchNormalizati (None, 56, 56, 64) 256 res2c_branch2b[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 56, 56, 64) 0 bn2c_branch2b[0][0]
__________________________________________________________________________________________________
res2c_branch2c (Conv2D) (None, 56, 56, 256) 16640 activation_9[0][0]
__________________________________________________________________________________________________
bn2c_branch2c (BatchNormalizati (None, 56, 56, 256) 1024 res2c_branch2c[0][0]
__________________________________________________________________________________________________
add_3 (Add) (None, 56, 56, 256) 0 bn2c_branch2c[0][0]
activation_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 56, 56, 256) 0 add_3[0][0]
__________________________________________________________________________________________________
res3a_branch2a (Conv2D) (None, 28, 28, 128) 32896 activation_10[0][0]
__________________________________________________________________________________________________
bn3a_branch2a (BatchNormalizati (None, 28, 28, 128) 512 res3a_branch2a[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 28, 28, 128) 0 bn3a_branch2a[0][0]
__________________________________________________________________________________________________
res3a_branch2b (Conv2D) (None, 28, 28, 128) 147584 activation_11[0][0]
__________________________________________________________________________________________________
bn3a_branch2b (BatchNormalizati (None, 28, 28, 128) 512 res3a_branch2b[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 28, 28, 128) 0 bn3a_branch2b[0][0]
__________________________________________________________________________________________________
res3a_branch2c (Conv2D) (None, 28, 28, 512) 66048 activation_12[0][0]
__________________________________________________________________________________________________
res3a_branch1 (Conv2D) (None, 28, 28, 512) 131584 activation_10[0][0]
__________________________________________________________________________________________________
bn3a_branch2c (BatchNormalizati (None, 28, 28, 512) 2048 res3a_branch2c[0][0]
__________________________________________________________________________________________________
bn3a_branch1 (BatchNormalizatio (None, 28, 28, 512) 2048 res3a_branch1[0][0]
__________________________________________________________________________________________________
add_4 (Add) (None, 28, 28, 512) 0 bn3a_branch2c[0][0]
bn3a_branch1[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 28, 28, 512) 0 add_4[0][0]
__________________________________________________________________________________________________
res3b_branch2a (Conv2D) (None, 28, 28, 128) 65664 activation_13[0][0]
__________________________________________________________________________________________________
bn3b_branch2a (BatchNormalizati (None, 28, 28, 128) 512 res3b_branch2a[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 28, 28, 128) 0 bn3b_branch2a[0][0]
__________________________________________________________________________________________________
res3b_branch2b (Conv2D) (None, 28, 28, 128) 147584 activation_14[0][0]
__________________________________________________________________________________________________
bn3b_branch2b (BatchNormalizati (None, 28, 28, 128) 512 res3b_branch2b[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 28, 28, 128) 0 bn3b_branch2b[0][0]
__________________________________________________________________________________________________
res3b_branch2c (Conv2D) (None, 28, 28, 512) 66048 activation_15[0][0]
__________________________________________________________________________________________________
bn3b_branch2c (BatchNormalizati (None, 28, 28, 512) 2048 res3b_branch2c[0][0]
__________________________________________________________________________________________________
add_5 (Add) (None, 28, 28, 512) 0 bn3b_branch2c[0][0]
activation_13[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 28, 28, 512) 0 add_5[0][0]
__________________________________________________________________________________________________
res3c_branch2a (Conv2D) (None, 28, 28, 128) 65664 activation_16[0][0]
__________________________________________________________________________________________________
bn3c_branch2a (BatchNormalizati (None, 28, 28, 128) 512 res3c_branch2a[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 28, 28, 128) 0 bn3c_branch2a[0][0]
__________________________________________________________________________________________________
res3c_branch2b (Conv2D) (None, 28, 28, 128) 147584 activation_17[0][0]
__________________________________________________________________________________________________
bn3c_branch2b (BatchNormalizati (None, 28, 28, 128) 512 res3c_branch2b[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 28, 28, 128) 0 bn3c_branch2b[0][0]
__________________________________________________________________________________________________
res3c_branch2c (Conv2D) (None, 28, 28, 512) 66048 activation_18[0][0]
__________________________________________________________________________________________________
bn3c_branch2c (BatchNormalizati (None, 28, 28, 512) 2048 res3c_branch2c[0][0]
__________________________________________________________________________________________________
add_6 (Add) (None, 28, 28, 512) 0 bn3c_branch2c[0][0]
activation_16[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 28, 28, 512) 0 add_6[0][0]
__________________________________________________________________________________________________
res3d_branch2a (Conv2D) (None, 28, 28, 128) 65664 activation_19[0][0]
__________________________________________________________________________________________________
bn3d_branch2a (BatchNormalizati (None, 28, 28, 128) 512 res3d_branch2a[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 28, 28, 128) 0 bn3d_branch2a[0][0]
__________________________________________________________________________________________________
res3d_branch2b (Conv2D) (None, 28, 28, 128) 147584 activation_20[0][0]
__________________________________________________________________________________________________
bn3d_branch2b (BatchNormalizati (None, 28, 28, 128) 512 res3d_branch2b[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 28, 28, 128) 0 bn3d_branch2b[0][0]
__________________________________________________________________________________________________
res3d_branch2c (Conv2D) (None, 28, 28, 512) 66048 activation_21[0][0]
__________________________________________________________________________________________________
bn3d_branch2c (BatchNormalizati (None, 28, 28, 512) 2048 res3d_branch2c[0][0]
__________________________________________________________________________________________________
add_7 (Add) (None, 28, 28, 512) 0 bn3d_branch2c[0][0]
activation_19[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 28, 28, 512) 0 add_7[0][0]
__________________________________________________________________________________________________
res4a_branch2a (Conv2D) (None, 14, 14, 256) 131328 activation_22[0][0]
__________________________________________________________________________________________________
bn4a_branch2a (BatchNormalizati (None, 14, 14, 256) 1024 res4a_branch2a[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 14, 14, 256) 0 bn4a_branch2a[0][0]
__________________________________________________________________________________________________
res4a_branch2b (Conv2D) (None, 14, 14, 256) 590080 activation_23[0][0]
__________________________________________________________________________________________________
bn4a_branch2b (BatchNormalizati (None, 14, 14, 256) 1024 res4a_branch2b[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 14, 14, 256) 0 bn4a_branch2b[0][0]
__________________________________________________________________________________________________
res4a_branch2c (Conv2D) (None, 14, 14, 1024) 263168 activation_24[0][0]
__________________________________________________________________________________________________
res4a_branch1 (Conv2D) (None, 14, 14, 1024) 525312 activation_22[0][0]
__________________________________________________________________________________________________
bn4a_branch2c (BatchNormalizati (None, 14, 14, 1024) 4096 res4a_branch2c[0][0]
__________________________________________________________________________________________________
bn4a_branch1 (BatchNormalizatio (None, 14, 14, 1024) 4096 res4a_branch1[0][0]
__________________________________________________________________________________________________
add_8 (Add) (None, 14, 14, 1024) 0 bn4a_branch2c[0][0]
bn4a_branch1[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 14, 14, 1024) 0 add_8[0][0]
__________________________________________________________________________________________________
res4b_branch2a (Conv2D) (None, 14, 14, 256) 262400 activation_25[0][0]
__________________________________________________________________________________________________
bn4b_branch2a (BatchNormalizati (None, 14, 14, 256) 1024 res4b_branch2a[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 14, 14, 256) 0 bn4b_branch2a[0][0]
__________________________________________________________________________________________________
res4b_branch2b (Conv2D) (None, 14, 14, 256) 590080 activation_26[0][0]
__________________________________________________________________________________________________
bn4b_branch2b (BatchNormalizati (None, 14, 14, 256) 1024 res4b_branch2b[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 14, 14, 256) 0 bn4b_branch2b[0][0]
__________________________________________________________________________________________________
res4b_branch2c (Conv2D) (None, 14, 14, 1024) 263168 activation_27[0][0]
__________________________________________________________________________________________________
bn4b_branch2c (BatchNormalizati (None, 14, 14, 1024) 4096 res4b_branch2c[0][0]
__________________________________________________________________________________________________
add_9 (Add) (None, 14, 14, 1024) 0 bn4b_branch2c[0][0]
activation_25[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 14, 14, 1024) 0 add_9[0][0]
__________________________________________________________________________________________________
res4c_branch2a (Conv2D) (None, 14, 14, 256) 262400 activation_28[0][0]
__________________________________________________________________________________________________
bn4c_branch2a (BatchNormalizati (None, 14, 14, 256) 1024 res4c_branch2a[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 14, 14, 256) 0 bn4c_branch2a[0][0]
__________________________________________________________________________________________________
res4c_branch2b (Conv2D) (None, 14, 14, 256) 590080 activation_29[0][0]
__________________________________________________________________________________________________
bn4c_branch2b (BatchNormalizati (None, 14, 14, 256) 1024 res4c_branch2b[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 14, 14, 256) 0 bn4c_branch2b[0][0]
__________________________________________________________________________________________________
res4c_branch2c (Conv2D) (None, 14, 14, 1024) 263168 activation_30[0][0]
__________________________________________________________________________________________________
bn4c_branch2c (BatchNormalizati (None, 14, 14, 1024) 4096 res4c_branch2c[0][0]
__________________________________________________________________________________________________
add_10 (Add) (None, 14, 14, 1024) 0 bn4c_branch2c[0][0]
activation_28[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 14, 14, 1024) 0 add_10[0][0]
__________________________________________________________________________________________________
res4d_branch2a (Conv2D) (None, 14, 14, 256) 262400 activation_31[0][0]
__________________________________________________________________________________________________
bn4d_branch2a (BatchNormalizati (None, 14, 14, 256) 1024 res4d_branch2a[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 14, 14, 256) 0 bn4d_branch2a[0][0]
__________________________________________________________________________________________________
res4d_branch2b (Conv2D) (None, 14, 14, 256) 590080 activation_32[0][0]
__________________________________________________________________________________________________
bn4d_branch2b (BatchNormalizati (None, 14, 14, 256) 1024 res4d_branch2b[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 14, 14, 256) 0 bn4d_branch2b[0][0]
__________________________________________________________________________________________________
res4d_branch2c (Conv2D) (None, 14, 14, 1024) 263168 activation_33[0][0]
__________________________________________________________________________________________________
bn4d_branch2c (BatchNormalizati (None, 14, 14, 1024) 4096 res4d_branch2c[0][0]
__________________________________________________________________________________________________
add_11 (Add) (None, 14, 14, 1024) 0 bn4d_branch2c[0][0]
activation_31[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 14, 14, 1024) 0 add_11[0][0]
__________________________________________________________________________________________________
res4e_branch2a (Conv2D) (None, 14, 14, 256) 262400 activation_34[0][0]
__________________________________________________________________________________________________
bn4e_branch2a (BatchNormalizati (None, 14, 14, 256) 1024 res4e_branch2a[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 14, 14, 256) 0 bn4e_branch2a[0][0]
__________________________________________________________________________________________________
res4e_branch2b (Conv2D) (None, 14, 14, 256) 590080 activation_35[0][0]
__________________________________________________________________________________________________
bn4e_branch2b (BatchNormalizati (None, 14, 14, 256) 1024 res4e_branch2b[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 14, 14, 256) 0 bn4e_branch2b[0][0]
__________________________________________________________________________________________________
res4e_branch2c (Conv2D) (None, 14, 14, 1024) 263168 activation_36[0][0]
__________________________________________________________________________________________________
bn4e_branch2c (BatchNormalizati (None, 14, 14, 1024) 4096 res4e_branch2c[0][0]
__________________________________________________________________________________________________
add_12 (Add) (None, 14, 14, 1024) 0 bn4e_branch2c[0][0]
activation_34[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 14, 14, 1024) 0 add_12[0][0]
__________________________________________________________________________________________________
res4f_branch2a (Conv2D) (None, 14, 14, 256) 262400 activation_37[0][0]
__________________________________________________________________________________________________
bn4f_branch2a (BatchNormalizati (None, 14, 14, 256) 1024 res4f_branch2a[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 14, 14, 256) 0 bn4f_branch2a[0][0]
__________________________________________________________________________________________________
res4f_branch2b (Conv2D) (None, 14, 14, 256) 590080 activation_38[0][0]
__________________________________________________________________________________________________
bn4f_branch2b (BatchNormalizati (None, 14, 14, 256) 1024 res4f_branch2b[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 14, 14, 256) 0 bn4f_branch2b[0][0]
__________________________________________________________________________________________________
res4f_branch2c (Conv2D) (None, 14, 14, 1024) 263168 activation_39[0][0]
__________________________________________________________________________________________________
bn4f_branch2c (BatchNormalizati (None, 14, 14, 1024) 4096 res4f_branch2c[0][0]
__________________________________________________________________________________________________
add_13 (Add) (None, 14, 14, 1024) 0 bn4f_branch2c[0][0]
activation_37[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 14, 14, 1024) 0 add_13[0][0]
__________________________________________________________________________________________________
res5a_branch2a (Conv2D) (None, 7, 7, 512) 524800 activation_40[0][0]
__________________________________________________________________________________________________
bn5a_branch2a (BatchNormalizati (None, 7, 7, 512) 2048 res5a_branch2a[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 7, 7, 512) 0 bn5a_branch2a[0][0]
__________________________________________________________________________________________________
res5a_branch2b (Conv2D) (None, 7, 7, 512) 2359808 activation_41[0][0]
__________________________________________________________________________________________________
bn5a_branch2b (BatchNormalizati (None, 7, 7, 512) 2048 res5a_branch2b[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 7, 7, 512) 0 bn5a_branch2b[0][0]
__________________________________________________________________________________________________
res5a_branch2c (Conv2D) (None, 7, 7, 2048) 1050624 activation_42[0][0]
__________________________________________________________________________________________________
res5a_branch1 (Conv2D) (None, 7, 7, 2048) 2099200 activation_40[0][0]
__________________________________________________________________________________________________
bn5a_branch2c (BatchNormalizati (None, 7, 7, 2048) 8192 res5a_branch2c[0][0]
__________________________________________________________________________________________________
bn5a_branch1 (BatchNormalizatio (None, 7, 7, 2048) 8192 res5a_branch1[0][0]
__________________________________________________________________________________________________
add_14 (Add) (None, 7, 7, 2048) 0 bn5a_branch2c[0][0]
bn5a_branch1[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 7, 7, 2048) 0 add_14[0][0]
__________________________________________________________________________________________________
res5b_branch2a (Conv2D) (None, 7, 7, 512) 1049088 activation_43[0][0]
__________________________________________________________________________________________________
bn5b_branch2a (BatchNormalizati (None, 7, 7, 512) 2048 res5b_branch2a[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 7, 7, 512) 0 bn5b_branch2a[0][0]
__________________________________________________________________________________________________
res5b_branch2b (Conv2D) (None, 7, 7, 512) 2359808 activation_44[0][0]
__________________________________________________________________________________________________
bn5b_branch2b (BatchNormalizati (None, 7, 7, 512) 2048 res5b_branch2b[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 7, 7, 512) 0 bn5b_branch2b[0][0]
__________________________________________________________________________________________________
res5b_branch2c (Conv2D) (None, 7, 7, 2048) 1050624 activation_45[0][0]
__________________________________________________________________________________________________
bn5b_branch2c (BatchNormalizati (None, 7, 7, 2048) 8192 res5b_branch2c[0][0]
__________________________________________________________________________________________________
add_15 (Add) (None, 7, 7, 2048) 0 bn5b_branch2c[0][0]
activation_43[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 7, 7, 2048) 0 add_15[0][0]
__________________________________________________________________________________________________
res5c_branch2a (Conv2D) (None, 7, 7, 512) 1049088 activation_46[0][0]
__________________________________________________________________________________________________
bn5c_branch2a (BatchNormalizati (None, 7, 7, 512) 2048 res5c_branch2a[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 7, 7, 512) 0 bn5c_branch2a[0][0]
__________________________________________________________________________________________________
res5c_branch2b (Conv2D) (None, 7, 7, 512) 2359808 activation_47[0][0]
__________________________________________________________________________________________________
bn5c_branch2b (BatchNormalizati (None, 7, 7, 512) 2048 res5c_branch2b[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 7, 7, 512) 0 bn5c_branch2b[0][0]
__________________________________________________________________________________________________
res5c_branch2c (Conv2D) (None, 7, 7, 2048) 1050624 activation_48[0][0]
__________________________________________________________________________________________________
bn5c_branch2c (BatchNormalizati (None, 7, 7, 2048) 8192 res5c_branch2c[0][0]
__________________________________________________________________________________________________
add_16 (Add) (None, 7, 7, 2048) 0 bn5c_branch2c[0][0]
activation_46[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 7, 7, 2048) 0 add_16[0][0]
__________________________________________________________________________________________________
avg_pool (GlobalAveragePooling2 (None, 2048) 0 activation_49[0][0]
__________________________________________________________________________________________________
fc1000 (Dense) (None, 1000) 2049000 avg_pool[0][0]
==================================================================================================
Total params: 25,636,712
Trainable params: 25,583,592
Non-trainable params: 53,120
__________________________________________________________________________________________________
None
###Markdown
Load the Image
###Code
new_image= image.load_img('../Data/Prediction/test_image_2.jpeg', target_size=(224, 224))
new_image
###Output
_____no_output_____
###Markdown
Change the image to array
###Code
transformed_image= image.img_to_array(new_image)
transformed_image.shape
###Output
_____no_output_____
###Markdown
Expand the tranfromed image with 4th Dimension
###Code
transformed_image=np.expand_dims(transformed_image,axis=0)
transformed_image.shape
###Output
_____no_output_____
###Markdown
Preprocess the Image
###Code
transformed_image=preprocess_input(transformed_image)
transformed_image
###Output
_____no_output_____
###Markdown
Create a predictor variable
###Code
y_pred= classifier.predict(transformed_image)
y_pred
###Output
_____no_output_____
###Markdown
Check the shape of the array
###Code
y_pred.shape
###Output
_____no_output_____
###Markdown
Make the predictions
###Code
from keras.applications.resnet50 import decode_predictions
decode_predictions(y_pred,top=5)
###Output
_____no_output_____
###Markdown
Make the predictions in readable form
###Code
label = decode_predictions(y_pred)
# retrieve the most likely result, i.e. highest probability
decoded_label = label[0][0]
# print the classification
print('%s (%.2f%%)' % (decoded_label[1], decoded_label[2]*100 ))
###Output
African_elephant (69.69%)
|
Asia_Conflict_Temp_DataExploration.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
conflict = pd.read_csv('/content/asia_conflicts[1].csv')
conflict.head()
conflict['country'].values
conflict = conflict[conflict.country != 'Thailand']
conflict
conflict.shape
conflict.isnull().sum()
conflict['location'].nunique()
conflict['event_date'].nunique()
conflict['year'].nunique()
#Conflict dataset runs from 2010-2018, contains long/lat
#Temp by City dataset runs from 1743-2013: No other viable datasets could be found for monthly temp by city from 2013-2018
#contains long/lat
#Braith conflict dataset runs from 1990-2001, contains date and long/lat
#Forgotten conflicts dataset runs from various dates depnding on location to present
#Asia conflicts has the day, month year for conflicts, can change the dt format, drop the day, keep month and year - supplement missing years
#from 2001-2010 with forgotten conflicts data. Add extra rows for months during ongoing conflict to supplement.
#Trim all datasets down by specific lat/long after rounding, and specific years after that.
temp = pd.read_csv('/content/GlobalLandTemperaturesByCity[1].csv')
temp.head()
temp.shape
temp['City'].nunique()
temp['Country'].nunique()
temp['dt'].nunique()
temp.tail()
import pandas as pd
Braith = pd.read_excel('/content/Braith_II2005_data.xls')
Braith.head()
Braith.tail()
###Output
_____no_output_____ |
tensorflow/sc17/cats/step_1_to_3.ipynb | ###Markdown
Performance Metric and Requirements================== **Author(s):** [email protected] Before we get started on data, we have to choose our project performance metric and decide the statistical testing criteria. We'll make use of the metric code we write here when we get to Step 6 (Training) and we'll use the criteria in Step 9 (Testing).
###Code
# Required libraries:
import numpy as np
import pandas as pd
import seaborn as sns
###Output
_____no_output_____
###Markdown
Performance Metric: AccuracyWe've picked accuracy as our performance metric.Accuracy $ = \frac{\text{correct predictions}}{\text{total predictions}}$
###Code
# Accuracy metric:
def get_accuracy(truth, predictions, threshold=0.5, roundoff=2):
"""
Args:
truth: can be Boolean (False, True), int (0, 1), or float (0, 1)
predictions: number between 0 and 1, inclusive
threshold: we convert predictions to 1s if they're above this value
roundoff: report accuracy to how many decimal places?
Returns:
accuracy: number correct divided by total predictions
"""
truth = np.array(truth) == (1|True)
predicted = np.array(predictions) >= threshold
matches = sum(predicted == truth)
accuracy = float(matches) / len(truth)
return round(accuracy, roundoff)
# Try it out:
acc = get_accuracy(truth=[0, False, 1], predictions=[0.2, 0.7, 0.6])
print 'Accuracy is ' + str(acc) + '.'
###Output
_____no_output_____
###Markdown
Compare Loss Function with Performance Metric
###Code
def get_loss(predictions, truth):
# Our methods will be using cross-entropy loss.
return -np.mean(truth * np.log(predictions) + (1 - truth) * np.log(1 - predictions))
# Simulate some situations:
loss = []
acc = []
for i in range(1000):
for n in [10, 100, 1000]:
p = np.random.uniform(0.01, 0.99, (1, 1))
y = np.random.binomial(1, p, (n, 1))
x = np.random.uniform(0.01, 0.99, (n, 1))
acc = np.append(acc, get_accuracy(truth=y, predictions=x, roundoff=6))
loss = np.append(loss, get_loss(predictions=x, truth=y))
df = pd.DataFrame({'accuracy': acc, 'cross-entropy': loss})
# Visualize with Seaborn
import seaborn as sns
%matplotlib inline
sns.regplot(x="accuracy", y="cross-entropy", data=df)
###Output
_____no_output_____
###Markdown
Hypothesis Testing Setup
###Code
# Testing setup:
SIGNIFICANCE_LEVEL = 0.05
TARGET_ACCURACY = 0.80
# Hypothesis test we'll use:
from statsmodels.stats.proportion import proportions_ztest
# Using standard notation for a one-sided test of one population proportion:
n = 100 # Example number of predictions
x = 95 # Example number of correct predictions
p_value = proportions_ztest(count=x, nobs=n, value=TARGET_ACCURACY, alternative='larger')[1]
if p_value < SIGNIFICANCE_LEVEL:
print 'Congratulations! Your model is good enough to build. It passes testing. Awesome!'
else:
print 'Too bad. Better luck next project. To try again, you need a pristine test dataset.'
###Output
_____no_output_____ |
Google Maps API/Google Maps JSON/pharmacie scraping - Find Place .ipynb | ###Markdown
Find Place
###Code
url = "https://maps.googleapis.com/maps/api/place/findplacefromtext/json?"
location = "33.589886, -7.603869"
# en mètres
radius = 26000
# type d'endroit à rechercher
place_type = "pharmacy"
language = "fr"
r = requests.get(url + '&locationbias=circle:20000@'+str(location) +'&radius='+str(radius) + '&input='+place_type+'&inputtype=textquery&language='+language+'&fields=name,geometry&key=' + api_key)
response = r.json()
###Output
_____no_output_____
###Markdown
Find the closest or first occurence of the place
###Code
response
c = 0
for i in response['results']:
print(i['geometry']['location'])
print(i['name'])
c = c + 1
print("**********")
print(c)
###Output
_____no_output_____ |
collect_splits/2_meltome_atlas.ipynb | ###Markdown
Meltome atlas download doesn't work --> http://meltomeatlas.proteomics.wzw.tum.de:5003/ ProteomicsDB data is difficult to downlaod --> https://www.proteomicsdb.org/ PRIDE FTP might contain data needed --> https://www.ebi.ac.uk/pride/archive/projects/PXD011929
###Code
import re
import json
import math
import matplotlib.pyplot as plt
from pathlib import Path
from pandas import read_csv, DataFrame, Series
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from sklearn.model_selection import train_test_split
from helpers import plot_data_statistics
# The UniProt accession regex, used later to extract the uniprot accession from the sequence identifiers
uniprot_accesson_regex = re.compile("([OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})")
# Where RAW data is stored and where processed data will be deposited
data_path = Path('') / '..' / 'data' / 'meltome'
split_path = Path('') / '..' / 'splits' / 'meltome'
# There are two datasets: one for human, one for other spieces
cross_data_path = data_path / 'cross-species.csv'
human_data_path = data_path / 'human.csv'
# For the human data, we need to map gene names to UniProt accessions
# Then we use a TSV export from UniProt to map the sequence to the gene name
human_sequences_path = data_path / 'human_sequences.tsv'
# We also perform mmseqs2 clustering of the human sequences to then perform train/test splits
human_clusters_path = data_path / 'sequence_cluster_splits.csv'
cross_data = read_csv(cross_data_path)
human_data = read_csv(human_data_path)
# Human sequence data is taken from UniProt and is taken for mapping purposes
human_sequences_data = read_csv(human_sequences_path, sep='\t')
# Some data cleaning: remove samples with NaNs
cross_data.dropna(subset=['Protein_ID', 'run_name'], inplace=True)
# Let's have a peek in the "cross data", aka other organisms than human
cross_data[:3]
# There are multtiple entries for the same protein ID as there are multiple temperature reads
# as well as, potentially, multiple cell lines
cross_data[cross_data['Protein_ID'].str.contains('C0H3V2') == True]
# Let's have a peek in the human data
human_data[:3]
# There are multtiple entries for the same protein ID as there are multiple temperature reads
# as well as, potentially, multiple cell lines, measured multiple times...
human_data[human_data['gene_name'].str.contains('BRCA')]
# In turn, the melting point may be different, depending on the cell line (or even in the same cell line)
human_data[human_data['gene_name'].str.contains('BRCA')][
['cell_line_or_type', 'meltPoint']
].drop_duplicates()
# Human data comes with an additional "quan_norm_meltPoint"
human_data[human_data['gene_name'].str.contains('BRCA')][
['cell_line_or_type', 'quan_norm_meltPoint']
].drop_duplicates()
# If we put it all together
human_data[human_data['gene_name'].str.contains('BRCA')][
['cell_line_or_type', 'quan_norm_meltPoint', 'meltPoint']
].drop_duplicates()
###Output
_____no_output_____
###Markdown
Map gene names to UniProt identifiers for human and sequences to identifiers for cross_spieces
###Code
# Get all human proteins from SwissProt (https://www.uniprot.org/uniprot/?query=*&fil=organism%3A%22Homo+sapiens+%28Human%29+%5B9606%5D%22+AND+reviewed%3Ayes&sort=score)
# Include: primary gene name and sequence
# Download as TSV!
# It's time to merge sequence data with the gene names
# let's list the columns in the human_sequences data
for column in human_sequences_data.columns:
print(column)
gene_sequence_mapping = {}
for gene_name in human_data['gene_name'].unique():
elements = human_sequences_data[human_sequences_data['Gene names (primary )'] == gene_name].to_dict('records')
if len(elements) < 1:
elements = human_sequences_data[human_sequences_data['Gene names'].str.contains(gene_name) == True].to_dict('records')
if len(elements) > 0:
first_element = elements[0]
gene_sequence_mapping[gene_name] = {
'uniprotAccession': first_element['Entry'],
'sequence': first_element['Sequence'],
}
###Output
/Users/chdallago/miniconda3/envs/bio-benchmarks/lib/python3.8/site-packages/pandas/core/strings/accessor.py:101: UserWarning: This pattern has match groups. To actually get the groups, use str.extract.
return func(self, *args, **kwargs)
###Markdown
Create the raw data mixed split: aggregate all data and split itGroup by run_name and Protein_ID.The melting point (meltPoint) will be the same for each grouped itemThen, there are the channel, temperature and fold change Structure idea:In JSON format:```json{ proteinId: XX, uniprotAccession: ???, runName: YY, meltingPoint: ZZ, quantNormMeltingPoint: KK, origin: [human|cross_spieces] meltingBehaviour: [ { tempertaure: temp, fold_change: fchg, channel: channel }, { tempertaure: temp, fold_change: fchg, channel: channel }, ... ]}```
###Code
proteins = list()
# For cross_data
def add_group_to_proteins(group):
first_hit = group.iloc[0]
melting_behaviour = group[['temperature', 'channel', 'fold_change']].to_dict('records')
protein = {
'proteinId': first_hit['Protein_ID'],
'uniprotAccession': None,
'runName': first_hit['run_name'],
'meltingPoint': first_hit['meltPoint'],
'meltingBehaviour': melting_behaviour,
'origin': "cross_spieces"
}
uniprot_match = re.search(uniprot_accesson_regex, first_hit['Protein_ID'])
if uniprot_match:
protein['uniprotAccession'] = uniprot_match.group()
proteins.append(protein)
cross_data.groupby(['Protein_ID', 'run_name']).apply(add_group_to_proteins)
# Print all the UniProt Accessions in order to use the UniProt mapping service (https://www.uniprot.org/uploadlists/)
# to download all sequences in bulk
# Uncomment the following to list all possible UniProt accessions
# ";".join([protein['uniprotAccession'] for protein in proteins if protein['uniprotAccession']])
# this resulted in: 34251 out of 34253 UniProtKB AC/ID identifiers were successfully mapped to 34236 UniProtKB IDs in the table below.
sequences_dict = {}
for sequence in SeqIO.parse(data_path / "sequences.fasta", "fasta"):
sequences_dict[sequence.id.split('|')[1]] = str(sequence.seq)
for protein in proteins:
if protein['uniprotAccession']:
match = sequences_dict.get(protein['uniprotAccession'])
if match:
protein['sequence'] = match
# For human_data
def add_group_to_proteins(group):
first_hit = group.iloc[0]
melting_behaviour = group[['temperature', 'fold_change']].to_dict('records')
protein = {
'proteinId': first_hit['gene_name'],
'uniprotAccession': None,
'runName': first_hit['cell_line_or_type'],
'meltingPoint': first_hit['meltPoint'],
'quantNormMeltingPoint': first_hit['quan_norm_meltPoint'],
'meltingBehaviour': melting_behaviour,
'origin': "human"
}
uniprot_match = gene_sequence_mapping.get(first_hit['gene_name'])
if uniprot_match:
protein['uniprotAccession'] = uniprot_match['uniprotAccession']
protein['sequence'] = uniprot_match['sequence']
proteins.append(protein)
human_data.groupby(['gene_name', 'cell_line_or_type', 'meltPoint']).apply(add_group_to_proteins)
with open(split_path / "full_dataset.json", "w") as outfile:
json.dump(proteins, outfile)
protein_sequences = list()
for protein in proteins:
if protein.get('sequence') \
and protein.get('meltingPoint') \
and not math.isnan(protein.get('meltingPoint'))\
and protein.get('runName'):
protein_sequences.append(
SeqRecord(
Seq(protein.get('sequence')),
id=f"{protein.get('uniprotAccession')}_{'_'.join(protein.get('runName').split(' '))}",
description=f"MELTING_POINT={protein.get('meltingPoint')}"
)
)
SeqIO.write(protein_sequences, split_path / "full_dataset_sequences.fasta", "fasta")
###Output
_____no_output_____
###Markdown
After having run MMSeqs2 to cluster the sequences, we can read in the TSV file to split the dataset
###Code
sequence_clusters = read_csv(split_path / "meltome_PIDE20_clusters.tsv", sep="\t")
sequence_clusters.drop_duplicates(inplace=True)
sequence_clusters[sequence_clusters.duplicated("cluster_component")]
cluster_representatives = sequence_clusters.cluster_representative.unique()
cluster_components = sequence_clusters.cluster_component.unique()
print(f"Theres {len(cluster_representatives)} cluster representatives and "
f"{len(cluster_components)} sequences in total.")
train, test = train_test_split(cluster_representatives, test_size=0.2, random_state=11)
# Turn train and test into sets (need to remove from them later)
train = set(train)
test = set(test)
possibilities = set(sequence_clusters.cluster_component.tolist())
clustered_set = list()
full_set = list()
mixed_set = list()
for protein in proteins:
if protein.get('sequence') \
and protein.get('meltingPoint') \
and not math.isnan(protein.get('meltingPoint'))\
and protein.get('runName'):
key = f"{protein.get('uniprotAccession')}_{'_'.join(protein.get('runName').split(' '))}"
if key in possibilities:
hits = sequence_clusters[sequence_clusters.cluster_component == key].values
cluster_rep_key = hits[0][0]
if cluster_rep_key in train:
full_set.append({
'sequence': protein.get('sequence'),
'target': protein.get('meltingPoint'),
'set': 'train'
})
mixed_set.append({
'sequence': protein.get('sequence'),
'target': protein.get('meltingPoint'),
'set': 'train'
})
elif cluster_rep_key in test:
full_set.append({
'sequence': protein.get('sequence'),
'target': protein.get('meltingPoint'),
'set': 'test'
})
possibilities.remove(key)
if key in train:
clustered_set.append({
'sequence': protein.get('sequence'),
'target': protein.get('meltingPoint'),
'set': 'train'
})
train.remove(key)
elif key in test:
clustered_set.append({
'sequence': protein.get('sequence'),
'target': protein.get('meltingPoint'),
'set': 'test'
})
mixed_set.append({
'sequence': protein.get('sequence'),
'target': protein.get('meltingPoint'),
'set': 'test'
})
test.remove(key)
# Turn dictionary into dataframe
mixed_set_df = DataFrame.from_records(mixed_set)
# Get 10% validation from training:
train_indices = mixed_set_df.query('set=="train"').index
_, val_indices = train_test_split(train_indices, test_size=0.1, random_state=11)
mixed_set_df.loc[val_indices, 'validation'] = True
mixed_set_df.to_csv(split_path / 'splits' / 'mixed_split.csv', index=False)
# Let's inspect the dataframe
display(mixed_set_df[:3])
# Plot statistics
plot_data_statistics(mixed_set_df, 'set', 'target')
###Output
_____no_output_____
###Markdown
Human data splits
###Code
# Pre-computed clusters from human sequences
# check out the notebook in helpers
human_clusters = read_csv(human_clusters_path)
def gene_to_sequence_set(row):
mapped_item = gene_sequence_mapping.get(row['gene_name'])
if not mapped_item:
return
cluster_component = human_clusters.query(
f"cluster_component == '{mapped_item['uniprotAccession']}'"
).iloc[0]
return Series({
'target': row['meltPoint'],
'gene_name': row['gene_name'],
'accession': mapped_item['uniprotAccession'],
'sequence': mapped_item['sequence'],
'set': cluster_component['set'],
'validation': cluster_component['validation'],
'cluster_representative': cluster_component['cluster_representative'],
})
# All averaged human data
human_proteins = human_data[['gene_name', 'meltPoint']].groupby('gene_name').mean()
# Add gene_name column
human_proteins['gene_name'] = human_proteins.index
# Map accession, sequence, set and val split to entry
human_proteins = human_proteins.apply(gene_to_sequence_set, axis=1)
# Drop rows where sequence could not be mapped
human_proteins.dropna(inplace=True)
# Plot statistics
plot_data_statistics(human_proteins, 'set', 'target')
# Write to CSV
human_proteins[
['sequence', 'target', 'set', 'validation']
].to_csv(split_path / 'splits' / 'human.csv', index=False)
# Only human data from one cell line (HepG2) in one experiment (the first in the set)
human_HepG2_proteins = human_data[
human_data.cell_line_or_type == "HepG2"
][
['gene_name', 'meltPoint']
].drop_duplicates(subset=['gene_name'], keep="first")
# Make sure that only unique names are in the gene name
assert(len(human_HepG2_proteins) == len(human_HepG2_proteins.gene_name.unique()))
# Map accession, sequence, set and val split to entry
human_HepG2_proteins = human_HepG2_proteins.apply(gene_to_sequence_set, axis=1)
# Drop rows where sequence could not be mapped
human_HepG2_proteins.dropna(inplace=True)
# Plot statistics
plot_data_statistics(human_HepG2_proteins, 'set', 'target')
# Write to CSV
human_HepG2_proteins[
['sequence', 'target', 'set', 'validation']
].to_csv(split_path / 'splits' / 'human_cell.csv', index=False)
###Output
The test set will be 19.08% of the data (1366 out of 7158 samples).
|
4. Convolutional Neural Networks/numpy_conv_net.ipynb | ###Markdown
Convolutional Neural Networks: Step by StepWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. **Notation**:- Superscript $[l]$ denotes an object of the $l^{th}$ layer. - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.- Superscript $(i)$ denotes an object from the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example input. - Subscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer. - $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. - $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started! Updates If you were working on the notebook before this update...* The current notebook is version "v2a".* You can find your original work saved in the notebook with the previous version name ("v2") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of updates* clarified example used for padding function. Updated starter code for padding function.* `conv_forward` has additional hints to help students if they're stuck.* `conv_forward` places code for `vert_start` and `vert_end` within the `for h in range(...)` loop; to avoid redundant calculations. Similarly updated `horiz_start` and `horiz_end`. **Thanks to our mentor Kevin Brown for pointing this out.*** `conv_forward` breaks down the `Z[i, h, w, c]` single line calculation into 3 lines, for clarity.* `conv_forward` test case checks that students don't accidentally use n_H_prev instead of n_H, use n_W_prev instead of n_W, and don't accidentally swap n_H with n_W* `pool_forward` properly nests calculations of `vert_start`, `vert_end`, `horiz_start`, and `horiz_end` to avoid redundant calculations.* `pool_forward' has two new test cases that check for a correct implementation of stride (the height and width of the previous layer's activations should be large enough relative to the filter dimensions so that a stride can take place). * `conv_backward`: initialize `Z` and `cache` variables within unit test, to make it independent of unit testing that occurs in the `conv_forward` section of the assignment.* **Many thanks to our course mentor, Paul Mielke, for proposing these test cases.** 1 - PackagesLet's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import numpy as np
import h5py
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
2 - Outline of the AssignmentYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:- Convolution functions, including: - Zero Padding - Convolve window - Convolution forward - Convolution backward (optional)- Pooling functions, including: - Pooling forward - Create mask - Distribute value - Pooling backward (optional) This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:**Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. 3 - Convolutional Neural NetworksAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. 3.1 - Zero-PaddingZero-padding adds zeros around the border of an image: **Figure 1** : **Zero-Padding** Image (3 channels, RGB) with a padding of 2. The main benefits of padding are the following:- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the "same" convolution, in which the height/width is exactly preserved after one layer. - It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.**Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array "a" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:```pythona = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), mode='constant', constant_values = (0,0))```
###Code
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)
"""
X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode='constant', constant_values=(0, 0))
return X_pad
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =\n", x.shape)
print ("x_pad.shape =\n", x_pad.shape)
print ("x[1,1] =\n", x[1,1])
print ("x_pad[1,1] =\n", x_pad[1,1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
###Output
x.shape =
(4, 3, 3, 2)
x_pad.shape =
(4, 7, 7, 2)
x[1,1] =
[[ 0.90085595 -0.68372786]
[-0.12289023 -0.93576943]
[-0.26788808 0.53035547]]
x_pad[1,1] =
[[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]]
###Markdown
**Expected Output**:```x.shape = (4, 3, 3, 2)x_pad.shape = (4, 7, 7, 2)x[1,1] = [[ 0.90085595 -0.68372786] [-0.12289023 -0.93576943] [-0.26788808 0.53035547]]x_pad[1,1] = [[ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.]]``` 3.2 - Single step of convolution In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: - Takes an input volume - Applies a filter at every position of the input- Outputs another volume (usually of different size) **Figure 2** : **Convolution operation** with a filter of 3x3 and a stride of 1 (stride = amount you move the window each time you slide) In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. **Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html). **Note**: The variable b will be passed in as a numpy array. If we add a scalar (a float or integer) to a numpy array, the result is a numpy array. In the special case when a numpy array contains a single value, we can cast it as a float to convert it to a scalar.
###Code
# GRADED FUNCTION: conv_single_step
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, the result of convolving the sliding window (W, b) on a slice x of the input data
"""
# Element-wise product between a_slice_prev and W. Do not add the bias yet.
s = np.multiply(a_slice_prev, W)
# Sum over all entries of the volume s.
Z = np.sum(s)
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = Z + float(b)
return Z
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_single_step(a_slice_prev, W, b)
print("Z =", Z)
###Output
Z = -6.99908945068
###Markdown
**Expected Output**: **Z** -6.99908945068 3.3 - Convolutional Neural Networks - Forward passIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: **Exercise**: Implement the function below to convolve the filters `W` on an input activation `A_prev`. This function takes the following inputs:* `A_prev`, the activations output by the previous layer (for a batch of m inputs); * Weights are denoted by `W`. The filter window size is `f` by `f`.* The bias vector is `b`, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. **Hint**: 1. To select a 2x2 slice at the upper left corner of a matrix "a_prev" (shape (5,5,3)), you would do:```pythona_slice_prev = a_prev[0:2,0:2,:]```Notice how this gives a 3D slice that has height 2, width 2, and depth 3. Depth is the number of channels. This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find out how each of the corner can be defined using h, w, f and s in the code below. **Figure 3** : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** This figure shows only a single channel. **Reminder**:The formulas relating the output shape of the convolution to the input shape is:$$ n_H = \lfloor \frac{n_{H_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$$$ n_W = \lfloor \frac{n_{W_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$$$ n_C = \text{number of filters used in the convolution}$$For this exercise, we won't worry about vectorization, and will just implement everything with for-loops. Additional Hints if you're stuck* You will want to use array slicing (e.g.`varname[0:1,:,3:5]`) for the following variables: `a_prev_pad` ,`W`, `b` Copy the starter code of the function and run it outside of the defined function, in separate cells. Check that the subset of each array is the size and dimension that you're expecting. * To decide how to get the vert_start, vert_end; horiz_start, horiz_end, remember that these are indices of the previous layer. Draw an example of a previous padded layer (8 x 8, for instance), and the current (output layer) (2 x 2, for instance). The output layer's indices are denoted by `h` and `w`. * Make sure that `a_slice_prev` has a height, width and depth.* Remember that `a_prev_pad` is a subset of `A_prev_pad`. Think about which one should be used within the for loops.
###Code
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
"""
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer,
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
"""
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters"
stride = hparameters["stride"]
pad = hparameters["pad"]
# Compute the dimensions of the CONV output volume using the formula given above.
n_H = int((n_H_prev - f + 2 * pad) / stride) + 1
n_W = int((n_W_prev - f + 2 * pad) / stride) + 1
# Initialize the output volume Z with zeros.
Z = np.zeros((m, n_H, n_W, n_C))
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i, ...] # Select ith training example's padded activation
for h in range(n_H): # loop over vertical axis of the output volume
vert_start = h * stride
vert_end = vert_start + f
for w in range(n_W): # loop over horizontal axis of the output volume
horiz_start = w * stride
horiz_end = horiz_start + f
for c in range(n_C): # loop over channels (= #filters) of the output volume
a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
weights = W[..., c]
biases = b[..., c]
Z[i, h, w, c] = conv_single_step(a_slice_prev, weights, biases)
# Making sure your output shape is correct
assert(Z.shape == (m, n_H, n_W, n_C))
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
np.random.seed(1)
A_prev = np.random.randn(10,5,7,4)
W = np.random.randn(3,3,4,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 1,
"stride": 2}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
print("Z's mean =\n", np.mean(Z))
print("Z[3,2,1] =\n", Z[3,2,1])
print("cache_conv[0][1][2][3] =\n", cache_conv[0][1][2][3])
###Output
Z's mean =
0.692360880758
Z[3,2,1] =
[ -1.28912231 2.27650251 6.61941931 0.95527176 8.25132576
2.31329639 13.00689405 2.34576051]
cache_conv[0][1][2][3] =
[-1.1191154 1.9560789 -0.3264995 -1.34267579]
###Markdown
**Expected Output**:```Z's mean = 0.692360880758Z[3,2,1] = [ -1.28912231 2.27650251 6.61941931 0.95527176 8.25132576 2.31329639 13.00689405 2.34576051]cache_conv[0][1][2][3] = [-1.1191154 1.9560789 -0.3264995 -1.34267579]``` Finally, CONV layer should also contain an activation, in which case we would add the following line of code:```python Convolve the window to get back one output neuronZ[i, h, w, c] = ... Apply activationA[i, h, w, c] = activation(Z[i, h, w, c])```You don't need to do it here. 4 - Pooling layer The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: - Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the $f \times f$ window you would compute a *max* or *average* over. 4.1 - Forward PoolingNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. **Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.**Reminder**:As there's no padding, the formulas binding the output shape of the pooling to the input shape is:$$ n_H = \lfloor \frac{n_{H_{prev}} - f}{stride} \rfloor +1 $$$$ n_W = \lfloor \frac{n_{W_{prev}} - f}{stride} \rfloor +1 $$$$ n_C = n_{C_{prev}}$$
###Code
# GRADED FUNCTION: pool_forward
def pool_forward(A_prev, hparameters, mode = "max"):
"""
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
"""
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
for i in range(m): # loop over the training examples
for h in range(n_H): # loop on the vertical axis of the output volume
vert_start = h * stride
vert_end = vert_start + f
for w in range(n_W): # loop on the horizontal axis of the output volume
horiz_start = w * stride
horiz_end = horiz_start + f
for c in range (n_C): # loop over the channels of the output volume
a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]
# Compute the pooling operation on the slice.
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.mean(a_prev_slice)
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
# Case 1: stride of 1
np.random.seed(1)
A_prev = np.random.randn(2, 5, 5, 3)
hparameters = {"stride" : 1, "f": 3}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A.shape = " + str(A.shape))
print("A =\n", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A.shape = " + str(A.shape))
print("A =\n", A)
###Output
mode = max
A.shape = (2, 3, 3, 3)
A =
[[[[ 1.74481176 0.90159072 1.65980218]
[ 1.74481176 1.46210794 1.65980218]
[ 1.74481176 1.6924546 1.65980218]]
[[ 1.14472371 0.90159072 2.10025514]
[ 1.14472371 0.90159072 1.65980218]
[ 1.14472371 1.6924546 1.65980218]]
[[ 1.13162939 1.51981682 2.18557541]
[ 1.13162939 1.51981682 2.18557541]
[ 1.13162939 1.6924546 2.18557541]]]
[[[ 1.19891788 0.84616065 0.82797464]
[ 0.69803203 0.84616065 1.2245077 ]
[ 0.69803203 1.12141771 1.2245077 ]]
[[ 1.96710175 0.84616065 1.27375593]
[ 1.96710175 0.84616065 1.23616403]
[ 1.62765075 1.12141771 1.2245077 ]]
[[ 1.96710175 0.86888616 1.27375593]
[ 1.96710175 0.86888616 1.23616403]
[ 1.62765075 1.12141771 0.79280687]]]]
mode = average
A.shape = (2, 3, 3, 3)
A =
[[[[ -3.01046719e-02 -3.24021315e-03 -3.36298859e-01]
[ 1.43310483e-01 1.93146751e-01 -4.44905196e-01]
[ 1.28934436e-01 2.22428468e-01 1.25067597e-01]]
[[ -3.81801899e-01 1.59993515e-02 1.70562706e-01]
[ 4.73707165e-02 2.59244658e-02 9.20338402e-02]
[ 3.97048605e-02 1.57189094e-01 3.45302489e-01]]
[[ -3.82680519e-01 2.32579951e-01 6.25997903e-01]
[ -2.47157416e-01 -3.48524998e-04 3.50539717e-01]
[ -9.52551510e-02 2.68511000e-01 4.66056368e-01]]]
[[[ -1.73134159e-01 3.23771981e-01 -3.43175716e-01]
[ 3.80634669e-02 7.26706274e-02 -2.30268958e-01]
[ 2.03009393e-02 1.41414785e-01 -1.23158476e-02]]
[[ 4.44976963e-01 -2.61694592e-03 -3.10403073e-01]
[ 5.08114737e-01 -2.34937338e-01 -2.39611830e-01]
[ 1.18726772e-01 1.72552294e-01 -2.21121966e-01]]
[[ 4.29449255e-01 8.44699612e-02 -2.72909051e-01]
[ 6.76351685e-01 -1.20138225e-01 -2.44076712e-01]
[ 1.50774518e-01 2.89111751e-01 1.23238536e-03]]]]
###Markdown
** Expected Output**```mode = maxA.shape = (2, 3, 3, 3)A = [[[[ 1.74481176 0.90159072 1.65980218] [ 1.74481176 1.46210794 1.65980218] [ 1.74481176 1.6924546 1.65980218]] [[ 1.14472371 0.90159072 2.10025514] [ 1.14472371 0.90159072 1.65980218] [ 1.14472371 1.6924546 1.65980218]] [[ 1.13162939 1.51981682 2.18557541] [ 1.13162939 1.51981682 2.18557541] [ 1.13162939 1.6924546 2.18557541]]] [[[ 1.19891788 0.84616065 0.82797464] [ 0.69803203 0.84616065 1.2245077 ] [ 0.69803203 1.12141771 1.2245077 ]] [[ 1.96710175 0.84616065 1.27375593] [ 1.96710175 0.84616065 1.23616403] [ 1.62765075 1.12141771 1.2245077 ]] [[ 1.96710175 0.86888616 1.27375593] [ 1.96710175 0.86888616 1.23616403] [ 1.62765075 1.12141771 0.79280687]]]]mode = averageA.shape = (2, 3, 3, 3)A = [[[[ -3.01046719e-02 -3.24021315e-03 -3.36298859e-01] [ 1.43310483e-01 1.93146751e-01 -4.44905196e-01] [ 1.28934436e-01 2.22428468e-01 1.25067597e-01]] [[ -3.81801899e-01 1.59993515e-02 1.70562706e-01] [ 4.73707165e-02 2.59244658e-02 9.20338402e-02] [ 3.97048605e-02 1.57189094e-01 3.45302489e-01]] [[ -3.82680519e-01 2.32579951e-01 6.25997903e-01] [ -2.47157416e-01 -3.48524998e-04 3.50539717e-01] [ -9.52551510e-02 2.68511000e-01 4.66056368e-01]]] [[[ -1.73134159e-01 3.23771981e-01 -3.43175716e-01] [ 3.80634669e-02 7.26706274e-02 -2.30268958e-01] [ 2.03009393e-02 1.41414785e-01 -1.23158476e-02]] [[ 4.44976963e-01 -2.61694592e-03 -3.10403073e-01] [ 5.08114737e-01 -2.34937338e-01 -2.39611830e-01] [ 1.18726772e-01 1.72552294e-01 -2.21121966e-01]] [[ 4.29449255e-01 8.44699612e-02 -2.72909051e-01] [ 6.76351685e-01 -1.20138225e-01 -2.44076712e-01] [ 1.50774518e-01 2.89111751e-01 1.23238536e-03]]]]```
###Code
# Case 2: stride of 2
np.random.seed(1)
A_prev = np.random.randn(2, 5, 5, 3)
hparameters = {"stride" : 2, "f": 3}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A.shape = " + str(A.shape))
print("A =\n", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A.shape = " + str(A.shape))
print("A =\n", A)
###Output
mode = max
A.shape = (2, 2, 2, 3)
A =
[[[[ 1.74481176 0.90159072 1.65980218]
[ 1.74481176 1.6924546 1.65980218]]
[[ 1.13162939 1.51981682 2.18557541]
[ 1.13162939 1.6924546 2.18557541]]]
[[[ 1.19891788 0.84616065 0.82797464]
[ 0.69803203 1.12141771 1.2245077 ]]
[[ 1.96710175 0.86888616 1.27375593]
[ 1.62765075 1.12141771 0.79280687]]]]
mode = average
A.shape = (2, 2, 2, 3)
A =
[[[[-0.03010467 -0.00324021 -0.33629886]
[ 0.12893444 0.22242847 0.1250676 ]]
[[-0.38268052 0.23257995 0.6259979 ]
[-0.09525515 0.268511 0.46605637]]]
[[[-0.17313416 0.32377198 -0.34317572]
[ 0.02030094 0.14141479 -0.01231585]]
[[ 0.42944926 0.08446996 -0.27290905]
[ 0.15077452 0.28911175 0.00123239]]]]
###Markdown
**Expected Output:** ```mode = maxA.shape = (2, 2, 2, 3)A = [[[[ 1.74481176 0.90159072 1.65980218] [ 1.74481176 1.6924546 1.65980218]] [[ 1.13162939 1.51981682 2.18557541] [ 1.13162939 1.6924546 2.18557541]]] [[[ 1.19891788 0.84616065 0.82797464] [ 0.69803203 1.12141771 1.2245077 ]] [[ 1.96710175 0.86888616 1.27375593] [ 1.62765075 1.12141771 0.79280687]]]]mode = averageA.shape = (2, 2, 2, 3)A = [[[[-0.03010467 -0.00324021 -0.33629886] [ 0.12893444 0.22242847 0.1250676 ]] [[-0.38268052 0.23257995 0.6259979 ] [-0.09525515 0.268511 0.46605637]]] [[[-0.17313416 0.32377198 -0.34317572] [ 0.02030094 0.14141479 -0.01231585]] [[ 0.42944926 0.08446996 -0.27290905] [ 0.15077452 0.28911175 0.00123239]]]]``` Congratulations! You have now implemented the forward passes of all the layers of a convolutional network. The remainder of this notebook is optional, and will not be graded. 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we will briefly present them below. 5.1 - Convolutional layer backward pass Let's start by implementing the backward pass for a CONV layer. 5.1.1 - Computing dA:This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:$$ dA += \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1}$$Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. In code, inside the appropriate for-loops, this formula translates into:```pythonda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]``` 5.1.2 - Computing dW:This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:$$ dW_c += \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2}$$Where $a_{slice}$ corresponds to the slice which was used to generate the activation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. In code, inside the appropriate for-loops, this formula translates into:```pythondW[:,:,:,c] += a_slice * dZ[i, h, w, c]``` 5.1.3 - Computing db:This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:$$ db = \sum_h \sum_w dZ_{hw} \tag{3}$$As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. In code, inside the appropriate for-loops, this formula translates into:```pythondb[:,:,:,c] += dZ[i, h, w, c]```**Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above.
###Code
def conv_backward(dZ, cache):
"""
Implement the backward propagation for a convolution function
Arguments:
dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward(), output of conv_forward()
Returns:
dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
dW -- gradient of the cost with respect to the weights of the conv layer (W)
numpy array of shape (f, f, n_C_prev, n_C)
db -- gradient of the cost with respect to the biases of the conv layer (b)
numpy array of shape (1, 1, 1, n_C)
"""
# Retrieve information from "cache"
(A_prev, W, b, hparameters) = cache
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters"
stride = hparameters["stride"]
pad = hparameters["pad"]
# Retrieve dimensions from dZ's shape
(m, n_H, n_W, n_C) = dZ.shape
# Initialize dA_prev, dW, db with the correct shapes
dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev))
dW = np.zeros((f, f, n_C_prev, n_C))
db = np.zeros((1, 1, 1, n_C))
# Pad A_prev and dA_prev
A_prev_pad = zero_pad(A_prev, pad)
dA_prev_pad = zero_pad(dA_prev, pad)
for i in range(m): # loop over the training examples
# select ith training example from A_prev_pad and dA_prev_pad
a_prev_pad = A_prev_pad[i]
da_prev_pad = dA_prev_pad[i]
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
# Use the corners to define the slice from a_prev_pad
a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Update gradients for the window and the filter's parameters using the code formulas given above
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
db[:,:,:,c] += dZ[i, h, w, c]
# Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :]
# Making sure your output shape is correct
assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
return dA_prev, dW, db
# We'll run conv_forward to initialize the 'Z' and 'cache_conv",
# which we'll use to test the conv_backward function
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W = np.random.randn(2,2,3,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 2,
"stride": 2}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
# Test conv_backward
dA, dW, db = conv_backward(Z, cache_conv)
print("dA_mean =", np.mean(dA))
print("dW_mean =", np.mean(dW))
print("db_mean =", np.mean(db))
###Output
dA_mean = 1.45243777754
dW_mean = 1.72699145831
db_mean = 7.83923256462
###Markdown
** Expected Output: ** **dA_mean** 1.45243777754 **dW_mean** 1.72699145831 **db_mean** 7.83923256462 5.2 Pooling layer - backward passNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. 5.2.1 Max pooling - backward pass Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: $$ X = \begin{bmatrix}1 && 3 \\4 && 2\end{bmatrix} \quad \rightarrow \quad M =\begin{bmatrix}0 && 0 \\1 && 0\end{bmatrix}\tag{4}$$As you can see, this function creates a "mask" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. **Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward. Hints:- [np.max()]() may be helpful. It computes the maximum of an array.- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:```A[i,j] = True if X[i,j] = xA[i,j] = False if X[i,j] != x```- Here, you don't need to consider cases where there are several maxima in a matrix.
###Code
def create_mask_from_window(x):
"""
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
"""
mask = (x == np.max(x))
return mask
np.random.seed(1)
x = np.random.randn(2,3)
mask = create_mask_from_window(x)
print('x = ', x)
print("mask = ", mask)
###Output
x = [[ 1.62434536 -0.61175641 -0.52817175]
[-1.07296862 0.86540763 -2.3015387 ]]
mask = [[ True False False]
[False False False]]
###Markdown
**Expected Output:** **x =**[[ 1.62434536 -0.61175641 -0.52817175] [-1.07296862 0.86540763 -2.3015387 ]] **mask =**[[ True False False] [False False False]] Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will "propagate" the gradient back to this particular input value that had influenced the cost. 5.2.2 - Average pooling - backward pass In max pooling, for each input window, all the "influence" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: $$ dZ = 1 \quad \rightarrow \quad dZ =\begin{bmatrix}1/4 && 1/4 \\1/4 && 1/4\end{bmatrix}\tag{5}$$This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. **Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)
###Code
def distribute_value(dz, shape):
"""
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
"""
# Retrieve dimensions from shape
(n_H, n_W) = shape
# Compute the value to distribute on the matrix
average = dz / (n_H * n_W)
# Create a matrix where every entry is the "average" value
a = np.zeros(shape) + average
return a
a = distribute_value(2, (2,2))
print('distributed value =', a)
###Output
distributed value = [[ 0.5 0.5]
[ 0.5 0.5]]
###Markdown
**Expected Output**: distributed_value =[[ 0.5 0.5] [ 0.5 0.5]] 5.2.3 Putting it together: Pooling backward You now have everything you need to compute backward propagation on a pooling layer.**Exercise**: Implement the `pool_backward` function in both modes (`"max"` and `"average"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dA.
###Code
def pool_backward(dA, cache, mode = "max"):
"""
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
"""
# Retrieve information from cache
(A_prev, hparameters) = cache
# Retrieve hyperparameters
stride = hparameters["stride"]
f = hparameters["f"]
# Retrieve dimensions from A_prev's shape and dA's shape
m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
m, n_H, n_W, n_C = dA.shape
# Initialize dA_prev with zeros
dA_prev = np.zeros(A_prev.shape)
for i in range(m): # loop over the training examples
a_prev = A_prev[i]
for h in range(n_H): # loop on the vertical axis
for w in range(n_W): # loop on the horizontal axis
for c in range(n_C): # loop over the channels (depth)
vert_start = h
vert_end = vert_start + f
horiz_start = w
horiz_end = horiz_start + f
# Compute the backward propagation in both modes.
if mode == "max":
a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]
mask = create_mask_from_window(a_prev_slice)
dA_prev[i, vert_start:vert_end, horiz_start:horiz_end, c] += np.multiply(mask, dA[i, h, w, c])
elif mode == "average":
da = dA[i, h, w, c]
shape = (f, f)
dA_prev[i, vert_start:vert_end, horiz_start:horiz_end, c] += distribute_value(da, shape)
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
###Output
mode = max
mean of dA = 0.145713902729
dA_prev[1,1] = [[ 0. 0. ]
[ 5.05844394 -1.68282702]
[ 0. 0. ]]
mode = average
mean of dA = 0.145713902729
dA_prev[1,1] = [[ 0.08485462 0.2787552 ]
[ 1.26461098 -0.25749373]
[ 1.17975636 -0.53624893]]
|
data_processing/createSmallDataset-LSTM.ipynb | ###Markdown
Small Dataset for LSTMWe are going to use a small subset for testing the LSTM model.
###Code
import pandas as pd
import numpy as np
import torchaudio
import torchaudio.transforms as T
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import librosa
import h5py
df = pd.read_csv("../datasets/AnimalSoundFull.csv")
df.head()
df_aves = df[df["class"] == "Aves"].reset_index(drop=True)
df_aves.head()
df_mammalia = df[df["class"] == "Mammalia"].reset_index(drop=True)
df_mammalia.head()
df_aves.shape, df_mammalia.shape
num_samples = 1000
np.random.seed(42)
df_small_aves = df_aves.sample(n=num_samples)
df_small_mammalia = df_mammalia.sample(n=num_samples)
df_small = pd.concat([df_small_aves, df_small_mammalia]).reset_index(drop=True).drop(columns=["identifier",
"species",
"genus",
"family",
"phylum"
])
df_small
def getMFCC(row, file):
y, sample_rate = librosa.load("../data/" + row.file_name)
MFCC = librosa.feature.mfcc(y=y, sr=sample_rate)
file.create_dataset(str(row.gbifID), data=MFCC)
return
tqdm.pandas(desc="Creating MFCC")
out_file = h5py.File("test_npz.h5", "w")
_ = df_small.progress_apply(getMFCC, file=out_file, axis=1)
out_file.close()
with h5py.File("test_npz.h5", "r")as f:
print(f.keys())
for a in loaded.keys():
print(a)
df_small.head()
#df_small.to_csv("../datasets/Aves-Mammalia.csv", index=False)
###Output
_____no_output_____ |
Wi19_content/DSMCER/L11_SVM_filled.ipynb | ###Markdown
From this article in [Scientific Reports](http://www.nature.com/articles/srep13285) Read in the data* elemental data: [https://raw.githubusercontent.com/UWDIRECT/UWDIRECT.github.io/master/Wi18_content/DSMCER/atomsradii.csv](https://raw.githubusercontent.com/UWDIRECT/UWDIRECT.github.io/master/Wi18_content/DSMCER/atomsradii.csv)* testing data: [https://raw.githubusercontent.com/UWDIRECT/UWDIRECT.github.io/master/Wi18_content/DSMCER/testing.csv](https://raw.githubusercontent.com/UWDIRECT/UWDIRECT.github.io/master/Wi18_content/DSMCER/testing.csv) Now, let's make a new classifier objectStart with [LinearSVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.htmlsklearn.svm.LinearSVC) You can use the following function to see how your model is doing: You and your partner should determine: * Testing error rate* Training error rate Grab your code from the L9.MLIntro notebook! With remaining time go through the cell below and look at graphs of the decision boundary vs K. * See if you can use the graph to determine your **testing** error rate * Could you also use the graph to determine your **training** error rate? (_open ended_) This is code to visualize the decision boundary. Fix it up to use your classifier from above or better yet, try a nonlinear kernel and visualize that! Name your classifier `clf.predict` and this should just run.
###Code
# additional library we will use
from matplotlib.colors import ListedColormap
# just for convenience and similarity with sklearn tutorial
# I am going to assign our X and Y data to specific vectors
# this is not strictly needed and you could use elements df for the whole thing!
X=elements[['rWC','rCh']]
#this is a trick to turn our strings (type of element / class) into unique
#numbers. Play with this in a separate cell and make sure you know wth is
#going on!
levels,labels=pd.factorize(elements.Type)
y=levels
#This determines levelspacing for our color map and the colors themselves
h=0.02
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# in the sklearn tutorial two different weights are compared
# the decision between "uniform" and "distance" determines the probability
# weight. "uniform" is the version presented in class, you can change to
# distance
weights='uniform'
# Straight from the tutorial - quickly read and see if you know what these
# things are going - if you are < 5 min until end then you should skip this part
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = elements.rWC.min() - 0.1 , elements.rWC.max() + 0.1
y_min, y_max = elements.rCh.min() - 0.1 , elements.rCh.max() + 0.1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(figsize=(4,4));
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
# This may be the 1st time you have seen how to color points by a 3rd vector
# In this case y ( see c=y in below statement ). This is very useful!
plt.scatter(X.rWC, X.rCh, c=y, cmap=cmap_bold)
# Set limits and lebels
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel('rWC')
plt.ylabel('rCh')
###Output
_____no_output_____ |
Knowledge Tracing/DKT/.ipynb_checkpoints/LIFE_Embedding_DKT_Compare-checkpoint.ipynb | ###Markdown
Auto-Encoding DKT for LIFE project (http://www.oxlifeproject.org) For the DKT model proposed, autoencoding was implemented in the hidden layers. Autoencoders are neural networks that use unsupervised learning technique for the task of representation learning. A DKT autoencoder model would seek to learn a compressed knowledge representation from learners’ trajectories of learning task attempts. This is useful because when it creates a bottleneck in the neural network (i.e. undercomplete), it forces the model to learn the most salient features of the original input features (student performance data). To avoid the model behaving like an identity function which duplicates the input features to the output features, regularisation is employed to force the DKT model to respond to unique statistical features of the input features. Additionally, use of a loss function may be employed to encourage the model to have other properties besides the ability to replicate features i.e. insensitive to memorising input features but sensitive enough to build a reconstruction of data. Essentially, this usage of autoencoding achieves data denoising and dimensionality reduction. If it was a linear network, (i.e. with no hidden layers, which have non-linear activation functions), the dimensionality reduction would be equivalent to Principal Components Analysis (PCA). Using a DKT autoencoder is technically applying self-supervised model learning methods to student learning data to represent their knowledge. The best performing DKT model is posited to be a composite model that combines autoencoding of knowledge representation as continuous and high-dimensional together with prediction of students future performance (Srivastava et al., 2015, Sapountzi A. et al., 2018).Arguably, LSTMs mimicking of memory better supports knowledge tracing by accounting for learning ‘history’, based on the recency and outcome of completed learning tasks. This can be done to ensure that the conjunctive nature of the learning content are factored into the prediction of student's future performance.
###Code
life_play_data = pd.read_csv('D:/DPhil - University of Oxford/Reports/ELO/life_play_data.csv')
life_play_data.loc[life_play_data.Correct==1,['Feedback']] = -1
life_play_data.Feedback = life_play_data.Feedback + 1
life_play_data['UserId']=life_play_data.User.astype("category").cat.codes
#Convert time int groups count data, with +1 representing 3 seconds
time_divisor = 3
life_play_data['Time_Counter'] = np.ceil(life_play_data.Time / time_divisor)
life_play_data.Time_Counter = life_play_data.Time_Counter.astype(int)
life_demographic_data = life_play_data[['User','UserId','Group']]
life_demographic_data = life_demographic_data.drop_duplicates(['User','UserId','Group'])
life_demographic_data.reset_index(drop=True,inplace=True)
perf_data = life_play_data.copy()
perf_data = perf_data[perf_data.Try==1]
user_scores = perf_data.groupby(['User','Session']).agg({"Correct": "sum", "Question" : 'count'})
user_scores.reset_index(inplace=True)
user_scores['Score'] = user_scores.Correct / user_scores.Question
user_scores.Score = user_scores.Score.apply(lambda x: math.floor(x*10))
user_scores.drop(['Correct','Question'],axis=1,inplace=True)
life_play_data = pd.merge(life_play_data,user_scores,how='left',on=['User','Session'])
#Use commonly recognised KT outcome of correct on first try
life_play_data['Correct_First_Try'] = 0
life_play_data.loc[(life_play_data.Try==1)&(life_play_data.Correct==1),['Correct_First_Try']]=1
life_play_grouping = life_play_data[['Session','UserId','Group']]
life_play_grouping = life_play_grouping.drop_duplicates(['UserId','Group'])
life_play_data.drop(['User',
'Correct',
'Cycle',
'Time',
'Try','SRL',
'Group',
'Complete_Plays'
,'Session_Complete'
#,'Gap'
],axis=1,inplace=True)
life_play_data.rename(columns={'Correct_First_Try':'Correct'
,'Time_Counter':'Time'
,'UserId':'User'
,'Gap':'Gap_Log'
},inplace=True)
life_play_data.rename(columns={'Correct_First_Try':'Correct',
'Time_Counter':'Time'
,'UserId':'User'
,'Gap_Type':'Gap'
},inplace=True)
life_play_data.head()
#Remember outcome is imbalanced, implications for AUCROC
life_play_data.Correct.value_counts(normalize=True)
perf_gap = life_play_data[['User','Session','Gap','Score']]
perf_gap = perf_gap.drop_duplicates(['User','Session','Gap','Score'])
perf_gap.Score = perf_gap.Score*10
perf_gap.rename(columns={'User':'UserId'},inplace=True)
perf_gap.head()
summary = pd.crosstab([perf_gap.Score],[perf_gap.Gap],
rownames=['Score'],
colnames=["Spacing"],
dropna=True,
margins=True)
summary
#Table above as percentages
summary = pd.crosstab([perf_gap.Score],[perf_gap.Gap],
rownames=['Score'],
colnames=["Spacing"],
dropna=True,
normalize=True,
margins=True)
summary
srl_data_mutinom = pd.read_csv('D:\DPhil - University of Oxford\Reports\SRL\mutinom_regress_data.csv')
srl_data_mutinom = srl_data_mutinom[['User','Cadre','Level','Experience','Age','SRL']]
srl_data_mutinom = pd.merge(srl_data_mutinom,life_demographic_data,how='inner',on='User')
srl_data_mutinom.UserId = srl_data_mutinom.UserId.astype(int)
users_with_demo = srl_data_mutinom.UserId.tolist()
srl_data_mutinom.head()
demo_perf = pd.merge(perf_gap,srl_data_mutinom,how='inner',on='UserId')
demo_perf.head()
###Output
_____no_output_____
###Markdown
Descriptive stats for learning sessions by spacing
###Code
table_gap = life_play_data[['User','Session','Gap']].drop_duplicates()
all_sessions = table_gap.shape[0]
print("All Sessions: "+str(table_gap.shape[0]))
print("None: "+str(table_gap[table_gap.Gap=='None'].shape[0]) + " ("+str(round(table_gap[table_gap.Gap=='None'].shape[0]/all_sessions,4)*100)+"%)")
print("<= 1 Hour: "+str(table_gap[table_gap.Gap=='<= 1 Hour'].shape[0]) + " ("+'{:0.2f}'.format((table_gap.User[table_gap.Gap=='<= 1 Hour'].shape[0]/all_sessions)*100)+"%)")
print("<= 1 Day: "+str(table_gap[table_gap.Gap=='<= 1 Day'].shape[0]) + " ("+'{:0.2f}'.format((table_gap.User[table_gap.Gap=='<= 1 Day'].shape[0]/all_sessions)*100)+"%)")
print("<= 1 Week: "+str(table_gap[table_gap.Gap=='<= 1 Week'].shape[0]) + " ("+'{:0.2f}'.format((table_gap.User[table_gap.Gap=='<= 1 Week'].shape[0]/all_sessions)*100)+"%)")
print("<= 1 Month: "+str(table_gap[table_gap.Gap=='<= 1 Month'].shape[0]) + " ("+'{:0.2f}'.format((table_gap.User[table_gap.Gap=='<= 1 Month'].shape[0]/all_sessions)*100)+"%)")
print("> 1 Month: "+str(table_gap[table_gap.Gap=='> 1 Month'].shape[0]) + " ("+'{:0.2f}'.format((table_gap.User[table_gap.Gap=='> 1 Month'].shape[0]/all_sessions)*100)+"%)")
table_forgetting_curves = life_play_data[['User','Session','Score','Gap']].drop_duplicates()
table_forgetting_curves['I'] = 1
table_forgetting_curves['Iteration'] = table_forgetting_curves.groupby('User').I.cumsum()
table_forgetting_curves.drop('I',axis=1,inplace=True)
table_forgetting_curves.Score = table_forgetting_curves.Score * 10
table_forgetting_curves = table_forgetting_curves[table_forgetting_curves.Gap !='None']
table_forgetting_curves.Iteration = table_forgetting_curves.Iteration -1
table_forgetting_curves = table_forgetting_curves[table_forgetting_curves.Iteration <7]
table_forgetting_curves.head()
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
axs = plt.subplots(nrows=1,ncols=1,figsize=(8,5))
axs = sns.lineplot(x="Iteration",
y="Score",
ci=None,
hue="Gap",
palette=sns.color_palette("bright", 5),
style="Gap",
markers=True,
markersize=10,
data=table_forgetting_curves)
axs.set_xlabel('Learning Session Count',fontsize=20)
axs.set_ylabel('Score (%)',fontsize=20)
axs.set_xticks(np.arange(1, 7, step=1))
axs.set_yticks(np.arange(30, 105, step=10))
handles, labels = axs.get_legend_handles_labels()
plt.legend(handles=handles[1:], labels=labels[1:],fontsize=16,ncol=3, bbox_to_anchor=(.95, -0.25))
for lh in axs.legend_.legendHandles:
lh._sizes = [100]
plt.show()
###Output
_____no_output_____
###Markdown
Missing data dropped users (based on sequence length)
###Code
count_attempts = (life_play_data.groupby('User').Question.count()).to_frame()
count_attempts.reset_index(inplace=True)
count_attempts
for i in range(3,11,1):
print("Missing less than {}: N={},({:.2%})".format(
i,
count_attempts.User[count_attempts.Question < i].nunique(),
count_attempts.User[count_attempts.Question < i].nunique()/count_attempts.User.nunique()
))
##New code
dataset = life_play_data.copy()
dataset.drop(['Session'],axis=1,inplace=True)
#Rearrange columns, make sure Correct is at the end
dataset = dataset[['User','Question','Feedback','Time','Gap','Opportunity','Score','Gap_Log','Correct']]
dataset.Gap = pd.Categorical(dataset.Gap.tolist(),categories=['None',
'<= 1 Hour',
'<= 1 Day',
'<= 1 Week',
'<= 1 Month',
'> 1 Month'])
dataset.Gap = dataset.Gap.cat.codes
dataset.tail()
dataset_autoencode = dataset.copy()# Create a dataset copy to be used for OneHotEncoding
#dataset_autoencode.loc[dataset_autoencode.Time > 29, ['Time']] = 30 #Make all time slots longer than 87 seconds, 90 seconds
#dataset_autoencode.loc[dataset_autoencode.Opportunity > 19, ['Opportunity']] = 20 #Make all opportunities greater than 19, 20
dataset_autoencode.tail()
#LAK dataset needs this
column_names = dataset.columns.tolist()
column_names.remove('Correct')
column_names.remove('Score')
column_names.remove('Gap_Log')
column_names.append('Gap_Log')
column_names.append('Score')
column_names.append('Correct')
dataset = dataset[column_names]
dataset.Score = dataset.Score * 10
dataset.tail()
users_max = (dataset.User.max()+1)
quiz_max = (dataset.Question.max()+1)
opportunity_max = (dataset.Opportunity.max()+1)
time_max = (dataset.Time.max()+1)
feedback_max = (dataset.Feedback.max()+1)
space_max = (dataset.Gap.max()+1)
def temporalize(features, labels, lookback): #Converts dataset to timeseries form, with sliding window of varying sizes
X = []
y_series = []
y_single = []
sets = len(features)-lookback + 1
for i in range(sets):
t = []
l = []
for j in range(0,lookback):
# Gather past records upto the lookback period
t.append(features[[(i+j)], :])
l.append(labels[[(i+j)]])
X.append(t)
y_single.append(labels[(i+lookback)-1])
y_series.append(l)
return X, y_series, y_single
def get_split_dataset(datasets,lookback,keepUserId):
global n_features
if keepUserId:
features = datasets.loc[:, ~(datasets.columns.isin(['Correct']))].values
else:
features = datasets.loc[:, ~(datasets.columns.isin(['User','Correct']))].values
labels = datasets.Correct.values
X, y_series, y_single = temporalize(features,labels,lookback)
X = np.array(X)
if keepUserId:
X = X.reshape(X.shape[0],lookback,n_features-1) #Just omit the Correct column
else:
X = X.reshape(X.shape[0],lookback,n_features-2) #Just omit the Correct & User column
y_series = np.array(y_series).reshape(np.array(y_series).shape[0],lookback,1)
y_single = np.array(y_single).reshape(np.array(y_single).shape[0],1)
return X, y_series, y_single
def get_datasets(dataset,
lookback,
single_label=False,
train_ratio = 0.8,
weighted=False,
learnerAgnostic=True,
keepUserId=False,
useSRLusersAsTest=False):
global users_with_demo
if useSRLusersAsTest: #Use dataset with known extra demographic details as the test dataset(~23% of the full dataset)
dataset_test = dataset[dataset.User.isin(users_with_demo)]
dataset_train = dataset[~dataset.User.isin(users_with_demo)]
learners_data_test = dataset_test.groupby(['User']).apply(get_split_dataset,lookback,keepUserId)
learners_data_train = dataset_train.groupby(['User']).apply(get_split_dataset,lookback,keepUserId)
X_test, y_test_series,y_test_last = zip(*learners_data_test)
X_train, y_train_series,y_train_last = zip(*learners_data_train)
X_test = [item for item in X_test if item.shape[0] > 0]
y_test_series = [item for item in y_test_series if item.shape[0] > 0]
y_test_last = [item for item in y_test_last if item.shape[0] > 0]
X_train = [item for item in X_train if item.shape[0] > 0]
y_train_series = [item for item in y_train_series if item.shape[0] > 0]
y_train_last = [item for item in y_train_last if item.shape[0] > 0]
X_test = np.vstack(X_test)
y_test_series = np.vstack(y_test_series)
y_test_last = np.vstack(y_test_last)
X_train = np.vstack(X_train)
y_train_series = np.vstack(y_train_series)
y_train_last = np.vstack(y_train_last)
return X_train, X_test, y_train_series, y_test_series
else:
learners_data = dataset.groupby(['User']).apply(get_split_dataset,lookback,keepUserId)
#Randomise learners to either test or train
summary_stats = pd.DataFrame()
for idx,items in learners_data.iteritems():
this_stat = pd.DataFrame({
'Learner':idx,
'seq_len':items[0].shape[0]
},index=[idx])
summary_stats = pd.concat([summary_stats,this_stat])
summary_stats = summary_stats[summary_stats.seq_len >0]
summary_stats['Learner']= range(0,summary_stats.shape[0])
summary_stats['weight'] = summary_stats.seq_len / summary_stats.seq_len.sum()
np.random.seed(1)
if weighted:
train_val_users = np.random.choice(summary_stats.Learner,
size=int(summary_stats.shape[0]*train_ratio), #default 80% of users to train
p=summary_stats.weight, #Weight by number of sequences per user
replace=False)
test_users = list(set(summary_stats.Learner.tolist()).difference(set(train_val_users.tolist())))
else:
train_val_users = np.random.choice(summary_stats.Learner,
size=int(summary_stats.shape[0]*train_ratio), #default 80% of users to train
replace=False)
test_users = list(set(summary_stats.Learner.tolist()).difference(set(train_val_users.tolist())))
features, labels, last_label = zip(*learners_data)
features_remain = [item for item in features if item.shape[0] > 0]
labels_remain = [item for item in labels if item.shape[0] > 0]
last_label_remain = [item for item in last_label if item.shape[0] > 0]
#Split the data for model training
if learnerAgnostic:
X = np.vstack(features_remain)
y = np.vstack(labels_remain)
y_single = np.vstack(last_label_remain)
if single_label:
X_train_val,X_test,y_train_val,y_test = train_test_split(X, y_single,test_size=(1.0-train_ratio),
shuffle=True,random_state=1)
else:
X_train_val,X_test,y_train_val,y_test = train_test_split(X, y,test_size=(1.0-train_ratio),
shuffle=True,random_state=1)
return X_train_val, X_test, y_train_val, y_test
else:
X_train_val = list(itemgetter(*train_val_users)(features_remain))
X_train_val = np.vstack(X_train_val)
X_test = list(itemgetter(*test_users)(features_remain))
X_test = np.vstack(X_test)
if single_label:
y_train_val = list(itemgetter(*train_val_users)(last_label_remain))
y_train_val = np.vstack(y_train_val)
y_test = list(itemgetter(*test_users)(last_label_remain))
y_test = np.vstack(y_test)
else:
y_train_val = list(itemgetter(*train_val_users)(labels_remain))
y_train_val = np.vstack(y_train_val)
y_test = list(itemgetter(*test_users)(labels_remain))
y_test = np.vstack(y_test)
return X_train_val, X_test, y_train_val, y_test
class PrintDot(Callback): #Keep track of progress
def on_epoch_end(self, epoch, logs):
if epoch % 400 == 0: print('')
if epoch % 20 == 0: print('.', end='')
es = EarlyStopping(monitor='val_loss',patience=4) #Avoid overfitting
cp = ModelCheckpoint(filepath="dkt_LIFE_AutoEncode_LS.h5", #Save best performing model
monitor='val_loss',
save_best_only=True,
verbose=0,mode='max')
###Output
_____no_output_____
###Markdown
Embedding Model (Many-to-One LSTM)
###Code
summary_stats = pd.DataFrame() #Keeps track of model performance over the different sequence lengths
#Ensure reproducability
random.seed(1)
tf.random.set_seed(1)
for lookback in range(3,31,3):
tf.random.set_seed(1)
n_features = dataset.shape[1] #-1, used in get_split_dataset function
X_train_val, X_test, y_train_val, y_test = get_datasets(dataset,
lookback, #timesteps
single_label=False, #Outcome at last step only?
learnerAgnostic=False, #Mix sequences from learners in train&test
train_ratio=0.7,
weighted=False, #Weight ratio by individual learner data
keepUserId=True, #If you want to keep track of user features
useSRLusersAsTest=True)
last_x = int(lookback - 1)
X_train_val = X_train_val[:,:-1,:]
X_test = X_test[:,:-1,:]
y_seq_out_ls = (y_train_val[:,-1:,:]).reshape(y_train_val.shape[0],1) #Only predict the last step from this sequence
y_test_ls = (y_test[:,-1:,:]).reshape(y_test.shape[0],1) #Only predict the last step from this sequence
user_train = [item[:,0] for item in X_train_val]
quiz_train = [item[:,1] for item in X_train_val]
feedback_train = [item[:,2] for item in X_train_val]
time_train = [item[:,3] for item in X_train_val]
space_train = [item[:,4] for item in X_train_val]
opp_train = [item[:,5] for item in X_train_val]
user_test = [item[:,0] for item in X_test]
quiz_test = [item[:,1] for item in X_test]
feedback_test = [item[:,2] for item in X_test]
time_test = [item[:,3] for item in X_test]
space_test = [item[:,4] for item in X_test]
opp_test = [item[:,5] for item in X_test]
embeddings_train = [user_train,quiz_train,feedback_train,time_train,space_train,opp_train]
embeddings_test = [user_test,quiz_test,feedback_test,time_test,space_test,opp_test]
# Each instance will consist of two inputs: a single user id, and a single question id
user_id_input = Input(shape=(last_x,), name='user_id')
quiz_id_input = Input(shape=(last_x,), name='quiz_id')
feedback_id_input = Input(shape=(last_x,), name='feedback_id')
time_id_input = Input(shape=(last_x,), name='time_id')
space_id_input = Input(shape=(last_x,), name='space_id')
opp_id_input = Input(shape=(last_x,), name='opp_id')
user_embedded = Embedding(users_max,8,input_length=last_x, name='user_embedding')(user_id_input)
quiz_embedded = Embedding(quiz_max, 8,input_length=last_x, name='quiz_embedding')(quiz_id_input)
feedback_embedded = Embedding(feedback_max, 8,input_length=last_x, name='feedback_embedding')(feedback_id_input)
time_embedded = Embedding(time_max, 8,input_length=last_x, name='time_embedding')(time_id_input)
space_embedded = Embedding(space_max, 8,input_length=last_x, name='space_embedding')(space_id_input)
opp_embedded = Embedding(opportunity_max, 8,input_length=last_x, name='opp_embedding')(opp_id_input)
concatenated=Concatenate(name='concat_embeddings')([user_embedded,
quiz_embedded,
feedback_embedded,
time_embedded,
space_embedded,
opp_embedded])
out = LSTM(32,
activation='relu',
kernel_regularizer=l2(10e-4),
recurrent_regularizer=l2(10e-4),
return_sequences=False,
dropout=0.5,
recurrent_dropout=0.5,
name='lstm_layer_outer')(concatenated)
out = Dense(1, activation='sigmoid',name='prediction_layer')(out)
model = Model(
inputs = [user_id_input, quiz_id_input, feedback_id_input, time_id_input, space_id_input, opp_id_input],
outputs = out,
)
adam = tf.keras.optimizers.Adam(lr=0.0005)
model.compile(loss='binary_crossentropy', optimizer = adam, metrics=['accuracy'],name='DKT_Embed')
history = model.fit(
embeddings_train,
y_seq_out_ls,
epochs=1000,
batch_size=256,
shuffle=True,
validation_split = 0.5,
verbose=0,
callbacks=[PrintDot(),es,cp])
y_pred = model.predict(embeddings_test,batch_size=y_test.shape[0])
actual = y_test_ls.ravel()
pred = y_pred.ravel()
fpr_embed, tpr_embed, thresholds_embed = roc_curve(actual, pred)
auc_embed = auc(fpr_embed, tpr_embed)
bin_pred = [1 if p > 0.5 else 0 for p in pred]
accuracy_embed = accuracy_score(actual, bin_pred) #Accuracy score
precision_embed = precision_score(actual, bin_pred, average='weighted') #Precision score
recall_embed = recall_score(actual, bin_pred, average='weighted') #Recall score
f1_embed = f1_score(actual, bin_pred, average='weighted') #Recall score
this_stat = pd.DataFrame({
'Lookback':lookback,
'AUC':auc_embed,
'Accuracy':accuracy_embed,
'Precision':precision_embed,
'Recall':recall_embed,
'F1':f1_embed,
'Brier_Score':brier_score_loss(actual,pred)
},index=[lookback])
summary_stats = pd.concat([summary_stats,this_stat])
print()
print()
print("Lookback: "+str(lookback))
print()
summary_stats.head()
###Output
....................
.
Lookback: 3
Analyses Samples: 17614
...................
Lookback: 6
Analyses Samples: 16087
..............
Lookback: 9
Analyses Samples: 14664
..............
Lookback: 12
Analyses Samples: 13302
................
Lookback: 15
Analyses Samples: 12029
.......
Lookback: 18
Analyses Samples: 10832
......
Lookback: 21
Analyses Samples: 9736
..........
Lookback: 24
Analyses Samples: 8751
.........
Lookback: 27
Analyses Samples: 7868
.....
Lookback: 30
Analyses Samples: 7083
###Markdown
Embedding Model (Many-to-Many LSTM)
###Code
summary_stats_series = pd.DataFrame() #Keeps track of model performance over the different sequence lengths
#Ensure reproducability
random.seed(1)
tf.random.set_seed(1)
for lookback in range(3,31,3):
tf.random.set_seed(1)
n_features = dataset.shape[1] #-1, used in get_split_dataset function
X_train_val, X_test, y_train_val, y_test = get_datasets(dataset,
lookback, #timesteps
single_label=False, #Outcome at last step only?
learnerAgnostic=False, #Mix sequences from learners in train&test
train_ratio=0.7,
weighted=False, #Weight ratio by individual learner data
keepUserId=True, #If you want to keep track of user features
useSRLusersAsTest=True)
last_x = int(lookback - 1)
X_train_val = X_train_val[:,:-1,:]
X_test = X_test[:,:-1,:]
y_train_series = y_train_val[:,1:,:] #Predict outcome of subsequent steps
y_test_series = y_test[:,1:,:]#Predict outcome of subsequent steps
user_train = [item[:,0] for item in X_train_val]
quiz_train = [item[:,1] for item in X_train_val]
feedback_train = [item[:,2] for item in X_train_val]
time_train = [item[:,3] for item in X_train_val]
space_train = [item[:,4] for item in X_train_val]
opp_train = [item[:,5] for item in X_train_val]
user_test = [item[:,0] for item in X_test]
quiz_test = [item[:,1] for item in X_test]
feedback_test = [item[:,2] for item in X_test]
time_test = [item[:,3] for item in X_test]
space_test = [item[:,4] for item in X_test]
opp_test = [item[:,5] for item in X_test]
embeddings_train = [user_train,quiz_train,feedback_train,time_train,space_train,opp_train]
embeddings_test = [user_test,quiz_test,feedback_test,time_test,space_test,opp_test]
# Each instance will consist of two inputs: a single user id, and a single question id
user_id_input = Input(shape=(last_x,), name='user_id')
quiz_id_input = Input(shape=(last_x,), name='quiz_id')
feedback_id_input = Input(shape=(last_x,), name='feedback_id')
time_id_input = Input(shape=(last_x,), name='time_id')
space_id_input = Input(shape=(last_x,), name='space_id')
opp_id_input = Input(shape=(last_x,), name='opp_id')
user_embedded = Embedding(users_max,8,input_length=last_x, name='user_embedding')(user_id_input)
quiz_embedded = Embedding(quiz_max, 8,input_length=last_x, name='quiz_embedding')(quiz_id_input)
feedback_embedded = Embedding(feedback_max, 8,input_length=last_x, name='feedback_embedding')(feedback_id_input)
time_embedded = Embedding(time_max, 8,input_length=last_x, name='time_embedding')(time_id_input)
space_embedded = Embedding(space_max, 8,input_length=last_x, name='space_embedding')(space_id_input)
opp_embedded = Embedding(opportunity_max, 8,input_length=last_x, name='opp_embedding')(opp_id_input)
concatenated=Concatenate(name='concat_embeddings')([user_embedded,
quiz_embedded,
feedback_embedded,
time_embedded,
space_embedded,
opp_embedded])
lstm_layer = LSTM(32,
activation='relu',
kernel_regularizer=l2(10e-4),
recurrent_regularizer=l2(10e-4),
return_sequences=True,
dropout=0.5,
recurrent_dropout=0.5,
name='lstm_layer_outer')(concatenated)
out = Dense(1, activation='sigmoid',name='prediction_layer')(lstm_layer)
model = Model(
inputs = [user_id_input, quiz_id_input, feedback_id_input, time_id_input, space_id_input, opp_id_input],
outputs = out,
)
adam = tf.keras.optimizers.Adam(lr=0.0005)
model.compile(loss='binary_crossentropy', optimizer = adam, metrics=['accuracy'],name='DKT_Embed')
history = model.fit(
embeddings_train,
y_train_series,
epochs=1000,
batch_size=256,
shuffle=True,
validation_split = 0.5,
verbose=0,
callbacks=[PrintDot(),es,cp])
if lookback == 15:
best_lstm_model = model
best_history = history
tsne_inputs = embeddings_train
model_tsne = Model(inputs=[user_id_input,quiz_id_input,feedback_id_input,time_id_input,space_id_input,opp_id_input],
outputs=lstm_layer,name='tsne_AE')
y_pred = model.predict(embeddings_test,batch_size=y_test.shape[0])
for i in range(0,y_test_series.shape[1]):
actual = y_test_series[:,i,:].ravel()
pred = y_pred[:,i,:].ravel()
fpr_embed, tpr_embed, thresholds_embed = roc_curve(actual, pred)
auc_embed = auc(fpr_embed, tpr_embed)
bin_pred = [1 if p > 0.5 else 0 for p in pred]
accuracy_embed = accuracy_score(actual, bin_pred) #Accuracy score
precision_embed = precision_score(actual, bin_pred, average='weighted') #Precision score
recall_embed = recall_score(actual, bin_pred, average='weighted') #Recall score
f1_embed = f1_score(actual, bin_pred, average='weighted') #Recall score
this_stat = pd.DataFrame({
'Lookback':lookback,
'Timestep':i,
'AUC':auc_embed,
'Accuracy':accuracy_embed,
'Precision':precision_embed,
'Recall':recall_embed,
'F1':f1_embed,
'Brier_Score':brier_score_loss(actual,pred)
},index=[lookback])
summary_stats_series = pd.concat([summary_stats_series,this_stat])
print()
print()
print("Lookback: "+str(lookback))
print("Analyses Samples: "+str(X_train_val.shape[0] + X_test.shape[0]))
print()
summary_stats_series.head()
# summarize history for accuracy
plt.plot(best_history.history['accuracy'])
plt.plot(best_history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(best_history.history['loss'])
plt.plot(best_history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plot_model(best_lstm_model,to_file='embedding_model.png',show_shapes=True)
best_lstm_model.summary()
series_perf = summary_stats_series.groupby('Lookback').last()
series_perf.reset_index(inplace=True)
ls_perf = summary_stats.copy()
ls_perf['Type']= 'Last Step'
series_perf['Type']='Series'
series_perf.drop(['Timestep'],axis=1,inplace=True)
model_performance = pd.concat([ls_perf,series_perf],axis=0)
model_performance.reset_index()
model_performance.drop(['Precision','Recall'],axis=1,inplace=True)
model_performance.head()
fig, axs = plt.subplots(nrows=2,ncols=2,figsize=(15,15))
filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X')
dash_styles = ["",
(4, 1.5),
(1, 1),
(3, 1, 1.5, 1),
(5, 1, 1, 1),
(5, 1, 2, 1, 2, 1),
(2, 2, 3, 1.5),
(1, 2.5, 3, 1.2)]
g = sns.lineplot(x="Lookback",y="AUC",hue="Type",
palette=sns.color_palette("bright", 2),
style="Type",
dashes=dash_styles,
markers=True,
data=model_performance,
ax=axs[0,0],legend="full",markersize=9,linewidth=1.5)
g.legend_.remove()
g.set(xticklabels=[])
axs[0,0].set_xlabel('')
axs[0,0].set_title("AUCROC",weight='bold').set_fontsize('14')
axs[0,0].set_yticks(np.arange(.775, .925, step=.025))
h = sns.lineplot(x="Lookback",y="Accuracy",hue="Type",
palette=sns.color_palette("bright", 2),
style="Type",
dashes=dash_styles,
markers=True,
data=model_performance,
ax=axs[0,1],legend="full",markersize=9,linewidth=1.5)
h.legend_.remove()
h.set(xticklabels=[])
axs[0,1].set_xlabel('')
axs[0,1].set_title("Accuracy",weight='bold').set_fontsize('14')
axs[0,1].set_yticks(np.arange(.725, .875, step=.025))
i = sns.lineplot(x="Lookback",y="F1",hue="Type",
palette=sns.color_palette("bright", 2),
style="Type",
dashes=dash_styles,
markers=True,
data=model_performance,
ax=axs[1,0],legend="full",markersize=9,linewidth=1.5)
i.legend_.remove()
axs[1,0].set_xlabel('Sliding Window Length')
axs[1,0].set_title("F1 Score",weight='bold').set_fontsize('14')
axs[1,0].set_yticks(np.arange(.7, .875, step=.025))
j = sns.lineplot(x="Lookback",y="Brier_Score",hue="Type",
palette=sns.color_palette("bright", 2),
style="Type",
dashes=dash_styles,
markers=True,
data=model_performance,
ax=axs[1,1],legend="full",markersize=9,linewidth=1.5)
j.legend_.remove()
axs[1,1].set_ylabel('Brier Score')
axs[1,1].set_xlabel('Sliding Window Length')
axs[1,1].set_title("Brier Score",weight='bold').set_fontsize('14')
axs[1,1].set_yticks(np.arange(.10, .25, step=.02))
fig.subplots_adjust(hspace = .1)
handles, labels = axs[1,1].get_legend_handles_labels()
plt.legend(handles=handles[1:], labels=labels[1:],fontsize=16,ncol=2, bbox_to_anchor=(0.25, -0.2))
plt.text(-4.0, 0.075, "Prediction Type", horizontalalignment='left', fontsize=16, color='black', weight='semibold')
plt.show()
def create_truncated_model(trained_model):
for i, layer in enumerate(model_tsne.layers):
model_tsne.layers[i].set_weights(trained_model.layers[i].get_weights())
model_tsne.compile(optimizer=adam, loss='binary_crossentropy',metrics=['accuracy'])
return model_tsne
truncated_model = create_truncated_model(best_lstm_model)
truncated_model.summary()
layer_pred = truncated_model.predict(embeddings_train,X_train_val.shape[0])
tsne = TSNE(n_components=2, init='pca',perplexity=50,n_iter=5000)
output_tsne = tsne.fit_transform(layer_pred[:,-1:,:].reshape(layer_pred.shape[0],layer_pred.shape[2]))
output_tsne.shape
outcome_vis = np.append(output_tsne,y_train_series[:,0,:],axis=1)
for i in range(1,y_train_series.shape[1]):
outcome_vis = np.append(outcome_vis,y_train_series[:,i,:],axis=1)
outcome_vis.shape
fig, axs = plt.subplots(nrows=2,ncols=2,figsize=(12,9))
fig.suptitle("2D Visualisation of LSTM layer(Timestep:12-15)",weight='bold').set_fontsize('18')
a = sns.scatterplot(x=outcome_vis[:,0]
,y=outcome_vis[:,1]
,hue=outcome_vis[:,12]
,style=outcome_vis[:,12]
,palette=sns.color_palette("bright", 2)
,markers=['.', '*']
,ax=axs[0,0]
,s=150)
a.legend_.remove()
axs[0,0].set_title("Step: 12",weight='bold').set_fontsize('18')
b = sns.scatterplot(x=outcome_vis[:,0]
,y=outcome_vis[:,1]
,hue=outcome_vis[:,13]
,style=outcome_vis[:,13]
,palette=sns.color_palette("bright", 2)
,markers=['.', '*']
,ax=axs[0,1]
,s=150)
b.legend_.remove()
axs[0,1].set_title("Step: 13",weight='bold').set_fontsize('18')
c = sns.scatterplot(x=outcome_vis[:,0]
,y=outcome_vis[:,1]
,hue=outcome_vis[:,14]
,style=outcome_vis[:,14]
,palette=sns.color_palette("bright", 2)
,markers=['.', '*']
,ax=axs[1,0]
,s=150)
c.legend_.remove()
axs[1,0].set_title("Step: 14",weight='bold').set_fontsize('18')
d = sns.scatterplot(x=outcome_vis[:,0]
,y=outcome_vis[:,1]
,hue=outcome_vis[:,15]
,style=outcome_vis[:,15]
,palette=sns.color_palette("bright", 2)
,markers=['.', '*']
,ax=axs[1,1]
,s=150)
axs[1,1].set_title("Step: 15",weight='bold').set_fontsize('18')
handles, labels = d.get_legend_handles_labels()
d.legend(handles=handles[0:], labels=('Wrong','Correct')
,fontsize=16
,ncol=3
,bbox_to_anchor=(0.30, -0.15)
,title='Predicted Outcome'
,title_fontsize='18')
for lh in d.legend_.legendHandles:
lh._sizes = [200]
fig.subplots_adjust(hspace = .35)
kc_embed_layer = best_lstm_model.get_layer('quiz_embedding')
weights_kc = kc_embed_layer.get_weights()[0]
weights_kc = weights_kc[[x.astype(int) for x in quiz_train]]
weights_kc = weights_kc.reshape(weights_kc.shape[0],-1)
output_tsne = tsne.fit_transform(weights_kc)
output_tsne.shape
last_quiz = np.array([x[-1] for x in quiz_train]).astype(int).reshape(-1,1)
outcome_vis = np.append(output_tsne,last_quiz,axis=1)
outcome_vis.shape
filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X')
fig, axs = plt.subplots(nrows=1,ncols=1,figsize=(12,9))
fig.suptitle("2D Visualisation of the KC encoding layer",weight='bold').set_fontsize('18')
axs = sns.scatterplot(x=outcome_vis[:,0]
,y=outcome_vis[:,1]
,hue=outcome_vis[:,2]
,style=outcome_vis[:,2]
,palette=sns.color_palette("bright", 10)
,markers=filled_markers
,s=120)
plt.legend(fontsize=16,ncol=5,bbox_to_anchor=(.90, -0.15),title='KCs',title_fontsize='18')
for lh in axs.legend_.legendHandles:
lh._sizes = [100]
plt.show()
###Output
_____no_output_____ |
2 - Webscraping.ipynb | ###Markdown
**Space X Falcon 9 First Stage Landing Prediction** Web scraping Falcon 9 and Falcon Heavy Launches Records from Wikipedia Estimated time needed: **40** minutes In this lab, you will be performing web scraping to collect Falcon 9 historical launch records from a Wikipedia page titled `List of Falcon 9 and Falcon Heavy launches`[https://en.wikipedia.org/wiki/List_of_Falcon\_9\_and_Falcon_Heavy_launches](https://en.wikipedia.org/wiki/List_of_Falcon\_9\_and_Falcon_Heavy_launches?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01)  Falcon 9 first stage will land successfully  Several examples of an unsuccessful landing are shown here:  More specifically, the launch records are stored in a HTML table shown below:  ObjectivesWeb scrap Falcon 9 launch records with `BeautifulSoup`:* Extract a Falcon 9 launch records HTML table from Wikipedia* Parse the table and convert it into a Pandas data frame First let's import required packages for this lab
###Code
!pip3 install beautifulsoup4
!pip3 install requests
import sys
import requests
from bs4 import BeautifulSoup
import re
import unicodedata
import pandas as pd
###Output
_____no_output_____
###Markdown
and we will provide some helper functions for you to process web scraped HTML table
###Code
def date_time(table_cells):
"""
This function returns the data and time from the HTML table cell
Input: the element of a table data cell extracts extra row
"""
return [data_time.strip() for data_time in list(table_cells.strings)][0:2]
def booster_version(table_cells):
"""
This function returns the booster version from the HTML table cell
Input: the element of a table data cell extracts extra row
"""
out=''.join([booster_version for i,booster_version in enumerate( table_cells.strings) if i%2==0][0:-1])
return out
def landing_status(table_cells):
"""
This function returns the landing status from the HTML table cell
Input: the element of a table data cell extracts extra row
"""
out=[i for i in table_cells.strings][0]
return out
def get_mass(table_cells):
mass=unicodedata.normalize("NFKD", table_cells.text).strip()
if mass:
mass.find("kg")
new_mass=mass[0:mass.find("kg")+2]
else:
new_mass=0
return new_mass
def extract_column_from_header(row):
"""
This function returns the landing status from the HTML table cell
Input: the element of a table data cell extracts extra row
"""
if (row.br):
row.br.extract()
if row.a:
row.a.extract()
if row.sup:
row.sup.extract()
colunm_name = ' '.join(row.contents)
# Filter the digit and empty names
if not(colunm_name.strip().isdigit()):
colunm_name = colunm_name.strip()
return colunm_name
###Output
_____no_output_____
###Markdown
To keep the lab tasks consistent, you will be asked to scrape the data from a snapshot of the `List of Falcon 9 and Falcon Heavy launches` Wikipage updated on`9th June 2021`
###Code
static_url = "https://en.wikipedia.org/w/index.php?title=List_of_Falcon_9_and_Falcon_Heavy_launches&oldid=1027686922"
###Output
_____no_output_____
###Markdown
Next, request the HTML page from the above URL and get a `response` object TASK 1: Request the Falcon9 Launch Wiki page from its URL First, let's perform an HTTP GET method to request the Falcon9 Launch HTML page, as an HTTP response.
###Code
# use requests.get() method with the provided static_url
# assign the response to a object
response = requests.get(static_url)
###Output
_____no_output_____
###Markdown
Create a `BeautifulSoup` object from the HTML `response`
###Code
# Use BeautifulSoup() to create a BeautifulSoup object from a response text content
soup = BeautifulSoup(response.text)
###Output
_____no_output_____
###Markdown
Print the page title to verify if the `BeautifulSoup` object was created properly
###Code
# Use soup.title attribute
soup.title
###Output
_____no_output_____
###Markdown
TASK 2: Extract all column/variable names from the HTML table header Next, we want to collect all relevant column names from the HTML table header Let's try to find all tables on the wiki page first. If you need to refresh your memory about `BeautifulSoup`, please check the external reference link towards the end of this lab
###Code
# Use the find_all function in the BeautifulSoup object, with element type `table`
# Assign the result to a list called `html_tables`
html_tables = soup.find_all('table')
###Output
_____no_output_____
###Markdown
Starting from the third table is our target table contains the actual launch records.
###Code
# Let's print the third table and check its content
first_launch_table = html_tables[2]
print(str(first_launch_table)[:1000])
###Output
<table class="wikitable plainrowheaders collapsible" style="width: 100%;">
<tbody><tr>
<th scope="col">Flight No.
</th>
<th scope="col">Date and<br/>time (<a href="/wiki/Coordinated_Universal_Time" title="Coordinated Universal Time">UTC</a>)
</th>
<th scope="col"><a href="/wiki/List_of_Falcon_9_first-stage_boosters" title="List of Falcon 9 first-stage boosters">Version,<br/>Booster</a> <sup class="reference" id="cite_ref-booster_11-0"><a href="#cite_note-booster-11">[b]</a></sup>
</th>
<th scope="col">Launch site
</th>
<th scope="col">Payload<sup class="reference" id="cite_ref-Dragon_12-0"><a href="#cite_note-Dragon-12">[c]</a></sup>
</th>
<th scope="col">Payload mass
</th>
<th scope="col">Orbit
</th>
<th scope="col">Customer
</th>
<th scope="col">Launch<br/>outcome
</th>
<th scope="col"><a href="/wiki/Falcon_9_first-stage_landing_tests" title="Falcon 9 first-stage landing tests">Booster<br/>landing</a>
</th></tr>
<tr>
<th rowspan="2" scope="row" style="text-align:center;">1
</th>
<td>
###Markdown
You should able to see the columns names embedded in the table header elements `` as follows: ```Flight No.Date andtime (UTC)Version,Booster [b]Launch sitePayload[c]Payload massOrbitCustomerLaunchoutcomeBoosterlanding``` Next, we just need to iterate through the `` elements and apply the provided `extract_column_from_header()` to extract column name one by one
###Code
column_names = []
# Apply find_all() function with `th` element on first_launch_table
# Iterate each th element and apply the provided extract_column_from_header() to get a column name
# Append the Non-empty column name (`if name is not None and len(name) > 0`) into a list called column_names
t = first_launch_table.find_all('th')
for element in t:
colName = extract_column_from_header(element)
if colName is not None and len(colName) > 0:
column_names.append(colName)
###Output
_____no_output_____
###Markdown
Check the extracted column names
###Code
print(column_names)
###Output
['Flight No.', 'Date and time ( )', 'Launch site', 'Payload', 'Payload mass', 'Orbit', 'Customer', 'Launch outcome']
###Markdown
TASK 3: Create a data frame by parsing the launch HTML tables We will create an empty dictionary with keys from the extracted column names in the previous task. Later, this dictionary will be converted into a Pandas dataframe
###Code
launch_dict= dict.fromkeys(column_names)
# Remove an irrelvant column
del launch_dict['Date and time ( )']
# Let's initial the launch_dict with each value to be an empty list
launch_dict['Flight No.'] = []
launch_dict['Launch site'] = []
launch_dict['Payload'] = []
launch_dict['Payload mass'] = []
launch_dict['Orbit'] = []
launch_dict['Customer'] = []
launch_dict['Launch outcome'] = []
# Added some new columns
launch_dict['Version Booster']=[]
launch_dict['Booster landing']=[]
launch_dict['Date']=[]
launch_dict['Time']=[]
###Output
_____no_output_____
###Markdown
Next, we just need to fill up the `launch_dict` with launch records extracted from table rows. Usually, HTML tables in Wiki pages are likely to contain unexpected annotations and other types of noises, such as reference links `B0004.1[8]`, missing values `N/A [e]`, inconsistent formatting, etc. To simplify the parsing process, we have provided an incomplete code snippet below to help you to fill up the `launch_dict`. Please complete the following code snippet with TODOs or you can choose to write your own logic to parse all launch tables:
###Code
extracted_row = 0
#Extract each table
for table_number,table in enumerate(soup.find_all('table',"wikitable plainrowheaders collapsible")):
# get table row
for rows in table.find_all("tr"):
#check to see if first table heading is as number corresponding to launch a number
if rows.th:
if rows.th.string:
flight_number=rows.th.string.strip()
flag=flight_number.isdigit()
else:
flag=False
#get table element
row=rows.find_all('td')
#if it is number save cells in a dictonary
if flag:
extracted_row += 1
# Flight Number value
# TODO: Append the flight_number into launch_dict with key `Flight No.`
#print(flight_number)
datatimelist=date_time(row[0])
launch_dict['Flight No.'].append(extracted_row)
# Date value
# TODO: Append the date into launch_dict with key `Date`
date = datatimelist[0].strip(',')
#print(date)
launch_dict['Date'].append(date)
# Time value
# TODO: Append the time into launch_dict with key `Time`
time = datatimelist[1]
#print(time)
launch_dict['Time'].append(time)
# Booster version
# TODO: Append the bv into launch_dict with key `Version Booster`
bv=booster_version(row[1])
if not(bv):
bv=row[1].a.string
print(bv)
launch_dict['Version Booster'].append(bv)
# Launch Site
# TODO: Append the bv into launch_dict with key `Launch Site`
launch_site = row[2].a.string
#print(launch_site)
launch_dict['Launch site'].append(launch_site)
# Payload
# TODO: Append the payload into launch_dict with key `Payload`
payload = row[3].a.string
#print(payload)
launch_dict['Payload'].append(payload)
# Payload Mass
# TODO: Append the payload_mass into launch_dict with key `Payload mass`
payload_mass = get_mass(row[4])
#print(payload)
launch_dict['Payload mass'].append(payload_mass)
# Orbit
# TODO: Append the orbit into launch_dict with key `Orbit`
orbit = row[5].a.string
#print(orbit)
launch_dict['Orbit'].append(orbit)
# Customer
# TODO: Append the customer into launch_dict with key `Customer`
customer = row[6].a.string
#print(customer)
launch_dict['Customer'].append(customer)
# Launch outcome
# TODO: Append the launch_outcome into launch_dict with key `Launch outcome`
launch_outcome = list(row[7].strings)[0]
#print(launch_outcome)
launch_dict['Launch outcome'].append(launch_outcome)
# Booster landing
# TODO: Append the launch_outcome into launch_dict with key `Booster landing`
booster_landing = landing_status(row[8])
#print(booster_landing)
launch_dict['Booster landing'].append(booster_landing)
###Output
F9 v1.0B0003.1
F9 v1.0B0004.1
F9 v1.0B0005.1
F9 v1.0B0006.1
F9 v1.0B0007.1
F9 v1.1B1003
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 v1.1
F9 FT
F9 v1.1
F9 FT
F9 FT
F9 FT
F9 FT
F9 FT
F9 FT
F9 FT
F9 FT
F9 FT
F9 FT
F9 FT♺
F9 FT
F9 FT
F9 FT
F9 FTB1029.2
F9 FT
F9 FT
F9 B4
F9 FT
F9 B4
F9 B4
F9 FTB1031.2
F9 B4
F9 FTB1035.2
F9 FTB1036.2
F9 B4
F9 FTB1032.2
F9 FTB1038.2
F9 B4
F9 B4B1041.2
F9 B4B1039.2
F9 B4
F9 B5B1046.1
F9 B4B1043.2
F9 B4B1040.2
F9 B4B1045.2
F9 B5
F9 B5B1048
F9 B5B1046.2
F9 B5
F9 B5B1048.2
F9 B5B1047.2
F9 B5B1046.3
F9 B5
F9 B5
F9 B5B1049.2
F9 B5B1048.3
F9 B5[268]
F9 B5
F9 B5B1049.3
F9 B5B1051.2
F9 B5B1056.2
F9 B5B1047.3
F9 B5
F9 B5
F9 B5B1056.3
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5B1058.2
F9 B5
F9 B5B1049.6
F9 B5
F9 B5B1060.2
F9 B5B1058.3
F9 B5B1051.6
F9 B5
F9 B5
F9 B5
F9 B5
F9 B5 ♺
F9 B5 ♺
F9 B5 ♺
F9 B5 ♺
F9 B5
F9 B5B1051.8
F9 B5B1058.5
###Markdown
After you have fill in the parsed launch record values into `launch_dict`, you can create a dataframe from it.
###Code
df = pd.DataFrame({ key:pd.Series(value) for key, value in launch_dict.items() })
###Output
_____no_output_____ |
notebooks/Edit calc hourly base dev.ipynb | ###Markdown
Now, we want to replace the emissions values that we got from the ML model in the CAMD database. We use the follow steps1. Match units to a ORISPL and UNIT ID (done in prep)2. Check to see how many of the units in the Simple Net are in the CAMD data (some will be too small) (done in prep)3. Add additional rows to the Simple Net data when the NYISO PTID corresponds to multiple UNIT IDs and divide the generation evenly among the units (done in prep)4. Replace the data in `calc_hourly_base.csv`
###Code
idx = 62
# Get the ORISPL and the Unit ID
egu_orispl = ml_co2.loc[idx].ORISPL
egu_unitid = ml_co2.loc[idx]['Unit ID']
print(f'ORISPL: {egu_orispl}\tUNIT ID:{egu_unitid}')
ml_co2.columns[5]
# Extract this ORISPL and UNIT ID from the base DataFrame
egu_df = base_df.loc[(base_df['orispl_code'] == egu_orispl) & (base_df['unitid'] == egu_unitid)]
# Extract the correct time window
egu_df = egu_df.loc[base_df['datetime'].isin(ml_co2.columns[5:])]
egu_df.head()
ml_co2.loc[idx, ml_co2.columns[5:]]
# Replace the CO2 emissions values
# NOTE: this is probably a dangerous way of doing this -- might be better to add the datetime as another index in the egu_df
egu_df['co2_mass (tons)'] = ml_co2.loc[idx, ml_co2.columns[5:]].values
# Replace the SO2 emissions values
egu_df['so2_mass (lbs)'] = ml_so2.loc[idx, ml_so2.columns[5:]].values
# Replace the NOx emissions values
egu_df['nox_mass (lbs)'] = ml_nox.loc[idx, ml_nox.columns[5:]].values
# Replace the load values
egu_df['gload (MW-hr)'] = ed_gen.loc[idx, ed_gen.columns[5:]].values
# Combine this new unit data back into the base_df
base_df.update(egu_df)
###Output
_____no_output_____
###Markdown
Check to make sure that the overwrite worked
###Code
# tmp = base_df.loc[(base_df['orispl_code'] == egu_orispl) & (base_df['unitid'] == egu_unitid)]
# tmp = tmp.loc[base_df['datetime'].isin(ml_co2.columns[5:])]
# tmp
# This plot should be blank
# (tmp['co2_mass (tons)'] - ml_co2.loc[idx, ml_co2.columns[5:]]).plot()
###Output
_____no_output_____
###Markdown
Here's the algorithm in a loop
###Code
# Get the name of an individual EGU -- this is how units are identified in the NY Simple Net & the ML
for idx in ml_co2.index:
# Get the ORISPL and the Unit ID
egu_orispl = ml_co2.loc[idx].ORISPL
egu_unitid = ml_co2.loc[idx]['Unit ID']
print(f'Working on ORISPL: {egu_orispl}\tUNIT ID:{egu_unitid}')
# Extract this ORISPL and UNIT ID from the base DataFrame
egu_df = base_df.loc[(base_df['orispl_code'] == egu_orispl) & (base_df['unitid'] == egu_unitid)]
# Extract the correct time window
egu_df = egu_df.loc[base_df['datetime'].isin(ml_co2.columns[5:])]
if len(egu_df) == 0:
print('Warning: this unit was not found in the CAMD data... skipping')
else:
# Replace the CO2 emissions values
# NOTE: this is probably a dangerous way of doing this -- might be better to add the datetime as another index in the egu_df
egu_df['co2_mass (tons)'] = ml_co2.loc[idx, ml_co2.columns[5:]].values
# Replace the SO2 emissions values
egu_df['so2_mass (lbs)'] = ml_so2.loc[idx, ml_so2.columns[5:]].values
# Replace the NOx emissions values
egu_df['nox_mass (lbs)'] = ml_nox.loc[idx, ml_nox.columns[5:]].values
# Replace the load values
egu_df['gload (MW-hr)'] = ed_gen.loc[idx, ed_gen.columns[5:]].values
# Combine this new unit data back into the base_df
base_df.update(egu_df)
base_df.head()
# Save the updated dataset to a new CSV
base_df = base_df.drop(columns=['datetime'])
base_df.to_csv('updated_test_calc_hourly_base.csv', index=False)
# Now, test the prepemis.update_camd() function
in_emis_file = 'test_calc_hourly_base.csv'
co2_file = '../ny_emis/ml_output/pred_xg_co2.csv'
nox_file = '../ny_emis/ml_output/pred_xg_nox.csv'
so2_file = '../ny_emis/ml_output/pred_xg_so2.csv'
gen_file = '../ny_emis/ed_output/thermal_with_renewable_20160805_20160815.csv'
lu_file = '../ny_emis/ed_output/RGGI_to_NYISO.csv'
out_emis_file = 'updated_test_calc_hourly_base_v2.csv'
prepemis.update_camd(in_emis_file=in_emis_file, co2_file=co2_file,
nox_file=nox_file, so2_file=so2_file,
gen_file=gen_file, lu_file=lu_file,
out_emis_file=out_emis_file)
###Output
_____no_output_____ |
Data Analysis/Matplotlib/introduction-to-matplotlib.ipynb | ###Markdown
Introduction to Matplotlib
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
plt.plot()
plt.plot();
plt.plot()
plt.show()
plt.plot([1,2,3,4])
x = [1,2,3,4]
y = [11,22,33,44]
plt.plot(x,y);
# 1st method
fig = plt.figure() #creates figure
ax = fig.add_subplot() #adds some axes
plt.show()
# 2nd method
fig = plt.figure() #creates figure
ax = fig.add_axes([1,1,1,1])
ax.plot(x,y) #add some data
plt.show()
# 3rd method (to use)
fig,ax = plt.subplots()
ax.plot(x,[50,100,200,250]);# add some data
type(fig),type(ax)
###Output
_____no_output_____
###Markdown
Matplotlib example workflow
###Code
# 0. import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
# 1. prepare data
x = np.array([1,2,3,4])
y = np.array([11,22,33,44])
# 2. setup plot
fig,ax = plt.subplots(figsize=(10,10))
# 3. plot data
ax.plot(x,y)
# 4. customize plot
ax.set(title="Simple plot",xlabel="x-axis",ylabel="y-axis")
# save and show
fig.savefig("same-plot.png")
###Output
_____no_output_____
###Markdown
Making figures with NumPy arrays* Line plot* Scatter plot* Bar plot* Histogram* Subplot
###Code
import numpy as np
# Create some data
x = np.linspace(0,10,100)
x[:10]
# Plot data
fig, ax = plt.subplots()
ax.plot(x,x**2)
# same data
fig,ax = plt.subplots()
ax.scatter(x,np.exp(x))
# another scatter plot
fig, ax = plt.subplots()
ax.scatter(x,np.sin(x))
# make plot from dict
nut_butter_prices = {"Almond butter":10, "Peanut butter": 8,"Cashew butter":12}
fig,ax = plt.subplots()
ax.bar(nut_butter_prices.keys(),nut_butter_prices.values())
ax.set(title="Bhupend's Butter Store",ylabel="Price($)");
fig,ax = plt.subplots()
ax.barh(list(nut_butter_prices.keys()),list(nut_butter_prices.values()));
# make some data for histogram
x = np.random.randn(1000)
fig,ax=plt.subplots()
ax.hist(x);
###Output
_____no_output_____
###Markdown
Two options for subplots
###Code
# subplots option 1
fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(nrows=2,ncols=2,figsize=(10,5))
# plot to each diff axis
ax1.plot(x,x/2)
ax2.scatter(np.random.random(10),np.random.random(10))
ax3.bar(nut_butter_prices.keys(),nut_butter_prices.values())
ax4.hist(np.random.random(100));
# subplots option 2
fig,ax = plt.subplots(nrows=2,ncols=2,figsize=(10,5))
# plot to each diff index
ax[0,0].plot(x,x/2)
ax[0,1].scatter(np.random.random(10),np.random.random(10))
ax[1,0].bar(nut_butter_prices.keys(),nut_butter_prices.values())
ax[1,1].hist(np.random.random(100));
###Output
_____no_output_____
###Markdown
Plotting from pandas DataFrames
###Code
import pandas as pd
# make dataframe
car_sales = pd.read_csv("car-sales.csv")
car_sales
ts = pd.Series(np.random.randn(1000),index=pd.date_range('1/1/2021',periods=1000))
ts = ts.cumsum()
ts.plot()
car_sales
car_sales["Price"] = car_sales["Price"].str.replace("[\$\,\.]","")
car_sales
# Remove last to zeros
car_sales["Price"] = car_sales["Price"].str[:-2]
car_sales
car_sales["Sale Date"] = pd.date_range("1/1/2021",periods=len(car_sales))
car_sales
car_sales["Total Sales"] = car_sales["Price"].astype(int).cumsum()
car_sales
# plot total sales
car_sales.plot(x="Sale Date",y="Total Sales")
# price to int from str
car_sales["Price"] = car_sales["Price"].astype(int)
# plot scatter plot
car_sales.plot(x="Odometer (KM)",y="Price",kind="scatter")
# bar graph
x= np.random.rand(10,4)
# into dataframe
df = pd.DataFrame(x,columns=["a","b","c","d"])
df
df.plot.bar()
df.plot(kind="bar")
car_sales
car_sales.plot(x="Make",y="Odometer (KM)",kind="bar")
# histograms
car_sales["Odometer (KM)"].plot.hist()
car_sales["Odometer (KM)"].plot(kind="hist")
car_sales["Odometer (KM)"].plot.hist(bins=10)
# another dataset
heart_disease = pd.read_csv("heart-disease.csv")
heart_disease.head()
# histogram of age
heart_disease["age"].plot.hist(bins=10)
heart_disease.head()
heart_disease.plot.hist(figsize=(10,30),subplots=True);
###Output
_____no_output_____
###Markdown
* OO method
###Code
heart_disease.head()
over_50 = heart_disease[heart_disease["age"]>50]
over_50.head()
# Pyplot method
over_50.plot(kind="scatter",x="age",y="chol",c="target",figsize=(10,6))
# OO method
fig, ax = plt.subplots(figsize=(10,6))
over_50.plot(kind="scatter",x="age",y="chol",c="target",ax=ax)
#ax.set_xlim([45,100])
over_50.target.values
# OO method from scratch
fig,ax = plt.subplots(figsize=(10,6))
# Plot the data
scatter = ax.scatter(x=over_50["age"],y=over_50["chol"],c=over_50["target"])
# Customize
ax.set(title="Heart Disease and Cholesterol Level",xlabel="Age",ylabel="Cholesterol")
# Legend
ax.legend(*scatter.legend_elements(),title="Target")
# Horizontal line
ax.axhline(over_50["chol"].mean(),ls="--")
over_50.head()
# subplot of chol, age, thalach
fig,(ax0,ax1) = plt.subplots(nrows=2,ncols=1,figsize=(10,10),sharex=True)
# ax0
# Add data
scatter = ax0.scatter(x=over_50["age"],y=over_50["chol"],c=over_50["target"])
# customize
ax0.set(title="Heart Disease and Cholesterol Levels",ylabel="Cholesterol")
# legend
ax0.legend(*scatter.legend_elements(),title="Target")
# meanline
ax0.axhline(y=over_50["chol"].mean(),ls="--")
# ax1
# add data
scatter = ax1.scatter(x=over_50["age"],y=over_50["thalach"],c=over_50["target"])
# customize
ax1.set(title="Heart Disease and Max Heart Rate",xlabel="Age",ylabel="Max Heart Rate")
# legend
ax1.legend(*scatter.legend_elements(),title="Target")
# meanline
ax1.axhline(y=over_50["thalach"].mean(),ls="--")
# fig title
fig.suptitle("Heart Disease Analysis",fontsize=16,fontweight="bold")
###Output
_____no_output_____
###Markdown
Customize Matplotlib plots and styling
###Code
# See the diff styles available
plt.style.available
car_sales.head()
car_sales["Price"].plot()
plt.style.use("seaborn-whitegrid")
car_sales["Price"].plot()
plt.style.use("seaborn")
car_sales["Price"].plot()
car_sales.plot(x="Odometer (KM)",y="Price",kind="scatter")
plt.style.use("ggplot")
car_sales["Price"].plot()
# random data
x = np.random.randn(10,4)
x
df = pd.DataFrame(x, columns=["a","b","c","d"])
df
ax = df.plot(kind="bar")
type(ax)
# customize using set
ax = df.plot(kind="bar")
ax.set(title="Random Number Bar Graph from dataframe",xlabel="Row number",ylabel="Random number")
ax.legend().set_visible(True)
# set the style
plt.style.use("seaborn-whitegrid")
# OO method from scratch
fig,ax = plt.subplots(figsize=(10,6))
# Plot the data
scatter = ax.scatter(x=over_50["age"],y=over_50["chol"],c=over_50["target"],cmap="winter")
# Customize
ax.set(title="Heart Disease and Cholesterol Level",xlabel="Age",ylabel="Cholesterol")
# Legend
ax.legend(*scatter.legend_elements(),title="Target")
# Horizontal line
ax.axhline(over_50["chol"].mean(),ls="--")
# customizing the y and x
# subplot of chol, age, thalach
fig,(ax0,ax1) = plt.subplots(nrows=2,ncols=1,figsize=(10,10),sharex=True)
# ax0
# Add data
scatter = ax0.scatter(x=over_50["age"],y=over_50["chol"],c=over_50["target"],cmap="winter")
# customize
ax0.set(title="Heart Disease and Cholesterol Levels",ylabel="Cholesterol")
#change x,y axis limit
ax0.set_xlim([50,80])
#ax0.set_ylim([60,200])
# legend
ax0.legend(*scatter.legend_elements(),title="Target")
# meanline
ax0.axhline(y=over_50["chol"].mean(),ls="--")
# ax1
# add data
scatter = ax1.scatter(x=over_50["age"],y=over_50["thalach"],c=over_50["target"],cmap="winter")
# customize
ax1.set(title="Heart Disease and Max Heart Rate",xlabel="Age",ylabel="Max Heart Rate")
#change x,y axis limit
ax1.set_xlim([50,80])
ax1.set_ylim([60,200])
# legend
ax1.legend(*scatter.legend_elements(),title="Target")
# meanline
ax1.axhline(y=over_50["thalach"].mean(),ls="--")
# fig title
fig.suptitle("Heart Disease Analysis",fontsize=16,fontweight="bold")
fig.savefig("heart-disease-analysis.png")
###Output
_____no_output_____ |
hepatitis data.ipynb | ###Markdown
The hepatitis dataset has 155 rows which correspond to the number of patients and 20 columns, corresponding to the features collected for each patient.
###Code
data.describe()
replacements = {'die': 0,
'live': 1,
'female': 0,
'male': 1}
data.replace(replacements, inplace = True)
###Output
_____no_output_____
###Markdown
The data type of the given dataset consist of object, booliean and float
###Code
die =len(data[data['class'] == 0])
live = len(data[data['class']== 1])
plt.figure(figsize=(5,5))
# Data to plot
labels = 'DIE','LIVE'
sizes = [die,live]
colors = ['red', 'lightgreen']
explode = (0.2, 0)
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True)
plt.show()
###Output
_____no_output_____
###Markdown
The total numer of patients died due to the disease is 20.65% and the total number of patients live by overcoming the disease is 79.35%
###Code
male =len(data[data['sex'] == 0])
female = len(data[data['sex']==1])
plt.figure(figsize=(5,5))
labels = 'MALE','FEMALE'
sizes = [male,female]
colors = ['skyblue', 'yellowgreen']
explode = (0.2, 0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True)
plt.show()
###Output
_____no_output_____
###Markdown
About 89.7% of male and 10.3% of female are affected by the disease
###Code
plt.figure(figsize=(15,6))
sns.countplot(x='age',data = data, hue = 'steroid')
plt.show()
###Output
_____no_output_____
###Markdown
The people in the age group between 22 to 34 consumed huge amount of steroid where people in the age of 30 has comsumed the highest amount of steroids followed by that 23, 28 and 51 has consumed high steroids
###Code
age = sns.distplot(data.age)
###Output
_____no_output_____
###Markdown
People between the age group of 20 to 50 was mostly affected by the diseases. people in the age group of 35(approx) was more affected.In the abover graph we can see that people with age group between 30 to 34 had consumed huge amount of steroids so the people affected by the disease was also seems to be high in the same age group.
###Code
protime = sns.boxplot(data.protime)
###Output
_____no_output_____
###Markdown
The "prothrombin time" (protime) is one way of measuring how long it takes blood to form a clot, and it is measured in second. A normal protime indicates that a normal amount of blood-clotting protein is available.If protime is high it take more time to clot the blood. The maximum protime seems to be 100 and the median is 60.
###Code
sns.scatterplot(x= data.protime, y=data.age, palette = ['blue','red'], data=data)
plt.show()
###Output
_____no_output_____
###Markdown
Protime is high for the age group of people 30 to 50.
###Code
sns.scatterplot(x= data.albumin, y=data.age,hue = data.sex, palette = ['blue','red'], data=data)
plt.show()
###Output
_____no_output_____
###Markdown
A low albumin level in patients with hepatitis can be a sign of advanced liver disease where male has low risk level when compare to female. The age of female between 20 to 60 are more affected due to the low level of albumin
###Code
plt.figure(figsize=(15,12))
sns.heatmap(data.corr(), cmap='coolwarm',annot = True)
plt.show()
###Output
_____no_output_____ |
Make-a-bot.ipynb | ###Markdown
Make-a-botBaixe os tweets da sua personalidade favorita e faça uma RNN que tenta prever as próximas palavras.
###Code
#!conda install -v -c conda-forge tweepy
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.text import *
from pathlib import Path
from tweet_dumper import get_all_tweets
tweetpath = Path('./tweets')
modelspath = Path('./models')
path = Path('./')
best_model_path = Path('./models/bestmodel30k')
# Download do modelo em portugues pretreinado na wikipedia
!curl https://storage.googleapis.com/gde-dl-bsb/models/bestmodel30k.pth -o ../models/bestmodel30k.pth
# Download do vocabulário
!curl https://storage.googleapis.com/gde-dl-bsb/models/itos.pkl -o ../models/itos.pkl
###Output
_____no_output_____
###Markdown
Baixando os tweetsEscreva suas chaves de acesso da API do Twitter e defina o nome do perfil de twitter que gostaria de usar para treinar a rede.Mais informações em https://developer.twitter.com/
###Code
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
keys = [consumer_key,consumer_secret,access_key,access_secret]
screen_name = "jairbolsonaro"
get_all_tweets(screen_name, keys)
###Output
_____no_output_____
###Markdown
Preparando os dados
###Code
def readtweets(d): return [' '.join(o.strip() for o in open(d).readlines())]
tweets = 'tweets/' + '{}_tweets.txt'.format(screen_name)
t = readtweets(tweets)
#separa 80% dos tweets para treino e 20% para validação
perc80 = len(t) - len(t)//5
train_txt = valid_txt = []
train_txt.append(t[0][:perc80])
valid_txt.append(t[0][perc80:])
bs = 64
train = TextList(list(train_txt), path=path);
valid = TextList(list(valid_txt), path=path);
src = ItemLists(path=path, train=train, valid=valid).label_for_lm()
data = src.databunch(bs=bs)
#tokenizador
tokenizer = Tokenizer(lang='pt', n_cpus=8)
#vocabulario
with modelspath.joinpath('itos.pkl').open('rb') as f:
itos = pickle.load(f)
vocab = Vocab(itos)
###Output
_____no_output_____
###Markdown
Transfer Learning
###Code
learn = language_model_learner(data, arch=AWD_LSTM, pretrained_fnames=(best_model_path,modelspath.joinpath('itos')))
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(1, max_lr=5e-2)
learn.fit_one_cycle(5, max_lr=5e-2)
learn.save('first-try')
###Output
_____no_output_____
###Markdown
Fine Tuning
###Code
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
learn.save('fine-tuned-model')
###Output
_____no_output_____
###Markdown
Prediction
###Code
TEXT = ""
N_WORDS = 50
N_SENTENCES = 5
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
###Output
_____no_output_____
###Markdown
---- Load ExamplesPodemos carregar robôs pré-treinados também. Alguns dos resultados já estão disponíveis na pasta *examples*. Bolsobot
###Code
# Download do modelo pré-treinado do Bolsonaro
!curl https://www.dropbox.com/s/bn4i4x6jiinejzy/Bolsobot.pth -o ../models/Bolsobot.pth
learn.load('Bolsobot')
TEXT = "Grande dia!"
N_WORDS = 50
N_SENTENCES = 5
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
###Output
_____no_output_____
###Markdown
Robolavo
###Code
# Download do modelo pré-treinado do Olavo de Carvalho
!curl https://www.dropbox.com/s/yhsbw1yu9a0844x/olavobot-twitter.pth -o ../models/olavobot-twitter.pth
learn.load('olavobot-twitter')
TEXT = "A Terra é"
N_WORDS = 50
N_SENTENCES = 5
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
###Output
_____no_output_____ |
assets/all_html/2019_10_24_HW4.ipynb | ###Markdown
HW4 -- Sentiment and Lies STEP 1: Import the dataNOTE: May need to change delimiter based on the data file
###Code
import pandas as pd
df = pd.read_csv('deception_data_converted_final.csv', sep='\t')
df[:5]
###Output
_____no_output_____
###Markdown
STEP 2: Pull out the labels
###Code
def get_labels(row):
split_row = str(row).split(',')
lie = split_row[0]
sentiment = split_row[1]
return [lie, sentiment, split_row[2:]]
df['all'] = df.apply(lambda row: get_labels(row['lie,sentiment,review']), axis=1)
df[:5]
df['lie'] = df.apply(lambda row: row['all'][0][0], axis=1)
df[:5]
df['sentiment'] = df.apply(lambda row: row['all'][1][0], axis=1)
df[:5]
df['review'] = df.apply(lambda row: ''.join(row['all'][2]), axis=1)
df[:5]
clean_df = df.copy()
clean_df.drop(['lie,sentiment,review', 'all'], axis=1, inplace=True)
clean_df
###Output
_____no_output_____
###Markdown
STEP 3: Clean the data
###Code
def clean_rogue_characters(string):
exclude = ['\\',"\'",'"']
string = ''.join(string.split('\\n'))
string = ''.join(ch for ch in string if ch not in exclude)
return string
clean_df['review'] = clean_df['review'].apply( lambda x: clean_rogue_characters(x) )
clean_df['review'][0]
###Output
_____no_output_____
###Markdown
STEP 4: Export cleaned, formatted CSV
###Code
clean_df.to_csv('hw4_data.csv',index=False)
df = pd.read_csv('hw4_data.csv')
df[:5]
###Output
_____no_output_____
###Markdown
STEP 5: Split df into data sets LIE DFs
###Code
lie_df_f = df[df['lie'] == 'f']
lie_df_t = df[df['lie'] == 't']
###Output
_____no_output_____
###Markdown
SENTIMENT DFs
###Code
sent_df_n = df[df['sentiment'] == 'n']
sent_df_p = df[df['sentiment'] == 'p']
###Output
_____no_output_____
###Markdown
STEP 5b: Export to Corpus to run on current pipelines
###Code
def print_to_file(rating, review, num, title):
both = review
output_filename = str(rating) + '_'+ title +'_' + str(num) + '.txt'
outfile = open(output_filename, 'w')
outfile.write(both)
outfile.close()
def export_to_corpus(df, subj, title):
for num,row in enumerate(df['review']):
print_to_file(subj, row, num, title)
export_to_corpus(sent_df_n, 'neg', 'hw4_n')
export_to_corpus(sent_df_p, 'pos', 'hw4_p')
export_to_corpus(lie_df_f, 'false', 'hw4_f')
export_to_corpus(lie_df_t, 'true', 'hw4_t')
###Output
_____no_output_____ |
exploratory/connor_flu.ipynb | ###Markdown
Influenza Cases (including H1N1)
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import git
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
###Output
_____no_output_____
###Markdown
Influenza-Like Illness (ILI)
###Code
# Influenza Like Illness Dataset
# Weekly (not cumulative counts)
# Note the number of total patients vs the other datasets (order of magnitude more)
# And data source: From regular local physicians not clinical or public health labs
# Much tougher 2020 flu season than 2019 flu season
df = pd.read_csv(f"{homedir}/data/us/flu/cases/ILI_Ages.csv")
ili_2019 = df[(df["YEAR"] == 2019) & (df["WEEK"] <= 10)].sort_values(["WEEK"])
ili_2019
ili_2020 = df[(df["YEAR"] == 2020) & (df["WEEK"] <= 10)].sort_values(["WEEK"])
ili_2020
# Plot number of patients
# %UNWEIGHTED ILI, TOTAL PATIENTS
def plot_ili_quantity(quantity):
weeks = ili_2019["WEEK"].values
plt.plot(weeks, ili_2020[quantity].values, label="2020")
plt.plot(weeks, ili_2019[quantity].values, label="2019")
plt.xlabel("WEEKS")
plt.ylabel(quantity)
plt.legend()
plt.show()
plot_ili_quantity("TOTAL PATIENTS")
plot_ili_quantity("%UNWEIGHTED ILI")
plot_ili_quantity("ILITOTAL")
###Output
_____no_output_____
###Markdown
Age Grouping and Virus Strain
###Code
# Age and Virus Grouping Dataset
df2 = pd.read_csv(f"{homedir}/data/us/flu/cases/WHO_cases_age_groupings_virus_strains.csv")
df2.sample(10)
###Output
_____no_output_____
###Markdown
H1N1
###Code
# Comprehensive (until 2015) Dataset including 2009 H1N1 statistics
# Includes both Clinical and Public Health Labs
# Weekly statistics not cumulative
# Note that H1N1 epidemic defied seasonality
# Observe elevated Percent Positive cases
df3 = pd.read_csv(f"{homedir}/data/us/flu/cases/WHO_NREVSS_Combined_prior_to_2015_16.csv")
df3[df3["YEAR"] == 2009].sort_values(["WEEK"])
###Output
_____no_output_____
###Markdown
Clinical Lab Data
###Code
# 2015-2020 Clinical Lab Data
# Observe that this more recent data (> 2015) has many more reported specimens than before
# Because data reported from local clinics as opposed to public health labs
# Less granular in determining specific virus strain
df4 = pd.read_csv(f"{homedir}/data/us/flu/cases/WHO_NREVSS_Clinical_Labs.csv")
df4[df4["YEAR"] == 2020].sort_values(["WEEK"])
###Output
_____no_output_____
###Markdown
Public Health Lab Data
###Code
# 2015-2020 Public Health Lab Data
# More detailed than local clinical lab data, but less quantity
# Still have fairly large number of H1N1 cases
df5 = pd.read_csv(f"{homedir}/data/us/flu/cases/WHO_NREVSS_Public_Health_Labs.csv")
df5[df5["YEAR"] == 2020].sort_values(["WEEK"])
###Output
_____no_output_____
###Markdown
Deaths
###Code
# National Deaths from 2013 - 2020
# Note how it is reported in seasons (Week 40 of previous year to Week 20 of current year)
# And that the death numbers are cumulative, not weekly
# Rolling one-year death count, so numbes do go down (even when cumulative!!)
df6 = pd.read_csv(f"{homedir}/data/us/flu/deaths/national_pi_deaths_2013_2020.csv")
df6[df6["SEASON"] == "2019-20"]
# Statewide Deaths from 2012 - 2020
df7 = pd.read_csv(f"{homedir}/data/us/flu/deaths/statewide_pi_deaths_2012_2020.csv")
df7[(df7["SEASON"] == "2019-20") & (df7["SUB AREA"] == "California")]
###Output
_____no_output_____ |
module2-wrangle-ml-datasets/Axel_Corro_LS_DS13_232_assignment.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 3, Module 1*--- Wrangle ML datasets- [ ] Continue to clean and explore your data. - [ ] For the evaluation metric you chose, what score would you get just by guessing?- [ ] Can you make a fast, first model that beats guessing?**We recommend that you use your portfolio project dataset for all assignments this sprint.****But if you aren't ready yet, or you want more practice, then use the New York City property sales dataset for today's assignment.** Follow the instructions below, to just keep a subset for the Tribeca neighborhood, and remove outliers or dirty data. [Here's a video walkthrough](https://youtu.be/pPWFw8UtBVg?t=584) you can refer to if you get stuck or want hints!- Data Source: [NYC OpenData: NYC Citywide Rolling Calendar Sales](https://data.cityofnewyork.us/dataset/NYC-Citywide-Rolling-Calendar-Sales/usep-8jbt)- Glossary: [NYC Department of Finance: Rolling Sales Data](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page)
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
!pip install pandas-profiling==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Read New York City property sales data
import pandas as pd
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
###Output
_____no_output_____
###Markdown
Your code starts here:
###Code
# Change column names: replace spaces with underscores
df.columns = df.columns.str.replace(" ","_")
df.columns
# Get Pandas Profiling Report
# from pandas_profiling import ProfileReport
# ProfileReport(df)
# Keep just the subset of data for the Tribeca neighborhood
# Check how many rows you have now. (Should go down from > 20k rows to 146)
tribeca = df[df['NEIGHBORHOOD']=='TRIBECA'].reset_index()
tribeca.head()
# Q. What's the date range of these property sales in Tribeca?
tribeca['SALE_DATE'].describe()
# The Pandas Profiling Report showed that SALE_PRICE was read as strings
# Convert it to integers
tribeca['SALE_DATE'] = pd.to_datetime(tribeca['SALE_DATE'],infer_datetime_format=True)
tribeca['SALE_DATE'].describe()[['first','last']]
# Q. What is the maximum SALE_PRICE in this dataset?
tribeca['SALE_PRICE'].head()
# Look at the row with the max SALE_PRICE
def clean_saleprice(content):
content = content.replace(" ","").replace("$","").replace(",","")
content = pd.to_numeric(content)
return content
tribeca['SALE_PRICE'] = tribeca['SALE_PRICE'].apply(clean_saleprice)
tribeca.sort_values(by='SALE_PRICE').tail(1)
# Get value counts of TOTAL_UNITS
# Q. How many property sales were for multiple units?
tribeca['TOTAL_UNITS'].value_counts()
# Keep only the single units
tribeca_singles = tribeca[tribeca['TOTAL_UNITS'] < 2]
# Q. Now what is the max sales price? How many square feet does it have?
tribeca_singles.sort_values(by='SALE_PRICE').tail(1)['GROSS_SQUARE_FEET']
# Q. How often did $0 sales occur in this subset of the data?
# There's a glossary here:
# https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page
# It says:
# A $0 sale indicates that there was a transfer of ownership without a
# cash consideration. There can be a number of reasons for a $0 sale including
# transfers of ownership from parents to children.
no_cash_transfers = tribeca[tribeca['SALE_PRICE'] == 0]
len(no_cash_transfers)
# Look at property sales for > 5,000 square feet
# Q. What is the highest square footage you see?
tribeca[tribeca['GROSS_SQUARE_FEET'] > 5000].sort_values(by='GROSS_SQUARE_FEET').tail(1)
# What are the building class categories?
# How frequently does each occur?
tribeca['BUILDING_CLASS_CATEGORY'] = tribeca['BUILDING_CLASS_CATEGORY'].str.replace(" ","_")
tribeca['BUILDING_CLASS_CATEGORY'].value_counts()
# Keep subset of rows:
# Sale price more than $0,
# Building class category = Condos - Elevator Apartments
# Check how many rows you have now. (Should be 106 rows.)
tribeca_condos = tribeca[(tribeca['SALE_PRICE'] != 0) & (tribeca['BUILDING_CLASS_CATEGORY'] == '13_CONDOS_-_ELEVATOR_APARTMENTS')]
tribeca_condos
# Make a Plotly Express scatter plot of GROSS_SQUARE_FEET vs SALE_PRICE
import plotly.express as px
px.scatter(tribeca_condos, x='GROSS_SQUARE_FEET',y='SALE_PRICE')
# Add an OLS (Ordinary Least Squares) trendline,
# to see how the outliers influence the "line of best fit"
px.scatter(tribeca_condos, x='GROSS_SQUARE_FEET',y='SALE_PRICE',trendline='ols')
# Look at sales for more than $35 million
# All are at 70 Vestry Street
# All but one have the same SALE_PRICE & SALE_DATE
# Was the SALE_PRICE for each? Or in total?
# Is this dirty data?
tribeca_condos[tribeca_condos['SALE_PRICE'] > 35000000]
# Make a judgment call:
# Keep rows where sale price was < $35 million
# Check how many rows you have now. (Should be down to 90 rows.)
tribeca_condos = tribeca_condos[tribeca_condos['SALE_PRICE'] < 35000000]
tribeca_condos
# Now that you've removed outliers,
# Look again at a scatter plot with OLS (Ordinary Least Squares) trendline
px.scatter(tribeca_condos, x='GROSS_SQUARE_FEET',y='SALE_PRICE',trendline='ols')
# Select these columns, then write to a csv file named tribeca.csv. Don't include the index.
tribeca_condos[['GROSS_SQUARE_FEET','SALE_PRICE']].to_csv('tribeca.csv')
###Output
_____no_output_____ |
homeworksMAT281/C2_machine_learning/02_analisis_supervisado_regresion/02_regresion.ipynb | ###Markdown
MAT281 - Modelos de regresión Objetivos de la clase* Aprender conceptos básicos de los modelos de regresión en python. Contenidos* [Modelos de regresión](c1)* [Ejemplos con python](c2) I.- Modelos de RegressiónLos modelos de regresión son ocupadas para predecir valores numéricos, por ejemplo, determinar el precio de una casa a partir de sus metros cuadrados. Dentro de los modelos de regresión, el modelo más básico (y no por eso menos importante) es el **modelo de regresión lineal**. 1.1) Regresión linealEl **modelo de regresión lineal** supone que, $$\boldsymbol{Y} = \boldsymbol{X}\boldsymbol{\beta} + \boldsymbol{\epsilon},$$ donde:* $\boldsymbol{X} = (x_1,...,x_n)^{T}$: variable explicativa* $\boldsymbol{Y} = (y_1,...,y_n)^{T}$: variable respuesta* $\boldsymbol{\epsilon} = (\epsilon_1,...,\epsilon_n)^{T}$: error que se asume normal, es decir, $\epsilon \sim \mathcal{N}( \boldsymbol{0},\Sigma)$.* $\boldsymbol{\beta} = (\beta_1,...,\beta_n)^{T}$: coeficientes de regresión.La idea es tratar de establecer la relación entre las variables independientes y dependientes por medio de ajustar una mejor línea recta con respecto a los puntos. 1.2) Error de un modeloEl **error** corresponde a la diferencia entre el valor original y el valor predicho,es decir:$$e_{i}=y_{i}-\hat{y}_{i} $$<img src="https://www.jmp.com/en_hk/statistics-knowledge-portal/what-is-multiple-regression/fitting-multiple-regression-model/_jcr_content/par/styledcontainer_2069/par/lightbox_4130/lightboxImage.img.png/1548704005203.png" width="480" height="360" align="rigt"/> a) Formas de medir el error de un modeloPara medir el ajuste de un modelo se ocupan las denominadas **funciones de distancias** o **métricas**. Existen varias métricas, dentro de las cuales encontramos: 1. **Métricas absolutas**: Las métricas absolutas o no escalada miden el error sin escalar los valores. Las métrica absolutas más ocupadas son: * **Mean Absolute Error** (MAE) $$\textrm{MAE}(y,\hat{y}) = \dfrac{1}{n}\sum_{t=1}^{n}\left | y_{t}-\hat{y}_{t}\right |$$ * **Mean squared error** (MSE): $$\textrm{MSE}(y,\hat{y}) =\dfrac{1}{n}\sum_{t=1}^{n}\left ( y_{t}-\hat{y}_{t}\right )^2$$ 2. **Métricas Porcentuales**: Las métricas porcentuales o escaladas miden el error de manera escalada, es decir, se busca acotar el error entre valores de 0 a 1, donde 0 significa que el ajuste es perfecto, mientras que 1 sería un mal ajuste. Cabe destacar que muchas veces las métricas porcentuales puden tener valores mayores a 1.Las métrica Porcentuales más ocupadas son: * **Mean absolute percentage error** (MAPE): $$\textrm{MAPE}(y,\hat{y}) = \dfrac{1}{n}\sum_{t=1}^{n}\left | \frac{y_{t}-\hat{y}_{t}}{y_{t}} \right |$$ * **Symmetric mean absolute percentage error** (sMAPE): $$\textrm{sMAPE}(y,\hat{y}) = \dfrac{1}{n}\sum_{t=1}^{n} \frac{\left |y_{t}-\hat{y}_{t}\right |}{(\left | y_{t} \right |^2+\left | \hat{y}_{t} \right |^2)/2}$$ b) R-cuadrado Y R-cuadrado ajustadoEl **coeficiente de determinación** o **R-cuadrado** ($r^2$ ) , es un estadístico usado en el contexto de un modelo estadístico cuyo principal propósito es predecir futuros resultados o probar una hipótesis. El coeficiente determina la calidad del modelo para replicar los resultados, y la proporción de variación de los resultados que puede explicarse por el modelo.El valor del $r^2$ habitualmente entre 0 y 1, donde 0 significa una mala calidad de ajuste en el modelo y 1 corresponde a un ajuste lineal perfecto. A menudo, este estadístico es ocupado para modelos lineales. Se define por la fórmula:$$r^2 = \dfrac{SS_{reg}}{SS_{tot}} = 1 - \dfrac{SS_{res}}{SS_{tot}},$$donde:* **$SS_{reg}$** ( suma explicada de cuadrados (ESS)): $\sum_{i}(\hat{y}-\bar{y})^2$* **$SS_{res}$**: ( suma residual de cuadrados (RSS)): $\sum_{i}(y_{i}-\hat{y})^2 = \sum_{i}e_{i}^2$* **$SS_{tot}$**: ( varianza): $\sum_{i}(y_{i}-\bar{y})$, donde: $SS_{tot}=SS_{reg}+SS_{res}$En una forma general, se puede ver que $r^2$ está relacionado con la fracción de varianza inexplicada (FVU), ya que el segundo término compara la varianza inexplicada (varianza de los errores del modelo) con la varianza total (de los datos).<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/86/Coefficient_of_Determination.svg/400px-Coefficient_of_Determination.svg.png" width="480" height="360" align="rigt"/>* Las áreas de los cuadrados azules representan los residuos cuadrados con respecto a la regresión lineal ($SS_{tot}$). * Las áreas de los cuadrados rojos representan los residuos al cuadrado con respecto al valor promedio ($SS_{res}$).Por otro lado, a medida que más variables explicativas se agregan al modelo, el $r^2$ aumenta de forma automática, es decir, entre más variables explicativas se agreguen, mejor será la calidad será el ajuste (un falso argumento).Es por ello que se define el **$r^2$ ajustado**, que viene a ser una modificación del $r^2$, ajustando por el número de variables explicativas en un modelo ($p$) en relación con el número de puntos de datos ($n$). $$r^2_{ajustado} = 1-(1-r^2)\dfrac{n-1}{n-p-1} ,$$ 1.3) Método de minimos cudradosEl **método de mínimos cudrados** es un método de optimización que busca encontrar la mejor aproximación mediante la minimización de los residuos al cuadrado, es decir, se buscar encontrar:$$(P)\ \min \sum_{i=1}^n e_{i}^2 =\sum_{i=1}^n (y_{i}-f_{i}(x;\beta))^2 $$Para el caso de la regresión lineal simple, se busca una función $$f(x;\beta) = \beta_{0} + \beta_{1}x,$$por lo tanto el problema que se debe resolver es el siguiente:$$(P)\ \min \sum_{i=1}^n e_{i}^2 =\dfrac{1}{n}\sum_{i=1}^{n}\left ( y_{i}-(\beta_{0} + \beta_{1}x_{i})\right )^2$$ Lo que significa, que para este problema, se debe encontrar $\beta = (\beta_{0},\beta_{1})$ que minimicen el problema de optimización. En este caso la solución viene dada por:$$\hat{\beta}_{1} = \dfrac{\sum(x-\bar{x})(y-\bar{y})}{\sum(x-\bar{x})^2} = \rho (x,y)\ ; \ \hat{\beta}_{0} = \bar{y}-\hat{\beta}_{1} \bar{x} $$ La metodología para encontrar los parámetros $\beta$ para el caso de la regresión lineal multiple se extienden de manera natural del modelo de regresión lineal multiple, cuya solución viene dada por:$$\beta = (XX^{\top})^{-1}X^{\top}y$$ **IMPORTANTE**:* Cabe destacar que el coeficiente $r^2$ funciona bien en el contexto del mundo de las regresiones lineales. Para el análisis de **modelos no lineales**, esto coeficiente pierde su interpretación.* Se deja la siguiente [refrerencia](http://reliawiki.org/index.php/Simple_Linear_Regression_Analysis) para comprender conceptos claves de test de hipótesis, intervalos de confianza, p-valor. Estos términos son escenciales para comprender la significancia del ajuste realizado.* Existen muchas más métricas, pero estas son las más usulaes de encontrar. En el archivo **metrics.py** se definen las distintas métricas presentadas, las cuales serpan de utilidad más adelante. II.- Ejemplos con python a) Dataset vehículos (regresion lineal simple)El dataset `vehiculos_procesado.csv` contiene en detalle las componenetes de distintos vehículos. EL objetivo será predecir el **co2** mediante análisis de regresión lineal.
###Code
# librerias
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', 500) # Ver más columnas de los dataframes
# Ver gráficos de matplotlib en jupyter notebook/lab
%matplotlib inline
# load data
litros_por_galon = 3.78541
vehiculos = pd.read_csv(os.path.join('data', 'vehiculos_procesado.csv'))
vehiculos["consumo_litros_milla"] = litros_por_galon/ vehiculos.consumo
vehiculos.head()
# descripcion del conjunto de datos
vehiculos.describe()
# grafico de puntos
sns.set(rc={'figure.figsize':(10,8)})
sns.scatterplot(
x='consumo_litros_milla',
y='co2',
data=vehiculos,
)
plt.show()
###Output
_____no_output_____
###Markdown
Para efectos prácticos, realicemos el ajuste para las variables:* $Y$ = $co_{2}$* $X$ = consumo_litros_millasEs decir, el problema de regresión con varios regresores se simplifica a un problema de regresión de un solo regresor (también conocido como **regresión lineal simple**).Lo primero que debemos hacer es separar nuestro datos en los conjuntos de **training set** y **test set**, pero ¿ qué son estos conjuntos ? Concepto de Train set y Test setAl momento de entrenar los modelos de machine leraning, se debe tener un conjunto para poder entrenar el modelo y otro conjunto para poder evaluar el modelo. Es por esto que el conjunto de datos se separá en dos conjuntos: * **Train set**: Conjunto de entrenamiento con el cual se entrenarán los algoritmos de machine learning. * **Test set**: Conjunto de testeo para averiguar la confiabilidad del modelo, es decir, cuan bueno es el ajuste del modelo. ¿ Qué tamaño debe tener cada conjunto?La respuesta depende fuertemente del tamaño del conjunto de datos. Como regla empírica consideremos:| número de filas | train set | test set ||----------------------|-----------|----------|| entre 100-1000 | 67% | 33% || entre 1.000- 100.000 | 80% | 20% || mayor a 100.000 | 99% | 1% |
###Code
from sklearn import datasets
from sklearn.model_selection import train_test_split
# import some data to play with
X = vehiculos[['consumo_litros_milla']] # we only take the first two features.
y = vehiculos['co2']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# print rows train and test sets
print('Separando informacion:\n')
print('numero de filas data original : ',len(X))
print('numero de filas train set : ',len(X_train))
print('numero de filas test set : ',len(X_test))
###Output
Separando informacion:
numero de filas data original : 35539
numero de filas train set : 28431
numero de filas test set : 7108
###Markdown
Para eso debemos instanciar nuestro modelo de alguna librería. Para este ejemplo, la librería para trabajar los distintos modelos será **sklearn**, y el modelo de regresión lineal **LinearRegression**.
###Code
# importando el modelo de regresión lineal
from sklearn.linear_model import LinearRegression
model_rl = LinearRegression() # Creando el modelo.
###Output
_____no_output_____
###Markdown
Una vez instanciado el modelo, debemos entrenarlo con los datos de **X_train** e **Y_train**.
###Code
# ajustando el modelo
model_rl.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Podemos ver los coeficientes del modelo ($\beta_0$ y $\beta_1$)
###Code
# Lista de coeficientes B para cada X
beta_0 = round(model_rl.intercept_,2)
beta_1 = round(model_rl.coef_[0],2)
print(f"El mejor ajuste lineal viene dado por la recta: \n\n \
f(consumo_litros_milla) = {beta_0} + {beta_1}*consumo_litros_milla")
###Output
El mejor ajuste lineal viene dado por la recta:
f(consumo_litros_milla) = 2.59 + 2339.86*consumo_litros_milla
###Markdown
Una vez calculado los parámetros del modelo, se puede realizar las predicciones sobre el conjunto **X_test**, cuyo valor denominaremos $\hat{y} = f(X_{test}) $ y podemos comparar con el valor real **Y_test**.
###Code
# predicciones
Y_predict = model_rl.predict(X_test)
Y_predict
###Output
_____no_output_____
###Markdown
Además, podemos realizar un gráfico con las predicciones:
###Code
# graficos con seaborn
beta_0 = model_rl.intercept_
beta_1 = model_rl.coef_[0]
x_range = np.arange(0.1,0.5,0.1)
df_plot = pd.DataFrame({'x':x_range,
'y_true':[beta_0 + beta_1*n for n in x_range]})
df = pd.DataFrame({'x':X['consumo_litros_milla'],
'y_true':y})
fig, ax = plt.subplots(figsize=(11, 8.5))
sns.scatterplot(x='x', y='y_true', data=df, ax=ax)
sns.lineplot(x='x', y='y_true', data=df_plot,ax=ax,color="red")
plt.xlabel('consumo_litros_milla')
plt.ylabel('co2')
plt.show()
###Output
_____no_output_____
###Markdown
Gráficamente podemos decir que el modelo se ajusta bastante bien, puesto que la línea recta (nuestro ajuste) pasa por la mayor cantidad de puntos posibles. Por otro lado, existe valores numéricos que también nos pueden ayudar a convensernos de estos, que son las métricas que se habian definidos con anterioridad. Para ello, instanciaremos las distintas metricas del archivo **metrics_regression.py** y calcularemos sus distintos valores.
###Code
from metrics_regression import *
from sklearn.metrics import r2_score
# ejemplo
df_temp = pd.DataFrame(
{
'y':y_test,
'yhat': model_rl.predict(X_test)
}
)
df_metrics = summary_metrics(df_temp)
df_metrics['r2'] = round(r2_score(y_test, model_rl.predict(X_test)),4)
print('\nMetricas para el regresor consumo_litros_milla:\n')
print(df_metrics)
###Output
Metricas para el regresor consumo_litros_milla:
mae mse rmse mape maape wmape mmape smape r2
0 3.4876 137.0854 11.7083 0.0079 0.0079 0.0074 0.0079 0.008 0.9875
###Markdown
Basado en las métricas y en la gráfica, podemos concluir que el ajuste realizado es bastante asertado. Veamos otro ejemplo donde el ajuste lineal puede ser limitado. Con StatsmodelsAhora desarrollaremos el mismo código pero con `statsmodels`
###Code
import statsmodels.api as sm
model = sm.OLS(y_train, sm.add_constant(X_train))
results = model.fit()
print(results.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: co2 R-squared: 0.987
Model: OLS Adj. R-squared: 0.987
Method: Least Squares F-statistic: 2.187e+06
Date: Wed, 02 Sep 2020 Prob (F-statistic): 0.00
Time: 12:25:32 Log-Likelihood: -1.1056e+05
No. Observations: 28431 AIC: 2.211e+05
Df Residuals: 28429 BIC: 2.211e+05
Df Model: 1
Covariance Type: nonrobust
========================================================================================
coef std err t P>|t| [0.025 0.975]
----------------------------------------------------------------------------------------
const 2.5875 0.324 7.984 0.000 1.952 3.223
consumo_litros_milla 2339.8556 1.582 1478.820 0.000 2336.754 2342.957
==============================================================================
Omnibus: 11380.855 Durbin-Watson: 1.994
Prob(Omnibus): 0.000 Jarque-Bera (JB): 4754865.799
Skew: 0.577 Prob(JB): 0.00
Kurtosis: 66.344 Cond. No. 23.5
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
b) Dataset Boston house prices (regresion lineal multiple)En este ejemplo se va utilizar el dataset **Boston** que ya viene junto con **sklearn** y es ideal para practicar con Regresiones Lineales; el mismo contiene precios de casas de varias áreas de la ciudad de Boston.
###Code
# cargar datos
boston = datasets.load_boston()
# dejar en formato dataframe
boston_df = pd.DataFrame(boston.data, columns=boston.feature_names)
boston_df['TARGET'] = boston.target
boston_df.head() # estructura de nuestro dataset.
###Output
_____no_output_____
###Markdown
Para efectos prácticos, realicemos el ajuste para las variables:* $Y$ = TARGET (price)* $X$ = CRIM ( per capita crime rate by town),el proceso es similar al ejercicio, anterior, por lo que desplegamos simultaneamente todas las sentencias.
###Code
# datos para la regresion lineal simple
X = boston_df[['CRIM']]
Y = boston_df["TARGET"]
# split dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state = 2)
# ajustar el modelo
model_rl = LinearRegression() # Creando el modelo.
model_rl.fit(X_train, Y_train) # ajustando el modelo
# prediciones
Y_predict = model_rl.predict(X_test)
# graficar
# graficos con seaborn
beta_0 = model_rl.intercept_
beta_1 = model_rl.coef_[0]
x_range = [n for n in range(int(X['CRIM'].min()),
int(X['CRIM'].max()),
1)
]
df_plot = pd.DataFrame({'x':x_range,
'y_true':[beta_0 + beta_1*n for n in x_range]})
df = pd.DataFrame({'x':X['CRIM'],
'y_true':Y})
fig, ax = plt.subplots(figsize=(11, 8.5))
sns.scatterplot(x='x', y='y_true', data=df, ax=ax)
sns.lineplot(x='x', y='y_true', data=df_plot,ax=ax,color="red")
plt.xlabel('CRIM')
plt.ylabel('PRICE')
plt.show()
# ejemplo: boston df
df_temp = pd.DataFrame(
{
'y':Y_test,
'yhat': model_rl.predict(X_test)
}
)
df_metrics = summary_metrics(df_temp)
df_metrics['r2'] = round(r2_score(Y_test, model_rl.predict(X_test)),4)
print('\nMetricas para el regresor CRIM:')
df_metrics
###Output
Metricas para el regresor CRIM:
###Markdown
Basado en las métricas y en la gráfica, el ajuste realizado no capta el comportamiento del modelo, esto puede suceder por:* El fenómeno sigue un comprtamiento no lineal* Faltan más regresores para explicar adecuadamente el fenómeno.Realicemos el ajuste de regresión lineal, pero ahora considerando todos los regresores:
###Code
# datos
X = boston.data
Y = boston_df["TARGET"]
n,p = boston.data.shape
# split dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state = 2)
model_rl = LinearRegression() # Creando el modelo.
model_rl.fit(X_train, Y_train) # ajustando el modelo
# ejemplo: boston df
df_temp = pd.DataFrame(
{
'y':Y_test,
'yhat': model_rl.predict(X_test)
}
)
df_metrics = summary_metrics(df_temp)
# calcular r2 y r2 ajustado
r2 = round(r2_score(Y_test, model_rl.predict(X_test)),4)
df_metrics['r2'] = r2
df_metrics['r2_ajustado'] = 1-(1-r2)*(n-1)/(n-p-1)
print('\nMetricas para TODOS los regresores:')
df_metrics
###Output
Metricas para TODOS los regresores:
###Markdown
Cuando se aplica el modelo de regresión lineal con todas las variables regresoras, las métricas disminuyen considerablemente, lo implica una mejora en el modelo Un problema que se tiene, a diferencia de la regresión lineal simple,es que no se puede ver gráficamente la calidad del ajuste, por lo que solo se puede confiar en las métricas calculadas. Además, se dejan las siguientes preguntas:* ¿ Entre más regresores, mejor será el modelo de regresión lineal?* ¿ Qué se debe tener en cuenta antes de agregar otro variable regresora al modelo de regresión lineal ?* ¿ Qué sucede si se tienen outliers ? c) Otros modelos lineales Existen varios modelos lineales que podemos trabajar en sklearn (ver [referencia](https://scikit-learn.org/stable/modules/linear_model.html)), los cualeas podemos utilizar e ir comparando unos con otros.De lo modelos lineales, destacamos los siguientes:* [regresión lineal clásica](https://en.wikipedia.org/wiki/Linear_regression): regresión clásica por mínimos cudrados.$$(P)\ \min \sum_{i=1}^n (y_{i}-f_{i}(x;\beta))^2 $$* [lasso](https://en.wikipedia.org/wiki/Lasso_(statistics)): se ocupa cuando tenemos un gran número de regresores y queremos que disminuya el problema de colinealidad (es decir, estimar como cero los parámetros poco relevantes).$$(P)\ \min \sum_{i=1}^n (y_{i}-f_{i}(x;\beta))^2 + \lambda \sum_{i=1}^n |\beta_{i}| $$* [ridge](https://en.wikipedia.org/wiki/Tikhonov_regularization): también sirve para disminuir el problema de colinealidad, y además trata de que los coeficientes sean más rocuesto bajo outliers.$$(P)\ \min \sum_{i=1}^n (y_{i}-f_{i}(x;\beta))^2 + \lambda \sum_{i=1}^n \beta_{i}^2 $$Dado que en sklearn, la forma de entrenar, estimar y predecir modelos de regresión siguen una misma estructura, para fectos prácticos, definimos una rutina para estimar las distintas métricas de la siguiente manera:
###Code
class SklearnRegressionModels:
def __init__(self,model,name_model):
self.model = model
self.name_model = name_model
@staticmethod
def test_train_model(X,y,n_size):
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=n_size , random_state=42)
return X_train, X_test, y_train, y_test
def fit_model(self,X,y,test_size):
X_train, X_test, y_train, y_test = self.test_train_model(X,y,test_size )
return self.model.fit(X_train, y_train)
def df_testig(self,X,y,test_size):
X_train, X_test, y_train, y_test = self.test_train_model(X,y,test_size )
model_fit = self.model.fit(X_train, y_train)
preds = model_fit.predict(X_test)
df_temp = pd.DataFrame(
{
'y':y_test,
'yhat': model_fit.predict(X_test)
}
)
return df_temp
def metrics(self,X,y,test_size):
df_temp = self.df_testig(X,y,test_size)
df_metrics = summary_metrics(df_temp)
df_metrics['model'] = self.name_model
return df_metrics
def parameters(self,X,y,test_size):
model_fit = self.fit_model(X,y,test_size)
list_betas = [
('beta_0',model_fit.intercept_)
]
betas = model_fit.coef_
for num, beta in enumerate(betas):
name_beta = f'beta_{num+1}'
list_betas.append((name_beta,round(beta,2)))
result = pd.DataFrame(
columns = ['coef','value'],
data = list_betas
)
result['model'] = self.name_model
return result
###Output
_____no_output_____
###Markdown
Ahora, comparemos las métricas de los distintos modelos aplicado al conjunto de datos **boston dataset**.
###Code
from sklearn import linear_model
# boston dataframe
X = boston.data
Y = boston_df["TARGET"]
reg_lineal = linear_model.LinearRegression()
reg_ridge = linear_model.Ridge(alpha=.5)
reg_lasso = linear_model.Lasso(alpha=0.1)
list_models =[
[reg_lineal,'lineal'],
[reg_ridge,'ridge'],
[reg_lasso,'lasso'],
]
frames_metrics = []
frames_coef = []
for model,name_models in list_models:
fit_model = SklearnRegressionModels( model,name_models)
frames_metrics.append(fit_model.metrics(X,Y,0.2))
frames_coef.append(fit_model.parameters(X,Y,0.2))
X
y
# juntar resultados: metricas
pd.concat(frames_metrics)
# juntar resultados: coeficientes
pd.concat(frames_coef)
###Output
_____no_output_____ |
Phase_02_Progress.ipynb | ###Markdown
Phase 2 progress Data preprocessingIn this part, I change the label into digital number from string and plot a graph via the length of text.
###Code
import pandas as pd
import nltk
df = pd.read_csv('data/fake_or_real_news.csv', nrows=10000)
df.info()
df.drop('Unnamed: 0', inplace=True, axis=1)
label_trans = lambda i: 0 if i == 'FAKE' else 1
df.label = df.label.apply(label_trans)
df.head()
import matplotlib
%matplotlib inline
df['text'].str.len().plot(kind = 'hist', bins = 1000, figsize = (7,5))
###Output
_____no_output_____
###Markdown
Term documentThe contents in text are vary from each other. To normalize them, I transfer all the words into lowercase words and filter all the punctuations. And then tokenize them into a word list.
###Code
from string import punctuation
texts = df.text
mapping_table = {ord(char): u' ' for char in punctuation}
tokenized = [nltk.word_tokenize(review.translate(mapping_table)) for review in texts]
def clean_text(tokenized_list):
import string
sw = nltk.corpus.stopwords.words('english')
sw.append("“")
sw.append("”")
sw.append("’")
sw.append("‘")
sw.append("—")
new_list = [[token.lower() for token in tlist if token not in string.punctuation and token.lower() not in sw] for tlist in tokenized_list]
return new_list
cleaned = clean_text(tokenized)
from gensim.models import Doc2Vec, Word2Vec
from gensim.models.doc2vec import TaggedDocument
from nltk.corpus import reuters
tokenized_docs = [nltk.word_tokenize(reuters.raw(fileid)) for fileid in reuters.fileids()]
tagged_docs = [TaggedDocument(doc, tags=[idx]) for idx, doc in enumerate(cleaned)]
###Output
_____no_output_____
###Markdown
Vector transitionI use two ways to transit the words showed up in the word list that I created in last step: Word2Vec and Doc2Vec, and right now I can't tell which one is better.
###Code
word_model = Word2Vec(cleaned, size = 300, window = 5, min_count = 1, alpha = 0.025, iter=10, batch_words = 10000)
doc_model = Doc2Vec(size=300, window=5, min_count=5, dm = 1, iter=10)
doc_model.build_vocab(tagged_docs)
doc_model.train(tagged_docs, epochs=10, total_examples=doc_model.corpus_count)
import numpy as np
doc_vectors = []
for i in range(len(df)):
doc_vectors.append(doc_model.docvecs[i])
doc_vectors = np.asarray(doc_vectors)
doc_vectors.shape
word_vectors = np.zeros((len(df), 300))
for i in range(0, len(df)):
word_vectors[i] = 0
for word in cleaned[i]:
word_vectors[i] += word_model[word]
if len(cleaned[i]) != 0:
word_vectors[i] = word_vectors[i] / len(cleaned[i])
word_vectors.shape
###Output
/Users/lifesaver/miniconda3/lib/python3.6/site-packages/ipykernel_launcher.py:5: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
"""
###Markdown
Set splittingAs learned in first lecture, I split the set into testing set and training set, with 300 features selected, and the test size is set to one third.
###Code
x0 = word_vectors
x = doc_vectors
y = np.array(df['label'])
from sklearn.model_selection import train_test_split
seed = 2
test_size = 0.33
x0_train, x0_test, y0_train, y0_test = train_test_split(x0, y, test_size=test_size, random_state=seed)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=seed)
###Output
_____no_output_____
###Markdown
Logistic regression
###Code
import time
from sklearn.linear_model import LogisticRegression
LR0_model = LogisticRegression()
LR_model = LogisticRegression()
LR0_model = LR0_model.fit(x0_train, y0_train)
LR_model = LR_model.fit(x_train, y_train)
print("Word2Vec + LR:",LR0_model.score(x0_test, y0_test))
print("Doc2Vec + LR:",LR_model.score(x_test, y_test))
from sklearn.metrics import classification_report
target_names = ['FAKE', 'REAL']
y0_pred = LR0_model.predict(x0_test)
y_pred = LR_model.predict(x_test)
print("Word2Vec + LR")
print(classification_report(y0_test, y0_pred, target_names=target_names))
print("Doc2Vec + LR")
print(classification_report(y_test, y_pred, target_names=target_names))
from sklearn.metrics import *
LR0_result = [accuracy_score(y0_test, y0_pred), precision_score(y0_test, y0_pred), recall_score(y0_test, y0_pred),
f1_score(y0_test, y0_pred)]
LR_result = [accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred),
f1_score(y_test, y_pred)]
###Output
_____no_output_____
###Markdown
Random Forest
###Code
from sklearn.ensemble import RandomForestClassifier
RF0_model = RandomForestClassifier(n_estimators = 20, max_features=20, random_state=seed)
RF_model = RandomForestClassifier(n_estimators = 20, max_features=20, random_state=seed)
RF0_model = RF0_model.fit(x0_train, y0_train)
RF_model = RF_model.fit(x_train, y_train)
print("Word2Vec + RF:",RF0_model.score(x0_test, y0_test))
print("Doc2Vec + RF:",RF_model.score(x_test, y_test))
y0_pred = RF0_model.predict(x0_test)
y_pred = RF_model.predict(x_test)
print("Word2Vec + RF")
print(classification_report(y0_test, y0_pred, target_names=target_names))
print("Doc2Vec + RF")
print(classification_report(y_test, y_pred, target_names=target_names))
from sklearn.metrics import *
RF0_result = [accuracy_score(y0_test, y0_pred), precision_score(y0_test, y0_pred), recall_score(y0_test, y0_pred),
f1_score(y0_test, y0_pred)]
RF_result = [accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred),
f1_score(y_test, y_pred)]
###Output
_____no_output_____
###Markdown
XGboost
###Code
from xgboost import XGBClassifier
XG0_model = XGBClassifier(max_depth=7, learning_rate=0.2,
n_estimators=20, silent=True,
objective='binary:logistic', nthread=-1,
gamma=0, min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1,
colsample_bylevel=1, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1,
base_score=0.5, seed=0, missing=None)
XG_model = XGBClassifier(max_depth=7, learning_rate=0.2,
n_estimators=20, silent=True,
objective='binary:logistic', nthread=-1,
gamma=0, min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1,
colsample_bylevel=1, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1,
base_score=0.5, seed=0, missing=None)
XG0_model = XG0_model.fit(x0_train, y0_train)
XG_model = XG_model.fit(x_train, y_train)
print("Word2Vec + XG:",XG0_model.score(x0_test, y0_test))
print("Doc2Vec + XG:",XG_model.score(x_test, y_test))
y0_pred = XG0_model.predict(x0_test)
y_pred = XG_model.predict(x_test)
print("Word2Vec + XG")
print(classification_report(y0_test, y0_pred, target_names=target_names))
print("Doc2Vec + XG")
print(classification_report(y_test, y_pred, target_names=target_names))
from sklearn.metrics import *
XG0_result = [accuracy_score(y0_test, y0_pred), precision_score(y0_test, y0_pred), recall_score(y0_test, y0_pred),
f1_score(y0_test, y0_pred)]
XG_result = [accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred),
f1_score(y_test, y_pred)]
###Output
_____no_output_____
###Markdown
Results
###Code
Y1 = np.array(LR0_result[:4])
Y2 = np.array(RF0_result[:4])
Y3 = np.array(XG0_result[:4])
from matplotlib import pyplot as plt
plt.figure(figsize=(11,6))
n = 4
X = np.arange(n)+1
plt.xticks([1.3,2.3,3.3,4.3],[r'$Accuracy$', r'$Precision$', r'$Recall$',r'$F1-score$'])
plt.bar(X,Y1,width = 0.3,facecolor = 'lightskyblue',edgecolor = 'white',label='LR')
plt.bar(X+0.3,Y2,width = 0.3,facecolor = 'yellowgreen',edgecolor = 'white',label='RF')
plt.bar(X+0.6, Y3, width = 0.3,facecolor = 'coral',edgecolor = 'white',label='XG')
for x,y in zip(X,Y1):
plt.text(x, y, '%.2f' % y, ha='center', va= 'bottom')
for x,y in zip(X,Y2):
plt.text(x+0.3, y, '%.2f' % y, ha='center', va= 'bottom')
for x,y in zip(X,Y3):
plt.text(x+0.6, y, '%.2f' % y, ha='center', va= 'bottom')
plt.ylabel('Percentage')
plt.ylim(0,+1)
plt.legend()
plt.title('Word2Vec Result')
plt.show()
Y1 = np.array(LR_result[:4])
Y2 = np.array(RF_result[:4])
Y3 = np.array(XG_result[:4])
from matplotlib import pyplot as plt
plt.figure(figsize=(11,6))
n = 4
X = np.arange(n)+1
plt.xticks([1.3,2.3,3.3,4.3],[r'$Accuracy$', r'$Precision$', r'$Recall$',r'$F1-score$'])
plt.bar(X,Y1,width = 0.3,facecolor = 'lightskyblue',edgecolor = 'white',label='LR')
plt.bar(X+0.3,Y2,width = 0.3,facecolor = 'yellowgreen',edgecolor = 'white',label='RF')
plt.bar(X+0.6, Y3, width = 0.3,facecolor = 'coral',edgecolor = 'white',label='XG')
for x,y in zip(X,Y1):
plt.text(x, y, '%.2f' % y, ha='center', va= 'bottom')
for x,y in zip(X,Y2):
plt.text(x+0.3, y, '%.2f' % y, ha='center', va= 'bottom')
for x,y in zip(X,Y3):
plt.text(x+0.6, y, '%.2f' % y, ha='center', va= 'bottom')
plt.ylabel('Percentage')
plt.ylim(0,+1)
plt.legend()
plt.title('Doc2Vec Result')
plt.show()
###Output
_____no_output_____ |
R_Li_w9_assn.ipynb | ###Markdown
***Task 1 and 2***
###Code
#Creating .csv file, loading to GitHub repository and reading from GitHub to Jupyter
aa =pd.read_csv("https://raw.githubusercontent.com/ak47m1a1/DBDA-python/master/Tidying%20and%20Transforming%20Data%20111.csv")
aa
#delete one entire row with NaN value
aa1=aa.dropna(thresh=2)
aa1
#use ffill to replace NaN to previous name
aa2=aa1.fillna(method='ffill')
aa2
#change columns' names
aa2.columns=['Flights', 'O_D','Los Angeles','Phoenix','San Diego','San Francisco','Seattle']
aa2
#change index number
aa2.rename(index={3:2,4:3},inplace=True)
aa2
#using melt function change wide to long format
aa3=pd.melt(aa2,id_vars=['Flights','O_D'],value_vars=['Los Angeles','Phoenix','San Diego','San Francisco','Seattle'])
aa3
#change long format's columns' names
aa3.rename(columns={'variable':'Cities', 'value':'Numbers'},inplace=True)
aa3
###Output
_____no_output_____
###Markdown
***Task 3*** **For each city, which airline had the best on time performance?**
###Code
#ontime numbers' table
ontime=aa3[aa3['O_D']=='on time'][['Flights','Cities','Numbers']]
ontime
#adding column to ontime table
ontime['Percentage']= 'wait'
ontime
#total numbers table
total=aa3.groupby(['Flights','Cities']).sum()['Numbers'].to_frame()
total
#Adding one column of percentage to the total table
total['Percentage']='wait'
total
#Using merge function to merge ontime and total table
table=pd.merge(ontime,total,on=['Flights','Cities','Percentage'],how='outer', suffixes=('ontime','total'))
table
#using formula to calculate percentage number
table['Percentage']=(table.Numbersontime/table.Numberstotal)*100
table
###Output
_____no_output_____
###Markdown
*Conclusion* *From the above chart, we can see that the Alaska flight is better than the Amwest in each city* **Which airline had the best overall on time performance?**
###Code
#total numbers for two flights only
total1=total.groupby(['Flights']).sum()['Numbers'].to_frame()
total1
#total ontime numbers for two flights only
ontime1=ontime.groupby(['Flights']).sum()['Numbers'].to_frame()
ontime1
#merge and change columns' names
best=pd.merge(ontime1,total1,on=['Flights'],how='outer', suffixes=('ontime','total'))
best
#create new column of percentage and calculate the percentage
best['p']=(best.Numbersontime/best.Numberstotal)*100
best
###Output
_____no_output_____
###Markdown
*Conclusion* *On the contrary from last question, we can see that the Amwest flight has better total performance than the Alaska* ***Task 4***
###Code
#copy original data
aa3
#using pivot function to convert long format to wide format
aa11=pd.pivot_table(aa3,values='Numbers',index=['Flights','O_D'],columns=['Cities'])
aa11
#Using unstack function to unstack original wide format table by Flights
aa11.unstack('Flights')
###Output
_____no_output_____ |
pan_genome/code/pan_genome_analysis.ipynb | ###Markdown
Objectives:1. produce a gene occurence figure to show the distribution of genes in the pan-genome among different genomes2. produce a gene presence/absence table3. produce a list of fasta files, of which each contains proteins sequences in the same ortholog group.
###Code
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from Bio import SeqIO
import os
def process_OG_line(line):
# line is a line in output file of orthomcl. Each line is a ortholog group
# return og_id and genome list
cont = line.strip().split(':')
return cont[0].strip(),[item.split('_')[0] for item in cont[1].split()]
def load_genomes_in_each_cluster(clsterfile):
# input clsterfile: is a cluter file generated by orthomcl
# return a dictionary {OG_id:list(unique genomes)}
#
og_genoms = dict()
for line in open(clsterfile):
if ':' not in line: continue
og_id,genoms = process_OG_line(line)
og_genoms[og_id] = list(set(genoms))
print('Number of gene groups:',len(og_genoms))
return og_genoms
def calculate_gene_occurence(og_genoms):
# return a list which contains the gene occruency of each gene (group)
#
# 1.get total number of genomes
all_genomes = []
for genoms in og_genoms.values(): all_genomes+=genoms
tot_genome_num = float(len(set(all_genomes)))
print('Number of genomes:',tot_genome_num)
occrs = [len(genoms)/tot_genome_num for genoms in og_genoms.values()]
return occrs
def plot_gene_occurence(og_genoms,outname):
# produce a histogram
#
occrs = calculate_gene_occurence(og_genoms)
plt.figure(figsize=(4,3))
plt.hist(occrs,50)
plt.xlabel('Frequency of occurence')
plt.ylabel('Count')
plt.yscale('log')
plt.tight_layout()
plt.savefig(outname)
plt.show()
###Output
_____no_output_____
###Markdown
1. gene occurance
###Code
og_genoms = load_genomes_in_each_cluster('../data/orthomcl_output/orthomcl_clusters.txt')
plot_gene_occurence(og_genoms,'../figures/pan_genome_gene_occurence.pdf')
###Output
Number of gene groups: 233478
Number of genomes: 343.0
###Markdown
2. gene presence absence tableIndex: species name Columns: S228C genes. For those ones without S228C genes, use ortho IDFor those orthogroups with multiple genes from S288C, randomly choose one
###Code
def load_idmap(idmapfile):
# idmapfile: orthomcl_SeqIDs_index.txt
gid2orgid = dict()
for line in open(idmapfile):
cont = line.strip().split(':')
gid2orgid[cont[0].strip()] = cont[1].strip()
print(list(gid2orgid.items())[:10])
print('Number of genes:',len(gid2orgid))
return gid2orgid
def produce_gene_pa_table(orthofile,idmapfile,outname):
# orthofile: orthomcl_clusters.txt
gid2orgid = load_idmap(idmapfile)
org_ref = dict() # {(org,ref_id):True}
orgs = dict()
refs = dict()
for group in open(orthofile):
cont = group.strip().split(':')
orthoid = cont[0].strip()
genes = [item for item in cont[1].split()]
# find one reference gene
ref = orthoid
for gene in genes:
org = gid2orgid[gene].split('@')[0]
orgs[org] = True
if org == 'Saccharomyces_cerevisiae':
ref = gid2orgid[gene].split('@')[1]
break
refs[ref] = True
for gene in genes:
org = gid2orgid[gene].split('@')[0]
org_ref[(org,ref)] = True
print('Number of gene pairs:',len(org_ref))
# produce a tsv file
index = list(orgs.keys())
index.sort()
columns = list(refs.keys())
columns.sort()
data = np.zeros((len(index),len(columns)))
for i,org in enumerate(index):
for j,ref in enumerate(columns):
if org_ref.get((org,ref),False): data[i,j] = 1
df = pd.DataFrame(data=data,index=index,columns=columns,dtype=int)
print(df.shape)
df.to_csv(outname)
def produce_one_fasta_for_each_gene_cluster(fafile,orthofile,idmapfile,outdir):
# load seqs
seqs = SeqIO.to_dict(SeqIO.parse(fafile,'fasta'))
gid2orgid = load_idmap(idmapfile)
for group in open(orthofile):
cont = group.strip().split(':')
orthoid = cont[0].strip()
genes = [item for item in cont[1].split()]
if len(genes)<2: continue
fhand = open(os.path.join(outdir,'{0}.fasta'.format(orthoid)),'w')
sub_seqs = [seqs[gid2orgid[gene]] for gene in genes]
SeqIO.write(sub_seqs,fhand,'fasta')
fhand.close()
produce_gene_pa_table('../data/orthomcl_output/orthomcl_clusters.txt',
'../data/orthomcl_output/orthomcl_SeqIDs_index.txt',
'../data/orthomcl_output/gene_pa_table.csv')
outdir = '../data/orthomcl_output/gene_clusters'
if not os.path.exists(outdir): os.mkdir(outdir)
produce_one_fasta_for_each_gene_cluster('../data/orthomcl_output/343taxa_proteins.fasta',
'../data/orthomcl_output/orthomcl_clusters.txt',
'../data/orthomcl_output/orthomcl_SeqIDs_index.txt',
outdir)
###Output
[('186_7100', 'yHMPu5000034631_Martiniozyma_abiesophila@Seq_7101'), ('272_4809', 'yHMPu5000035048_Barnettozyma_salicaria@Seq_4810'), ('84_6277', 'Metschnikowia_matae@Seq_6278'), ('74_1964', 'Metschnikowia_continentalis@Seq_1965'), ('49_2508', 'Kazachstania_naganishii@Seq_2509'), ('339_4404', 'yHMPu5000041862_Candida_golubevii@Seq_4405'), ('79_1102', 'Metschnikowia_hibisci@Seq_1103'), ('21_2590', 'Candida_infanticola@Seq_2591'), ('278_782', 'yHMPu5000035268_Wickerhamomyces_hampshirensis@Seq_783'), ('277_5417', 'yHMPu5000035261_Candida_ponderosae@Seq_5418')]
Number of genes: 2012541
|
examples/notebooks/34_add_points_from_xy.ipynb | ###Markdown
Uncomment the following line to install [leafmap](https://leafmap.org) if needed.
###Code
# !pip install leafmap
import leafmap
import pandas as pd
# leafmap.update_package()
###Output
_____no_output_____
###Markdown
Using a CSV file containing xy coordinates
###Code
m = leafmap.Map()
data = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_cities.csv'
m.add_points_from_xy(data, x="longitude", y="latitude")
m
###Output
_____no_output_____
###Markdown
Using a Pandas DataFrame containing xy coordinates.
###Code
m = leafmap.Map()
data = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_cities.csv'
df = pd.read_csv(data)
m.add_points_from_xy(df, x="longitude", y="latitude")
m
###Output
_____no_output_____
###Markdown
Uncomment the following line to install [leafmap](https://leafmap.org) if needed.
###Code
# !pip install leafmap
import leafmap
import pandas as pd
# leafmap.update_package()
###Output
_____no_output_____
###Markdown
Using a CSV file containing xy coordinates
###Code
m = leafmap.Map()
data = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_cities.csv'
m.add_points_from_xy(data, x="longitude", y="latitude")
m
###Output
_____no_output_____
###Markdown
Using a Pandas DataFrame containing xy coordinates.
###Code
m = leafmap.Map()
data = 'https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_cities.csv'
df = pd.read_csv(data)
m.add_points_from_xy(df, x="longitude", y="latitude")
m
###Output
_____no_output_____ |
Project-Euler/ProjectEuler1.ipynb | ###Markdown
Project Euler: Problem 1 If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.Find the sum of all the multiples of 3 or 5 below 1000.
###Code
result = 0
for i in range(0,1000):
if i % 3 == 0 or i % 5 == 0:
result = result + i
print("The sum of all the multiples of 3 or 5 below 1000 is " + str(result))
###Output
The sum of all the multiples of 3 or 5 below 1000 is 233168
###Markdown
Above: I use a for loop to run through all numbers, i, from 0 to 999 by using range(0,1000). Then an if statement and modulo to check if i is divisible by 3 or 5. If it is divisible I add i to the result.
###Code
print(sum(i for i in range(1000) if i % 3 == 0 or i % 5 == 0))
# This cell will be used for grading, leave it at the end of the notebook.
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.