path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Notebooks/ImageColorizer.ipynb | ###Markdown
All Imports Merged
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from skimage.color import rgb2lab, lab2rgb
from skimage.io import imread
from skimage.transform import resize
import sklearn.neighbors as ne
from sklearn.model_selection import train_test_split
import scipy.misc
from math import sqrt, pi
import time
import os
from os import listdir, walk
from os.path import join, isfile, isdir
import pdb
import random
import sys
import getopt
# http://pytorch.org/
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision
import torch
from torch.utils.data import Dataset
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from google.colab import drive
!pip install --no-cache-dir -I pillow
from IPython.display import Math, HTML
display(HTML("<script src='https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.3/"
"latest.js?config=default'></script>"))
cuda = True if torch.cuda.is_available() else False
drive.mount('/content/gdrive')
#defining the main path of the drive where all contents are saved.
StatePath = "gdrive/My Drive/AIProject/PytorchVersion"
DatasetPath = StatePath+"/flowers"
os.makedirs(StatePath, exist_ok=True)
os.makedirs(StatePath+"/states", exist_ok=True)
###Output
_____no_output_____
###Markdown
Hyper Parameters
###Code
epochs = 1000
batch_size = 10
imageSize = 128
learningRate = 0.001
print_freq = 10
save_freq = 2
###Output
_____no_output_____
###Markdown
Color Utilities The ab colorspace was quantized into bins with grid size 10. The number of quantized ab values $Q = 313$. These qauntized values are kept in $\texttt{pts_in_hull.npy}$. The following class $\texttt{NNEncode}$ implements important functions as discussed in research paper.---The function $\texttt{imgEncodeTorch}$ implements the $H_{gt}^{-1}$ function which converts ground truth colors to a vector $Z$ using a soft encoding scheme. Here the $ab$ colorspace (ground truth) is encoded into quantized $ab$ space according to the file $\texttt{pts_in_hull.npy}$.
###Code
class NNEncode():
def __init__(self, NN=5, sigma=5, km_filepath=join(StatePath, 'static', 'pts_in_hull.npy'), train=True, location='cuda'):
self.cc = np.load(km_filepath)
self.NN = int(NN)
self.sigma = sigma
self.nbrs = ne.NearestNeighbors(
n_neighbors=NN, algorithm='ball_tree').fit(self.cc)
if train:
self.weights = torch.load(StatePath+'/static/weights_test')
if ('cuda' in location):
self.weights = self.weights.cuda()
# computes soft encoding of ground truth ab image, multiplied by weight (for class rebalancing)
#for training
def imgEncodeTorch(self, abimg):
abimg = abimg.cuda()
w, h = abimg.shape[1], abimg.shape[2]
label = torch.zeros((w*h, 313))
label = label.cuda()
(dists, indexes) = self.nbrs.kneighbors(
abimg.view(abimg.shape[0], -1).t(), self.NN)
dists = torch.from_numpy(dists).float().cuda()
indexes = torch.from_numpy(indexes).cuda()
weights = torch.exp(-dists**2/(2*self.sigma**2)).cuda()
weights = weights/torch.sum(weights, dim=1).view(-1, 1)
pixel_indexes = torch.Tensor.long(torch.arange(
start=0, end=abimg.shape[1]*abimg.shape[2])[:, np.newaxis])
pixel_indexes = pixel_indexes.cuda()
label[pixel_indexes, indexes] = weights
label = label.t().contiguous().view(313, w, h)
rebal_indexes = indexes[:, 0]
rebal_weights = self.weights[rebal_indexes]
rebal_weights = rebal_weights.view(w, h)
rebal_label = rebal_weights * label
return rebal_label
def bin2color(self, idx):
return self.cc[idx]
def uint_color2tanh_range(img):
return img / 128.0 - 1.0
def tanh_range2uint_color(img):
return (img * 128.0 + 128.0).astype(np.uint8)
def modelimg2cvimg(img):
cvimg = np.array(img[0, :, :, :]).transpose(1, 2, 0)
return tanh_range2uint_color(cvimg)
###Output
_____no_output_____
###Markdown
This function is implemented to save the results of every $10^{th}$ epoch and show us how the model is learning an image.
###Code
def sample_image(grayImage, predImage, actualImage, batch, index):
gen_imgs = np.concatenate((predImage, actualImage), axis=1)
os.makedirs(StatePath+"/images/"+str(batch), exist_ok=True)
scipy.misc.imsave(StatePath+"/images/"+str(batch)+"/"+str(index)+'.jpg', gen_imgs)
###Output
_____no_output_____
###Markdown
Making Dataset This function is used to make train, validate and tests datasets
###Code
class CustomImages(Dataset):
def __init__(self, root, train=True, val=False, color_space='lab', transform=None, test_size=0.1, val_size=0.125, location='cuda'):
self.root_dir = root
all_files = []
for r, _, files in walk(self.root_dir):
for f in files:
if f.endswith('.jpg'):
all_files.append(join(r, f))
train_val_files, test_files = train_test_split(
all_files, test_size=test_size, random_state=69)
train_files, val_files = train_test_split(train_val_files,
test_size=val_size, random_state=69)
if (train and val):
self.filenames = val_files
elif train:
self.filenames = train_files
else:
self.filenames = test_files
self.color_space = color_space
if (self.color_space not in ['rgb', 'lab']):
raise(NotImplementedError)
self.transform = transform
self.location = location
self.nnenc = NNEncode(location=self.location)
self.train = train
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
img = imread(self.filenames[idx])
if self.color_space == 'lab':
img = rgb2lab(img)
if self.transform is not None:
img = self.transform(img)
bwimg = img[:, :, 0:1].transpose(2, 0, 1)
bwimg = torch.from_numpy(bwimg).float()
abimg = img[:, :, 1:].transpose(2, 0, 1) # abimg dim: 2, h, w
abimg = torch.from_numpy(abimg).float()
label = -1
if (self.train):
if ('cuda' in self.location):
label = self.nnenc.imgEncodeTorch(abimg)
#else:
# label = self.nnenc.imgEncode(abimg)
return (bwimg, label, abimg)
###Output
_____no_output_____
###Markdown
If the image is of size greater than 128 by 128, we will rescale it using the following function.
###Code
class Rescale(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image = sample
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = resize(image, (new_h, new_w))[:self.output_size, :self.output_size, :]
return img
###Output
_____no_output_____
###Markdown
Class Rebalancing The loss function is dominated by desaturated $ab$ values if the distribution of $ab$ values is strongly biased towards low ab values. This biasness is removed by reweighting the loss of each pixel at train time based on the pixel color rarity. Each pixel is weighed by factor $w \in R^Q$, based on its closest $ab$ bin.
###Code
# calculate the weight for each bin based on empirical probability, for class rebalancing
# only needs to be run once
def cal_emp_weights(dset, bins_num=313, sigma=5, lamda=0.5):
cc = np.load(os.path.join(StatePath, 'static', 'pts_in_hull.npy'))
nbrs = ne.NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(cc)
bins_prob = torch.zeros(bins_num)
print('Dataset length:', len(dset))
for i in range(len(dset)):
if (i%100==0):
print('Reading Image:', i)
_, _, abimg = dset[i]
_, indexes = nbrs.kneighbors(abimg.view(abimg.shape[0],-1).t(), 1)
bins_prob[torch.from_numpy(indexes).view(-1)] += 1
bins_sum = bins_prob.sum()
bins_prob /= bins_sum
w = 1/((1 - lamda) * bins_prob + lamda / bins_num)
w /= ((bins_prob * w).sum())
torch.save(w, StatePath+'/static/weights_test')
return w
entire_dataset = CustomImages(DatasetPath, train=True, test_size=0.1, val_size=0) #40 images for test
print("final lenght",len(entire_dataset))
a = cal_emp_weights(entire_dataset, 313)
###Output
final lenght 1440
Dataset length: 1440
Reading Image: 0
Reading Image: 100
Reading Image: 200
Reading Image: 300
Reading Image: 400
Reading Image: 500
Reading Image: 600
Reading Image: 700
Reading Image: 800
Reading Image: 900
Reading Image: 1000
Reading Image: 1100
Reading Image: 1200
Reading Image: 1300
Reading Image: 1400
###Markdown
Loss Function Euclidean loss is not robust to the inherent ambiguity and multimodalnature of the colorization problem. If an image can contain a set of distinct $ab$ values, the optimal solution to the Euclidean loss will be the mean of the set. In color prediction, this averaging effect favors grayish, desaturated results. Thus, the research paper uses multinomial cross entropy loss to element desaturation of images.
###Code
print(1)
class MultinomialCELoss(nn.Module):
def __init__(self):
super(MultinomialCELoss, self).__init__()
# x dim: n, q, h, w
# y dim: n, q, h, w
# n number of cases
# h, w height width
# q number of bins
# output: loss, as a float
def forward(self, x, y):
# softmax
x = x + 1e-8 #add a small number in x to avoid number 0.
x = torch.log(x)
zlogz = y*x
loss = - zlogz.sum()
loss /= (x.shape[0] * x.shape[2] * x.shape[3])
return loss
###Output
_____no_output_____
###Markdown
CNN architecture This architecture uses multiple layers of CNN and maps the image pixels to a probability distribution of depth $313$. This result is described as $\hat Z$ in the research paper. The probability distribution that the model learns is then evaluated with the multinomial loss function described above.$L_{cl}(\hat Z, Z) = -\sum{v(Z_{h,w})} \sum Z_{h,w,q} log (\hat Z_{h,w,q}) $
###Code
class ColorfulColorizer(nn.Module):
def __init__(self):
super(ColorfulColorizer, self).__init__()
self.op_1 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
)
self.op_2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.BatchNorm2d(128)
)
self.op_3 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.BatchNorm2d(256)
)
self.op_4 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(512)
)
self.op_5 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2),
nn.ReLU(),
nn.BatchNorm2d(512)
)
self.op_6 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2),
nn.ReLU(),
nn.BatchNorm2d(512)
)
self.op_7 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(512)
)
self.op_8 = nn.Sequential(
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(512, 256, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(256, 313, kernel_size=1),
nn.UpsamplingBilinear2d(scale_factor=4)
)
self.op_9 = nn.Sequential(
nn.Softmax(dim=1)
)
self.op_1.apply(self.init_weights)
self.op_2.apply(self.init_weights)
self.op_3.apply(self.init_weights)
self.op_4.apply(self.init_weights)
self.op_5.apply(self.init_weights)
self.op_6.apply(self.init_weights)
self.op_7.apply(self.init_weights)
self.op_8.apply(self.init_weights)
def init_weights(self, m):
if type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def forward(self, x):
out = self.op_1(x)
out = self.op_2(out)
out = self.op_3(out)
out = self.op_4(out)
out = self.op_5(out)
out = self.op_6(out)
out = self.op_7(out)
out = self.op_8(out)
out = self.op_9(out)
return out
###Output
_____no_output_____
###Markdown
Main - Training Data
###Code
def main(dset_root, batch_size, num_epochs, print_freq, encoder, criterion,
optimizer, step_every_iteration=False):
continue_training = True
location = 'cuda'
rescale = Rescale(imageSize)
train_dataset = CustomImages(
root=dset_root, train=True, location=location, transform=rescale, test_size=0)
val_dataset = CustomImages(
root=dset_root, train=True, val=True, location=location, transform=rescale) #val files
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
shuffle=True)
if continue_training and os.path.isfile('best_model.pkl'):
encoder.load_state_dict(torch.load(
'best_model.pkl', map_location=location))
print('Model loaded!')
if 'cuda' in location:
print('Using:', torch.cuda.get_device_name(torch.cuda.current_device()))
encoder.cuda()
criterion.cuda()
best_loss = 100
losses = []
for epoch in range(num_epochs):
# train for one epoch
epoch_losses = train(train_loader, encoder, criterion, optimizer, epoch, location, step_every_iteration, num_epochs, print_freq)
losses.append(epoch_losses)
if epoch % save_freq == 0:
save_checkpoint(encoder.state_dict(), str(epoch)+".pkl")
save_model_results(train_dataset, encoder, epoch)
# coloring 5 random images and saving the output
# evaluate on validation set
val_loss = validate(val_loader, encoder, criterion, location, num_epochs, print_freq)
# if (not step_every_iteration):
# scheduler.step(val_loss.data.item())
is_best = val_loss.data.item() < best_loss
if is_best:
print('New best score! Model saved as best_model.pkl')
best_loss = val_loss.data.item()
save_checkpoint(encoder.state_dict(), is_best)
return losses
def save_checkpoint(state, is_best=False, filename='colorizer2.pkl'):
torch.save(state, StatePath+"/states/"+filename)
if is_best:
torch.save(state, 'best_model.pkl')
###Output
_____no_output_____
###Markdown
After calculating the loss of each image between the ground truth encoded/ quantized ab space and the learned probability distribution $\hat Z$, the prediction of ab colorspace of images is done via taking annealed mean of the learned probability distribution. This is because taking mean of this distribution poses the same problems as they were with computing Euclidean Loss, desaturated images. Hence a function $H(\hat Z_{h,w})$ which takes the learned probability distribution as an input is implemented as described in research paper, and it outputs the annealed mean of the distribution for every pixel. This gives us the predicted ab colorspace for that image which is then converted to rgb colorspace to give results. According to the research paper, a temperature value $T = 0.38$ captures the vibrancy of the mode while maintaining the spatial coherence of the mean.
###Code
def save_model_results(dset, model, batchesDone, location='cuda'):
test_cases = np.floor(np.random.rand(5) * len(dset)).astype(int)
test_cases = np.append(test_cases, [0], 0)
outputs = []
images = []
labels = []
for c in test_cases:
image,_, label = dset[c]
image = image.unsqueeze(0)
with torch.no_grad():
if 'cuda' in location:
image = image.cuda()
label = label.cuda()
images.append(image)
labels.append(label)
output = model(image)
outputs.append(output)
T = 0.38
q = 313 # number of colours
nnenc = NNEncode()
bin_index = np.arange(q)
ab_list = nnenc.bin2color(bin_index)
for i in range(len(test_cases)):
l_layer = images[i].data[0].cpu().numpy()
bin_probabilities = outputs[i].data[0].cpu().numpy() # bin_probabilities dim: q, h, w
ab_label = labels[i].data.cpu().numpy().astype('float64')
# convert bin_probab -> ab_pred
bin_probabilities = np.exp(np.log(bin_probabilities)/T)
bin_sum = bin_probabilities.sum(0)
bin_sum = bin_sum.reshape((1, bin_sum.shape[0], bin_sum.shape[1]))
bin_probabilities /= bin_sum
# ab_pred dim: 2, h, w
ab_pred = (bin_probabilities[:, np.newaxis, :, :] * ab_list[:, :, np.newaxis, np.newaxis]).sum(0)
img_input = l_layer[0]
# img_input = np.concatenate((l_layer, torch.zeros([2,128,128])), axis=0)
img_pred = np.concatenate((l_layer, ab_pred), axis=0)
img_actual = np.concatenate((l_layer, ab_label), axis=0)
# img_input = lab2rgb(img_input.transpose(1, 2, 0))
img_pred = lab2rgb(img_pred.transpose(1, 2, 0))
img_actual = lab2rgb(img_actual.transpose(1, 2, 0))
sample_image(img_input, img_pred, img_actual, batchesDone, i)
def train(train_loader, model, criterion, optimizer, epoch,
location, step_every_iteration,num_epochs, print_freq):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
epoch_losses = []
# switch to train mode
model.train()
end = time.time()
for i, (image, target, _) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
image_var = Variable(image)
target_var = Variable(target)
if 'cuda' in location:
image_var = image_var.cuda()
target_var = target_var.cuda()
# compute output
output = model(image_var)
loss = criterion(output, target_var)
losses.update(loss.data.item(), image.size(0))
epoch_losses.append(loss.data.item())
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
batchDone = epoch * len(train_loader) + i
if batchDone % print_freq == 0:
print('Epoch: [{0}/{1}][{2}/{3}]\t'
'BatchTime(Average) {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DataTime(Average) {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss(Average) {loss.val:.4f} ({loss.avg:.4f})\t'
.format(
epoch, num_epochs, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
return epoch_losses
def validate(val_loader, model, criterion, location,num_epochs, print_freq):
batch_time = AverageMeter()
losses = AverageMeter()
loss = 0
# switch to evaluate mode
model.eval()
end = time.time()
for i, (image, target, _) in enumerate(val_loader):
with torch.no_grad():
image_var = Variable(image)
target_var = Variable(target)
if 'cuda' in location:
image_var = image_var.cuda()
target_var = target_var.cuda()
# compute output
output = model(image_var)
loss = criterion(output, target_var)
losses.update(loss.data.item(), image.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
return loss
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
## Training the model here by calling main() which will run the training loop
dset_root = DatasetPath
encoder = ColorfulColorizer()
criterion = MultinomialCELoss()
optimizer = torch.optim.SGD(encoder.parameters(), lr=learningRate)
main(dset_root, batch_size, epochs, print_freq, encoder, criterion, optimizer)
###Output
Using: Tesla K80
###Markdown
Testing Images from Test Dataset
###Code
rescale = Rescale(imageSize)
test_dataset = CustomImages(
root=DatasetPath, train=False, transform=rescale)
location = 'cuda'
test_cases = np.floor(np.random.rand(5) * len(test_dataset)).astype(int)
test_cases = np.append(test_cases, [0], 0)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
encoder = ColorfulColorizer()
encoder.load_state_dict(torch.load(StatePath+'/states/colorizer.pkl'))
if 'cuda' in location:
print('Using:', torch.cuda.get_device_name(torch.cuda.current_device()))
encoder.cuda()
encoder.eval()
# encoder.parameters()
outputs = []
images = []
labels = []
for c in test_cases:
print('Encoding image number:', c)
image,_, label = test_dataset[c]
image = image.unsqueeze(0)
with torch.no_grad():
if 'cuda' in location:
image = image.cuda()
label = label.cuda()
images.append(image)
labels.append(label)
print(image.shape)
output = encoder(image)
outputs.append(output)
T = 0.38
q = 313 # number of colours
nnenc = NNEncode()
bin_index = np.arange(q)
print('Getting ab_list')
ab_list = nnenc.bin2color(bin_index) # q, 2
f, axarr = plt.subplots(len(test_cases), 3)
for i in range(len(test_cases)):
l_layer = images[i].data[0].cpu().numpy()
bin_probabilities = outputs[i].data[0].cpu().numpy() # bin_probabilities dim: q, h, w
ab_label = labels[i].data.cpu().numpy().astype('float64')
# convert bin_probab -> ab_pred
bin_probabilities = np.exp(np.log(bin_probabilities)/T)
bin_sum = bin_probabilities.sum(0)
bin_sum = bin_sum.reshape((1, bin_sum.shape[0], bin_sum.shape[1]))
bin_probabilities /= bin_sum
# ab_pred dim: 2, h, w
ab_pred = (bin_probabilities[:, np.newaxis, :, :] * ab_list[:, :, np.newaxis, np.newaxis]).sum(0)
img_input = l_layer[0]
# img_input = np.concatenate((l_layer, torch.zeros([2,128,128])), axis=0)
img_pred = np.concatenate((l_layer, ab_pred), axis=0)
img_actual = np.concatenate((l_layer, ab_label), axis=0)
# img_input = lab2rgb(img_input.transpose(1, 2, 0))
img_pred = lab2rgb(img_pred.transpose(1, 2, 0))
img_actual = lab2rgb(img_actual.transpose(1, 2, 0))
axarr[i][0].imshow(img_input)
axarr[i][1].imshow(img_pred)
axarr[i][2].imshow(img_actual)
sample_image(img_input, img_pred, img_actual, 1, i)
plt.show()
###Output
_____no_output_____ |
media/Create_Db2_Node_JS_Application.ipynb | ###Markdown
[](https://www.ibm.com/demos/collection/db2-database/)
###Code
%run refresh.ipynb
###Output
_____no_output_____
###Markdown
Create Node JS Application on OpenShift Connecting to Db2This lab will take the user through the steps required take an application on GitHub and deploy it on Red Hat OpenShift connecting to Db2.In this hands-on lab you will1. Connect to Red Hat OpenShift2. Install a sample NodeJS application3. Run the application and connect it to Db2, also running on OpenShift4. Explore the impact of the application through the Db2 Console How to Copy Code and ExamplesThroughout this lab there are code samples that need to be copied and modified in a text editor. Any commands that need to be executed from a command line are found in grey boxes (an example is found below) has been designed to be easily copied.
###Code
%%html
<div style="margin-left: 35px; border-style: solid; border-width: 1px; padding: 10px;background-Color:black;" >
<p style=" color:white ;font-family:courier;background-Color:black"
<pre>
Sample commands are found in cells like this.
</pre>
</div>
###Output
_____no_output_____
###Markdown
Highlight the text and copy it using **Copy** from the right click menu or type Ctrl-C. Paste the command into the Command terminal using **Paste** from the right click menu or type Ctrl-V.It may be easier to keep a terminal window on top of the Jupyter notebook when running these commands. When you have a terminal window displayed, right click on the title bar and select `Always on Top` to keep the screen visible during the duration of the lab. About the Node.js Application you will deployThis application is designed to give you a simple introduction to deploying Node applications on OpenShift that wil work with Db2 using the Db2 Opensource Node.js drivers.These drivers are kept at https://github.com/ibmdb. The AppIBM® Db2 NodeJS Mock Webstore simulates dozens or hundreds of users making online orders separately at the same time, and includes supporting quantities of queries through two connection pools connecting with Db2.This demo is a simple implementation of an application based on Node.js runtime environment, demonstrating how Node.js applications connect to Db2. It includes examples of running both regular SQL statements as well as JSON capabilities. This Lab will combine Command Line and Openshift Console Activities to build the Node.js Application.The source for this application is available on GitHub at https://github.com/Db2-DTE-POC/db2-nodejs-web-app. The Virtual Machine EnvironmentThere are four virtual machines or nodes in this environment. All are running on Centos 7 virtual machines.* host-1 (10.0.0.1) **RedHat OpenShift with Db2 11.5.4.0 Cartridge** * Main UserID: db2pot * password: 123qwe123* server7 (10.0.0.2) **Db2 11.1 on Premises installation** * Main UserID: db2inst1 * password: db2inst1* host-2 (10.0.0.3) **Db2 Data Management Console 3.1.3 and Db2 11.5 Repository Database** * Main UserID: db2inst1 * password: db2inst1* host-3 (10.0.0.4) **Jupyter Notebook Environment** * Main UserID: ibmuser * password: engageibm Passwords for this LabHere are the key passwords you will need to complete the lab. Each virtual machine environment also include a key icon. Click on the icon to see and copy userids and passwords associated with each virtual machine. Operating system login & Web App Login UserID: db2pot password: 123qwe123 Openshift Login UserID: admin password: redhat Db2 Login UserID: db2inst1 password: db2inst1 Creating the Node.js environment Log into OpenShift from the Command Line1. You should already be using the **Db2 Source Data Server** Virtual Machine 2. If you have not already done so, log in with the **db2inst1** userid and **db2inst1** password3. Double-Click the **Terminal** icon4. From the db2inst1@server7 prompt enter
###Code
%%html
<div style="margin-left: 35px; border-style: solid; border-width: 1px; padding: 10px;background-Color:black;" >
<p style=" color:white ;font-family:courier;background-Color:black"
<pre>
ssh db2pot@host-1
</pre>
</div>
###Output
_____no_output_____
###Markdown
2. Enter the db2pot password **123qwe123**3. Enter **yes** to continue connectingYou should now see the **db2pot@host-1** command line prompt4. Log into OpenShift using the OC command Log into Red Hat OpenShiftNow that you have a terminal logged into your OpenShift machine you can log into OpenShift.
###Code
%%html
<div style="margin-left: 35px; border-style: solid; border-width: 1px; padding: 10px;background-Color:black" >
<p style=" color:white ;font-family:courier;background-Color:black"
<pre>
oc login -u admin -n db2
</pre>
</div>
###Output
_____no_output_____
###Markdown
Create a new OpenShift ProjectCreate a project for your new application.
###Code
%%html
<div style="margin-left: 35px; border-style: solid; border-width: 1px; padding: 10px;background-Color:black" >
<p style=" color:white ;font-family:courier;background-Color:black"
<pre>
oc new-project db2node
</pre>
</div>
###Output
_____no_output_____
###Markdown
Pull the container for the Node.js application from Docker.
###Code
%%html
<div style="margin-left: 35px; border-style: solid; border-width: 1px; padding: 10px;background-Color:black" >
<p style=" color:white ;font-family:courier;background-Color:black"
<pre>
sudo docker pull centos/nodejs-12-centos7
</pre>
</div>
###Output
_____no_output_____
###Markdown
Deploy the application, which is stored in a github repositoryhttps://github.com/Db2-DTE-POC/db2-nodejs-web-app.
###Code
%%html
<div style="margin-left: 35px; border-style: solid; border-width: 1px; padding: 10px;background-Color:black" >
<p style=" color:white ;font-family:courier;background-Color:black"
<pre>
oc new-app nodejs~https://github.com/Db2-DTE-POC/db2-nodejs-web-app
</pre>
</div>
###Output
_____no_output_____
###Markdown
Monitor the application's deployment progress using the following command.
###Code
%%html
<div style="margin-left: 35px; border-style: solid; border-width: 1px; padding: 10px;background-Color:black;" >
<p style=" color:white ;font-family:courier;background-Color:black"
<pre>
oc get pods
</pre>
</div>
%%html
<div style="margin-left: 35px; border-style: solid; border-width: 1px; padding: 10px;background-Color:black;" >
<p style=" color:white ;font-family:courier;background-Color:black"
<pre>
sudo restorecon -Rv /home/db2pot/db2vol1
</pre>
</div>
###Output
_____no_output_____ |
cellular-automata/01__2D_cellular_automata.ipynb | ###Markdown
Cellular automata An implementation of a **2D cellular automaton**.Experiments with **[Conway's Game of Life](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life)**. Define the automaton
###Code
class Cellular2D:
def __init__(self, init_state, rule=None, kernel=None):
"""Initilizes a `Cellular2D` object. If `kernel=None`, a Conway's Game of Life (GoL) kernel is used.
This implementation uses a GoL kernel proposed by Allen B. Downey in his book "Think Complexity".
More on Conway's Game of Life: https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
"""
self.init_state = np.array(init_state)
self.state = np.array(init_state)
self.rule = rule
self.kernel = kernel
self.n_cells = np.sum(np.ones_like(init_state))
if not self.rule:
# Define a Game of Life kernel
# More on the logic behind the implementation:
# https://greenteapress.com/complexity/html/thinkcomplexity008.html#toc49
self.rule = [3, 12, 13]
if not self.kernel:
# Define a kernel
# More on the logic behind the implementation:
# https://greenteapress.com/complexity/html/thinkcomplexity008.html#toc49
self.kernel = np.ones((3, 3))
self.kernel[1, 1] *= 10
self.history = {
'step': [0],
'entropy': [self.get_entropy()],
'percent_living': [np.sum(init_state) / self.n_cells]
}
def get_entropy(self):
x = self.state
p = np.sum(x) / np.sum(np.ones_like(x))
return -(p * np.log(p) + (1 - p) * np.log(1 - p))
def run_step(self):
"""Runs single time step of the automaton."""
correlated = correlate2d(self.state, self.kernel, mode='same')
self.state = np.isin(correlated, self.rule).astype('uint8')
# Update history
self.history['step'].append(self.history['step'][-1] + 1)
self.history['entropy'].append(self.get_entropy())
self.history['percent_living'].append(np.sum(self.state) / self.n_cells)
return self.state
def run(self, n_timesteps, reset_state=False):
"""Runs the automaton for `n_timesteps` steps."""
if reset_state:
self.state = self.init_state
for i in range(n_timesteps):
self.state = self.run_step()
return self.state
###Output
_____no_output_____
###Markdown
Game of Life Initialize
###Code
# Define hyperparams
STEPS = 3000
SIZE = (150, 150)
P = .5
ANIMATION_SPEED = 50
# Define grid init state
init_state = np.random.binomial(1, P, SIZE)
# Initialize the automaton
c2 = Cellular2D(init_state)
###Output
_____no_output_____
###Markdown
Animate
###Code
# Create a blank window
fig = plt.figure(figsize=(7, 7))
axis = plt.axes(xlim =(0, SIZE[0]),
ylim =(0, SIZE[1]))
plt.axis('off')
img = plt.imshow(init_state, interpolation=None)
# Define init function
def init():
ent = c2.history['entropy'][-1]
plt.title(f'Step 1 of {STEPS}\n({100 * 0 / STEPS:0.1f}%)\nEntropy: {ent:0.3f}')
img.set_data(init_state)
return img,
# Define the animate function
def animate(i):
step = c2.run_step()
ent = c2.history['entropy'][-1]
plt.title(f'Step {i + 1} of {STEPS}\n({100*(i + 1) / STEPS:0.1f}%)\nEntropy: {ent:0.3f}')
img.set_data(step)
return img,
# calling the animation function
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=STEPS, interval=ANIMATION_SPEED, blit=True, repeat=False)
###Output
_____no_output_____
###Markdown
Analyze
###Code
plt.plot(c2.history['entropy'], label='Entropy')
plt.plot(c2.history['percent_living'], label='% living')
plt.legend()
plt.title(f'$p={P}$; $epochs={STEPS}$; size={SIZE}')
plt.show()
###Output
_____no_output_____
###Markdown
Day & Nighthttps://en.wikipedia.org/wiki/Day_and_Night_(cellular_automaton) It is defined by rule notation B3678/S34678, meaning that a dead cell becomes live (is born) if it has 3, 6, 7, or 8 live neighbors, and a live cell remains alive (survives) if it has 3, 4, 6, 7, or 8 live neighbors, out of the eight neighbors in the Moore neighborhood. Initialize
###Code
# Define a rule
rule_dn = [3, 6, 7, 8, 13, 14, 16, 17, 18]
# Define hyperparams
STEPS = 3000
SIZE = (500, 500)
P = .5
ANIMATION_SPEED = 25
# Define grid init state
init_state = np.random.binomial(1, P, SIZE)
# Initialize the automaton
c2_dn = Cellular2D(init_state, rule=rule_dn)
###Output
_____no_output_____
###Markdown
Animate
###Code
# Create a blank window
fig = plt.figure(figsize=(7, 7))
axis = plt.axes(xlim =(0, SIZE[0]),
ylim =(0, SIZE[1]))
plt.axis('off')
img = plt.imshow(init_state, interpolation=None)
# Define init function
def init():
ent = c2_dn.history['entropy'][-1]
plt.title(f'Step 1 of {STEPS}\n({100 * 0 / STEPS:0.1f}%)\nEntropy: {ent:0.3f}')
img.set_data(init_state)
return img,
# Define the animate function
def animate(i):
step = c2_dn.run_step()
ent = c2_dn.history['entropy'][-1]
plt.title(f'Step {i + 1} of {STEPS}\n({100*(i + 1) / STEPS:0.1f}%)\nEntropy: {ent:0.3f}')
img.set_data(step)
return img,
# calling the animation function
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=STEPS, interval=ANIMATION_SPEED, blit=True, repeat=False)
###Output
_____no_output_____
###Markdown
Analyze
###Code
plt.plot(c2_dn.history['entropy'], label='Entropy')
plt.plot(c2_dn.history['percent_living'], label='% living')
plt.legend()
plt.title(f'$p={P}$; $epochs={STEPS}$; size={SIZE}')
plt.show()
###Output
_____no_output_____ |
lottery/Lottery.ipynb | ###Markdown
Seeing the previous graphs it's obvious to say that the distribution of numbers are clearly random and steady.
###Code
#distance between numbers
data['D12'] = data.N2 - data.N1
data['D23'] = data.N3 - data.N2
data['D34'] = data.N4 - data.N3
data['D45'] = data.N5 - data.N4
data['D56'] = data.N6 - data.N5
#number odds and evens
data['evens'] = data.iloc[:,1:7].apply(lambda x: x%2).sum(axis=1)
data['odds'] = 6 - data['evens']
data['Timestamp'] = pd.to_datetime(data['Date'] , format='%d/%m/%Y').apply(lambda x: pd.Timestamp(x).value)
data.head()
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
X, y = data.iloc[:,1:7].values, data.iloc[:,7:14].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
###Output
_____no_output_____ |
Homeworks/Extra Credit - Machine Learning Tutoruial/Deep Learning Tutorial.ipynb | ###Markdown
Deep Learning Tutorial Deep learning is a subfield of machine learning that is a set of algorithms that is inspired by the structure and function of the brain. These algorithms are usually called Artificial Neural Networks (ANN). Deep learning is one of the hottest fields in data science with many case studies with marvelous results in robotics, image recognition and Artificial Intelligence (AI).Deep learning is a class of machine learning algorithms that: -->use a cascade of multiple layers of nonlinear processing units for feature extraction and transformation. Each successive layer uses the output from the previous layer as input. -->learn in supervised (e.g., classification) and/or unsupervised (e.g., pattern analysis) manners. -->learn multiple levels of representations that correspond to different levels of abstraction; the levels form a hierarchy of concepts. -->use some form of gradient descent for training via backpropagation.Deep learning exploits this idea of hierarchical explanatory factors[clarification needed] where higher level, more abstract concepts are learned from the lower level ones. Deep learning architectures are often constructed with a greedy layer-by-layer method. Deep learning helps to disentangle these abstractions and pick out which features are useful for improving performance. For supervised learning tasks, deep learning methods obviate feature engineering, by translating the data into compact intermediate representations akin to principal components, and derive layered structures that remove redundancy in representation. Deep learning algorithms can be applied to unsupervised learning tasks. This is an important benefit because unlabeled data are more abundant than labeled data. Examples of deep structures that can be trained in an unsupervised manner are neural history compressors perceptron could only represent linear separations between classes, the multi-layer perceptron overcomes that limitation and can also represent more complex decision boundaries.For the analysis I have considered the pollution causing agents data of different cities.
###Code
import os
import pandas as pd
import numpy as np
from numpy import *
data = pd.read_table("Pollution.csv", sep=',', header=None)
data.head()
from sklearn.decomposition import PCA
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
a =le.fit_transform(data[0])
data[0]=a
data[0].head()
data.loc[[0],0:1] = 1,2
data.loc[[0],2:3] = 3,4
data.loc[[0],4:5] = 5,6
data.loc[[0],6:7] = 7,8
data.loc[[0],8:9] = 9,10
data.loc[[0],10:11] = 11,12
data.head()
list(data.columns.values)
###Output
_____no_output_____
###Markdown
Data Visualization
###Code
import matplotlib.pyplot as plt
data=data.astype(float)
data.plot(x=1, y=2, style='o')
plt.xlabel("CITY")
plt.ylabel("NO2 Mean")
plt.title("effect of Nitous oxide on city")
plt.show()
data=data.astype(float)
data.plot(x=1, y=6, style='o')
plt.xlabel("CITY")
plt.ylabel("O2 content")
plt.title("effect of oxygen on city")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocess Data It’s time to act upon the insights that you have gained! Let’s preprocess the data so that you can start building your own neural network!Correlation Matrix: Now that you have the full data set, it’s a good idea to also do a quick data exploration; You already know some stuff from looking at the data set, and now it’s time to gather some more solid insights, perhaps. Since it can be somewhat difficult to interpret graphs, it’s also a good idea to plot a correlation matrix. This will give insights more quickly about which variables correlate:
###Code
import seaborn as sns
corr = data.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
###Output
_____no_output_____
###Markdown
Train and Test Sets Imbalanced data typically refers to a problem with classification problems where the classes are not represented equally.Most classification data sets do not have exactly equal number of instances in each class, but a small difference often does not matter.For now, import the train_test_split from sklearn.model_selection and assign the data and the target labels to the variables X and y.
###Code
# Import `train_test_split` from `sklearn.model_selection`
from sklearn.model_selection import train_test_split
# Specify the data
X=data.loc[:,0:11]
X.head()
data_train=data
data1 = pd.read_table("Pollution.csv", sep=',', header=None)
data1.head()
from sklearn.decomposition import PCA
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
a =le.fit_transform(data1[0])
data1[0]=a
data1[0].head()
data1.loc[[0],0:1] = 1,2
data1.loc[[0],2:3] = 3,4
data1.loc[[0],4:5] = 5,6
data1.loc[[0],6:7] = 7,8
data1.loc[[0],8:9] = 9,10
data1.loc[[0],10:11] = 11,12
data1.head()
# Import `train_test_split` from `sklearn.model_selection`
from sklearn.model_selection import train_test_split
# Specify the data
X1=data1.loc[:,0:11]
X1.head()
###Output
_____no_output_____
###Markdown
Standardize the data Standardization is a way to deal with these values that lie so far apart. The scikit-learn package offers you a great and quick way of getting your data standardized: import the StandardScaler module from sklearn.preprocessing and you’re ready to scale your train and test data!
###Code
# Import `StandardScaler` from `sklearn.preprocessing`
from sklearn.preprocessing import StandardScaler
# Define the scaler
scaler = StandardScaler().fit(data_train)
# Scale the train set
data_train = scaler.transform(data_train)
# Scale the test set
data_test = scaler.transform(data1)
###Output
_____no_output_____
###Markdown
Now that you’re data is preprocessed, you can move on to the real work: building your own neural network to classify pollution content. Model Data Here we are building a multi-layer perceptron, this type of neural network is often fully connected. That means that you’re looking to build a fairly simple stack of fully-connected layers to solve this problem. As for the activation function that you will use, it’s best to use one of the most common ones here for the purpose of getting familiar with Keras and neural networks, which is the relu activation function.we create the model by passing a list of layer instances to the constructor, which you set up by running model = Sequential(). comming to the structure of the multi-layer perceptron we have an input layer, some hidden layers and an output layer. it’s therefore important to take into account that your first layer needs to make the input shape clear. The model needs to know what input shape to expect and that’s why you’ll always find the input_shape, input_dim, input_length, or batch_size arguments in the documentation of the layers and in practical examples of those layers. You are ending the network with a Dense layer of size 1. The final layer will also use a sigmoid activation function so that your output is actually a probability; This means that this will result in a score between 0 and 1, indicating how likely the sample is to have the target “1”, or how likely the data is to be from data_train.
###Code
from keras.utils import np_utils
# Import `Sequential` from `keras.models`
from keras.models import Sequential
# Import `Dense` from `keras.layers`
from keras.layers import Dense
# Initialize the constructor
model = Sequential()
# Add an input layer
model.add(Dense(12, activation='relu', input_shape=(11,)))
# Add one hidden layer
model.add(Dense(8, activation='relu'))
# Add an output layer
model.add(Dense(1, activation='sigmoid'))
###Output
_____no_output_____ |
practice/week-03/W03_2_static_page_scrapping.ipynb | ###Markdown
TEXT MINING for PRACTICE: 정적 웹페이지 스크랩핑---
###Code
# 라이브러리 설명 https://2.python-requests.org/en/master/user/quickstart/
import requests
###Output
_____no_output_____
###Markdown
1. 위키피다아 내용 스크래핑 하기
###Code
url = "https://ko.wikipedia.org/wiki/미세먼지"
res = requests.get(url)
res
print(res.text[:1000],"\n...")
# 정규식을 쓰는 원리와 비슷하게, 직접 replace나 split으로 이용해 문서를 토큰화 하는 것이 비효율 적이므로,
# HTML 태그 파싱을 위한 라이브러리를 활용!
import sys
!{sys.executable} -m pip install bs4
from bs4 import BeautifulSoup
soup = BeautifulSoup(res.text, "html.parser")
print(soup)
soup.find('span')
spans = soup.find_all('span')
print(len(spans))
spans[0:10]
divs = soup.find_all('div')
print(len(divs))
for number, div in enumerate(divs[0:10]):
print(div)
print(number,"-"*50)
###Output
<div class="noprint" id="mw-page-base"></div>
0 --------------------------------------------------
<div class="noprint" id="mw-head-base"></div>
1 --------------------------------------------------
<div class="mw-body" id="content" role="main">
<a id="top"></a>
<div class="mw-body-content" id="siteNotice"><!-- CentralNotice --></div>
<div class="mw-indicators mw-body-content">
</div>
<h1 class="firstHeading" id="firstHeading" lang="ko">미세먼지</h1>
<div class="mw-body-content" id="bodyContent">
<div class="noprint" id="siteSub">위키백과, 우리 모두의 백과사전.</div>
<div id="contentSub"></div>
<div id="jump-to-nav"></div>
<a class="mw-jump-link" href="#mw-head">둘러보기로 가기</a>
<a class="mw-jump-link" href="#p-search">검색하러 가기</a>
<div class="mw-content-ltr" dir="ltr" id="mw-content-text" lang="ko"><div class="mw-parser-output"><div class="thumb tright"><div class="thumbinner" style="width:402px;"><div class="PopUpMediaTransform" id="mwe_player_0" style="width:400px;" videopayload='<div class="mediaContainer" style="width:854px"><video id="mwe_player_1" poster="//upload.wikimedia.org/wikipedia/commons/thumb/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/854px--Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.jpg" controls="" preload="none" autoplay="" style="width:854px;height:428px" class="kskin" data-durationhint="189.54448979592" data-startoffset="0" data-mwtitle="Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" data-mwprovider="wikimediacommons"><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.480p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="SD VP9 (480P)" data-shorttitle="VP9 480P" data-transcodekey="480p.vp9.webm" data-width="854" data-height="428" data-bandwidth="1003256" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.480p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="SD WebM (480P)" data-shorttitle="WebM 480P" data-transcodekey="480p.webm" data-width="854" data-height="428" data-bandwidth="1028176" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.720p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="HD VP9 (720P)" data-shorttitle="VP9 720P" data-transcodekey="720p.vp9.webm" data-width="1280" data-height="640" data-bandwidth="1792304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.720p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="HD WebM (720P)" data-shorttitle="WebM 720P" data-transcodekey="720p.webm" data-width="1280" data-height="640" data-bandwidth="1921504" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" type="video/ogg; codecs=&quot;theora, vorbis&quot;" data-title="원본 Ogg 파일, 1,280 × 640 (3.16 Mbps)" data-shorttitle="Ogg 원본" data-width="1280" data-height="640" data-bandwidth="3158304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.120p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="대역이 가장 낮은 VP9 (120P)" data-shorttitle="VP9 120P" data-transcodekey="120p.vp9.webm" data-width="214" data-height="106" data-bandwidth="123496" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.160p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="대역이 낮은 WebM (160P)" data-shorttitle="WebM 160P" data-transcodekey="160p.webm" data-width="288" data-height="144" data-bandwidth="132304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.180p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="대역이 낮은 VP9 (180P)" data-shorttitle="VP9 180P" data-transcodekey="180p.vp9.webm" data-width="320" data-height="160" data-bandwidth="203888" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.240p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="소형 WebM (240P)" data-shorttitle="WebM 240P" data-transcodekey="240p.webm" data-width="426" data-height="214" data-bandwidth="260264" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.240p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="소형 VP9 (240P)" data-shorttitle="VP9 240P" data-transcodekey="240p.vp9.webm" data-width="426" data-height="214" data-bandwidth="312624" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.360p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="웹 스트리밍 가능 WebM (360P)" data-shorttitle="WebM 360P" data-transcodekey="360p.webm" data-width="640" data-height="320" data-bandwidth="516248" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.360p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="VP9 (360P)" data-shorttitle="VP9 360P" data-transcodekey="360p.vp9.webm" data-width="640" data-height="320" data-bandwidth="556872" data-framerate="29.97002997003"/></video></div>'><img alt="파일:Atmospheric Aerosol Eddies and Flows - NASA GSFC S.ogv" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/400px--Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.jpg" style="width:400px;height:200px"/><a href="//upload.wikimedia.org/wikipedia/commons/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" target="new" title="미디어 재생"><span class="play-btn-large"><span class="mw-tmh-playtext">미디어 재생</span></span></a></div> <div class="thumbcaption"><div class="magnify"><a class="internal" href="/wiki/%ED%8C%8C%EC%9D%BC:Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" title="실제 크기로"></a></div>2006년 8월 17일부터 2007년 4월 10일까지 모습 (GOCART 모델을 사용한 애니메이션).<sup class="reference" id="cite_ref-1"><a href="#cite_note-1">[1]</a></sup><sup class="reference" id="cite_ref-2"><a href="#cite_note-2">[2]</a></sup> (자세한 사항을 보려면 클릭할 것) <br/>* 녹색: 검은 탄소와 유기탄소 <br/>* 빨강/주황: 먼지 <br/>* 흰색: 황산염 <br/>* 파랑: 해염</div></div></div>
<table class="toccolours" style="float:right; clear:right;width:250px;margin:0 0 0.5em 1em;">
<tbody><tr>
<td align="center" style="background:#ccddcc"><b><a href="/wiki/%EC%98%A4%EC%97%BC" title="오염">오염</a></b>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EB%8C%80%EA%B8%B0_%EC%98%A4%EC%97%BC" title="대기 오염">대기 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EC%82%B0%EC%84%B1%EB%B9%84" title="산성비">산성비</a> • <a href="/wiki/%EB%8C%80%EA%B8%B0%EC%A7%88_%EC%A7%80%EC%88%98" title="대기질 지수">대기질 지수</a> • <a class="new" href="/w/index.php?title=%EB%8C%80%EA%B8%B0%EB%B6%84%EC%82%B0%EB%AA%A8%EB%8D%B8&action=edit&redlink=1" title="대기분산모델 (없는 문서)">대기분산모델</a> • <a href="/wiki/%ED%95%A0%EB%A1%9C%EC%95%8C%EC%BC%80%EC%9D%B8" title="할로알케인">할로알케인</a> • <a class="mw-redirect" href="/wiki/%EA%B8%80%EB%A1%9C%EB%B2%8C_%EB%94%94%EB%B0%8D" title="글로벌 디밍">글로벌 디밍</a> • <a href="/wiki/%EC%A7%80%EA%B5%AC_%EC%98%A8%EB%82%9C%ED%99%94" title="지구 온난화">지구 온난화</a> • <a href="/wiki/%EC%95%88%EA%B0%9C" title="안개">안개</a> • <a class="new" href="/w/index.php?title=%EC%8B%A4%EB%82%B4%EA%B3%B5%EA%B8%B0%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="실내공기환경 (없는 문서)">실내공기환경</a> • <a class="mw-redirect" href="/wiki/%EC%98%A4%EC%A1%B4%EC%B8%B5_%EA%B0%90%EC%86%8C" title="오존층 감소">오존층 감소</a> • <a class="mw-redirect" href="/wiki/%EB%AF%B8%EB%A6%BD%EC%9E%90" title="미립자">미립자</a> • <a href="/wiki/%EC%8A%A4%EB%AA%A8%EA%B7%B8" title="스모그">스모그</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EC%88%98%EC%A7%88_%EC%98%A4%EC%97%BC" title="수질 오염">수질 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EB%B6%80%EC%98%81%EC%96%91%ED%99%94" title="부영양화">부영양화</a> • <a class="new" href="/w/index.php?title=%EC%82%B0%EC%86%8C%EA%B2%B0%ED%95%8D&action=edit&redlink=1" title="산소결핍 (없는 문서)">산소결핍</a> • <a href="/wiki/%ED%95%B4%EC%96%91_%EC%98%A4%EC%97%BC" title="해양 오염">해양 오염</a> • <a href="/wiki/%ED%95%B4%EC%96%91_%EC%82%B0%EC%84%B1%ED%99%94" title="해양 산성화">해양 산성화</a> • <a href="/wiki/%EA%B8%B0%EB%A6%84_%EC%9C%A0%EC%B6%9C" title="기름 유출">기름 유출</a> • <a class="new" href="/w/index.php?title=%EC%84%A0%EB%B0%95_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="선박 오염 (없는 문서)">선박 오염</a> • <a class="new" href="/w/index.php?title=%ED%91%9C%EB%A9%B4%EC%9C%A0%EC%88%98&action=edit&redlink=1" title="표면유수 (없는 문서)">표면유수</a> • <a class="new" href="/w/index.php?title=%EC%97%B4_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="열 오염 (없는 문서)">열 오염</a> • <a class="mw-redirect" href="/wiki/%EC%83%9D%ED%99%9C%ED%95%98%EC%88%98" title="생활하수">생활하수</a> • <a class="new" href="/w/index.php?title=%EC%88%98%EC%9D%B8%EC%84%B1_%EC%A0%84%EC%97%BC&action=edit&redlink=1" title="수인성 전염 (없는 문서)">수인성 전염</a> • <a href="/wiki/%EC%88%98%EC%A7%88" title="수질">수질</a> • <a class="new" href="/w/index.php?title=%EB%AC%BC_%EC%A0%95%EC%B2%B4&action=edit&redlink=1" title="물 정체 (없는 문서)">물 정체</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%ED%86%A0%EC%96%91_%EC%98%A4%EC%97%BC" title="토양 오염">토양 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%EC%83%9D%EB%AC%BC%ED%95%99%EC%A0%81%EA%B5%90%EC%A0%95" title="생물학적교정">생물학적교정</a> • <a href="/wiki/%EC%A0%9C%EC%B4%88%EC%A0%9C" title="제초제">제초제</a> • <a href="/wiki/%EB%86%8D%EC%95%BD" title="농약">농약</a> • <a href="/wiki/%EC%82%B4%EC%B6%A9%EC%A0%9C" title="살충제">살충제</a> • <a class="new" href="/w/index.php?title=%ED%86%A0%EC%96%91%EC%A7%80%EC%B9%A8%EA%B0%92_(SGVs)&action=edit&redlink=1" title="토양지침값 (SGVs) (없는 문서)">토양지침값 (SGVs)</a> • <a href="/wiki/%EC%82%AC%EB%A7%89%ED%99%94" title="사막화">사막화</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EB%B0%A9%EC%82%AC%EB%8A%A5_%EC%98%A4%EC%97%BC" title="방사능 오염">방사능 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="new" href="/w/index.php?title=%EC%95%85%ED%8B%B0%EB%8A%84%EC%A1%B1%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="악티늄족과 환경 (없는 문서)">악티늄족과 환경</a> • <a href="/wiki/%ED%99%98%EA%B2%BD_%EB%B0%A9%EC%82%AC%EB%8A%A5" title="환경 방사능">환경방사능</a> • <a class="mw-redirect" href="/wiki/%ED%95%B5%EB%B6%84%EC%97%B4%EC%83%9D%EC%84%B1%EB%AC%BC" title="핵분열생성물">핵분열생성물</a> • <a href="/wiki/%EB%82%99%EC%A7%84" title="낙진">낙진</a> • <a class="new" href="/w/index.php?title=%ED%94%8C%EB%A3%A8%ED%86%A0%EB%8A%84%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="플루토늄과 환경 (없는 문서)">플루토늄과 환경</a> • <a class="new" href="/w/index.php?title=%EB%B0%A9%EC%82%AC%EB%8A%A5_%EC%A4%91%EB%8F%85&action=edit&redlink=1" title="방사능 중독 (없는 문서)">방사능 중독</a> • <a class="new" href="/w/index.php?title=%EB%9D%BC%EB%93%90%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="라듐과 환경 (없는 문서)">라듐과 환경</a> • <a class="new" href="/w/index.php?title=%EC%9A%B0%EB%9D%BC%EB%8A%84%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="우라늄과 환경 (없는 문서)">우라늄과 환경</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b>기타 오염</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%EC%B9%A8%EC%9E%85%EC%A2%85" title="침입종">침입종</a> • <a href="/wiki/%EA%B4%91%EA%B3%B5%ED%95%B4" title="광공해">광공해</a> • <a href="/wiki/%EC%86%8C%EC%9D%8C_%EA%B3%B5%ED%95%B4" title="소음 공해">소음 공해</a> • <a class="new" href="/w/index.php?title=%EC%A0%84%EC%9E%90%ED%8C%8C_%EC%8A%A4%ED%8E%99%ED%8A%B8%EB%9F%BC_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="전자파 스펙트럼 오염 (없는 문서)">전자파 스펙트럼 오염</a> • <a class="new" href="/w/index.php?title=%EC%8B%9C%EA%B0%81_%EA%B3%B5%ED%95%B4&action=edit&redlink=1" title="시각 공해 (없는 문서)">시각 공해</a> • <a class="mw-redirect" href="/wiki/%EB%A9%B8%EC%A2%85" title="멸종">멸종</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b>국제 협약</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EB%AA%AC%ED%8A%B8%EB%A6%AC%EC%98%AC_%EC%9D%98%EC%A0%95%EC%84%9C" title="몬트리올 의정서">몬트리올 의정서</a> • <a href="/wiki/%EA%B5%90%ED%86%A0_%EC%9D%98%EC%A0%95%EC%84%9C" title="교토 의정서">교토 의정서</a> • <a href="/wiki/%EB%8C%80%EA%B8%B0%EC%98%A4%EC%97%BC%EB%AC%BC%EC%A7%88%EC%9D%98_%EC%9E%A5%EA%B1%B0%EB%A6%AC_%EC%9D%B4%EB%8F%99%EC%97%90_%EA%B4%80%ED%95%9C_%ED%98%91%EC%95%BD" title="대기오염물질의 장거리 이동에 관한 협약">대기오염물질의 장거리 이동에 관한 협약</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a class="new" href="/w/index.php?title=%ED%99%98%EA%B2%BD%EB%8B%A8%EC%B2%B4_%EB%AA%A9%EB%A1%9D&action=edit&redlink=1" title="환경단체 목록 (없는 문서)">환경단체 목록</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="new" href="/w/index.php?title=%EC%A7%80%EA%B5%AC%EB%8C%80%EA%B8%B0%EA%B0%90%EC%8B%9C&action=edit&redlink=1" title="지구대기감시 (없는 문서)">지구대기감시</a> • <a href="/wiki/%EA%B7%B8%EB%A6%B0%ED%94%BC%EC%8A%A4" title="그린피스">그린피스</a>
</td></tr>
<tr>
<td align="center" style="background:#ccddcc" width="400"><b>관련 항목</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%ED%99%98%EA%B2%BD_%EA%B3%BC%ED%95%99" title="환경 과학">환경 과학</a> • <a href="/wiki/%EC%9E%90%EC%97%B0_%ED%99%98%EA%B2%BD" title="자연 환경">자연 환경</a>
</td></tr></tbody></table>
<p><b>미세먼지</b>(微細-, <span style="font-size: smaller;"><a href="/wiki/%EC%98%81%EC%96%B4" title="영어">영어</a>: </span><span lang="en">particulate matter, <b>PM</b>, suspended particulate matter, <b>SPM</b>, atmospheric aerosol particles, atmospheric particulate matter</span>) 또는 <b>분진</b>(粉塵)은 눈에 보이지 않을 정도로 <a href="/wiki/%EC%9E%85%EC%9E%90" title="입자">입자</a>가 작은 먼지이다. <a class="mw-redirect" href="/wiki/%EC%95%84%ED%99%A9%EC%82%B0%EA%B0%80%EC%8A%A4" title="아황산가스">아황산가스</a>, <a href="/wiki/%EC%A7%88%EC%86%8C_%EC%82%B0%ED%99%94%EB%AC%BC" title="질소 산화물">질소 산화물</a>, <a href="/wiki/%EB%82%A9" title="납">납</a>, <a href="/wiki/%EC%98%A4%EC%A1%B4" title="오존">오존</a>, <a href="/wiki/%EC%9D%BC%EC%82%B0%ED%99%94_%ED%83%84%EC%86%8C" title="일산화 탄소">일산화 탄소</a> 등을 포함하는 대기오염 물질로 <a href="/wiki/%EC%9E%90%EB%8F%99%EC%B0%A8" title="자동차">자동차</a>, <a href="/wiki/%EA%B3%B5%EC%9E%A5" title="공장">공장</a>, 조리 과정 등에서 발생하여 대기 중 장기간 떠다니는 입경 10<a href="/wiki/%EB%A7%88%EC%9D%B4%ED%81%AC%EB%A1%9C%EB%AF%B8%ED%84%B0" title="마이크로미터">μm</a> 이하의 미세한 <a href="/wiki/%EB%A8%BC%EC%A7%80" title="먼지">먼지</a>이며, PM10이라고도 한다. 입자가 2.5μm 이하인 경우는 PM 2.5라고 쓰며 '초미세먼지' 또는 '극미세먼지' 라고도 부른다. 학술적으로는 <a class="mw-redirect" href="/wiki/%EC%97%90%EC%96%B4%EB%A1%9C%EC%A1%B8" title="에어로졸">에어로졸</a>(aerosol)이라고 부른다. 미세먼지(fine particles)는 부유분진(Suspended particles), 입자상물질(Particulate matter) 등으로도 불리며 명칭에 따라 약간씩 다른 의미를 가지고 있다. 입자상물질은 지름이 100μm에서 10<a href="/wiki/%EB%82%98%EB%85%B8" title="나노">n</a><a href="/wiki/%EB%AF%B8%ED%84%B0" title="미터">m</a>정도이며, 이보다 지름이 크면 중력으로 인해 대기중 체류시간이 아주 짧다
</p>
<div class="toc" id="toc"><input class="toctogglecheckbox" id="toctogglecheckbox" role="button" style="display:none" type="checkbox"/><div class="toctitle" dir="ltr" lang="ko"><h2>목차</h2><span class="toctogglespan"><label class="toctogglelabel" for="toctogglecheckbox"></label></span></div>
<ul>
<li class="toclevel-1 tocsection-1"><a href="#개요"><span class="tocnumber">1</span> <span class="toctext">개요</span></a></li>
<li class="toclevel-1 tocsection-2"><a href="#먼지들의_분류"><span class="tocnumber">2</span> <span class="toctext">먼지들의 분류</span></a>
<ul>
<li class="toclevel-2 tocsection-3"><a href="#PM-10_(10μm_미만_입자)"><span class="tocnumber">2.1</span> <span class="toctext">PM-10 (10μm 미만 입자)</span></a></li>
<li class="toclevel-2 tocsection-4"><a href="#PM-2.5_(2.5μm_미만_입자)"><span class="tocnumber">2.2</span> <span class="toctext">PM-2.5 (2.5μm 미만 입자)</span></a></li>
<li class="toclevel-2 tocsection-5"><a href="#TSP_(Total_suspended_Particles,_총_부유_입자)"><span class="tocnumber">2.3</span> <span class="toctext">TSP (Total suspended Particles, 총 부유 입자)</span></a></li>
</ul>
</li>
<li class="toclevel-1 tocsection-6"><a href="#발생_원인"><span class="tocnumber">3</span> <span class="toctext">발생 원인</span></a></li>
<li class="toclevel-1 tocsection-7"><a href="#미세먼지_구성_성분"><span class="tocnumber">4</span> <span class="toctext">미세먼지 구성 성분</span></a></li>
<li class="toclevel-1 tocsection-8"><a href="#질병"><span class="tocnumber">5</span> <span class="toctext">질병</span></a>
<ul>
<li class="toclevel-2 tocsection-9"><a href="#노인사망률_증가"><span class="tocnumber">5.1</span> <span class="toctext">노인사망률 증가</span></a></li>
<li class="toclevel-2 tocsection-10"><a href="#임산부와_태아"><span class="tocnumber">5.2</span> <span class="toctext">임산부와 태아</span></a></li>
<li class="toclevel-2 tocsection-11"><a href="#천식"><span class="tocnumber">5.3</span> <span class="toctext">천식</span></a></li>
<li class="toclevel-2 tocsection-12"><a href="#두통"><span class="tocnumber">5.4</span> <span class="toctext">두통</span></a></li>
<li class="toclevel-2 tocsection-13"><a href="#아토피"><span class="tocnumber">5.5</span> <span class="toctext">아토피</span></a></li>
<li class="toclevel-2 tocsection-14"><a href="#인슐린_저항성"><span class="tocnumber">5.6</span> <span class="toctext">인슐린 저항성</span></a></li>
</ul>
</li>
<li class="toclevel-1 tocsection-15"><a href="#예방과_대책"><span class="tocnumber">6</span> <span class="toctext">예방과 대책</span></a></li>
<li class="toclevel-1 tocsection-16"><a href="#같이_보기"><span class="tocnumber">7</span> <span class="toctext">같이 보기</span></a></li>
<li class="toclevel-1 tocsection-17"><a href="#각주"><span class="tocnumber">8</span> <span class="toctext">각주</span></a></li>
<li class="toclevel-1 tocsection-18"><a href="#외부_링크"><span class="tocnumber">9</span> <span class="toctext">외부 링크</span></a></li>
</ul>
</div>
<h2><span id=".EA.B0.9C.EC.9A.94"></span><span class="mw-headline" id="개요">개요</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=1" title="부분 편집: 개요">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>인체에 큰 영향을 미치는 물질이다. <a href="/wiki/1948%EB%85%84" title="1948년">1948년</a> 미국 <a href="/wiki/%ED%8E%9C%EC%8B%A4%EB%B2%A0%EC%9D%B4%EB%8B%88%EC%95%84%EC%A3%BC" title="펜실베이니아주">펜실베이니아주</a> 도노라에서 20명이 사망한 대기오염사고, <a href="/wiki/1952%EB%85%84" title="1952년">1952년</a> 약 4,100명의 사망자를 발생시킨 <a href="/wiki/%EA%B7%B8%EB%A0%88%EC%9D%B4%ED%8A%B8_%EC%8A%A4%EB%AA%A8%EA%B7%B8" title="그레이트 스모그">런던스모그</a>는 미세먼지가 인체에 어떤 영향을 미치는지 보여 주는 대표적인 사례이다. 그 이후로 미세먼지가 인체에 미치는 영향에 대한 다양한 역학조사가 실시되었고, 특히 10μm 이하의 미세먼지 입자(PM10)가 취약집단의 질병발생률과 사망률을 높이는 등 인체에 해로운 영향을 미칠 가능성이 높다는 것이 밝혀졌다. 이후 각 국에서 대기오염대책이 마련되었으며, 미세먼지가 인체와 환경에 미치는 해로운 영향을 줄이기 위해 대기오염기준도 마련하였다. 미세먼지는 입자의 크기에 따라 50µm 이하인 총먼지(TPS, TOTAL SUSPENDED PARTICLES)와 입자 크기가 매우 작은 미세먼지로 구분한다. 미세먼지는 지름이 10µm 보다 작은 미세먼지(PM10)와 지름이 2.5µm보다 작은 미세먼지(PM2.5)로 나뉜다.
</p><p>공기 속에 입자상물질(고체나 액체상태)이 부유하고 있는 상태를 일반적으로 <a class="mw-redirect" href="/wiki/%EC%97%90%EC%96%B4%EB%A1%9C%EC%A1%B8" title="에어로졸">에어로졸</a>(Aerosol)이라 한다. 통상적으로 먼지라 말하고 있다.
</p>
<ul><li>먼지의 입도(粒度)범위는 0.001~1000μm이지만 70μm이상의 먼지는 발생 즉시 침강하므로 일반적으로 70μm 미만의 총먼지(TSP, Total Suspended Particle)라 한다.</li>
<li>0.1μm 이하의 먼지입경을 초범위(ultra range)라 하며, 대부분의 먼지는 0.1~10μm 사이에 분포하게 된다. 0.1~1μm 범위의 입자는 입경분포의 특성상 침강이나 응집이 쉽지 않기 때문에 대기 중에 체류시간이 길고 폐포(肺胞)에 침투가 가장 용이하다.</li>
<li>0.5μm 크기의 입자는 빛의 산란효과가 가장 커서 시정감소 등의 원인이 되기도 한다.</li></ul>
<h2><span id=".EB.A8.BC.EC.A7.80.EB.93.A4.EC.9D.98_.EB.B6.84.EB.A5.98"></span><span class="mw-headline" id="먼지들의_분류">먼지들의 분류</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=2" title="부분 편집: 먼지들의 분류">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<h3><span id="PM-10_.2810.CE.BCm_.EB.AF.B8.EB.A7.8C_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="PM-10_(10μm_미만_입자)">PM-10 (10μm 미만 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=3" title="부분 편집: PM-10 (10μm 미만 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>입자의 크기가 10μm 미만인 먼지를 말한다. 국가에서 환경기준으로 연평균 50㎍/㎥ , 24시간 평균 100㎍/㎥를 기준으로 하고 있다. 인체의 폐포까지 침투하여 각종 호흡기 질환의 직접적인 원인이 되며, 인체의 면역 기능을 악화시킨다. 세계보건기구(WHO) 가이드라인으로는 연평균 20㎍/㎥, 24시간 평균 50㎍/㎥으로 설정되어있으며, 개발도상국의 경우 연평균 70㎍/㎥ 정도라고 한다.
</p>
<h3><span id="PM-2.5_.282.5.CE.BCm_.EB.AF.B8.EB.A7.8C_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="PM-2.5_(2.5μm_미만_입자)">PM-2.5 (2.5μm 미만 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=4" title="부분 편집: PM-2.5 (2.5μm 미만 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>입자의 크기가 2.5μm 미만인 먼지를 말한다. 이것을 초미세먼지라고 한다.
입자의 크기가 작을수록 건강에 미치는 영향이 크다는 결과에 따라 선진국에서 미세입자에 대한 기준을 90년대 후반부터 도입하기 시작했다.
</p><p>대한민국은 연평균 15㎍/㎥, 24시간 평균 35㎍/㎥의 기준을 발표하였으며, 미국은 연평균 15㎍/㎥, 24시간 평균 35㎍/㎥의 기준을 설정하였다. 세계보건기구(WHO) 가이드라인으로는 연평균 10㎍/㎥, 24시간 평균 25㎍/㎥으로 설정되어있다.
</p>
<h3><span id="TSP_.28Total_suspended_Particles.2C_.EC.B4.9D_.EB.B6.80.EC.9C.A0_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="TSP_(Total_suspended_Particles,_총_부유_입자)">TSP (Total suspended Particles, 총 부유 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=5" title="부분 편집: TSP (Total suspended Particles, 총 부유 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>총부유분진 또는 총부유입자상 물질 또는 총입자상 물질이라고 하며, 통상적으로 50μm 이하의 모든 부유 먼지를 말한다. 입자의 크기가 10μm이상인 경우에는 도시미관에 영향을 미치긴 하지만 인체의 건강에는 영향이 적기 때문에 90년대 후반 TSP 에서 PM-10으로 환경기준을 변경하였다.
</p>
<h2><span id=".EB.B0.9C.EC.83.9D_.EC.9B.90.EC.9D.B8"></span><span class="mw-headline" id="발생_원인">발생 원인</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=6" title="부분 편집: 발생 원인">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>미세먼지의 배출원인은 인위적인 발생과 자연적인 발생으로 구분된다. 인위적인 발생의 원인은 중국발 미세먼지, 공장에서 나오는 매연 쓰레기소각, 가정에서 생선이나 그 외의 것을 구울 때 등이 이유가 될 수 있다.자연발생원인은 모래바람의 먼지, 화산재, 산불이 일 때 발생하는 먼지 등 때문이다. 해염입자 또한 바다 가까이에 위치한 지역에는 많은 영향을 미친다.
</p>
<h2><span id=".EB.AF.B8.EC.84.B8.EB.A8.BC.EC.A7.80_.EA.B5.AC.EC.84.B1_.EC.84.B1.EB.B6.84"></span><span class="mw-headline" id="미세먼지_구성_성분">미세먼지 구성 성분</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=7" title="부분 편집: 미세먼지 구성 성분">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>미세먼지의 구성 성분은 질산염과 황산염 등이 58.3%, 탄소류와 검댕 16.8%, 광물 6.3%, 기타 18.6%로 이루어져있다.<sup class="reference" id="cite_ref-3"><a href="#cite_note-3">[3]</a></sup><sup class="reference" id="cite_ref-4"><a href="#cite_note-4">[4]</a></sup>
</p>
<h2><span id=".EC.A7.88.EB.B3.91"></span><span class="mw-headline" id="질병">질병</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=8" title="부분 편집: 질병">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<h3><span id=".EB.85.B8.EC.9D.B8.EC.82.AC.EB.A7.9D.EB.A5.A0_.EC.A6.9D.EA.B0.80"></span><span class="mw-headline" id="노인사망률_증가">노인사망률 증가</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=9" title="부분 편집: 노인사망률 증가">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>2009년 <a href="/wiki/%EA%B5%AD%EB%A6%BD%ED%99%98%EA%B2%BD%EA%B3%BC%ED%95%99%EC%9B%90" title="국립환경과학원">국립환경과학원</a>과 인하대 연구팀의 미세먼지와 사망률 연구 결과, 서울에서 미세먼지(PM10) 농도가 ㎥당 10㎍(100만분의 1g) 증가할 때마다 65살 이상 노인 등 대기오염에 민감한 집단의 사망률은 0.4%씩 증가하는 것으로 파악했다. 초미세먼지(PM2.5) 의 영향은 더 커서 10㎍/㎥ 증가할 때마다 민감집단의 사망률은 1.1% 늘어나는 것으로 추정했다.
</p>
<h3><span id=".EC.9E.84.EC.82.B0.EB.B6.80.EC.99.80_.ED.83.9C.EC.95.84"></span><span class="mw-headline" id="임산부와_태아">임산부와 태아</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=10" title="부분 편집: 임산부와 태아">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>이화여대 의대 <a class="new" href="/w/index.php?title=%ED%95%98%EC%9D%80%ED%9D%AC&action=edit&redlink=1" title="하은희 (없는 문서)">하은희</a> 교수팀의 연구 결과 미세먼지 농도가 10㎍/㎥ 올라가면 저체중아 출산 위험이 5.2%에서 7.4%까지 높아지고, 임신 4~9개월 사이의 사산 위험도 8.0~13.8%까지 올라가는 것으로 조사됐다.<sup class="reference" id="cite_ref-5"><a href="#cite_note-5">[5]</a></sup>
</p><p>2009년 <a class="new" href="/w/index.php?title=%EC%96%91%EC%82%B0%EB%B6%80%EC%82%B0%EB%8C%80%EB%B3%91%EC%9B%90&action=edit&redlink=1" title="양산부산대병원 (없는 문서)">양산부산대병원</a> 산업의학 전문의, 대기과학 및 지리정보시스템 전문가들이 공동으로 연구를 진행한 결과, 미세먼지(PM10, 직경이 10μm 이하의 먼지) 농도가 저체중아 출산 및 사산, 기형아 발생과 밀접한 관계가 있는 것으로 조사됐다.<sup class="reference" id="cite_ref-6"><a href="#cite_note-6">[6]</a></sup>
</p><p><a href="/wiki/%EA%B5%AD%EA%B2%BD%EC%97%86%EB%8A%94%EC%9D%98%EC%82%AC%ED%9A%8C" title="국경없는의사회">국경없는의사회</a>(MSF)의 1998년 조사 결과 <a href="/wiki/%ED%88%AC%EB%A5%B4%ED%81%AC%EB%A9%94%EB%8B%88%EC%8A%A4%ED%83%84" title="투르크메니스탄">투르크메니스탄</a>의 <a href="/wiki/%EC%95%84%EB%9E%84%ED%95%B4" title="아랄해">아랄해</a> 인접지역은 먼지 퇴적률이 아주 높았으며 살충제의 오염도 심한 것으로 나왔다. 2000~2001년 카라칼파크 지역의 먼지와 호흡기 질환의 상관관계 조사에서는 건강에 위협적인 미세먼지가 전체 먼지 가운데 14~53%에 이르는 것으로 나타났으며, 이 지역 어린이들의 폐활량 등 폐기능이 유럽 어린이에 비해 현저히 낮은 것으로 나타났다.<sup class="reference" id="cite_ref-7"><a href="#cite_note-7">[7]</a></sup>
</p><p>미국의 한 대학병원이 아동 천7백 명을 조사한 연구를 보면, 미세먼지 농도가 짙은 지역에서 태어난 아이들은 그렇지 않은 지역에서 태어난 아이들보다 폐활량이 정상의 80%에 못 미치는 '폐 기능장애'를 겪을 가능성이 커지는 것으로 조사됐다. 이런 사실 때문에 전문가들은 미세먼지를 '조용한 살인자'라고 부른다.<sup class="reference" id="cite_ref-8"><a href="#cite_note-8">[8]</a></sup>
</p>
<h3><span id=".EC.B2.9C.EC.8B.9D"></span><span class="mw-headline" id="천식">천식</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=11" title="부분 편집: 천식">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>사람의 폐포까지 깊숙하게 침투해 기관지와 폐에 쌓인 미세먼지는 각종 호흡기 질환의 직접 원인이 되며 몸의 면역 기능을 떨어뜨린다. 천식과 호흡곤란을 일으키며 장거리 이동으로 비 또는 눈속의 중금속 농도를 증가시키기도 한다. 또한 대기 중에 부유하면서 빛을 흡수, 산란시키기 때문에 시야를 악화시키기도 하고, 식물의 잎 표면에 쌓여 광합성 동화작용, 호흡작용과 증산작용 등을 저해하여 식물 성장에도 나쁜 영향을 미친다. 또한 여성의 사망원인중 88%가 조리 과정에서 발생한 미세먼지에 의한 사망이라고 한다.
</p><p><a class="mw-redirect" href="/wiki/%ED%95%9C%EA%B5%AD%ED%99%98%EA%B2%BD%EC%A0%95%EC%B1%85%ED%8F%89%EA%B0%80%EC%97%B0%EA%B5%AC%EC%9B%90" title="한국환경정책평가연구원">한국환경정책평가연구원</a> 조승헌 박사팀의 연구결과에 따르면, 미세먼지를 10∼30% 감축하면 수도권의 관련 질환 사망자 수가 해마다 40∼120명 줄어들고 심장 및 호흡기 질환 건수는 연간 2800∼8300건 줄일 수 있는 것으로 전망했다. 또 심장 및 호흡기계통 질환과 관련된 의료비용 등을 토대로 미세먼지 감축으로 인한 이익을 계산한 결과 연간 80억∼1200억원에 이르는 것으로 풀이했다.
</p>
<h3><span id=".EB.91.90.ED.86.B5"></span><span class="mw-headline" id="두통">두통</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=12" title="부분 편집: 두통">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>무연탄을 태울 때 나오는 신경계 독성물질인 납이나 비소, 아연 등 유해 중금속 농도가 높은 미세먼지를 마시면 멀쩡하던 사람도 기침하게 되고 목이 아프고, 피부 트러블을 일으키기도 한다. 머리가 굉장히 아프거나 어지러움, 호흡곤란 등이 생긴다.
<sup class="reference" id="cite_ref-9"><a href="#cite_note-9">[9]</a></sup>
</p><p>대부분의 미세먼지가 치명적이지만 그중에서도 <a class="new" href="/w/index.php?title=%ED%99%A9%EC%82%B0%EC%9D%B4%EC%98%A8&action=edit&redlink=1" title="황산이온 (없는 문서)">황산이온</a>이나 <a class="new" href="/w/index.php?title=%EC%A7%88%EC%82%B0%EC%9D%B4%EC%98%A8&action=edit&redlink=1" title="질산이온 (없는 문서)">질산이온</a> 등은 <a href="/wiki/%ED%99%A9%EC%82%AC" title="황사">황사</a> 속 먼지와 흡착되면서 산화물로 변해 호흡과 함께 폐로 들어가게 된다. 이 물질이 폐로 들어가면 염증을 일으키는데, <a href="/wiki/%EA%B8%B0%EA%B4%80%EC%A7%80%EC%97%BC" title="기관지염">기관지염</a>이나 <a href="/wiki/%EC%B2%9C%EC%8B%9D" title="천식">천식</a>, <a class="mw-redirect" href="/wiki/%EB%A7%8C%EC%84%B1%ED%8F%90%EC%87%84%EC%84%B1%ED%8F%90%EC%A7%88%ED%99%98" title="만성폐쇄성폐질환">만성폐쇄성폐질환</a>(COPD)이 대표적이다. 이런 물질들은 <a href="/wiki/%EB%B0%B1%ED%98%88%EA%B5%AC" title="백혈구">백혈구</a>를 자극해 혈관벽에도 염증을 일으킬 수 있다. 이렇게 되면 전형적인 혈관질환인 <a class="mw-redirect" href="/wiki/%EB%8F%99%EB%A7%A5%EA%B2%BD%ED%99%94" title="동맥경화">동맥경화</a>, <a href="/wiki/%EB%87%8C%EA%B2%BD%EC%83%89" title="뇌경색">뇌경색</a>, <a class="mw-redirect" href="/wiki/%EC%8B%AC%EA%B7%BC%EA%B2%BD%EC%83%89" title="심근경색">심근경색</a> 등을 유발할 수 있다.
</p>
<h3><span id=".EC.95.84.ED.86.A0.ED.94.BC"></span><span class="mw-headline" id="아토피">아토피</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=13" title="부분 편집: 아토피">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>모공보다 더 작은 초미세먼지는 모공으로 침투해 <a href="/wiki/%EC%95%84%ED%86%A0%ED%94%BC" title="아토피">아토피</a> 등 <a href="/wiki/%ED%94%BC%EB%B6%80%EC%97%BC" title="피부염">피부염</a>의 원인이 되기 때문에 여드름이 있거나 아토피가 있는 사람들 역시 황사가 온다는 예보에는 야외활동을 자제하는 것이 좋다.
</p>
<h3><span id=".EC.9D.B8.EC.8A.90.EB.A6.B0_.EC.A0.80.ED.95.AD.EC.84.B1"></span><span class="mw-headline" id="인슐린_저항성">인슐린 저항성</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=14" title="부분 편집: 인슐린 저항성">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>대기오염 미세먼지의 주성분인 노년여성의 인슐린 저항성을 높인다는 연구 결과가 나왔다. 인슐린 저항성(IR)은 혈당을 낮추는 인슐린의 기 혈당을 효과적으로 사용하지 못해 대사증후군은 물론 심장병·당뇨병 등까지 초래할 수 있다.
<sup class="reference" id="cite_ref-10"><a href="#cite_note-10">[10]</a></sup>
</p>
<h2><span id=".EC.98.88.EB.B0.A9.EA.B3.BC_.EB.8C.80.EC.B1.85"></span><span class="mw-headline" id="예방과_대책">예방과 대책</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=15" title="부분 편집: 예방과 대책">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li>사전에 미세먼지 농도를 확인한 후, 농도에 따라 활동범위를 정한다.</li>
<li>어린이, 노인, 폐질환 및 심장질환자 등 민감군은 실외 활동을 제한하고 그렇지 않은 사람들은 장시간 또는 무리한 실외 활동을 줄인다.</li>
<li>미세먼지로부터 보호할 수 있는 가장 간단하고 보편적인 방법은 미세먼지 차단 마스크를 착용하는 것이다. 미세먼지 차단 성능이 있는 마스크는 제품 포장에 '의약외품'이라는 문자와 KF80, KF94, KF99 등이 표시되어 있다. KF80, KF94, KF99는 입자차단 성능을 나타내는데 KF80은 평균 0.6μm 크기의 미세입자를 80퍼센트 이상 걸러낼 수 있으며, KF94, KF99는 0.4μm 크기의 미세입자를 94퍼센트, 99퍼센트 이상 각각 걸러낼 수 있다.</li>
<li>전 지구적인 문제이므로 각 나라의 수장들이 모여 정책을 마련할 필요가 있다.</li></ul>
<h2><span id=".EA.B0.99.EC.9D.B4_.EB.B3.B4.EA.B8.B0"></span><span class="mw-headline" id="같이_보기">같이 보기</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=16" title="부분 편집: 같이 보기">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li><a href="/wiki/%EB%A8%BC%EC%A7%80" title="먼지">먼지</a></li>
<li><a href="/wiki/%EB%8C%80%EA%B8%B0%EC%A7%88_%EC%A7%80%EC%88%98" title="대기질 지수">대기질 지수</a></li>
<li><a class="new" href="/w/index.php?title=%EC%84%9D%EC%9C%A0%EC%BD%94%ED%81%AC&action=edit&redlink=1" title="석유코크 (없는 문서)">석유코크</a>(페트코크, <a class="extiw" href="https://en.wikipedia.org/wiki/Petroleum_coke" title="en:Petroleum coke">en:Petroleum coke</a>, petcoke)</li></ul>
<h2><span id=".EA.B0.81.EC.A3.BC"></span><span class="mw-headline" id="각주">각주</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=17" title="부분 편집: 각주">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<div class="reflist" style="list-style-type: decimal;">
<div class="mw-references-wrap"><ol class="references">
<li id="cite_note-1"><span class="mw-cite-backlink"><a href="#cite_ref-1">↑</a></span> <span class="reference-text"><a class="external text" href="http://gmao.gsfc.nasa.gov/research/aerosol/modeling/nr1_movie/" rel="nofollow">GMAO – Research</a></span>
</li>
<li id="cite_note-2"><span class="mw-cite-backlink"><a href="#cite_ref-2">↑</a></span> <span class="reference-text"><a class="external text" href="http://gmao.gsfc.nasa.gov/research/aerosol/" rel="nofollow">GMAO – Research</a></span>
</li>
<li id="cite_note-3"><span class="mw-cite-backlink"><a href="#cite_ref-3">↑</a></span> <span class="reference-text"><cite class="citation news"><a class="external text" href="http://kormedi.com/1254659/11%ec%9b%94-%eb%af%b8%ec%84%b8-%eb%a8%bc%ec%a7%80-%ec%8a%b5%ea%b2%a9-%ec%a4%91%ea%b5%ad-%ec%95%84%eb%8b%8c-%ea%b5%ad%eb%82%b4-%ec%98%81%ed%96%a5/" rel="nofollow">“11월 미세 먼지 습격, 중국 아닌 국내 영향 컸다.”</a>.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=11%EC%9B%94+%EB%AF%B8%EC%84%B8+%EB%A8%BC%EC%A7+%EC%8A%B5%EA%B2%A9%2C+%EC%A4%91%EA%B5+%EC%95%84%EB%8C+%EA%B5%EB%82%B4+%EC%98%81%ED%96%A5+%EC%BB%B8%EB%A4.&rft.genre=article&rft_id=http%3A%2F%2Fkormedi.com%2F1254659%2F11%25ec%259b%2594-%25eb%25af%25b8%25ec%2584%25b8-%25eb%25a8%25bc%25ec%25a7%2580-%25ec%258a%25b5%25ea%25b2%25a9-%25ec%25a4%2591%25ea%25b5%25ad-%25ec%2595%2584%25eb%258b%258c-%25ea%25b5%25ad%25eb%2582%25b4-%25ec%2598%2581%25ed%2596%25a5%2F&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-4"><span class="mw-cite-backlink"><a href="#cite_ref-4">↑</a></span> <span class="reference-text"><cite class="citation news"><a class="external text" href="http://hellodd.com/?md=news&mt=view&pid=65607" rel="nofollow">“<span style="padding-left:0.2em;">'</span>라돈·케모포비아' 공포···출연연 '융합연구' 해결 나섰다.”</a>.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%27%EB%9D%BC%EB%8F%88%B7%EC%BC%EB%AA%A8%ED%8F%AC%EB%B9%84%EC%95%84%27+%EA%B3%B5%ED%8F%AC%B7%B7%B7%EC%B6%9C%EC%97%B0%EC%97%B0+%27%EC%9C%B5%ED%95%A9%EC%97%B0%EA%B5%AC%27+%ED%95%B4%EA%B2%B0+%EB%82%98%EC%84%B0%EB%A4.&rft.genre=article&rft_id=http%3A%2F%2Fhellodd.com%2F%3Fmd%3Dnews%26mt%3Dview%26pid%3D65607&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-5"><span class="mw-cite-backlink"><a href="#cite_ref-5">↑</a></span> <span class="reference-text"><cite class="citation news">송창석 (2013년 11월 19일). <a class="external text" href="http://www.hani.co.kr/arti/society/environment/611890.html" rel="nofollow">“중국발 초미세먼지, 엄마 뱃속 태아까지 위협한다”</a>. 《<a href="/wiki/%ED%95%9C%EA%B2%A8%EB%A0%88" title="한겨레">한겨레</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EC%A4%91%EA%B5%EB%B0%9C+%EC%B4%88%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%2C+%EC%97%84%EB%A7%88+%EB%B1%83%EC%86+%ED%83%9C%EC%95%84%EA%B9%8C%EC%A7+%EC%9C%84%ED%98%91%ED%95%9C%EB%A4&rft.au=%EC%86%A1%EC%B0%BD%EC%84%9D&rft.date=2013-11-19&rft.genre=article&rft.jtitle=%ED%95%9C%EA%B2%A8%EB%A0%88&rft_id=http%3A%2F%2Fwww.hani.co.kr%2Farti%2Fsociety%2Fenvironment%2F611890.html&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-6"><span class="mw-cite-backlink"><a href="#cite_ref-6">↑</a></span> <span class="reference-text"><cite class="citation news">민영규 (2009년 9월 24일). <a class="external text" href="http://news.naver.com/main/read.nhn?mode=LSD&mid=sec&sid1=102&oid=001&aid=0002881939" rel="nofollow">“부산MBC, 26일 특별기획 '미세먼지의 비밀' 방영”</a>. 《<a href="/wiki/%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4" title="연합뉴스">연합뉴스</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EB%B6%EC%82%B0MBC%2C+26%EC%9D%BC+%ED%8A%B9%EB%B3%84%EA%B8%B0%ED%9A+%27%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%EC%9D%98+%EB%B9%84%EB%B0%27+%EB%B0%A9%EC%98%81&rft.au=%EB%AF%BC%EC%98%81%EA%B7%9C&rft.date=2009-09-24&rft.genre=article&rft.jtitle=%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4&rft_id=http%3A%2F%2Fnews.naver.com%2Fmain%2Fread.nhn%3Fmode%3DLSD%26mid%3Dsec%26sid1%3D102%26oid%3D001%26aid%3D0002881939&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-7"><span class="mw-cite-backlink"><a href="#cite_ref-7">↑</a></span> <span class="reference-text"><cite class="citation news">김학준 (2002년 12월 31일). <a class="external text" href="http://legacy.www.hani.co.kr/section-005100007/2002/12/005100007200212312045128.html" rel="nofollow">“오염먼지 쌓여 결핵·빈혈로 '시름<span style="padding-right:0.2em;">'</span>”</a>. 《<a href="/wiki/%ED%95%9C%EA%B2%A8%EB%A0%88" title="한겨레">한겨레</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EC%98%A4%EC%97%BC%EB%A8%BC%EC%A7+%EC%8C%93%EC%97%AC+%EA%B2%B0%ED%95%B5%B7%EB%B9%88%ED%98%88%EB%A1%9C+%27%EC%9C%EB%A6%84%27&rft.au=%EA%B9%ED%95%99%EC%A4&rft.date=2002-12-31&rft.genre=article&rft.jtitle=%ED%95%9C%EA%B2%A8%EB%A0%88&rft_id=http%3A%2F%2Flegacy.www.hani.co.kr%2Fsection-005100007%2F2002%2F12%2F005100007200212312045128.html&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-8"><span class="mw-cite-backlink"><a href="#cite_ref-8">↑</a></span> <span class="reference-text"><cite class="citation news">한세현 (2013년 12월 7일). <a class="external text" href="http://news.sbs.co.kr/news/endPage.do?news_id=N1002121023" rel="nofollow">“<span style="padding-left:0.2em;">"</span>미세먼지 임신부와 태아에 특히 더 위험<span style="padding-right:0.2em;">"</span>”</a>. 《<a class="mw-disambig" href="/wiki/SBS" title="SBS">SBS</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%22%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7+%EC%9E%84%EC%A0%EB%B6%EC%99+%ED%83%9C%EC%95%84%EC%97%90+%ED%8A%B9%ED%9E%88+%EB%94+%EC%9C%84%ED%97%98%22&rft.au=%ED%95%9C%EC%84%B8%ED%98%84&rft.date=2013-12-07&rft.genre=article&rft.jtitle=SBS&rft_id=http%3A%2F%2Fnews.sbs.co.kr%2Fnews%2FendPage.do%3Fnews_id%3DN1002121023&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-9"><span class="mw-cite-backlink"><a href="#cite_ref-9">↑</a></span> <span class="reference-text"><cite class="citation news">박현갑 (2013년 11월 27일). <a class="external text" href="http://www.seoul.co.kr/news/newsView.php?id=20131127031010" rel="nofollow">“한반도를 엄습하는 중국발 미세먼지”</a>. 《<a href="/wiki/%EC%84%9C%EC%9A%B8%EC%8B%A0%EB%AC%B8" title="서울신문">서울신문</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%ED%95%9C%EB%B0%98%EB%8F%84%EB%A5%BC+%EC%97%84%EC%8A%B5%ED%95%98%EB%8A%94+%EC%A4%91%EA%B5%EB%B0%9C+%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.au=%EB%B0%95%ED%98%84%EA%B0%91&rft.date=2013-11-27&rft.genre=article&rft.jtitle=%EC%84%9C%EC%9A%B8%EC%A0%EB%AC%B8&rft_id=http%3A%2F%2Fwww.seoul.co.kr%2Fnews%2FnewsView.php%3Fid%3D20131127031010&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-10"><span class="mw-cite-backlink"><a href="#cite_ref-10">↑</a></span> <span class="reference-text"><cite class="citation news">이주영 (2015년 3월 23일). <a class="external text" href="http://www.yonhapnews.co.kr/bulletin/2015/03/23/0200000000AKR20150323110800017.HTML" rel="nofollow">“<span style="padding-left:0.2em;">"</span>미세먼지 주성분 PAH, 과체중 노년여성 건강위협<span style="padding-right:0.2em;">"</span>”</a>. 《<a href="/wiki/%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4" title="연합뉴스">연합뉴스</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%22%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7+%EC%A3%BC%EC%84%B1%EB%B6%84+PAH%2C+%EA%B3%BC%EC%B2%B4%EC%A4%91+%EB%85%B8%EB%85%84%EC%97%AC%EC%84%B1+%EA%B1%B4%EA%B0%95%EC%9C%84%ED%98%91%22&rft.au=%EC%9D%B4%EC%A3%BC%EC%98%81&rft.date=2015-03-23&rft.genre=article&rft.jtitle=%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4&rft_id=http%3A%2F%2Fwww.yonhapnews.co.kr%2Fbulletin%2F2015%2F03%2F23%2F0200000000AKR20150323110800017.HTML&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
</ol></div></div>
<h2><span id=".EC.99.B8.EB.B6.80_.EB.A7.81.ED.81.AC"></span><span class="mw-headline" id="외부_링크">외부 링크</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=18" title="부분 편집: 외부 링크">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li><a class="external text" href="https://web.archive.org/web/20150831085115/http://www.airkorea.or.kr/dustForecast" rel="nofollow">에어 코리아 - 대한민국 실시간 대기오염도</a></li></ul>
<div aria-labelledby="난방,_환기,_공기_조화" class="navbox" role="navigation" style="vertical-align: middle;;padding:3px"><table class="nowraplinks collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th class="navbox-title" colspan="2" scope="col"><div class="plainlinks hlist navbar mini"><ul><li class="nv-view"><a href="/wiki/%ED%8B%80:HVAC" title="틀:HVAC"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀을 보기">v</abbr></a></li><li class="nv-talk"><a class="new" href="/w/index.php?title=%ED%8B%80%ED%86%A0%EB%A1%A0:HVAC&action=edit&redlink=1" title="틀토론:HVAC (없는 문서)"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀에 대한 토론">d</abbr></a></li><li class="nv-edit"><a class="external text" href="https://ko.wikipedia.org/w/index.php?title=%ED%8B%80:HVAC&action=edit"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀을 편집하기">e</abbr></a></li><li class="nv-history"><a class="external text" href="https://ko.wikipedia.org/w/index.php?title=%ED%8B%80:HVAC&action=history"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀의 역사">h</abbr></a></li></ul></div><div id="난방,_환기,_공기_조화" style="font-size:114%;margin:0 4em"><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94%EA%B8%B0%EC%88%A0" title="공기조화기술">난방, 환기, 공기 조화</a></div></th></tr><tr><th class="navbox-group" scope="row" style="width:1%">기본 개념</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EB%8C%80%EB%A5%98" title="대류">대류</a></li>
<li><a href="/wiki/%EC%97%94%ED%83%88%ED%94%BC" title="엔탈피">엔탈피</a></li>
<li><a href="/wiki/%EC%9C%A0%EC%B2%B4%EB%8F%99%EC%97%AD%ED%95%99" title="유체동역학">유체동역학</a></li>
<li><a href="/wiki/%EC%A0%84%EC%97%B4" title="전열">전열</a></li>
<li><a href="/wiki/%EC%8A%B5%EB%8F%84" title="습도">습도</a></li>
<li><a href="/wiki/%EC%9E%A0%EC%97%B4" title="잠열">잠열</a></li>
<li><a class="mw-selflink selflink">미세먼지</a></li>
<li><a href="/wiki/%EA%B5%B4%EB%9A%9D_%ED%9A%A8%EA%B3%BC" title="굴뚝 효과">굴뚝 효과</a></li>
<li><a href="/wiki/%EC%97%B4%EC%97%AD%ED%95%99" title="열역학">열역학</a></li>
<li><a href="/wiki/%EB%AC%BC%EC%9D%98_%EC%A6%9D%EA%B8%B0%EC%95%95" title="물의 증기압">물의 증기압</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">기술</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94" title="공기조화">공기조화</a></li>
<li><a href="/wiki/%EB%B6%80%EB%8F%99%EC%95%A1" title="부동액">부동액</a></li>
<li><a href="/wiki/%EC%A4%91%EC%95%99_%EB%82%9C%EB%B0%A9" title="중앙 난방">중앙 난방</a></li>
<li><a href="/wiki/%EB%83%89%EA%B0%81%EC%A0%9C" title="냉각제">냉각제</a></li>
<li><a href="/wiki/%EC%A0%84%EA%B8%B0%EB%82%9C%EB%A1%9C" title="전기난로">전기난로</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94%EA%B8%B0%EC%88%A0" title="공기조화기술">공기조화기술</a></li>
<li><a href="/wiki/%ED%8C%A8%EC%8B%9C%EB%B8%8C_%ED%95%98%EC%9A%B0%EC%8A%A4" title="패시브 하우스">패시브 하우스</a></li>
<li><a href="/wiki/%EB%83%89%EC%9E%A5" title="냉장">냉장</a></li>
<li><a href="/wiki/%ED%99%98%EA%B8%B0" title="환기">환기</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">구성 요소</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EC%9D%B8%EB%B2%84%ED%84%B0" title="인버터">인버터</a></li>
<li><a class="new" href="/w/index.php?title=%EC%97%90%EC%96%B4_%EB%8F%84%EC%96%B4&action=edit&redlink=1" title="에어 도어 (없는 문서)">에어 도어</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%97%AC%EA%B3%BC%EA%B8%B0" title="공기여과기">공기여과기</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%B2%AD%EC%A0%95%EA%B8%B0" title="공기청정기">공기청정기</a></li>
<li><a href="/wiki/%EB%B3%B4%EC%9D%BC%EB%9F%AC" title="보일러">보일러</a></li>
<li><a href="/wiki/%EC%86%A1%ED%92%8D%EA%B8%B0" title="송풍기">송풍기</a></li>
<li><a href="/wiki/%EB%B3%B5%EC%88%98%EA%B8%B0" title="복수기">콘덴서</a></li>
<li><a href="/wiki/%EB%83%89%EA%B0%81%ED%83%91" title="냉각탑">냉각탑</a></li>
<li><a href="/wiki/%EC%A0%9C%EC%8A%B5%EA%B8%B0" title="제습기">제습기</a></li>
<li><a href="/wiki/%EB%B2%BD%EB%82%9C%EB%A1%9C" title="벽난로">벽난로</a></li>
<li><a href="/wiki/%ED%93%B8_%ED%9B%84%EB%93%9C" title="퓸 후드">퓸 후드</a></li>
<li><a href="/wiki/%EC%9A%94%EB%A1%9C" title="요로">요로</a></li>
<li><a href="/wiki/%EC%97%B4%EA%B5%90%ED%99%98%EA%B8%B0" title="열교환기">열교환기</a></li>
<li><a href="/wiki/%ED%9E%88%ED%8A%B8%ED%8C%8C%EC%9D%B4%ED%94%84" title="히트파이프">히트파이프</a></li>
<li><a href="/wiki/%EC%97%B4%ED%8E%8C%ED%94%84" title="열펌프">열펌프</a></li>
<li><a href="/wiki/HEPA" title="HEPA">HEPA</a></li>
<li><a href="/wiki/%EA%B0%80%EC%8A%B5%EA%B8%B0" title="가습기">가습기</a></li>
<li><a href="/wiki/%EC%84%A0%ED%92%8D%EA%B8%B0" title="선풍기">선풍기</a></li>
<li><a href="/wiki/%EA%B8%B0%EA%B3%84%EC%8B%A4" title="기계실">기계실</a></li>
<li><a href="/wiki/%EC%84%9D%EC%9C%A0%EB%82%9C%EB%A1%9C" title="석유난로">석유난로</a></li>
<li><a href="/wiki/%EB%83%89%EB%A7%A4" title="냉매">냉매</a></li>
<li><a href="/wiki/%ED%9E%88%ED%84%B0" title="히터">히터</a></li>
<li><a href="/wiki/%ED%8A%B8%EB%A1%AC%EB%B8%8C_%EB%B2%BD" title="트롬브 벽">트롬브 벽</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">측정<br/>및 제어</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EC%9D%B8%ED%85%94%EB%A6%AC%EC%A0%84%ED%8A%B8_%EB%B9%8C%EB%94%A9" title="인텔리전트 빌딩">인텔리전트 빌딩</a></li>
<li><a href="/wiki/%EC%9D%B4%EC%82%B0%ED%99%94_%ED%83%84%EC%86%8C_%EC%84%BC%EC%84%9C" title="이산화 탄소 센서">이산화 탄소 센서</a></li>
<li><a href="/wiki/%EC%9D%B8%ED%85%94%EB%A6%AC%EC%A0%84%ED%8A%B8_%EB%B9%8C%EB%94%A9" title="인텔리전트 빌딩">인텔리전트 빌딩</a></li>
<li><a href="/wiki/%EC%8B%A4%EC%98%A8" title="실온">실온</a></li>
<li><a href="/wiki/%EC%98%A8%EB%8F%84%EC%A1%B0%EC%A0%88%EA%B8%B0" title="온도조절기">온도조절기</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">직업, 무역,<br/>서비스</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B1%B4%EC%B6%95%EA%B3%B5%ED%95%99" title="건축공학">건축공학</a></li>
<li><a href="/wiki/%EB%B9%8C%EB%94%A9_%EC%A0%95%EB%B3%B4_%EB%AA%A8%EB%8D%B8%EB%A7%81" title="빌딩 정보 모델링">빌딩 정보 모델링</a> (BIM)</li>
<li><a href="/wiki/%ED%99%98%EA%B2%BD%EA%B3%B5%ED%95%99" title="환경공학">환경공학</a></li>
<li><a href="/wiki/%EA%B8%B0%EA%B3%84%EA%B3%B5%ED%95%99" title="기계공학">기계공학</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">산업 단체</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a class="new" href="/w/index.php?title=ASHRAE&action=edit&redlink=1" title="ASHRAE (없는 문서)">ASHRAE</a></li>
<li><a class="new" href="/w/index.php?title=ASTM_%EC%9D%B8%ED%84%B0%EB%82%B4%EC%85%94%EB%84%90&action=edit&redlink=1" title="ASTM 인터내셔널 (없는 문서)">ASTM 인터내셔널</a></li>
<li><a class="new" href="/w/index.php?title=BSRIA&action=edit&redlink=1" title="BSRIA (없는 문서)">BSRIA</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">건강 및 안전</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a class="new" href="/w/index.php?title=%EC%8B%A4%EB%82%B4_%EB%8C%80%EA%B8%B0%EC%A7%88&action=edit&redlink=1" title="실내 대기질 (없는 문서)">실내 대기질</a> (IAQ)</li>
<li><a href="/wiki/%EA%B0%84%EC%A0%91_%ED%9D%A1%EC%97%B0" title="간접 흡연">간접 흡연</a></li>
<li><a href="/wiki/%EC%95%84%ED%94%88_%EA%B1%B4%EB%AC%BC_%EC%A6%9D%ED%9B%84%EA%B5%B0" title="아픈 건물 증후군">아픈 건물 증후군</a> (SBS)</li></ul>
</div></td></tr></tbody></table></div>
<p><small><a class="image" href="/wiki/%ED%8C%8C%EC%9D%BC:PD-icon.svg"><img alt="PD-icon.svg" data-file-height="196" data-file-width="196" decoding="async" height="20" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/20px-PD-icon.svg.png" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/30px-PD-icon.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/40px-PD-icon.svg.png 2x" width="20"/></a> 본 문서에는 <a href="/wiki/%EC%84%9C%EC%9A%B8%ED%8A%B9%EB%B3%84%EC%8B%9C" title="서울특별시">서울특별시</a>에서 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%EC%A7%80%EC%8B%9D%EA%B3%B5%EC%9C%A0_%ED%94%84%EB%A1%9C%EC%A0%9D%ED%8A%B8" title="위키백과:지식공유 프로젝트">지식공유 프로젝트</a>를 통해 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%ED%8D%BC%EB%B8%94%EB%A6%AD_%EB%8F%84%EB%A9%94%EC%9D%B8" title="위키백과:퍼블릭 도메인">퍼블릭 도메인</a>으로 공개한 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%EC%84%9C%EC%9A%B8%EC%8B%9C_%EC%A7%80%EC%8B%9D%EA%B3%B5%EC%9C%A0_%ED%94%84%EB%A1%9C%EC%A0%9D%ED%8A%B8" title="위키백과:서울시 지식공유 프로젝트">저작물</a>을 기초로 작성된 내용이 포함되어 있습니다.</small>
</p>
<div aria-labelledby="전거_통제" class="navbox" role="navigation" style="padding:3px"><table class="nowraplinks hlist navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th class="navbox-group" id="전거_통제" scope="row" style="width:1%"><a href="/wiki/%EC%A0%84%EA%B1%B0_%ED%86%B5%EC%A0%9C" title="전거 통제">전거 통제</a></th><td class="navbox-list navbox-odd" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B2%8C%EB%A7%88%EC%9D%B8%EC%9E%90%EB%A9%94_%EB%85%B8%EB%A6%84%EB%8B%A4%ED%83%80%EC%9D%B4" title="게마인자메 노름다타이">GND</a>: <span class="uid"><a class="external text" href="http://d-nb.info/gnd/4153891-2" rel="nofollow">4153891-2</a></span></li></ul>
</div></td></tr></tbody></table></div>
<!--
NewPP limit report
Parsed by mw1336
Cached time: 20190831132546
Cache expiry: 2592000
Dynamic content: false
Complications: []
CPU time usage: 0.252 seconds
Real time usage: 0.358 seconds
Preprocessor visited node count: 606/1000000
Preprocessor generated node count: 0/1500000
Post‐expand include size: 32542/2097152 bytes
Template argument size: 418/2097152 bytes
Highest expansion depth: 9/40
Expensive parser function count: 0/500
Unstrip recursion depth: 0/20
Unstrip post‐expand size: 10062/5000000 bytes
Number of Wikibase entities loaded: 1/400
Lua time usage: 0.080/10.000 seconds
Lua memory usage: 3.18 MB/50 MB
-->
<!--
Transclusion expansion time report (%,ms,calls,template)
100.00% 200.295 1 -total
48.98% 98.100 1 틀:각주
40.65% 81.416 8 틀:뉴스_인용
17.93% 35.914 1 틀:전거_통제
15.97% 31.997 1 틀:Llang
10.58% 21.196 1 틀:HVAC
8.90% 17.826 1 틀:둘러보기_상자
3.68% 7.378 1 틀:Lang
1.84% 3.677 1 틀:오염
1.68% 3.360 2 틀:일반_기타
-->
<!-- Saved in parser cache with key kowiki:pcache:idhash:48995-0!canonical and timestamp 20190831132546 and revision id 24624501
-->
</div><noscript><img alt="" height="1" src="//ko.wikipedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" style="border: none; position: absolute;" title="" width="1"/></noscript></div>
<div class="printfooter">원본 주소 "<a dir="ltr" href="https://ko.wikipedia.org/w/index.php?title=미세먼지&oldid=24624501">https://ko.wikipedia.org/w/index.php?title=미세먼지&oldid=24624501</a>"</div>
<div class="catlinks" data-mw="interface" id="catlinks"><div class="mw-normal-catlinks" id="mw-normal-catlinks"><a href="/wiki/%ED%8A%B9%EC%88%98:%EB%B6%84%EB%A5%98" title="특수:분류">분류</a>: <ul><li><a href="/wiki/%EB%B6%84%EB%A5%98:%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80" title="분류:미세먼지">미세먼지</a></li><li><a href="/wiki/%EB%B6%84%EB%A5%98:%EC%8A%A4%EB%AA%A8%EA%B7%B8" title="분류:스모그">스모그</a></li><li><a href="/wiki/%EB%B6%84%EB%A5%98:IARC_1%EB%93%B1%EA%B8%89_%EB%B0%9C%EC%95%94_%EB%AC%BC%EC%A7%88" title="분류:IARC 1등급 발암 물질">IARC 1등급 발암 물질</a></li></ul></div><div class="mw-hidden-catlinks mw-hidden-cats-hidden" id="mw-hidden-catlinks">숨은 분류: <ul><li><a href="/wiki/%EB%B6%84%EB%A5%98:%EC%98%81%EC%96%B4_%ED%91%9C%EA%B8%B0%EB%A5%BC_%ED%8F%AC%ED%95%A8%ED%95%9C_%EB%AC%B8%EC%84%9C" title="분류:영어 표기를 포함한 문서">영어 표기를 포함한 문서</a></li><li><a href="/wiki/%EB%B6%84%EB%A5%98:%EC%84%9C%EC%9A%B8%ED%8A%B9%EB%B3%84%EC%8B%9C_%EA%B3%B5%EA%B0%9C%EC%9E%90%EB%A3%8C%EB%A5%BC_%EC%9D%B8%EC%9A%A9%ED%95%9C_%EB%AC%B8%EC%84%9C" title="분류:서울특별시 공개자료를 인용한 문서">서울특별시 공개자료를 인용한 문서</a></li><li><a href="/wiki/%EB%B6%84%EB%A5%98:GND_%EC%8B%9D%EB%B3%84%EC%9E%90%EB%A5%BC_%ED%8F%AC%ED%95%A8%ED%95%9C_%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC_%EB%AC%B8%EC%84%9C" title="분류:GND 식별자를 포함한 위키백과 문서">GND 식별자를 포함한 위키백과 문서</a></li></ul></div></div>
<div class="visualClear"></div>
</div>
</div>
2 --------------------------------------------------
<div class="mw-body-content" id="siteNotice"><!-- CentralNotice --></div>
3 --------------------------------------------------
<div class="mw-indicators mw-body-content">
</div>
4 --------------------------------------------------
<div class="mw-body-content" id="bodyContent">
<div class="noprint" id="siteSub">위키백과, 우리 모두의 백과사전.</div>
<div id="contentSub"></div>
<div id="jump-to-nav"></div>
<a class="mw-jump-link" href="#mw-head">둘러보기로 가기</a>
<a class="mw-jump-link" href="#p-search">검색하러 가기</a>
<div class="mw-content-ltr" dir="ltr" id="mw-content-text" lang="ko"><div class="mw-parser-output"><div class="thumb tright"><div class="thumbinner" style="width:402px;"><div class="PopUpMediaTransform" id="mwe_player_0" style="width:400px;" videopayload='<div class="mediaContainer" style="width:854px"><video id="mwe_player_1" poster="//upload.wikimedia.org/wikipedia/commons/thumb/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/854px--Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.jpg" controls="" preload="none" autoplay="" style="width:854px;height:428px" class="kskin" data-durationhint="189.54448979592" data-startoffset="0" data-mwtitle="Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" data-mwprovider="wikimediacommons"><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.480p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="SD VP9 (480P)" data-shorttitle="VP9 480P" data-transcodekey="480p.vp9.webm" data-width="854" data-height="428" data-bandwidth="1003256" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.480p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="SD WebM (480P)" data-shorttitle="WebM 480P" data-transcodekey="480p.webm" data-width="854" data-height="428" data-bandwidth="1028176" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.720p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="HD VP9 (720P)" data-shorttitle="VP9 720P" data-transcodekey="720p.vp9.webm" data-width="1280" data-height="640" data-bandwidth="1792304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.720p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="HD WebM (720P)" data-shorttitle="WebM 720P" data-transcodekey="720p.webm" data-width="1280" data-height="640" data-bandwidth="1921504" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" type="video/ogg; codecs=&quot;theora, vorbis&quot;" data-title="원본 Ogg 파일, 1,280 × 640 (3.16 Mbps)" data-shorttitle="Ogg 원본" data-width="1280" data-height="640" data-bandwidth="3158304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.120p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="대역이 가장 낮은 VP9 (120P)" data-shorttitle="VP9 120P" data-transcodekey="120p.vp9.webm" data-width="214" data-height="106" data-bandwidth="123496" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.160p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="대역이 낮은 WebM (160P)" data-shorttitle="WebM 160P" data-transcodekey="160p.webm" data-width="288" data-height="144" data-bandwidth="132304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.180p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="대역이 낮은 VP9 (180P)" data-shorttitle="VP9 180P" data-transcodekey="180p.vp9.webm" data-width="320" data-height="160" data-bandwidth="203888" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.240p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="소형 WebM (240P)" data-shorttitle="WebM 240P" data-transcodekey="240p.webm" data-width="426" data-height="214" data-bandwidth="260264" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.240p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="소형 VP9 (240P)" data-shorttitle="VP9 240P" data-transcodekey="240p.vp9.webm" data-width="426" data-height="214" data-bandwidth="312624" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.360p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="웹 스트리밍 가능 WebM (360P)" data-shorttitle="WebM 360P" data-transcodekey="360p.webm" data-width="640" data-height="320" data-bandwidth="516248" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.360p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="VP9 (360P)" data-shorttitle="VP9 360P" data-transcodekey="360p.vp9.webm" data-width="640" data-height="320" data-bandwidth="556872" data-framerate="29.97002997003"/></video></div>'><img alt="파일:Atmospheric Aerosol Eddies and Flows - NASA GSFC S.ogv" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/400px--Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.jpg" style="width:400px;height:200px"/><a href="//upload.wikimedia.org/wikipedia/commons/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" target="new" title="미디어 재생"><span class="play-btn-large"><span class="mw-tmh-playtext">미디어 재생</span></span></a></div> <div class="thumbcaption"><div class="magnify"><a class="internal" href="/wiki/%ED%8C%8C%EC%9D%BC:Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" title="실제 크기로"></a></div>2006년 8월 17일부터 2007년 4월 10일까지 모습 (GOCART 모델을 사용한 애니메이션).<sup class="reference" id="cite_ref-1"><a href="#cite_note-1">[1]</a></sup><sup class="reference" id="cite_ref-2"><a href="#cite_note-2">[2]</a></sup> (자세한 사항을 보려면 클릭할 것) <br/>* 녹색: 검은 탄소와 유기탄소 <br/>* 빨강/주황: 먼지 <br/>* 흰색: 황산염 <br/>* 파랑: 해염</div></div></div>
<table class="toccolours" style="float:right; clear:right;width:250px;margin:0 0 0.5em 1em;">
<tbody><tr>
<td align="center" style="background:#ccddcc"><b><a href="/wiki/%EC%98%A4%EC%97%BC" title="오염">오염</a></b>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EB%8C%80%EA%B8%B0_%EC%98%A4%EC%97%BC" title="대기 오염">대기 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EC%82%B0%EC%84%B1%EB%B9%84" title="산성비">산성비</a> • <a href="/wiki/%EB%8C%80%EA%B8%B0%EC%A7%88_%EC%A7%80%EC%88%98" title="대기질 지수">대기질 지수</a> • <a class="new" href="/w/index.php?title=%EB%8C%80%EA%B8%B0%EB%B6%84%EC%82%B0%EB%AA%A8%EB%8D%B8&action=edit&redlink=1" title="대기분산모델 (없는 문서)">대기분산모델</a> • <a href="/wiki/%ED%95%A0%EB%A1%9C%EC%95%8C%EC%BC%80%EC%9D%B8" title="할로알케인">할로알케인</a> • <a class="mw-redirect" href="/wiki/%EA%B8%80%EB%A1%9C%EB%B2%8C_%EB%94%94%EB%B0%8D" title="글로벌 디밍">글로벌 디밍</a> • <a href="/wiki/%EC%A7%80%EA%B5%AC_%EC%98%A8%EB%82%9C%ED%99%94" title="지구 온난화">지구 온난화</a> • <a href="/wiki/%EC%95%88%EA%B0%9C" title="안개">안개</a> • <a class="new" href="/w/index.php?title=%EC%8B%A4%EB%82%B4%EA%B3%B5%EA%B8%B0%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="실내공기환경 (없는 문서)">실내공기환경</a> • <a class="mw-redirect" href="/wiki/%EC%98%A4%EC%A1%B4%EC%B8%B5_%EA%B0%90%EC%86%8C" title="오존층 감소">오존층 감소</a> • <a class="mw-redirect" href="/wiki/%EB%AF%B8%EB%A6%BD%EC%9E%90" title="미립자">미립자</a> • <a href="/wiki/%EC%8A%A4%EB%AA%A8%EA%B7%B8" title="스모그">스모그</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EC%88%98%EC%A7%88_%EC%98%A4%EC%97%BC" title="수질 오염">수질 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EB%B6%80%EC%98%81%EC%96%91%ED%99%94" title="부영양화">부영양화</a> • <a class="new" href="/w/index.php?title=%EC%82%B0%EC%86%8C%EA%B2%B0%ED%95%8D&action=edit&redlink=1" title="산소결핍 (없는 문서)">산소결핍</a> • <a href="/wiki/%ED%95%B4%EC%96%91_%EC%98%A4%EC%97%BC" title="해양 오염">해양 오염</a> • <a href="/wiki/%ED%95%B4%EC%96%91_%EC%82%B0%EC%84%B1%ED%99%94" title="해양 산성화">해양 산성화</a> • <a href="/wiki/%EA%B8%B0%EB%A6%84_%EC%9C%A0%EC%B6%9C" title="기름 유출">기름 유출</a> • <a class="new" href="/w/index.php?title=%EC%84%A0%EB%B0%95_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="선박 오염 (없는 문서)">선박 오염</a> • <a class="new" href="/w/index.php?title=%ED%91%9C%EB%A9%B4%EC%9C%A0%EC%88%98&action=edit&redlink=1" title="표면유수 (없는 문서)">표면유수</a> • <a class="new" href="/w/index.php?title=%EC%97%B4_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="열 오염 (없는 문서)">열 오염</a> • <a class="mw-redirect" href="/wiki/%EC%83%9D%ED%99%9C%ED%95%98%EC%88%98" title="생활하수">생활하수</a> • <a class="new" href="/w/index.php?title=%EC%88%98%EC%9D%B8%EC%84%B1_%EC%A0%84%EC%97%BC&action=edit&redlink=1" title="수인성 전염 (없는 문서)">수인성 전염</a> • <a href="/wiki/%EC%88%98%EC%A7%88" title="수질">수질</a> • <a class="new" href="/w/index.php?title=%EB%AC%BC_%EC%A0%95%EC%B2%B4&action=edit&redlink=1" title="물 정체 (없는 문서)">물 정체</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%ED%86%A0%EC%96%91_%EC%98%A4%EC%97%BC" title="토양 오염">토양 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%EC%83%9D%EB%AC%BC%ED%95%99%EC%A0%81%EA%B5%90%EC%A0%95" title="생물학적교정">생물학적교정</a> • <a href="/wiki/%EC%A0%9C%EC%B4%88%EC%A0%9C" title="제초제">제초제</a> • <a href="/wiki/%EB%86%8D%EC%95%BD" title="농약">농약</a> • <a href="/wiki/%EC%82%B4%EC%B6%A9%EC%A0%9C" title="살충제">살충제</a> • <a class="new" href="/w/index.php?title=%ED%86%A0%EC%96%91%EC%A7%80%EC%B9%A8%EA%B0%92_(SGVs)&action=edit&redlink=1" title="토양지침값 (SGVs) (없는 문서)">토양지침값 (SGVs)</a> • <a href="/wiki/%EC%82%AC%EB%A7%89%ED%99%94" title="사막화">사막화</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EB%B0%A9%EC%82%AC%EB%8A%A5_%EC%98%A4%EC%97%BC" title="방사능 오염">방사능 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="new" href="/w/index.php?title=%EC%95%85%ED%8B%B0%EB%8A%84%EC%A1%B1%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="악티늄족과 환경 (없는 문서)">악티늄족과 환경</a> • <a href="/wiki/%ED%99%98%EA%B2%BD_%EB%B0%A9%EC%82%AC%EB%8A%A5" title="환경 방사능">환경방사능</a> • <a class="mw-redirect" href="/wiki/%ED%95%B5%EB%B6%84%EC%97%B4%EC%83%9D%EC%84%B1%EB%AC%BC" title="핵분열생성물">핵분열생성물</a> • <a href="/wiki/%EB%82%99%EC%A7%84" title="낙진">낙진</a> • <a class="new" href="/w/index.php?title=%ED%94%8C%EB%A3%A8%ED%86%A0%EB%8A%84%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="플루토늄과 환경 (없는 문서)">플루토늄과 환경</a> • <a class="new" href="/w/index.php?title=%EB%B0%A9%EC%82%AC%EB%8A%A5_%EC%A4%91%EB%8F%85&action=edit&redlink=1" title="방사능 중독 (없는 문서)">방사능 중독</a> • <a class="new" href="/w/index.php?title=%EB%9D%BC%EB%93%90%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="라듐과 환경 (없는 문서)">라듐과 환경</a> • <a class="new" href="/w/index.php?title=%EC%9A%B0%EB%9D%BC%EB%8A%84%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="우라늄과 환경 (없는 문서)">우라늄과 환경</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b>기타 오염</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%EC%B9%A8%EC%9E%85%EC%A2%85" title="침입종">침입종</a> • <a href="/wiki/%EA%B4%91%EA%B3%B5%ED%95%B4" title="광공해">광공해</a> • <a href="/wiki/%EC%86%8C%EC%9D%8C_%EA%B3%B5%ED%95%B4" title="소음 공해">소음 공해</a> • <a class="new" href="/w/index.php?title=%EC%A0%84%EC%9E%90%ED%8C%8C_%EC%8A%A4%ED%8E%99%ED%8A%B8%EB%9F%BC_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="전자파 스펙트럼 오염 (없는 문서)">전자파 스펙트럼 오염</a> • <a class="new" href="/w/index.php?title=%EC%8B%9C%EA%B0%81_%EA%B3%B5%ED%95%B4&action=edit&redlink=1" title="시각 공해 (없는 문서)">시각 공해</a> • <a class="mw-redirect" href="/wiki/%EB%A9%B8%EC%A2%85" title="멸종">멸종</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b>국제 협약</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EB%AA%AC%ED%8A%B8%EB%A6%AC%EC%98%AC_%EC%9D%98%EC%A0%95%EC%84%9C" title="몬트리올 의정서">몬트리올 의정서</a> • <a href="/wiki/%EA%B5%90%ED%86%A0_%EC%9D%98%EC%A0%95%EC%84%9C" title="교토 의정서">교토 의정서</a> • <a href="/wiki/%EB%8C%80%EA%B8%B0%EC%98%A4%EC%97%BC%EB%AC%BC%EC%A7%88%EC%9D%98_%EC%9E%A5%EA%B1%B0%EB%A6%AC_%EC%9D%B4%EB%8F%99%EC%97%90_%EA%B4%80%ED%95%9C_%ED%98%91%EC%95%BD" title="대기오염물질의 장거리 이동에 관한 협약">대기오염물질의 장거리 이동에 관한 협약</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a class="new" href="/w/index.php?title=%ED%99%98%EA%B2%BD%EB%8B%A8%EC%B2%B4_%EB%AA%A9%EB%A1%9D&action=edit&redlink=1" title="환경단체 목록 (없는 문서)">환경단체 목록</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="new" href="/w/index.php?title=%EC%A7%80%EA%B5%AC%EB%8C%80%EA%B8%B0%EA%B0%90%EC%8B%9C&action=edit&redlink=1" title="지구대기감시 (없는 문서)">지구대기감시</a> • <a href="/wiki/%EA%B7%B8%EB%A6%B0%ED%94%BC%EC%8A%A4" title="그린피스">그린피스</a>
</td></tr>
<tr>
<td align="center" style="background:#ccddcc" width="400"><b>관련 항목</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%ED%99%98%EA%B2%BD_%EA%B3%BC%ED%95%99" title="환경 과학">환경 과학</a> • <a href="/wiki/%EC%9E%90%EC%97%B0_%ED%99%98%EA%B2%BD" title="자연 환경">자연 환경</a>
</td></tr></tbody></table>
<p><b>미세먼지</b>(微細-, <span style="font-size: smaller;"><a href="/wiki/%EC%98%81%EC%96%B4" title="영어">영어</a>: </span><span lang="en">particulate matter, <b>PM</b>, suspended particulate matter, <b>SPM</b>, atmospheric aerosol particles, atmospheric particulate matter</span>) 또는 <b>분진</b>(粉塵)은 눈에 보이지 않을 정도로 <a href="/wiki/%EC%9E%85%EC%9E%90" title="입자">입자</a>가 작은 먼지이다. <a class="mw-redirect" href="/wiki/%EC%95%84%ED%99%A9%EC%82%B0%EA%B0%80%EC%8A%A4" title="아황산가스">아황산가스</a>, <a href="/wiki/%EC%A7%88%EC%86%8C_%EC%82%B0%ED%99%94%EB%AC%BC" title="질소 산화물">질소 산화물</a>, <a href="/wiki/%EB%82%A9" title="납">납</a>, <a href="/wiki/%EC%98%A4%EC%A1%B4" title="오존">오존</a>, <a href="/wiki/%EC%9D%BC%EC%82%B0%ED%99%94_%ED%83%84%EC%86%8C" title="일산화 탄소">일산화 탄소</a> 등을 포함하는 대기오염 물질로 <a href="/wiki/%EC%9E%90%EB%8F%99%EC%B0%A8" title="자동차">자동차</a>, <a href="/wiki/%EA%B3%B5%EC%9E%A5" title="공장">공장</a>, 조리 과정 등에서 발생하여 대기 중 장기간 떠다니는 입경 10<a href="/wiki/%EB%A7%88%EC%9D%B4%ED%81%AC%EB%A1%9C%EB%AF%B8%ED%84%B0" title="마이크로미터">μm</a> 이하의 미세한 <a href="/wiki/%EB%A8%BC%EC%A7%80" title="먼지">먼지</a>이며, PM10이라고도 한다. 입자가 2.5μm 이하인 경우는 PM 2.5라고 쓰며 '초미세먼지' 또는 '극미세먼지' 라고도 부른다. 학술적으로는 <a class="mw-redirect" href="/wiki/%EC%97%90%EC%96%B4%EB%A1%9C%EC%A1%B8" title="에어로졸">에어로졸</a>(aerosol)이라고 부른다. 미세먼지(fine particles)는 부유분진(Suspended particles), 입자상물질(Particulate matter) 등으로도 불리며 명칭에 따라 약간씩 다른 의미를 가지고 있다. 입자상물질은 지름이 100μm에서 10<a href="/wiki/%EB%82%98%EB%85%B8" title="나노">n</a><a href="/wiki/%EB%AF%B8%ED%84%B0" title="미터">m</a>정도이며, 이보다 지름이 크면 중력으로 인해 대기중 체류시간이 아주 짧다
</p>
<div class="toc" id="toc"><input class="toctogglecheckbox" id="toctogglecheckbox" role="button" style="display:none" type="checkbox"/><div class="toctitle" dir="ltr" lang="ko"><h2>목차</h2><span class="toctogglespan"><label class="toctogglelabel" for="toctogglecheckbox"></label></span></div>
<ul>
<li class="toclevel-1 tocsection-1"><a href="#개요"><span class="tocnumber">1</span> <span class="toctext">개요</span></a></li>
<li class="toclevel-1 tocsection-2"><a href="#먼지들의_분류"><span class="tocnumber">2</span> <span class="toctext">먼지들의 분류</span></a>
<ul>
<li class="toclevel-2 tocsection-3"><a href="#PM-10_(10μm_미만_입자)"><span class="tocnumber">2.1</span> <span class="toctext">PM-10 (10μm 미만 입자)</span></a></li>
<li class="toclevel-2 tocsection-4"><a href="#PM-2.5_(2.5μm_미만_입자)"><span class="tocnumber">2.2</span> <span class="toctext">PM-2.5 (2.5μm 미만 입자)</span></a></li>
<li class="toclevel-2 tocsection-5"><a href="#TSP_(Total_suspended_Particles,_총_부유_입자)"><span class="tocnumber">2.3</span> <span class="toctext">TSP (Total suspended Particles, 총 부유 입자)</span></a></li>
</ul>
</li>
<li class="toclevel-1 tocsection-6"><a href="#발생_원인"><span class="tocnumber">3</span> <span class="toctext">발생 원인</span></a></li>
<li class="toclevel-1 tocsection-7"><a href="#미세먼지_구성_성분"><span class="tocnumber">4</span> <span class="toctext">미세먼지 구성 성분</span></a></li>
<li class="toclevel-1 tocsection-8"><a href="#질병"><span class="tocnumber">5</span> <span class="toctext">질병</span></a>
<ul>
<li class="toclevel-2 tocsection-9"><a href="#노인사망률_증가"><span class="tocnumber">5.1</span> <span class="toctext">노인사망률 증가</span></a></li>
<li class="toclevel-2 tocsection-10"><a href="#임산부와_태아"><span class="tocnumber">5.2</span> <span class="toctext">임산부와 태아</span></a></li>
<li class="toclevel-2 tocsection-11"><a href="#천식"><span class="tocnumber">5.3</span> <span class="toctext">천식</span></a></li>
<li class="toclevel-2 tocsection-12"><a href="#두통"><span class="tocnumber">5.4</span> <span class="toctext">두통</span></a></li>
<li class="toclevel-2 tocsection-13"><a href="#아토피"><span class="tocnumber">5.5</span> <span class="toctext">아토피</span></a></li>
<li class="toclevel-2 tocsection-14"><a href="#인슐린_저항성"><span class="tocnumber">5.6</span> <span class="toctext">인슐린 저항성</span></a></li>
</ul>
</li>
<li class="toclevel-1 tocsection-15"><a href="#예방과_대책"><span class="tocnumber">6</span> <span class="toctext">예방과 대책</span></a></li>
<li class="toclevel-1 tocsection-16"><a href="#같이_보기"><span class="tocnumber">7</span> <span class="toctext">같이 보기</span></a></li>
<li class="toclevel-1 tocsection-17"><a href="#각주"><span class="tocnumber">8</span> <span class="toctext">각주</span></a></li>
<li class="toclevel-1 tocsection-18"><a href="#외부_링크"><span class="tocnumber">9</span> <span class="toctext">외부 링크</span></a></li>
</ul>
</div>
<h2><span id=".EA.B0.9C.EC.9A.94"></span><span class="mw-headline" id="개요">개요</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=1" title="부분 편집: 개요">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>인체에 큰 영향을 미치는 물질이다. <a href="/wiki/1948%EB%85%84" title="1948년">1948년</a> 미국 <a href="/wiki/%ED%8E%9C%EC%8B%A4%EB%B2%A0%EC%9D%B4%EB%8B%88%EC%95%84%EC%A3%BC" title="펜실베이니아주">펜실베이니아주</a> 도노라에서 20명이 사망한 대기오염사고, <a href="/wiki/1952%EB%85%84" title="1952년">1952년</a> 약 4,100명의 사망자를 발생시킨 <a href="/wiki/%EA%B7%B8%EB%A0%88%EC%9D%B4%ED%8A%B8_%EC%8A%A4%EB%AA%A8%EA%B7%B8" title="그레이트 스모그">런던스모그</a>는 미세먼지가 인체에 어떤 영향을 미치는지 보여 주는 대표적인 사례이다. 그 이후로 미세먼지가 인체에 미치는 영향에 대한 다양한 역학조사가 실시되었고, 특히 10μm 이하의 미세먼지 입자(PM10)가 취약집단의 질병발생률과 사망률을 높이는 등 인체에 해로운 영향을 미칠 가능성이 높다는 것이 밝혀졌다. 이후 각 국에서 대기오염대책이 마련되었으며, 미세먼지가 인체와 환경에 미치는 해로운 영향을 줄이기 위해 대기오염기준도 마련하였다. 미세먼지는 입자의 크기에 따라 50µm 이하인 총먼지(TPS, TOTAL SUSPENDED PARTICLES)와 입자 크기가 매우 작은 미세먼지로 구분한다. 미세먼지는 지름이 10µm 보다 작은 미세먼지(PM10)와 지름이 2.5µm보다 작은 미세먼지(PM2.5)로 나뉜다.
</p><p>공기 속에 입자상물질(고체나 액체상태)이 부유하고 있는 상태를 일반적으로 <a class="mw-redirect" href="/wiki/%EC%97%90%EC%96%B4%EB%A1%9C%EC%A1%B8" title="에어로졸">에어로졸</a>(Aerosol)이라 한다. 통상적으로 먼지라 말하고 있다.
</p>
<ul><li>먼지의 입도(粒度)범위는 0.001~1000μm이지만 70μm이상의 먼지는 발생 즉시 침강하므로 일반적으로 70μm 미만의 총먼지(TSP, Total Suspended Particle)라 한다.</li>
<li>0.1μm 이하의 먼지입경을 초범위(ultra range)라 하며, 대부분의 먼지는 0.1~10μm 사이에 분포하게 된다. 0.1~1μm 범위의 입자는 입경분포의 특성상 침강이나 응집이 쉽지 않기 때문에 대기 중에 체류시간이 길고 폐포(肺胞)에 침투가 가장 용이하다.</li>
<li>0.5μm 크기의 입자는 빛의 산란효과가 가장 커서 시정감소 등의 원인이 되기도 한다.</li></ul>
<h2><span id=".EB.A8.BC.EC.A7.80.EB.93.A4.EC.9D.98_.EB.B6.84.EB.A5.98"></span><span class="mw-headline" id="먼지들의_분류">먼지들의 분류</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=2" title="부분 편집: 먼지들의 분류">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<h3><span id="PM-10_.2810.CE.BCm_.EB.AF.B8.EB.A7.8C_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="PM-10_(10μm_미만_입자)">PM-10 (10μm 미만 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=3" title="부분 편집: PM-10 (10μm 미만 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>입자의 크기가 10μm 미만인 먼지를 말한다. 국가에서 환경기준으로 연평균 50㎍/㎥ , 24시간 평균 100㎍/㎥를 기준으로 하고 있다. 인체의 폐포까지 침투하여 각종 호흡기 질환의 직접적인 원인이 되며, 인체의 면역 기능을 악화시킨다. 세계보건기구(WHO) 가이드라인으로는 연평균 20㎍/㎥, 24시간 평균 50㎍/㎥으로 설정되어있으며, 개발도상국의 경우 연평균 70㎍/㎥ 정도라고 한다.
</p>
<h3><span id="PM-2.5_.282.5.CE.BCm_.EB.AF.B8.EB.A7.8C_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="PM-2.5_(2.5μm_미만_입자)">PM-2.5 (2.5μm 미만 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=4" title="부분 편집: PM-2.5 (2.5μm 미만 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>입자의 크기가 2.5μm 미만인 먼지를 말한다. 이것을 초미세먼지라고 한다.
입자의 크기가 작을수록 건강에 미치는 영향이 크다는 결과에 따라 선진국에서 미세입자에 대한 기준을 90년대 후반부터 도입하기 시작했다.
</p><p>대한민국은 연평균 15㎍/㎥, 24시간 평균 35㎍/㎥의 기준을 발표하였으며, 미국은 연평균 15㎍/㎥, 24시간 평균 35㎍/㎥의 기준을 설정하였다. 세계보건기구(WHO) 가이드라인으로는 연평균 10㎍/㎥, 24시간 평균 25㎍/㎥으로 설정되어있다.
</p>
<h3><span id="TSP_.28Total_suspended_Particles.2C_.EC.B4.9D_.EB.B6.80.EC.9C.A0_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="TSP_(Total_suspended_Particles,_총_부유_입자)">TSP (Total suspended Particles, 총 부유 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=5" title="부분 편집: TSP (Total suspended Particles, 총 부유 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>총부유분진 또는 총부유입자상 물질 또는 총입자상 물질이라고 하며, 통상적으로 50μm 이하의 모든 부유 먼지를 말한다. 입자의 크기가 10μm이상인 경우에는 도시미관에 영향을 미치긴 하지만 인체의 건강에는 영향이 적기 때문에 90년대 후반 TSP 에서 PM-10으로 환경기준을 변경하였다.
</p>
<h2><span id=".EB.B0.9C.EC.83.9D_.EC.9B.90.EC.9D.B8"></span><span class="mw-headline" id="발생_원인">발생 원인</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=6" title="부분 편집: 발생 원인">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>미세먼지의 배출원인은 인위적인 발생과 자연적인 발생으로 구분된다. 인위적인 발생의 원인은 중국발 미세먼지, 공장에서 나오는 매연 쓰레기소각, 가정에서 생선이나 그 외의 것을 구울 때 등이 이유가 될 수 있다.자연발생원인은 모래바람의 먼지, 화산재, 산불이 일 때 발생하는 먼지 등 때문이다. 해염입자 또한 바다 가까이에 위치한 지역에는 많은 영향을 미친다.
</p>
<h2><span id=".EB.AF.B8.EC.84.B8.EB.A8.BC.EC.A7.80_.EA.B5.AC.EC.84.B1_.EC.84.B1.EB.B6.84"></span><span class="mw-headline" id="미세먼지_구성_성분">미세먼지 구성 성분</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=7" title="부분 편집: 미세먼지 구성 성분">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>미세먼지의 구성 성분은 질산염과 황산염 등이 58.3%, 탄소류와 검댕 16.8%, 광물 6.3%, 기타 18.6%로 이루어져있다.<sup class="reference" id="cite_ref-3"><a href="#cite_note-3">[3]</a></sup><sup class="reference" id="cite_ref-4"><a href="#cite_note-4">[4]</a></sup>
</p>
<h2><span id=".EC.A7.88.EB.B3.91"></span><span class="mw-headline" id="질병">질병</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=8" title="부분 편집: 질병">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<h3><span id=".EB.85.B8.EC.9D.B8.EC.82.AC.EB.A7.9D.EB.A5.A0_.EC.A6.9D.EA.B0.80"></span><span class="mw-headline" id="노인사망률_증가">노인사망률 증가</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=9" title="부분 편집: 노인사망률 증가">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>2009년 <a href="/wiki/%EA%B5%AD%EB%A6%BD%ED%99%98%EA%B2%BD%EA%B3%BC%ED%95%99%EC%9B%90" title="국립환경과학원">국립환경과학원</a>과 인하대 연구팀의 미세먼지와 사망률 연구 결과, 서울에서 미세먼지(PM10) 농도가 ㎥당 10㎍(100만분의 1g) 증가할 때마다 65살 이상 노인 등 대기오염에 민감한 집단의 사망률은 0.4%씩 증가하는 것으로 파악했다. 초미세먼지(PM2.5) 의 영향은 더 커서 10㎍/㎥ 증가할 때마다 민감집단의 사망률은 1.1% 늘어나는 것으로 추정했다.
</p>
<h3><span id=".EC.9E.84.EC.82.B0.EB.B6.80.EC.99.80_.ED.83.9C.EC.95.84"></span><span class="mw-headline" id="임산부와_태아">임산부와 태아</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=10" title="부분 편집: 임산부와 태아">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>이화여대 의대 <a class="new" href="/w/index.php?title=%ED%95%98%EC%9D%80%ED%9D%AC&action=edit&redlink=1" title="하은희 (없는 문서)">하은희</a> 교수팀의 연구 결과 미세먼지 농도가 10㎍/㎥ 올라가면 저체중아 출산 위험이 5.2%에서 7.4%까지 높아지고, 임신 4~9개월 사이의 사산 위험도 8.0~13.8%까지 올라가는 것으로 조사됐다.<sup class="reference" id="cite_ref-5"><a href="#cite_note-5">[5]</a></sup>
</p><p>2009년 <a class="new" href="/w/index.php?title=%EC%96%91%EC%82%B0%EB%B6%80%EC%82%B0%EB%8C%80%EB%B3%91%EC%9B%90&action=edit&redlink=1" title="양산부산대병원 (없는 문서)">양산부산대병원</a> 산업의학 전문의, 대기과학 및 지리정보시스템 전문가들이 공동으로 연구를 진행한 결과, 미세먼지(PM10, 직경이 10μm 이하의 먼지) 농도가 저체중아 출산 및 사산, 기형아 발생과 밀접한 관계가 있는 것으로 조사됐다.<sup class="reference" id="cite_ref-6"><a href="#cite_note-6">[6]</a></sup>
</p><p><a href="/wiki/%EA%B5%AD%EA%B2%BD%EC%97%86%EB%8A%94%EC%9D%98%EC%82%AC%ED%9A%8C" title="국경없는의사회">국경없는의사회</a>(MSF)의 1998년 조사 결과 <a href="/wiki/%ED%88%AC%EB%A5%B4%ED%81%AC%EB%A9%94%EB%8B%88%EC%8A%A4%ED%83%84" title="투르크메니스탄">투르크메니스탄</a>의 <a href="/wiki/%EC%95%84%EB%9E%84%ED%95%B4" title="아랄해">아랄해</a> 인접지역은 먼지 퇴적률이 아주 높았으며 살충제의 오염도 심한 것으로 나왔다. 2000~2001년 카라칼파크 지역의 먼지와 호흡기 질환의 상관관계 조사에서는 건강에 위협적인 미세먼지가 전체 먼지 가운데 14~53%에 이르는 것으로 나타났으며, 이 지역 어린이들의 폐활량 등 폐기능이 유럽 어린이에 비해 현저히 낮은 것으로 나타났다.<sup class="reference" id="cite_ref-7"><a href="#cite_note-7">[7]</a></sup>
</p><p>미국의 한 대학병원이 아동 천7백 명을 조사한 연구를 보면, 미세먼지 농도가 짙은 지역에서 태어난 아이들은 그렇지 않은 지역에서 태어난 아이들보다 폐활량이 정상의 80%에 못 미치는 '폐 기능장애'를 겪을 가능성이 커지는 것으로 조사됐다. 이런 사실 때문에 전문가들은 미세먼지를 '조용한 살인자'라고 부른다.<sup class="reference" id="cite_ref-8"><a href="#cite_note-8">[8]</a></sup>
</p>
<h3><span id=".EC.B2.9C.EC.8B.9D"></span><span class="mw-headline" id="천식">천식</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=11" title="부분 편집: 천식">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>사람의 폐포까지 깊숙하게 침투해 기관지와 폐에 쌓인 미세먼지는 각종 호흡기 질환의 직접 원인이 되며 몸의 면역 기능을 떨어뜨린다. 천식과 호흡곤란을 일으키며 장거리 이동으로 비 또는 눈속의 중금속 농도를 증가시키기도 한다. 또한 대기 중에 부유하면서 빛을 흡수, 산란시키기 때문에 시야를 악화시키기도 하고, 식물의 잎 표면에 쌓여 광합성 동화작용, 호흡작용과 증산작용 등을 저해하여 식물 성장에도 나쁜 영향을 미친다. 또한 여성의 사망원인중 88%가 조리 과정에서 발생한 미세먼지에 의한 사망이라고 한다.
</p><p><a class="mw-redirect" href="/wiki/%ED%95%9C%EA%B5%AD%ED%99%98%EA%B2%BD%EC%A0%95%EC%B1%85%ED%8F%89%EA%B0%80%EC%97%B0%EA%B5%AC%EC%9B%90" title="한국환경정책평가연구원">한국환경정책평가연구원</a> 조승헌 박사팀의 연구결과에 따르면, 미세먼지를 10∼30% 감축하면 수도권의 관련 질환 사망자 수가 해마다 40∼120명 줄어들고 심장 및 호흡기 질환 건수는 연간 2800∼8300건 줄일 수 있는 것으로 전망했다. 또 심장 및 호흡기계통 질환과 관련된 의료비용 등을 토대로 미세먼지 감축으로 인한 이익을 계산한 결과 연간 80억∼1200억원에 이르는 것으로 풀이했다.
</p>
<h3><span id=".EB.91.90.ED.86.B5"></span><span class="mw-headline" id="두통">두통</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=12" title="부분 편집: 두통">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>무연탄을 태울 때 나오는 신경계 독성물질인 납이나 비소, 아연 등 유해 중금속 농도가 높은 미세먼지를 마시면 멀쩡하던 사람도 기침하게 되고 목이 아프고, 피부 트러블을 일으키기도 한다. 머리가 굉장히 아프거나 어지러움, 호흡곤란 등이 생긴다.
<sup class="reference" id="cite_ref-9"><a href="#cite_note-9">[9]</a></sup>
</p><p>대부분의 미세먼지가 치명적이지만 그중에서도 <a class="new" href="/w/index.php?title=%ED%99%A9%EC%82%B0%EC%9D%B4%EC%98%A8&action=edit&redlink=1" title="황산이온 (없는 문서)">황산이온</a>이나 <a class="new" href="/w/index.php?title=%EC%A7%88%EC%82%B0%EC%9D%B4%EC%98%A8&action=edit&redlink=1" title="질산이온 (없는 문서)">질산이온</a> 등은 <a href="/wiki/%ED%99%A9%EC%82%AC" title="황사">황사</a> 속 먼지와 흡착되면서 산화물로 변해 호흡과 함께 폐로 들어가게 된다. 이 물질이 폐로 들어가면 염증을 일으키는데, <a href="/wiki/%EA%B8%B0%EA%B4%80%EC%A7%80%EC%97%BC" title="기관지염">기관지염</a>이나 <a href="/wiki/%EC%B2%9C%EC%8B%9D" title="천식">천식</a>, <a class="mw-redirect" href="/wiki/%EB%A7%8C%EC%84%B1%ED%8F%90%EC%87%84%EC%84%B1%ED%8F%90%EC%A7%88%ED%99%98" title="만성폐쇄성폐질환">만성폐쇄성폐질환</a>(COPD)이 대표적이다. 이런 물질들은 <a href="/wiki/%EB%B0%B1%ED%98%88%EA%B5%AC" title="백혈구">백혈구</a>를 자극해 혈관벽에도 염증을 일으킬 수 있다. 이렇게 되면 전형적인 혈관질환인 <a class="mw-redirect" href="/wiki/%EB%8F%99%EB%A7%A5%EA%B2%BD%ED%99%94" title="동맥경화">동맥경화</a>, <a href="/wiki/%EB%87%8C%EA%B2%BD%EC%83%89" title="뇌경색">뇌경색</a>, <a class="mw-redirect" href="/wiki/%EC%8B%AC%EA%B7%BC%EA%B2%BD%EC%83%89" title="심근경색">심근경색</a> 등을 유발할 수 있다.
</p>
<h3><span id=".EC.95.84.ED.86.A0.ED.94.BC"></span><span class="mw-headline" id="아토피">아토피</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=13" title="부분 편집: 아토피">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>모공보다 더 작은 초미세먼지는 모공으로 침투해 <a href="/wiki/%EC%95%84%ED%86%A0%ED%94%BC" title="아토피">아토피</a> 등 <a href="/wiki/%ED%94%BC%EB%B6%80%EC%97%BC" title="피부염">피부염</a>의 원인이 되기 때문에 여드름이 있거나 아토피가 있는 사람들 역시 황사가 온다는 예보에는 야외활동을 자제하는 것이 좋다.
</p>
<h3><span id=".EC.9D.B8.EC.8A.90.EB.A6.B0_.EC.A0.80.ED.95.AD.EC.84.B1"></span><span class="mw-headline" id="인슐린_저항성">인슐린 저항성</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=14" title="부분 편집: 인슐린 저항성">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>대기오염 미세먼지의 주성분인 노년여성의 인슐린 저항성을 높인다는 연구 결과가 나왔다. 인슐린 저항성(IR)은 혈당을 낮추는 인슐린의 기 혈당을 효과적으로 사용하지 못해 대사증후군은 물론 심장병·당뇨병 등까지 초래할 수 있다.
<sup class="reference" id="cite_ref-10"><a href="#cite_note-10">[10]</a></sup>
</p>
<h2><span id=".EC.98.88.EB.B0.A9.EA.B3.BC_.EB.8C.80.EC.B1.85"></span><span class="mw-headline" id="예방과_대책">예방과 대책</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=15" title="부분 편집: 예방과 대책">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li>사전에 미세먼지 농도를 확인한 후, 농도에 따라 활동범위를 정한다.</li>
<li>어린이, 노인, 폐질환 및 심장질환자 등 민감군은 실외 활동을 제한하고 그렇지 않은 사람들은 장시간 또는 무리한 실외 활동을 줄인다.</li>
<li>미세먼지로부터 보호할 수 있는 가장 간단하고 보편적인 방법은 미세먼지 차단 마스크를 착용하는 것이다. 미세먼지 차단 성능이 있는 마스크는 제품 포장에 '의약외품'이라는 문자와 KF80, KF94, KF99 등이 표시되어 있다. KF80, KF94, KF99는 입자차단 성능을 나타내는데 KF80은 평균 0.6μm 크기의 미세입자를 80퍼센트 이상 걸러낼 수 있으며, KF94, KF99는 0.4μm 크기의 미세입자를 94퍼센트, 99퍼센트 이상 각각 걸러낼 수 있다.</li>
<li>전 지구적인 문제이므로 각 나라의 수장들이 모여 정책을 마련할 필요가 있다.</li></ul>
<h2><span id=".EA.B0.99.EC.9D.B4_.EB.B3.B4.EA.B8.B0"></span><span class="mw-headline" id="같이_보기">같이 보기</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=16" title="부분 편집: 같이 보기">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li><a href="/wiki/%EB%A8%BC%EC%A7%80" title="먼지">먼지</a></li>
<li><a href="/wiki/%EB%8C%80%EA%B8%B0%EC%A7%88_%EC%A7%80%EC%88%98" title="대기질 지수">대기질 지수</a></li>
<li><a class="new" href="/w/index.php?title=%EC%84%9D%EC%9C%A0%EC%BD%94%ED%81%AC&action=edit&redlink=1" title="석유코크 (없는 문서)">석유코크</a>(페트코크, <a class="extiw" href="https://en.wikipedia.org/wiki/Petroleum_coke" title="en:Petroleum coke">en:Petroleum coke</a>, petcoke)</li></ul>
<h2><span id=".EA.B0.81.EC.A3.BC"></span><span class="mw-headline" id="각주">각주</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=17" title="부분 편집: 각주">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<div class="reflist" style="list-style-type: decimal;">
<div class="mw-references-wrap"><ol class="references">
<li id="cite_note-1"><span class="mw-cite-backlink"><a href="#cite_ref-1">↑</a></span> <span class="reference-text"><a class="external text" href="http://gmao.gsfc.nasa.gov/research/aerosol/modeling/nr1_movie/" rel="nofollow">GMAO – Research</a></span>
</li>
<li id="cite_note-2"><span class="mw-cite-backlink"><a href="#cite_ref-2">↑</a></span> <span class="reference-text"><a class="external text" href="http://gmao.gsfc.nasa.gov/research/aerosol/" rel="nofollow">GMAO – Research</a></span>
</li>
<li id="cite_note-3"><span class="mw-cite-backlink"><a href="#cite_ref-3">↑</a></span> <span class="reference-text"><cite class="citation news"><a class="external text" href="http://kormedi.com/1254659/11%ec%9b%94-%eb%af%b8%ec%84%b8-%eb%a8%bc%ec%a7%80-%ec%8a%b5%ea%b2%a9-%ec%a4%91%ea%b5%ad-%ec%95%84%eb%8b%8c-%ea%b5%ad%eb%82%b4-%ec%98%81%ed%96%a5/" rel="nofollow">“11월 미세 먼지 습격, 중국 아닌 국내 영향 컸다.”</a>.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=11%EC%9B%94+%EB%AF%B8%EC%84%B8+%EB%A8%BC%EC%A7+%EC%8A%B5%EA%B2%A9%2C+%EC%A4%91%EA%B5+%EC%95%84%EB%8C+%EA%B5%EB%82%B4+%EC%98%81%ED%96%A5+%EC%BB%B8%EB%A4.&rft.genre=article&rft_id=http%3A%2F%2Fkormedi.com%2F1254659%2F11%25ec%259b%2594-%25eb%25af%25b8%25ec%2584%25b8-%25eb%25a8%25bc%25ec%25a7%2580-%25ec%258a%25b5%25ea%25b2%25a9-%25ec%25a4%2591%25ea%25b5%25ad-%25ec%2595%2584%25eb%258b%258c-%25ea%25b5%25ad%25eb%2582%25b4-%25ec%2598%2581%25ed%2596%25a5%2F&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-4"><span class="mw-cite-backlink"><a href="#cite_ref-4">↑</a></span> <span class="reference-text"><cite class="citation news"><a class="external text" href="http://hellodd.com/?md=news&mt=view&pid=65607" rel="nofollow">“<span style="padding-left:0.2em;">'</span>라돈·케모포비아' 공포···출연연 '융합연구' 해결 나섰다.”</a>.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%27%EB%9D%BC%EB%8F%88%B7%EC%BC%EB%AA%A8%ED%8F%AC%EB%B9%84%EC%95%84%27+%EA%B3%B5%ED%8F%AC%B7%B7%B7%EC%B6%9C%EC%97%B0%EC%97%B0+%27%EC%9C%B5%ED%95%A9%EC%97%B0%EA%B5%AC%27+%ED%95%B4%EA%B2%B0+%EB%82%98%EC%84%B0%EB%A4.&rft.genre=article&rft_id=http%3A%2F%2Fhellodd.com%2F%3Fmd%3Dnews%26mt%3Dview%26pid%3D65607&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-5"><span class="mw-cite-backlink"><a href="#cite_ref-5">↑</a></span> <span class="reference-text"><cite class="citation news">송창석 (2013년 11월 19일). <a class="external text" href="http://www.hani.co.kr/arti/society/environment/611890.html" rel="nofollow">“중국발 초미세먼지, 엄마 뱃속 태아까지 위협한다”</a>. 《<a href="/wiki/%ED%95%9C%EA%B2%A8%EB%A0%88" title="한겨레">한겨레</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EC%A4%91%EA%B5%EB%B0%9C+%EC%B4%88%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%2C+%EC%97%84%EB%A7%88+%EB%B1%83%EC%86+%ED%83%9C%EC%95%84%EA%B9%8C%EC%A7+%EC%9C%84%ED%98%91%ED%95%9C%EB%A4&rft.au=%EC%86%A1%EC%B0%BD%EC%84%9D&rft.date=2013-11-19&rft.genre=article&rft.jtitle=%ED%95%9C%EA%B2%A8%EB%A0%88&rft_id=http%3A%2F%2Fwww.hani.co.kr%2Farti%2Fsociety%2Fenvironment%2F611890.html&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-6"><span class="mw-cite-backlink"><a href="#cite_ref-6">↑</a></span> <span class="reference-text"><cite class="citation news">민영규 (2009년 9월 24일). <a class="external text" href="http://news.naver.com/main/read.nhn?mode=LSD&mid=sec&sid1=102&oid=001&aid=0002881939" rel="nofollow">“부산MBC, 26일 특별기획 '미세먼지의 비밀' 방영”</a>. 《<a href="/wiki/%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4" title="연합뉴스">연합뉴스</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EB%B6%EC%82%B0MBC%2C+26%EC%9D%BC+%ED%8A%B9%EB%B3%84%EA%B8%B0%ED%9A+%27%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%EC%9D%98+%EB%B9%84%EB%B0%27+%EB%B0%A9%EC%98%81&rft.au=%EB%AF%BC%EC%98%81%EA%B7%9C&rft.date=2009-09-24&rft.genre=article&rft.jtitle=%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4&rft_id=http%3A%2F%2Fnews.naver.com%2Fmain%2Fread.nhn%3Fmode%3DLSD%26mid%3Dsec%26sid1%3D102%26oid%3D001%26aid%3D0002881939&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-7"><span class="mw-cite-backlink"><a href="#cite_ref-7">↑</a></span> <span class="reference-text"><cite class="citation news">김학준 (2002년 12월 31일). <a class="external text" href="http://legacy.www.hani.co.kr/section-005100007/2002/12/005100007200212312045128.html" rel="nofollow">“오염먼지 쌓여 결핵·빈혈로 '시름<span style="padding-right:0.2em;">'</span>”</a>. 《<a href="/wiki/%ED%95%9C%EA%B2%A8%EB%A0%88" title="한겨레">한겨레</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EC%98%A4%EC%97%BC%EB%A8%BC%EC%A7+%EC%8C%93%EC%97%AC+%EA%B2%B0%ED%95%B5%B7%EB%B9%88%ED%98%88%EB%A1%9C+%27%EC%9C%EB%A6%84%27&rft.au=%EA%B9%ED%95%99%EC%A4&rft.date=2002-12-31&rft.genre=article&rft.jtitle=%ED%95%9C%EA%B2%A8%EB%A0%88&rft_id=http%3A%2F%2Flegacy.www.hani.co.kr%2Fsection-005100007%2F2002%2F12%2F005100007200212312045128.html&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-8"><span class="mw-cite-backlink"><a href="#cite_ref-8">↑</a></span> <span class="reference-text"><cite class="citation news">한세현 (2013년 12월 7일). <a class="external text" href="http://news.sbs.co.kr/news/endPage.do?news_id=N1002121023" rel="nofollow">“<span style="padding-left:0.2em;">"</span>미세먼지 임신부와 태아에 특히 더 위험<span style="padding-right:0.2em;">"</span>”</a>. 《<a class="mw-disambig" href="/wiki/SBS" title="SBS">SBS</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%22%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7+%EC%9E%84%EC%A0%EB%B6%EC%99+%ED%83%9C%EC%95%84%EC%97%90+%ED%8A%B9%ED%9E%88+%EB%94+%EC%9C%84%ED%97%98%22&rft.au=%ED%95%9C%EC%84%B8%ED%98%84&rft.date=2013-12-07&rft.genre=article&rft.jtitle=SBS&rft_id=http%3A%2F%2Fnews.sbs.co.kr%2Fnews%2FendPage.do%3Fnews_id%3DN1002121023&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-9"><span class="mw-cite-backlink"><a href="#cite_ref-9">↑</a></span> <span class="reference-text"><cite class="citation news">박현갑 (2013년 11월 27일). <a class="external text" href="http://www.seoul.co.kr/news/newsView.php?id=20131127031010" rel="nofollow">“한반도를 엄습하는 중국발 미세먼지”</a>. 《<a href="/wiki/%EC%84%9C%EC%9A%B8%EC%8B%A0%EB%AC%B8" title="서울신문">서울신문</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%ED%95%9C%EB%B0%98%EB%8F%84%EB%A5%BC+%EC%97%84%EC%8A%B5%ED%95%98%EB%8A%94+%EC%A4%91%EA%B5%EB%B0%9C+%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.au=%EB%B0%95%ED%98%84%EA%B0%91&rft.date=2013-11-27&rft.genre=article&rft.jtitle=%EC%84%9C%EC%9A%B8%EC%A0%EB%AC%B8&rft_id=http%3A%2F%2Fwww.seoul.co.kr%2Fnews%2FnewsView.php%3Fid%3D20131127031010&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-10"><span class="mw-cite-backlink"><a href="#cite_ref-10">↑</a></span> <span class="reference-text"><cite class="citation news">이주영 (2015년 3월 23일). <a class="external text" href="http://www.yonhapnews.co.kr/bulletin/2015/03/23/0200000000AKR20150323110800017.HTML" rel="nofollow">“<span style="padding-left:0.2em;">"</span>미세먼지 주성분 PAH, 과체중 노년여성 건강위협<span style="padding-right:0.2em;">"</span>”</a>. 《<a href="/wiki/%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4" title="연합뉴스">연합뉴스</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%22%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7+%EC%A3%BC%EC%84%B1%EB%B6%84+PAH%2C+%EA%B3%BC%EC%B2%B4%EC%A4%91+%EB%85%B8%EB%85%84%EC%97%AC%EC%84%B1+%EA%B1%B4%EA%B0%95%EC%9C%84%ED%98%91%22&rft.au=%EC%9D%B4%EC%A3%BC%EC%98%81&rft.date=2015-03-23&rft.genre=article&rft.jtitle=%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4&rft_id=http%3A%2F%2Fwww.yonhapnews.co.kr%2Fbulletin%2F2015%2F03%2F23%2F0200000000AKR20150323110800017.HTML&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
</ol></div></div>
<h2><span id=".EC.99.B8.EB.B6.80_.EB.A7.81.ED.81.AC"></span><span class="mw-headline" id="외부_링크">외부 링크</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=18" title="부분 편집: 외부 링크">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li><a class="external text" href="https://web.archive.org/web/20150831085115/http://www.airkorea.or.kr/dustForecast" rel="nofollow">에어 코리아 - 대한민국 실시간 대기오염도</a></li></ul>
<div aria-labelledby="난방,_환기,_공기_조화" class="navbox" role="navigation" style="vertical-align: middle;;padding:3px"><table class="nowraplinks collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th class="navbox-title" colspan="2" scope="col"><div class="plainlinks hlist navbar mini"><ul><li class="nv-view"><a href="/wiki/%ED%8B%80:HVAC" title="틀:HVAC"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀을 보기">v</abbr></a></li><li class="nv-talk"><a class="new" href="/w/index.php?title=%ED%8B%80%ED%86%A0%EB%A1%A0:HVAC&action=edit&redlink=1" title="틀토론:HVAC (없는 문서)"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀에 대한 토론">d</abbr></a></li><li class="nv-edit"><a class="external text" href="https://ko.wikipedia.org/w/index.php?title=%ED%8B%80:HVAC&action=edit"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀을 편집하기">e</abbr></a></li><li class="nv-history"><a class="external text" href="https://ko.wikipedia.org/w/index.php?title=%ED%8B%80:HVAC&action=history"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀의 역사">h</abbr></a></li></ul></div><div id="난방,_환기,_공기_조화" style="font-size:114%;margin:0 4em"><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94%EA%B8%B0%EC%88%A0" title="공기조화기술">난방, 환기, 공기 조화</a></div></th></tr><tr><th class="navbox-group" scope="row" style="width:1%">기본 개념</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EB%8C%80%EB%A5%98" title="대류">대류</a></li>
<li><a href="/wiki/%EC%97%94%ED%83%88%ED%94%BC" title="엔탈피">엔탈피</a></li>
<li><a href="/wiki/%EC%9C%A0%EC%B2%B4%EB%8F%99%EC%97%AD%ED%95%99" title="유체동역학">유체동역학</a></li>
<li><a href="/wiki/%EC%A0%84%EC%97%B4" title="전열">전열</a></li>
<li><a href="/wiki/%EC%8A%B5%EB%8F%84" title="습도">습도</a></li>
<li><a href="/wiki/%EC%9E%A0%EC%97%B4" title="잠열">잠열</a></li>
<li><a class="mw-selflink selflink">미세먼지</a></li>
<li><a href="/wiki/%EA%B5%B4%EB%9A%9D_%ED%9A%A8%EA%B3%BC" title="굴뚝 효과">굴뚝 효과</a></li>
<li><a href="/wiki/%EC%97%B4%EC%97%AD%ED%95%99" title="열역학">열역학</a></li>
<li><a href="/wiki/%EB%AC%BC%EC%9D%98_%EC%A6%9D%EA%B8%B0%EC%95%95" title="물의 증기압">물의 증기압</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">기술</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94" title="공기조화">공기조화</a></li>
<li><a href="/wiki/%EB%B6%80%EB%8F%99%EC%95%A1" title="부동액">부동액</a></li>
<li><a href="/wiki/%EC%A4%91%EC%95%99_%EB%82%9C%EB%B0%A9" title="중앙 난방">중앙 난방</a></li>
<li><a href="/wiki/%EB%83%89%EA%B0%81%EC%A0%9C" title="냉각제">냉각제</a></li>
<li><a href="/wiki/%EC%A0%84%EA%B8%B0%EB%82%9C%EB%A1%9C" title="전기난로">전기난로</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94%EA%B8%B0%EC%88%A0" title="공기조화기술">공기조화기술</a></li>
<li><a href="/wiki/%ED%8C%A8%EC%8B%9C%EB%B8%8C_%ED%95%98%EC%9A%B0%EC%8A%A4" title="패시브 하우스">패시브 하우스</a></li>
<li><a href="/wiki/%EB%83%89%EC%9E%A5" title="냉장">냉장</a></li>
<li><a href="/wiki/%ED%99%98%EA%B8%B0" title="환기">환기</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">구성 요소</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EC%9D%B8%EB%B2%84%ED%84%B0" title="인버터">인버터</a></li>
<li><a class="new" href="/w/index.php?title=%EC%97%90%EC%96%B4_%EB%8F%84%EC%96%B4&action=edit&redlink=1" title="에어 도어 (없는 문서)">에어 도어</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%97%AC%EA%B3%BC%EA%B8%B0" title="공기여과기">공기여과기</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%B2%AD%EC%A0%95%EA%B8%B0" title="공기청정기">공기청정기</a></li>
<li><a href="/wiki/%EB%B3%B4%EC%9D%BC%EB%9F%AC" title="보일러">보일러</a></li>
<li><a href="/wiki/%EC%86%A1%ED%92%8D%EA%B8%B0" title="송풍기">송풍기</a></li>
<li><a href="/wiki/%EB%B3%B5%EC%88%98%EA%B8%B0" title="복수기">콘덴서</a></li>
<li><a href="/wiki/%EB%83%89%EA%B0%81%ED%83%91" title="냉각탑">냉각탑</a></li>
<li><a href="/wiki/%EC%A0%9C%EC%8A%B5%EA%B8%B0" title="제습기">제습기</a></li>
<li><a href="/wiki/%EB%B2%BD%EB%82%9C%EB%A1%9C" title="벽난로">벽난로</a></li>
<li><a href="/wiki/%ED%93%B8_%ED%9B%84%EB%93%9C" title="퓸 후드">퓸 후드</a></li>
<li><a href="/wiki/%EC%9A%94%EB%A1%9C" title="요로">요로</a></li>
<li><a href="/wiki/%EC%97%B4%EA%B5%90%ED%99%98%EA%B8%B0" title="열교환기">열교환기</a></li>
<li><a href="/wiki/%ED%9E%88%ED%8A%B8%ED%8C%8C%EC%9D%B4%ED%94%84" title="히트파이프">히트파이프</a></li>
<li><a href="/wiki/%EC%97%B4%ED%8E%8C%ED%94%84" title="열펌프">열펌프</a></li>
<li><a href="/wiki/HEPA" title="HEPA">HEPA</a></li>
<li><a href="/wiki/%EA%B0%80%EC%8A%B5%EA%B8%B0" title="가습기">가습기</a></li>
<li><a href="/wiki/%EC%84%A0%ED%92%8D%EA%B8%B0" title="선풍기">선풍기</a></li>
<li><a href="/wiki/%EA%B8%B0%EA%B3%84%EC%8B%A4" title="기계실">기계실</a></li>
<li><a href="/wiki/%EC%84%9D%EC%9C%A0%EB%82%9C%EB%A1%9C" title="석유난로">석유난로</a></li>
<li><a href="/wiki/%EB%83%89%EB%A7%A4" title="냉매">냉매</a></li>
<li><a href="/wiki/%ED%9E%88%ED%84%B0" title="히터">히터</a></li>
<li><a href="/wiki/%ED%8A%B8%EB%A1%AC%EB%B8%8C_%EB%B2%BD" title="트롬브 벽">트롬브 벽</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">측정<br/>및 제어</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EC%9D%B8%ED%85%94%EB%A6%AC%EC%A0%84%ED%8A%B8_%EB%B9%8C%EB%94%A9" title="인텔리전트 빌딩">인텔리전트 빌딩</a></li>
<li><a href="/wiki/%EC%9D%B4%EC%82%B0%ED%99%94_%ED%83%84%EC%86%8C_%EC%84%BC%EC%84%9C" title="이산화 탄소 센서">이산화 탄소 센서</a></li>
<li><a href="/wiki/%EC%9D%B8%ED%85%94%EB%A6%AC%EC%A0%84%ED%8A%B8_%EB%B9%8C%EB%94%A9" title="인텔리전트 빌딩">인텔리전트 빌딩</a></li>
<li><a href="/wiki/%EC%8B%A4%EC%98%A8" title="실온">실온</a></li>
<li><a href="/wiki/%EC%98%A8%EB%8F%84%EC%A1%B0%EC%A0%88%EA%B8%B0" title="온도조절기">온도조절기</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">직업, 무역,<br/>서비스</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B1%B4%EC%B6%95%EA%B3%B5%ED%95%99" title="건축공학">건축공학</a></li>
<li><a href="/wiki/%EB%B9%8C%EB%94%A9_%EC%A0%95%EB%B3%B4_%EB%AA%A8%EB%8D%B8%EB%A7%81" title="빌딩 정보 모델링">빌딩 정보 모델링</a> (BIM)</li>
<li><a href="/wiki/%ED%99%98%EA%B2%BD%EA%B3%B5%ED%95%99" title="환경공학">환경공학</a></li>
<li><a href="/wiki/%EA%B8%B0%EA%B3%84%EA%B3%B5%ED%95%99" title="기계공학">기계공학</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">산업 단체</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a class="new" href="/w/index.php?title=ASHRAE&action=edit&redlink=1" title="ASHRAE (없는 문서)">ASHRAE</a></li>
<li><a class="new" href="/w/index.php?title=ASTM_%EC%9D%B8%ED%84%B0%EB%82%B4%EC%85%94%EB%84%90&action=edit&redlink=1" title="ASTM 인터내셔널 (없는 문서)">ASTM 인터내셔널</a></li>
<li><a class="new" href="/w/index.php?title=BSRIA&action=edit&redlink=1" title="BSRIA (없는 문서)">BSRIA</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">건강 및 안전</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a class="new" href="/w/index.php?title=%EC%8B%A4%EB%82%B4_%EB%8C%80%EA%B8%B0%EC%A7%88&action=edit&redlink=1" title="실내 대기질 (없는 문서)">실내 대기질</a> (IAQ)</li>
<li><a href="/wiki/%EA%B0%84%EC%A0%91_%ED%9D%A1%EC%97%B0" title="간접 흡연">간접 흡연</a></li>
<li><a href="/wiki/%EC%95%84%ED%94%88_%EA%B1%B4%EB%AC%BC_%EC%A6%9D%ED%9B%84%EA%B5%B0" title="아픈 건물 증후군">아픈 건물 증후군</a> (SBS)</li></ul>
</div></td></tr></tbody></table></div>
<p><small><a class="image" href="/wiki/%ED%8C%8C%EC%9D%BC:PD-icon.svg"><img alt="PD-icon.svg" data-file-height="196" data-file-width="196" decoding="async" height="20" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/20px-PD-icon.svg.png" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/30px-PD-icon.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/40px-PD-icon.svg.png 2x" width="20"/></a> 본 문서에는 <a href="/wiki/%EC%84%9C%EC%9A%B8%ED%8A%B9%EB%B3%84%EC%8B%9C" title="서울특별시">서울특별시</a>에서 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%EC%A7%80%EC%8B%9D%EA%B3%B5%EC%9C%A0_%ED%94%84%EB%A1%9C%EC%A0%9D%ED%8A%B8" title="위키백과:지식공유 프로젝트">지식공유 프로젝트</a>를 통해 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%ED%8D%BC%EB%B8%94%EB%A6%AD_%EB%8F%84%EB%A9%94%EC%9D%B8" title="위키백과:퍼블릭 도메인">퍼블릭 도메인</a>으로 공개한 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%EC%84%9C%EC%9A%B8%EC%8B%9C_%EC%A7%80%EC%8B%9D%EA%B3%B5%EC%9C%A0_%ED%94%84%EB%A1%9C%EC%A0%9D%ED%8A%B8" title="위키백과:서울시 지식공유 프로젝트">저작물</a>을 기초로 작성된 내용이 포함되어 있습니다.</small>
</p>
<div aria-labelledby="전거_통제" class="navbox" role="navigation" style="padding:3px"><table class="nowraplinks hlist navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th class="navbox-group" id="전거_통제" scope="row" style="width:1%"><a href="/wiki/%EC%A0%84%EA%B1%B0_%ED%86%B5%EC%A0%9C" title="전거 통제">전거 통제</a></th><td class="navbox-list navbox-odd" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B2%8C%EB%A7%88%EC%9D%B8%EC%9E%90%EB%A9%94_%EB%85%B8%EB%A6%84%EB%8B%A4%ED%83%80%EC%9D%B4" title="게마인자메 노름다타이">GND</a>: <span class="uid"><a class="external text" href="http://d-nb.info/gnd/4153891-2" rel="nofollow">4153891-2</a></span></li></ul>
</div></td></tr></tbody></table></div>
<!--
NewPP limit report
Parsed by mw1336
Cached time: 20190831132546
Cache expiry: 2592000
Dynamic content: false
Complications: []
CPU time usage: 0.252 seconds
Real time usage: 0.358 seconds
Preprocessor visited node count: 606/1000000
Preprocessor generated node count: 0/1500000
Post‐expand include size: 32542/2097152 bytes
Template argument size: 418/2097152 bytes
Highest expansion depth: 9/40
Expensive parser function count: 0/500
Unstrip recursion depth: 0/20
Unstrip post‐expand size: 10062/5000000 bytes
Number of Wikibase entities loaded: 1/400
Lua time usage: 0.080/10.000 seconds
Lua memory usage: 3.18 MB/50 MB
-->
<!--
Transclusion expansion time report (%,ms,calls,template)
100.00% 200.295 1 -total
48.98% 98.100 1 틀:각주
40.65% 81.416 8 틀:뉴스_인용
17.93% 35.914 1 틀:전거_통제
15.97% 31.997 1 틀:Llang
10.58% 21.196 1 틀:HVAC
8.90% 17.826 1 틀:둘러보기_상자
3.68% 7.378 1 틀:Lang
1.84% 3.677 1 틀:오염
1.68% 3.360 2 틀:일반_기타
-->
<!-- Saved in parser cache with key kowiki:pcache:idhash:48995-0!canonical and timestamp 20190831132546 and revision id 24624501
-->
</div><noscript><img alt="" height="1" src="//ko.wikipedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" style="border: none; position: absolute;" title="" width="1"/></noscript></div>
<div class="printfooter">원본 주소 "<a dir="ltr" href="https://ko.wikipedia.org/w/index.php?title=미세먼지&oldid=24624501">https://ko.wikipedia.org/w/index.php?title=미세먼지&oldid=24624501</a>"</div>
<div class="catlinks" data-mw="interface" id="catlinks"><div class="mw-normal-catlinks" id="mw-normal-catlinks"><a href="/wiki/%ED%8A%B9%EC%88%98:%EB%B6%84%EB%A5%98" title="특수:분류">분류</a>: <ul><li><a href="/wiki/%EB%B6%84%EB%A5%98:%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80" title="분류:미세먼지">미세먼지</a></li><li><a href="/wiki/%EB%B6%84%EB%A5%98:%EC%8A%A4%EB%AA%A8%EA%B7%B8" title="분류:스모그">스모그</a></li><li><a href="/wiki/%EB%B6%84%EB%A5%98:IARC_1%EB%93%B1%EA%B8%89_%EB%B0%9C%EC%95%94_%EB%AC%BC%EC%A7%88" title="분류:IARC 1등급 발암 물질">IARC 1등급 발암 물질</a></li></ul></div><div class="mw-hidden-catlinks mw-hidden-cats-hidden" id="mw-hidden-catlinks">숨은 분류: <ul><li><a href="/wiki/%EB%B6%84%EB%A5%98:%EC%98%81%EC%96%B4_%ED%91%9C%EA%B8%B0%EB%A5%BC_%ED%8F%AC%ED%95%A8%ED%95%9C_%EB%AC%B8%EC%84%9C" title="분류:영어 표기를 포함한 문서">영어 표기를 포함한 문서</a></li><li><a href="/wiki/%EB%B6%84%EB%A5%98:%EC%84%9C%EC%9A%B8%ED%8A%B9%EB%B3%84%EC%8B%9C_%EA%B3%B5%EA%B0%9C%EC%9E%90%EB%A3%8C%EB%A5%BC_%EC%9D%B8%EC%9A%A9%ED%95%9C_%EB%AC%B8%EC%84%9C" title="분류:서울특별시 공개자료를 인용한 문서">서울특별시 공개자료를 인용한 문서</a></li><li><a href="/wiki/%EB%B6%84%EB%A5%98:GND_%EC%8B%9D%EB%B3%84%EC%9E%90%EB%A5%BC_%ED%8F%AC%ED%95%A8%ED%95%9C_%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC_%EB%AC%B8%EC%84%9C" title="분류:GND 식별자를 포함한 위키백과 문서">GND 식별자를 포함한 위키백과 문서</a></li></ul></div></div>
<div class="visualClear"></div>
</div>
5 --------------------------------------------------
<div class="noprint" id="siteSub">위키백과, 우리 모두의 백과사전.</div>
6 --------------------------------------------------
<div id="contentSub"></div>
7 --------------------------------------------------
<div id="jump-to-nav"></div>
8 --------------------------------------------------
<div class="mw-content-ltr" dir="ltr" id="mw-content-text" lang="ko"><div class="mw-parser-output"><div class="thumb tright"><div class="thumbinner" style="width:402px;"><div class="PopUpMediaTransform" id="mwe_player_0" style="width:400px;" videopayload='<div class="mediaContainer" style="width:854px"><video id="mwe_player_1" poster="//upload.wikimedia.org/wikipedia/commons/thumb/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/854px--Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.jpg" controls="" preload="none" autoplay="" style="width:854px;height:428px" class="kskin" data-durationhint="189.54448979592" data-startoffset="0" data-mwtitle="Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" data-mwprovider="wikimediacommons"><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.480p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="SD VP9 (480P)" data-shorttitle="VP9 480P" data-transcodekey="480p.vp9.webm" data-width="854" data-height="428" data-bandwidth="1003256" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.480p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="SD WebM (480P)" data-shorttitle="WebM 480P" data-transcodekey="480p.webm" data-width="854" data-height="428" data-bandwidth="1028176" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.720p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="HD VP9 (720P)" data-shorttitle="VP9 720P" data-transcodekey="720p.vp9.webm" data-width="1280" data-height="640" data-bandwidth="1792304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.720p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="HD WebM (720P)" data-shorttitle="WebM 720P" data-transcodekey="720p.webm" data-width="1280" data-height="640" data-bandwidth="1921504" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" type="video/ogg; codecs=&quot;theora, vorbis&quot;" data-title="원본 Ogg 파일, 1,280 × 640 (3.16 Mbps)" data-shorttitle="Ogg 원본" data-width="1280" data-height="640" data-bandwidth="3158304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.120p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="대역이 가장 낮은 VP9 (120P)" data-shorttitle="VP9 120P" data-transcodekey="120p.vp9.webm" data-width="214" data-height="106" data-bandwidth="123496" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.160p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="대역이 낮은 WebM (160P)" data-shorttitle="WebM 160P" data-transcodekey="160p.webm" data-width="288" data-height="144" data-bandwidth="132304" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.180p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="대역이 낮은 VP9 (180P)" data-shorttitle="VP9 180P" data-transcodekey="180p.vp9.webm" data-width="320" data-height="160" data-bandwidth="203888" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.240p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="소형 WebM (240P)" data-shorttitle="WebM 240P" data-transcodekey="240p.webm" data-width="426" data-height="214" data-bandwidth="260264" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.240p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="소형 VP9 (240P)" data-shorttitle="VP9 240P" data-transcodekey="240p.vp9.webm" data-width="426" data-height="214" data-bandwidth="312624" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.360p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-title="웹 스트리밍 가능 WebM (360P)" data-shorttitle="WebM 360P" data-transcodekey="360p.webm" data-width="640" data-height="320" data-bandwidth="516248" data-framerate="29.97002997003"/><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.360p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-title="VP9 (360P)" data-shorttitle="VP9 360P" data-transcodekey="360p.vp9.webm" data-width="640" data-height="320" data-bandwidth="556872" data-framerate="29.97002997003"/></video></div>'><img alt="파일:Atmospheric Aerosol Eddies and Flows - NASA GSFC S.ogv" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv/400px--Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv.jpg" style="width:400px;height:200px"/><a href="//upload.wikimedia.org/wikipedia/commons/4/41/Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" target="new" title="미디어 재생"><span class="play-btn-large"><span class="mw-tmh-playtext">미디어 재생</span></span></a></div> <div class="thumbcaption"><div class="magnify"><a class="internal" href="/wiki/%ED%8C%8C%EC%9D%BC:Atmospheric_Aerosol_Eddies_and_Flows_-_NASA_GSFC_S.ogv" title="실제 크기로"></a></div>2006년 8월 17일부터 2007년 4월 10일까지 모습 (GOCART 모델을 사용한 애니메이션).<sup class="reference" id="cite_ref-1"><a href="#cite_note-1">[1]</a></sup><sup class="reference" id="cite_ref-2"><a href="#cite_note-2">[2]</a></sup> (자세한 사항을 보려면 클릭할 것) <br/>* 녹색: 검은 탄소와 유기탄소 <br/>* 빨강/주황: 먼지 <br/>* 흰색: 황산염 <br/>* 파랑: 해염</div></div></div>
<table class="toccolours" style="float:right; clear:right;width:250px;margin:0 0 0.5em 1em;">
<tbody><tr>
<td align="center" style="background:#ccddcc"><b><a href="/wiki/%EC%98%A4%EC%97%BC" title="오염">오염</a></b>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EB%8C%80%EA%B8%B0_%EC%98%A4%EC%97%BC" title="대기 오염">대기 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EC%82%B0%EC%84%B1%EB%B9%84" title="산성비">산성비</a> • <a href="/wiki/%EB%8C%80%EA%B8%B0%EC%A7%88_%EC%A7%80%EC%88%98" title="대기질 지수">대기질 지수</a> • <a class="new" href="/w/index.php?title=%EB%8C%80%EA%B8%B0%EB%B6%84%EC%82%B0%EB%AA%A8%EB%8D%B8&action=edit&redlink=1" title="대기분산모델 (없는 문서)">대기분산모델</a> • <a href="/wiki/%ED%95%A0%EB%A1%9C%EC%95%8C%EC%BC%80%EC%9D%B8" title="할로알케인">할로알케인</a> • <a class="mw-redirect" href="/wiki/%EA%B8%80%EB%A1%9C%EB%B2%8C_%EB%94%94%EB%B0%8D" title="글로벌 디밍">글로벌 디밍</a> • <a href="/wiki/%EC%A7%80%EA%B5%AC_%EC%98%A8%EB%82%9C%ED%99%94" title="지구 온난화">지구 온난화</a> • <a href="/wiki/%EC%95%88%EA%B0%9C" title="안개">안개</a> • <a class="new" href="/w/index.php?title=%EC%8B%A4%EB%82%B4%EA%B3%B5%EA%B8%B0%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="실내공기환경 (없는 문서)">실내공기환경</a> • <a class="mw-redirect" href="/wiki/%EC%98%A4%EC%A1%B4%EC%B8%B5_%EA%B0%90%EC%86%8C" title="오존층 감소">오존층 감소</a> • <a class="mw-redirect" href="/wiki/%EB%AF%B8%EB%A6%BD%EC%9E%90" title="미립자">미립자</a> • <a href="/wiki/%EC%8A%A4%EB%AA%A8%EA%B7%B8" title="스모그">스모그</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EC%88%98%EC%A7%88_%EC%98%A4%EC%97%BC" title="수질 오염">수질 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EB%B6%80%EC%98%81%EC%96%91%ED%99%94" title="부영양화">부영양화</a> • <a class="new" href="/w/index.php?title=%EC%82%B0%EC%86%8C%EA%B2%B0%ED%95%8D&action=edit&redlink=1" title="산소결핍 (없는 문서)">산소결핍</a> • <a href="/wiki/%ED%95%B4%EC%96%91_%EC%98%A4%EC%97%BC" title="해양 오염">해양 오염</a> • <a href="/wiki/%ED%95%B4%EC%96%91_%EC%82%B0%EC%84%B1%ED%99%94" title="해양 산성화">해양 산성화</a> • <a href="/wiki/%EA%B8%B0%EB%A6%84_%EC%9C%A0%EC%B6%9C" title="기름 유출">기름 유출</a> • <a class="new" href="/w/index.php?title=%EC%84%A0%EB%B0%95_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="선박 오염 (없는 문서)">선박 오염</a> • <a class="new" href="/w/index.php?title=%ED%91%9C%EB%A9%B4%EC%9C%A0%EC%88%98&action=edit&redlink=1" title="표면유수 (없는 문서)">표면유수</a> • <a class="new" href="/w/index.php?title=%EC%97%B4_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="열 오염 (없는 문서)">열 오염</a> • <a class="mw-redirect" href="/wiki/%EC%83%9D%ED%99%9C%ED%95%98%EC%88%98" title="생활하수">생활하수</a> • <a class="new" href="/w/index.php?title=%EC%88%98%EC%9D%B8%EC%84%B1_%EC%A0%84%EC%97%BC&action=edit&redlink=1" title="수인성 전염 (없는 문서)">수인성 전염</a> • <a href="/wiki/%EC%88%98%EC%A7%88" title="수질">수질</a> • <a class="new" href="/w/index.php?title=%EB%AC%BC_%EC%A0%95%EC%B2%B4&action=edit&redlink=1" title="물 정체 (없는 문서)">물 정체</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%ED%86%A0%EC%96%91_%EC%98%A4%EC%97%BC" title="토양 오염">토양 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%EC%83%9D%EB%AC%BC%ED%95%99%EC%A0%81%EA%B5%90%EC%A0%95" title="생물학적교정">생물학적교정</a> • <a href="/wiki/%EC%A0%9C%EC%B4%88%EC%A0%9C" title="제초제">제초제</a> • <a href="/wiki/%EB%86%8D%EC%95%BD" title="농약">농약</a> • <a href="/wiki/%EC%82%B4%EC%B6%A9%EC%A0%9C" title="살충제">살충제</a> • <a class="new" href="/w/index.php?title=%ED%86%A0%EC%96%91%EC%A7%80%EC%B9%A8%EA%B0%92_(SGVs)&action=edit&redlink=1" title="토양지침값 (SGVs) (없는 문서)">토양지침값 (SGVs)</a> • <a href="/wiki/%EC%82%AC%EB%A7%89%ED%99%94" title="사막화">사막화</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a href="/wiki/%EB%B0%A9%EC%82%AC%EB%8A%A5_%EC%98%A4%EC%97%BC" title="방사능 오염">방사능 오염</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="new" href="/w/index.php?title=%EC%95%85%ED%8B%B0%EB%8A%84%EC%A1%B1%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="악티늄족과 환경 (없는 문서)">악티늄족과 환경</a> • <a href="/wiki/%ED%99%98%EA%B2%BD_%EB%B0%A9%EC%82%AC%EB%8A%A5" title="환경 방사능">환경방사능</a> • <a class="mw-redirect" href="/wiki/%ED%95%B5%EB%B6%84%EC%97%B4%EC%83%9D%EC%84%B1%EB%AC%BC" title="핵분열생성물">핵분열생성물</a> • <a href="/wiki/%EB%82%99%EC%A7%84" title="낙진">낙진</a> • <a class="new" href="/w/index.php?title=%ED%94%8C%EB%A3%A8%ED%86%A0%EB%8A%84%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="플루토늄과 환경 (없는 문서)">플루토늄과 환경</a> • <a class="new" href="/w/index.php?title=%EB%B0%A9%EC%82%AC%EB%8A%A5_%EC%A4%91%EB%8F%85&action=edit&redlink=1" title="방사능 중독 (없는 문서)">방사능 중독</a> • <a class="new" href="/w/index.php?title=%EB%9D%BC%EB%93%90%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="라듐과 환경 (없는 문서)">라듐과 환경</a> • <a class="new" href="/w/index.php?title=%EC%9A%B0%EB%9D%BC%EB%8A%84%EA%B3%BC_%ED%99%98%EA%B2%BD&action=edit&redlink=1" title="우라늄과 환경 (없는 문서)">우라늄과 환경</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b>기타 오염</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%EC%B9%A8%EC%9E%85%EC%A2%85" title="침입종">침입종</a> • <a href="/wiki/%EA%B4%91%EA%B3%B5%ED%95%B4" title="광공해">광공해</a> • <a href="/wiki/%EC%86%8C%EC%9D%8C_%EA%B3%B5%ED%95%B4" title="소음 공해">소음 공해</a> • <a class="new" href="/w/index.php?title=%EC%A0%84%EC%9E%90%ED%8C%8C_%EC%8A%A4%ED%8E%99%ED%8A%B8%EB%9F%BC_%EC%98%A4%EC%97%BC&action=edit&redlink=1" title="전자파 스펙트럼 오염 (없는 문서)">전자파 스펙트럼 오염</a> • <a class="new" href="/w/index.php?title=%EC%8B%9C%EA%B0%81_%EA%B3%B5%ED%95%B4&action=edit&redlink=1" title="시각 공해 (없는 문서)">시각 공해</a> • <a class="mw-redirect" href="/wiki/%EB%A9%B8%EC%A2%85" title="멸종">멸종</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b>국제 협약</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a href="/wiki/%EB%AA%AC%ED%8A%B8%EB%A6%AC%EC%98%AC_%EC%9D%98%EC%A0%95%EC%84%9C" title="몬트리올 의정서">몬트리올 의정서</a> • <a href="/wiki/%EA%B5%90%ED%86%A0_%EC%9D%98%EC%A0%95%EC%84%9C" title="교토 의정서">교토 의정서</a> • <a href="/wiki/%EB%8C%80%EA%B8%B0%EC%98%A4%EC%97%BC%EB%AC%BC%EC%A7%88%EC%9D%98_%EC%9E%A5%EA%B1%B0%EB%A6%AC_%EC%9D%B4%EB%8F%99%EC%97%90_%EA%B4%80%ED%95%9C_%ED%98%91%EC%95%BD" title="대기오염물질의 장거리 이동에 관한 협약">대기오염물질의 장거리 이동에 관한 협약</a>
</td></tr>
<tr>
<td align="center" style="background:#ccccff"><b><a class="new" href="/w/index.php?title=%ED%99%98%EA%B2%BD%EB%8B%A8%EC%B2%B4_%EB%AA%A9%EB%A1%9D&action=edit&redlink=1" title="환경단체 목록 (없는 문서)">환경단체 목록</a></b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="new" href="/w/index.php?title=%EC%A7%80%EA%B5%AC%EB%8C%80%EA%B8%B0%EA%B0%90%EC%8B%9C&action=edit&redlink=1" title="지구대기감시 (없는 문서)">지구대기감시</a> • <a href="/wiki/%EA%B7%B8%EB%A6%B0%ED%94%BC%EC%8A%A4" title="그린피스">그린피스</a>
</td></tr>
<tr>
<td align="center" style="background:#ccddcc" width="400"><b>관련 항목</b>
</td></tr>
<tr>
<td align="center" style="font-size: 90%;"><a class="mw-redirect" href="/wiki/%ED%99%98%EA%B2%BD_%EA%B3%BC%ED%95%99" title="환경 과학">환경 과학</a> • <a href="/wiki/%EC%9E%90%EC%97%B0_%ED%99%98%EA%B2%BD" title="자연 환경">자연 환경</a>
</td></tr></tbody></table>
<p><b>미세먼지</b>(微細-, <span style="font-size: smaller;"><a href="/wiki/%EC%98%81%EC%96%B4" title="영어">영어</a>: </span><span lang="en">particulate matter, <b>PM</b>, suspended particulate matter, <b>SPM</b>, atmospheric aerosol particles, atmospheric particulate matter</span>) 또는 <b>분진</b>(粉塵)은 눈에 보이지 않을 정도로 <a href="/wiki/%EC%9E%85%EC%9E%90" title="입자">입자</a>가 작은 먼지이다. <a class="mw-redirect" href="/wiki/%EC%95%84%ED%99%A9%EC%82%B0%EA%B0%80%EC%8A%A4" title="아황산가스">아황산가스</a>, <a href="/wiki/%EC%A7%88%EC%86%8C_%EC%82%B0%ED%99%94%EB%AC%BC" title="질소 산화물">질소 산화물</a>, <a href="/wiki/%EB%82%A9" title="납">납</a>, <a href="/wiki/%EC%98%A4%EC%A1%B4" title="오존">오존</a>, <a href="/wiki/%EC%9D%BC%EC%82%B0%ED%99%94_%ED%83%84%EC%86%8C" title="일산화 탄소">일산화 탄소</a> 등을 포함하는 대기오염 물질로 <a href="/wiki/%EC%9E%90%EB%8F%99%EC%B0%A8" title="자동차">자동차</a>, <a href="/wiki/%EA%B3%B5%EC%9E%A5" title="공장">공장</a>, 조리 과정 등에서 발생하여 대기 중 장기간 떠다니는 입경 10<a href="/wiki/%EB%A7%88%EC%9D%B4%ED%81%AC%EB%A1%9C%EB%AF%B8%ED%84%B0" title="마이크로미터">μm</a> 이하의 미세한 <a href="/wiki/%EB%A8%BC%EC%A7%80" title="먼지">먼지</a>이며, PM10이라고도 한다. 입자가 2.5μm 이하인 경우는 PM 2.5라고 쓰며 '초미세먼지' 또는 '극미세먼지' 라고도 부른다. 학술적으로는 <a class="mw-redirect" href="/wiki/%EC%97%90%EC%96%B4%EB%A1%9C%EC%A1%B8" title="에어로졸">에어로졸</a>(aerosol)이라고 부른다. 미세먼지(fine particles)는 부유분진(Suspended particles), 입자상물질(Particulate matter) 등으로도 불리며 명칭에 따라 약간씩 다른 의미를 가지고 있다. 입자상물질은 지름이 100μm에서 10<a href="/wiki/%EB%82%98%EB%85%B8" title="나노">n</a><a href="/wiki/%EB%AF%B8%ED%84%B0" title="미터">m</a>정도이며, 이보다 지름이 크면 중력으로 인해 대기중 체류시간이 아주 짧다
</p>
<div class="toc" id="toc"><input class="toctogglecheckbox" id="toctogglecheckbox" role="button" style="display:none" type="checkbox"/><div class="toctitle" dir="ltr" lang="ko"><h2>목차</h2><span class="toctogglespan"><label class="toctogglelabel" for="toctogglecheckbox"></label></span></div>
<ul>
<li class="toclevel-1 tocsection-1"><a href="#개요"><span class="tocnumber">1</span> <span class="toctext">개요</span></a></li>
<li class="toclevel-1 tocsection-2"><a href="#먼지들의_분류"><span class="tocnumber">2</span> <span class="toctext">먼지들의 분류</span></a>
<ul>
<li class="toclevel-2 tocsection-3"><a href="#PM-10_(10μm_미만_입자)"><span class="tocnumber">2.1</span> <span class="toctext">PM-10 (10μm 미만 입자)</span></a></li>
<li class="toclevel-2 tocsection-4"><a href="#PM-2.5_(2.5μm_미만_입자)"><span class="tocnumber">2.2</span> <span class="toctext">PM-2.5 (2.5μm 미만 입자)</span></a></li>
<li class="toclevel-2 tocsection-5"><a href="#TSP_(Total_suspended_Particles,_총_부유_입자)"><span class="tocnumber">2.3</span> <span class="toctext">TSP (Total suspended Particles, 총 부유 입자)</span></a></li>
</ul>
</li>
<li class="toclevel-1 tocsection-6"><a href="#발생_원인"><span class="tocnumber">3</span> <span class="toctext">발생 원인</span></a></li>
<li class="toclevel-1 tocsection-7"><a href="#미세먼지_구성_성분"><span class="tocnumber">4</span> <span class="toctext">미세먼지 구성 성분</span></a></li>
<li class="toclevel-1 tocsection-8"><a href="#질병"><span class="tocnumber">5</span> <span class="toctext">질병</span></a>
<ul>
<li class="toclevel-2 tocsection-9"><a href="#노인사망률_증가"><span class="tocnumber">5.1</span> <span class="toctext">노인사망률 증가</span></a></li>
<li class="toclevel-2 tocsection-10"><a href="#임산부와_태아"><span class="tocnumber">5.2</span> <span class="toctext">임산부와 태아</span></a></li>
<li class="toclevel-2 tocsection-11"><a href="#천식"><span class="tocnumber">5.3</span> <span class="toctext">천식</span></a></li>
<li class="toclevel-2 tocsection-12"><a href="#두통"><span class="tocnumber">5.4</span> <span class="toctext">두통</span></a></li>
<li class="toclevel-2 tocsection-13"><a href="#아토피"><span class="tocnumber">5.5</span> <span class="toctext">아토피</span></a></li>
<li class="toclevel-2 tocsection-14"><a href="#인슐린_저항성"><span class="tocnumber">5.6</span> <span class="toctext">인슐린 저항성</span></a></li>
</ul>
</li>
<li class="toclevel-1 tocsection-15"><a href="#예방과_대책"><span class="tocnumber">6</span> <span class="toctext">예방과 대책</span></a></li>
<li class="toclevel-1 tocsection-16"><a href="#같이_보기"><span class="tocnumber">7</span> <span class="toctext">같이 보기</span></a></li>
<li class="toclevel-1 tocsection-17"><a href="#각주"><span class="tocnumber">8</span> <span class="toctext">각주</span></a></li>
<li class="toclevel-1 tocsection-18"><a href="#외부_링크"><span class="tocnumber">9</span> <span class="toctext">외부 링크</span></a></li>
</ul>
</div>
<h2><span id=".EA.B0.9C.EC.9A.94"></span><span class="mw-headline" id="개요">개요</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=1" title="부분 편집: 개요">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>인체에 큰 영향을 미치는 물질이다. <a href="/wiki/1948%EB%85%84" title="1948년">1948년</a> 미국 <a href="/wiki/%ED%8E%9C%EC%8B%A4%EB%B2%A0%EC%9D%B4%EB%8B%88%EC%95%84%EC%A3%BC" title="펜실베이니아주">펜실베이니아주</a> 도노라에서 20명이 사망한 대기오염사고, <a href="/wiki/1952%EB%85%84" title="1952년">1952년</a> 약 4,100명의 사망자를 발생시킨 <a href="/wiki/%EA%B7%B8%EB%A0%88%EC%9D%B4%ED%8A%B8_%EC%8A%A4%EB%AA%A8%EA%B7%B8" title="그레이트 스모그">런던스모그</a>는 미세먼지가 인체에 어떤 영향을 미치는지 보여 주는 대표적인 사례이다. 그 이후로 미세먼지가 인체에 미치는 영향에 대한 다양한 역학조사가 실시되었고, 특히 10μm 이하의 미세먼지 입자(PM10)가 취약집단의 질병발생률과 사망률을 높이는 등 인체에 해로운 영향을 미칠 가능성이 높다는 것이 밝혀졌다. 이후 각 국에서 대기오염대책이 마련되었으며, 미세먼지가 인체와 환경에 미치는 해로운 영향을 줄이기 위해 대기오염기준도 마련하였다. 미세먼지는 입자의 크기에 따라 50µm 이하인 총먼지(TPS, TOTAL SUSPENDED PARTICLES)와 입자 크기가 매우 작은 미세먼지로 구분한다. 미세먼지는 지름이 10µm 보다 작은 미세먼지(PM10)와 지름이 2.5µm보다 작은 미세먼지(PM2.5)로 나뉜다.
</p><p>공기 속에 입자상물질(고체나 액체상태)이 부유하고 있는 상태를 일반적으로 <a class="mw-redirect" href="/wiki/%EC%97%90%EC%96%B4%EB%A1%9C%EC%A1%B8" title="에어로졸">에어로졸</a>(Aerosol)이라 한다. 통상적으로 먼지라 말하고 있다.
</p>
<ul><li>먼지의 입도(粒度)범위는 0.001~1000μm이지만 70μm이상의 먼지는 발생 즉시 침강하므로 일반적으로 70μm 미만의 총먼지(TSP, Total Suspended Particle)라 한다.</li>
<li>0.1μm 이하의 먼지입경을 초범위(ultra range)라 하며, 대부분의 먼지는 0.1~10μm 사이에 분포하게 된다. 0.1~1μm 범위의 입자는 입경분포의 특성상 침강이나 응집이 쉽지 않기 때문에 대기 중에 체류시간이 길고 폐포(肺胞)에 침투가 가장 용이하다.</li>
<li>0.5μm 크기의 입자는 빛의 산란효과가 가장 커서 시정감소 등의 원인이 되기도 한다.</li></ul>
<h2><span id=".EB.A8.BC.EC.A7.80.EB.93.A4.EC.9D.98_.EB.B6.84.EB.A5.98"></span><span class="mw-headline" id="먼지들의_분류">먼지들의 분류</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=2" title="부분 편집: 먼지들의 분류">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<h3><span id="PM-10_.2810.CE.BCm_.EB.AF.B8.EB.A7.8C_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="PM-10_(10μm_미만_입자)">PM-10 (10μm 미만 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=3" title="부분 편집: PM-10 (10μm 미만 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>입자의 크기가 10μm 미만인 먼지를 말한다. 국가에서 환경기준으로 연평균 50㎍/㎥ , 24시간 평균 100㎍/㎥를 기준으로 하고 있다. 인체의 폐포까지 침투하여 각종 호흡기 질환의 직접적인 원인이 되며, 인체의 면역 기능을 악화시킨다. 세계보건기구(WHO) 가이드라인으로는 연평균 20㎍/㎥, 24시간 평균 50㎍/㎥으로 설정되어있으며, 개발도상국의 경우 연평균 70㎍/㎥ 정도라고 한다.
</p>
<h3><span id="PM-2.5_.282.5.CE.BCm_.EB.AF.B8.EB.A7.8C_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="PM-2.5_(2.5μm_미만_입자)">PM-2.5 (2.5μm 미만 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=4" title="부분 편집: PM-2.5 (2.5μm 미만 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>입자의 크기가 2.5μm 미만인 먼지를 말한다. 이것을 초미세먼지라고 한다.
입자의 크기가 작을수록 건강에 미치는 영향이 크다는 결과에 따라 선진국에서 미세입자에 대한 기준을 90년대 후반부터 도입하기 시작했다.
</p><p>대한민국은 연평균 15㎍/㎥, 24시간 평균 35㎍/㎥의 기준을 발표하였으며, 미국은 연평균 15㎍/㎥, 24시간 평균 35㎍/㎥의 기준을 설정하였다. 세계보건기구(WHO) 가이드라인으로는 연평균 10㎍/㎥, 24시간 평균 25㎍/㎥으로 설정되어있다.
</p>
<h3><span id="TSP_.28Total_suspended_Particles.2C_.EC.B4.9D_.EB.B6.80.EC.9C.A0_.EC.9E.85.EC.9E.90.29"></span><span class="mw-headline" id="TSP_(Total_suspended_Particles,_총_부유_입자)">TSP (Total suspended Particles, 총 부유 입자)</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=5" title="부분 편집: TSP (Total suspended Particles, 총 부유 입자)">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>총부유분진 또는 총부유입자상 물질 또는 총입자상 물질이라고 하며, 통상적으로 50μm 이하의 모든 부유 먼지를 말한다. 입자의 크기가 10μm이상인 경우에는 도시미관에 영향을 미치긴 하지만 인체의 건강에는 영향이 적기 때문에 90년대 후반 TSP 에서 PM-10으로 환경기준을 변경하였다.
</p>
<h2><span id=".EB.B0.9C.EC.83.9D_.EC.9B.90.EC.9D.B8"></span><span class="mw-headline" id="발생_원인">발생 원인</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=6" title="부분 편집: 발생 원인">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>미세먼지의 배출원인은 인위적인 발생과 자연적인 발생으로 구분된다. 인위적인 발생의 원인은 중국발 미세먼지, 공장에서 나오는 매연 쓰레기소각, 가정에서 생선이나 그 외의 것을 구울 때 등이 이유가 될 수 있다.자연발생원인은 모래바람의 먼지, 화산재, 산불이 일 때 발생하는 먼지 등 때문이다. 해염입자 또한 바다 가까이에 위치한 지역에는 많은 영향을 미친다.
</p>
<h2><span id=".EB.AF.B8.EC.84.B8.EB.A8.BC.EC.A7.80_.EA.B5.AC.EC.84.B1_.EC.84.B1.EB.B6.84"></span><span class="mw-headline" id="미세먼지_구성_성분">미세먼지 구성 성분</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=7" title="부분 편집: 미세먼지 구성 성분">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<p>미세먼지의 구성 성분은 질산염과 황산염 등이 58.3%, 탄소류와 검댕 16.8%, 광물 6.3%, 기타 18.6%로 이루어져있다.<sup class="reference" id="cite_ref-3"><a href="#cite_note-3">[3]</a></sup><sup class="reference" id="cite_ref-4"><a href="#cite_note-4">[4]</a></sup>
</p>
<h2><span id=".EC.A7.88.EB.B3.91"></span><span class="mw-headline" id="질병">질병</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=8" title="부분 편집: 질병">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<h3><span id=".EB.85.B8.EC.9D.B8.EC.82.AC.EB.A7.9D.EB.A5.A0_.EC.A6.9D.EA.B0.80"></span><span class="mw-headline" id="노인사망률_증가">노인사망률 증가</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=9" title="부분 편집: 노인사망률 증가">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>2009년 <a href="/wiki/%EA%B5%AD%EB%A6%BD%ED%99%98%EA%B2%BD%EA%B3%BC%ED%95%99%EC%9B%90" title="국립환경과학원">국립환경과학원</a>과 인하대 연구팀의 미세먼지와 사망률 연구 결과, 서울에서 미세먼지(PM10) 농도가 ㎥당 10㎍(100만분의 1g) 증가할 때마다 65살 이상 노인 등 대기오염에 민감한 집단의 사망률은 0.4%씩 증가하는 것으로 파악했다. 초미세먼지(PM2.5) 의 영향은 더 커서 10㎍/㎥ 증가할 때마다 민감집단의 사망률은 1.1% 늘어나는 것으로 추정했다.
</p>
<h3><span id=".EC.9E.84.EC.82.B0.EB.B6.80.EC.99.80_.ED.83.9C.EC.95.84"></span><span class="mw-headline" id="임산부와_태아">임산부와 태아</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=10" title="부분 편집: 임산부와 태아">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>이화여대 의대 <a class="new" href="/w/index.php?title=%ED%95%98%EC%9D%80%ED%9D%AC&action=edit&redlink=1" title="하은희 (없는 문서)">하은희</a> 교수팀의 연구 결과 미세먼지 농도가 10㎍/㎥ 올라가면 저체중아 출산 위험이 5.2%에서 7.4%까지 높아지고, 임신 4~9개월 사이의 사산 위험도 8.0~13.8%까지 올라가는 것으로 조사됐다.<sup class="reference" id="cite_ref-5"><a href="#cite_note-5">[5]</a></sup>
</p><p>2009년 <a class="new" href="/w/index.php?title=%EC%96%91%EC%82%B0%EB%B6%80%EC%82%B0%EB%8C%80%EB%B3%91%EC%9B%90&action=edit&redlink=1" title="양산부산대병원 (없는 문서)">양산부산대병원</a> 산업의학 전문의, 대기과학 및 지리정보시스템 전문가들이 공동으로 연구를 진행한 결과, 미세먼지(PM10, 직경이 10μm 이하의 먼지) 농도가 저체중아 출산 및 사산, 기형아 발생과 밀접한 관계가 있는 것으로 조사됐다.<sup class="reference" id="cite_ref-6"><a href="#cite_note-6">[6]</a></sup>
</p><p><a href="/wiki/%EA%B5%AD%EA%B2%BD%EC%97%86%EB%8A%94%EC%9D%98%EC%82%AC%ED%9A%8C" title="국경없는의사회">국경없는의사회</a>(MSF)의 1998년 조사 결과 <a href="/wiki/%ED%88%AC%EB%A5%B4%ED%81%AC%EB%A9%94%EB%8B%88%EC%8A%A4%ED%83%84" title="투르크메니스탄">투르크메니스탄</a>의 <a href="/wiki/%EC%95%84%EB%9E%84%ED%95%B4" title="아랄해">아랄해</a> 인접지역은 먼지 퇴적률이 아주 높았으며 살충제의 오염도 심한 것으로 나왔다. 2000~2001년 카라칼파크 지역의 먼지와 호흡기 질환의 상관관계 조사에서는 건강에 위협적인 미세먼지가 전체 먼지 가운데 14~53%에 이르는 것으로 나타났으며, 이 지역 어린이들의 폐활량 등 폐기능이 유럽 어린이에 비해 현저히 낮은 것으로 나타났다.<sup class="reference" id="cite_ref-7"><a href="#cite_note-7">[7]</a></sup>
</p><p>미국의 한 대학병원이 아동 천7백 명을 조사한 연구를 보면, 미세먼지 농도가 짙은 지역에서 태어난 아이들은 그렇지 않은 지역에서 태어난 아이들보다 폐활량이 정상의 80%에 못 미치는 '폐 기능장애'를 겪을 가능성이 커지는 것으로 조사됐다. 이런 사실 때문에 전문가들은 미세먼지를 '조용한 살인자'라고 부른다.<sup class="reference" id="cite_ref-8"><a href="#cite_note-8">[8]</a></sup>
</p>
<h3><span id=".EC.B2.9C.EC.8B.9D"></span><span class="mw-headline" id="천식">천식</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=11" title="부분 편집: 천식">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>사람의 폐포까지 깊숙하게 침투해 기관지와 폐에 쌓인 미세먼지는 각종 호흡기 질환의 직접 원인이 되며 몸의 면역 기능을 떨어뜨린다. 천식과 호흡곤란을 일으키며 장거리 이동으로 비 또는 눈속의 중금속 농도를 증가시키기도 한다. 또한 대기 중에 부유하면서 빛을 흡수, 산란시키기 때문에 시야를 악화시키기도 하고, 식물의 잎 표면에 쌓여 광합성 동화작용, 호흡작용과 증산작용 등을 저해하여 식물 성장에도 나쁜 영향을 미친다. 또한 여성의 사망원인중 88%가 조리 과정에서 발생한 미세먼지에 의한 사망이라고 한다.
</p><p><a class="mw-redirect" href="/wiki/%ED%95%9C%EA%B5%AD%ED%99%98%EA%B2%BD%EC%A0%95%EC%B1%85%ED%8F%89%EA%B0%80%EC%97%B0%EA%B5%AC%EC%9B%90" title="한국환경정책평가연구원">한국환경정책평가연구원</a> 조승헌 박사팀의 연구결과에 따르면, 미세먼지를 10∼30% 감축하면 수도권의 관련 질환 사망자 수가 해마다 40∼120명 줄어들고 심장 및 호흡기 질환 건수는 연간 2800∼8300건 줄일 수 있는 것으로 전망했다. 또 심장 및 호흡기계통 질환과 관련된 의료비용 등을 토대로 미세먼지 감축으로 인한 이익을 계산한 결과 연간 80억∼1200억원에 이르는 것으로 풀이했다.
</p>
<h3><span id=".EB.91.90.ED.86.B5"></span><span class="mw-headline" id="두통">두통</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=12" title="부분 편집: 두통">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>무연탄을 태울 때 나오는 신경계 독성물질인 납이나 비소, 아연 등 유해 중금속 농도가 높은 미세먼지를 마시면 멀쩡하던 사람도 기침하게 되고 목이 아프고, 피부 트러블을 일으키기도 한다. 머리가 굉장히 아프거나 어지러움, 호흡곤란 등이 생긴다.
<sup class="reference" id="cite_ref-9"><a href="#cite_note-9">[9]</a></sup>
</p><p>대부분의 미세먼지가 치명적이지만 그중에서도 <a class="new" href="/w/index.php?title=%ED%99%A9%EC%82%B0%EC%9D%B4%EC%98%A8&action=edit&redlink=1" title="황산이온 (없는 문서)">황산이온</a>이나 <a class="new" href="/w/index.php?title=%EC%A7%88%EC%82%B0%EC%9D%B4%EC%98%A8&action=edit&redlink=1" title="질산이온 (없는 문서)">질산이온</a> 등은 <a href="/wiki/%ED%99%A9%EC%82%AC" title="황사">황사</a> 속 먼지와 흡착되면서 산화물로 변해 호흡과 함께 폐로 들어가게 된다. 이 물질이 폐로 들어가면 염증을 일으키는데, <a href="/wiki/%EA%B8%B0%EA%B4%80%EC%A7%80%EC%97%BC" title="기관지염">기관지염</a>이나 <a href="/wiki/%EC%B2%9C%EC%8B%9D" title="천식">천식</a>, <a class="mw-redirect" href="/wiki/%EB%A7%8C%EC%84%B1%ED%8F%90%EC%87%84%EC%84%B1%ED%8F%90%EC%A7%88%ED%99%98" title="만성폐쇄성폐질환">만성폐쇄성폐질환</a>(COPD)이 대표적이다. 이런 물질들은 <a href="/wiki/%EB%B0%B1%ED%98%88%EA%B5%AC" title="백혈구">백혈구</a>를 자극해 혈관벽에도 염증을 일으킬 수 있다. 이렇게 되면 전형적인 혈관질환인 <a class="mw-redirect" href="/wiki/%EB%8F%99%EB%A7%A5%EA%B2%BD%ED%99%94" title="동맥경화">동맥경화</a>, <a href="/wiki/%EB%87%8C%EA%B2%BD%EC%83%89" title="뇌경색">뇌경색</a>, <a class="mw-redirect" href="/wiki/%EC%8B%AC%EA%B7%BC%EA%B2%BD%EC%83%89" title="심근경색">심근경색</a> 등을 유발할 수 있다.
</p>
<h3><span id=".EC.95.84.ED.86.A0.ED.94.BC"></span><span class="mw-headline" id="아토피">아토피</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=13" title="부분 편집: 아토피">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>모공보다 더 작은 초미세먼지는 모공으로 침투해 <a href="/wiki/%EC%95%84%ED%86%A0%ED%94%BC" title="아토피">아토피</a> 등 <a href="/wiki/%ED%94%BC%EB%B6%80%EC%97%BC" title="피부염">피부염</a>의 원인이 되기 때문에 여드름이 있거나 아토피가 있는 사람들 역시 황사가 온다는 예보에는 야외활동을 자제하는 것이 좋다.
</p>
<h3><span id=".EC.9D.B8.EC.8A.90.EB.A6.B0_.EC.A0.80.ED.95.AD.EC.84.B1"></span><span class="mw-headline" id="인슐린_저항성">인슐린 저항성</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=14" title="부분 편집: 인슐린 저항성">편집</a><span class="mw-editsection-bracket">]</span></span></h3>
<p>대기오염 미세먼지의 주성분인 노년여성의 인슐린 저항성을 높인다는 연구 결과가 나왔다. 인슐린 저항성(IR)은 혈당을 낮추는 인슐린의 기 혈당을 효과적으로 사용하지 못해 대사증후군은 물론 심장병·당뇨병 등까지 초래할 수 있다.
<sup class="reference" id="cite_ref-10"><a href="#cite_note-10">[10]</a></sup>
</p>
<h2><span id=".EC.98.88.EB.B0.A9.EA.B3.BC_.EB.8C.80.EC.B1.85"></span><span class="mw-headline" id="예방과_대책">예방과 대책</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=15" title="부분 편집: 예방과 대책">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li>사전에 미세먼지 농도를 확인한 후, 농도에 따라 활동범위를 정한다.</li>
<li>어린이, 노인, 폐질환 및 심장질환자 등 민감군은 실외 활동을 제한하고 그렇지 않은 사람들은 장시간 또는 무리한 실외 활동을 줄인다.</li>
<li>미세먼지로부터 보호할 수 있는 가장 간단하고 보편적인 방법은 미세먼지 차단 마스크를 착용하는 것이다. 미세먼지 차단 성능이 있는 마스크는 제품 포장에 '의약외품'이라는 문자와 KF80, KF94, KF99 등이 표시되어 있다. KF80, KF94, KF99는 입자차단 성능을 나타내는데 KF80은 평균 0.6μm 크기의 미세입자를 80퍼센트 이상 걸러낼 수 있으며, KF94, KF99는 0.4μm 크기의 미세입자를 94퍼센트, 99퍼센트 이상 각각 걸러낼 수 있다.</li>
<li>전 지구적인 문제이므로 각 나라의 수장들이 모여 정책을 마련할 필요가 있다.</li></ul>
<h2><span id=".EA.B0.99.EC.9D.B4_.EB.B3.B4.EA.B8.B0"></span><span class="mw-headline" id="같이_보기">같이 보기</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=16" title="부분 편집: 같이 보기">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li><a href="/wiki/%EB%A8%BC%EC%A7%80" title="먼지">먼지</a></li>
<li><a href="/wiki/%EB%8C%80%EA%B8%B0%EC%A7%88_%EC%A7%80%EC%88%98" title="대기질 지수">대기질 지수</a></li>
<li><a class="new" href="/w/index.php?title=%EC%84%9D%EC%9C%A0%EC%BD%94%ED%81%AC&action=edit&redlink=1" title="석유코크 (없는 문서)">석유코크</a>(페트코크, <a class="extiw" href="https://en.wikipedia.org/wiki/Petroleum_coke" title="en:Petroleum coke">en:Petroleum coke</a>, petcoke)</li></ul>
<h2><span id=".EA.B0.81.EC.A3.BC"></span><span class="mw-headline" id="각주">각주</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=17" title="부분 편집: 각주">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<div class="reflist" style="list-style-type: decimal;">
<div class="mw-references-wrap"><ol class="references">
<li id="cite_note-1"><span class="mw-cite-backlink"><a href="#cite_ref-1">↑</a></span> <span class="reference-text"><a class="external text" href="http://gmao.gsfc.nasa.gov/research/aerosol/modeling/nr1_movie/" rel="nofollow">GMAO – Research</a></span>
</li>
<li id="cite_note-2"><span class="mw-cite-backlink"><a href="#cite_ref-2">↑</a></span> <span class="reference-text"><a class="external text" href="http://gmao.gsfc.nasa.gov/research/aerosol/" rel="nofollow">GMAO – Research</a></span>
</li>
<li id="cite_note-3"><span class="mw-cite-backlink"><a href="#cite_ref-3">↑</a></span> <span class="reference-text"><cite class="citation news"><a class="external text" href="http://kormedi.com/1254659/11%ec%9b%94-%eb%af%b8%ec%84%b8-%eb%a8%bc%ec%a7%80-%ec%8a%b5%ea%b2%a9-%ec%a4%91%ea%b5%ad-%ec%95%84%eb%8b%8c-%ea%b5%ad%eb%82%b4-%ec%98%81%ed%96%a5/" rel="nofollow">“11월 미세 먼지 습격, 중국 아닌 국내 영향 컸다.”</a>.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=11%EC%9B%94+%EB%AF%B8%EC%84%B8+%EB%A8%BC%EC%A7+%EC%8A%B5%EA%B2%A9%2C+%EC%A4%91%EA%B5+%EC%95%84%EB%8C+%EA%B5%EB%82%B4+%EC%98%81%ED%96%A5+%EC%BB%B8%EB%A4.&rft.genre=article&rft_id=http%3A%2F%2Fkormedi.com%2F1254659%2F11%25ec%259b%2594-%25eb%25af%25b8%25ec%2584%25b8-%25eb%25a8%25bc%25ec%25a7%2580-%25ec%258a%25b5%25ea%25b2%25a9-%25ec%25a4%2591%25ea%25b5%25ad-%25ec%2595%2584%25eb%258b%258c-%25ea%25b5%25ad%25eb%2582%25b4-%25ec%2598%2581%25ed%2596%25a5%2F&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-4"><span class="mw-cite-backlink"><a href="#cite_ref-4">↑</a></span> <span class="reference-text"><cite class="citation news"><a class="external text" href="http://hellodd.com/?md=news&mt=view&pid=65607" rel="nofollow">“<span style="padding-left:0.2em;">'</span>라돈·케모포비아' 공포···출연연 '융합연구' 해결 나섰다.”</a>.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%27%EB%9D%BC%EB%8F%88%B7%EC%BC%EB%AA%A8%ED%8F%AC%EB%B9%84%EC%95%84%27+%EA%B3%B5%ED%8F%AC%B7%B7%B7%EC%B6%9C%EC%97%B0%EC%97%B0+%27%EC%9C%B5%ED%95%A9%EC%97%B0%EA%B5%AC%27+%ED%95%B4%EA%B2%B0+%EB%82%98%EC%84%B0%EB%A4.&rft.genre=article&rft_id=http%3A%2F%2Fhellodd.com%2F%3Fmd%3Dnews%26mt%3Dview%26pid%3D65607&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-5"><span class="mw-cite-backlink"><a href="#cite_ref-5">↑</a></span> <span class="reference-text"><cite class="citation news">송창석 (2013년 11월 19일). <a class="external text" href="http://www.hani.co.kr/arti/society/environment/611890.html" rel="nofollow">“중국발 초미세먼지, 엄마 뱃속 태아까지 위협한다”</a>. 《<a href="/wiki/%ED%95%9C%EA%B2%A8%EB%A0%88" title="한겨레">한겨레</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EC%A4%91%EA%B5%EB%B0%9C+%EC%B4%88%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%2C+%EC%97%84%EB%A7%88+%EB%B1%83%EC%86+%ED%83%9C%EC%95%84%EA%B9%8C%EC%A7+%EC%9C%84%ED%98%91%ED%95%9C%EB%A4&rft.au=%EC%86%A1%EC%B0%BD%EC%84%9D&rft.date=2013-11-19&rft.genre=article&rft.jtitle=%ED%95%9C%EA%B2%A8%EB%A0%88&rft_id=http%3A%2F%2Fwww.hani.co.kr%2Farti%2Fsociety%2Fenvironment%2F611890.html&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-6"><span class="mw-cite-backlink"><a href="#cite_ref-6">↑</a></span> <span class="reference-text"><cite class="citation news">민영규 (2009년 9월 24일). <a class="external text" href="http://news.naver.com/main/read.nhn?mode=LSD&mid=sec&sid1=102&oid=001&aid=0002881939" rel="nofollow">“부산MBC, 26일 특별기획 '미세먼지의 비밀' 방영”</a>. 《<a href="/wiki/%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4" title="연합뉴스">연합뉴스</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EB%B6%EC%82%B0MBC%2C+26%EC%9D%BC+%ED%8A%B9%EB%B3%84%EA%B8%B0%ED%9A+%27%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%EC%9D%98+%EB%B9%84%EB%B0%27+%EB%B0%A9%EC%98%81&rft.au=%EB%AF%BC%EC%98%81%EA%B7%9C&rft.date=2009-09-24&rft.genre=article&rft.jtitle=%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4&rft_id=http%3A%2F%2Fnews.naver.com%2Fmain%2Fread.nhn%3Fmode%3DLSD%26mid%3Dsec%26sid1%3D102%26oid%3D001%26aid%3D0002881939&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-7"><span class="mw-cite-backlink"><a href="#cite_ref-7">↑</a></span> <span class="reference-text"><cite class="citation news">김학준 (2002년 12월 31일). <a class="external text" href="http://legacy.www.hani.co.kr/section-005100007/2002/12/005100007200212312045128.html" rel="nofollow">“오염먼지 쌓여 결핵·빈혈로 '시름<span style="padding-right:0.2em;">'</span>”</a>. 《<a href="/wiki/%ED%95%9C%EA%B2%A8%EB%A0%88" title="한겨레">한겨레</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%EC%98%A4%EC%97%BC%EB%A8%BC%EC%A7+%EC%8C%93%EC%97%AC+%EA%B2%B0%ED%95%B5%B7%EB%B9%88%ED%98%88%EB%A1%9C+%27%EC%9C%EB%A6%84%27&rft.au=%EA%B9%ED%95%99%EC%A4&rft.date=2002-12-31&rft.genre=article&rft.jtitle=%ED%95%9C%EA%B2%A8%EB%A0%88&rft_id=http%3A%2F%2Flegacy.www.hani.co.kr%2Fsection-005100007%2F2002%2F12%2F005100007200212312045128.html&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-8"><span class="mw-cite-backlink"><a href="#cite_ref-8">↑</a></span> <span class="reference-text"><cite class="citation news">한세현 (2013년 12월 7일). <a class="external text" href="http://news.sbs.co.kr/news/endPage.do?news_id=N1002121023" rel="nofollow">“<span style="padding-left:0.2em;">"</span>미세먼지 임신부와 태아에 특히 더 위험<span style="padding-right:0.2em;">"</span>”</a>. 《<a class="mw-disambig" href="/wiki/SBS" title="SBS">SBS</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%22%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7+%EC%9E%84%EC%A0%EB%B6%EC%99+%ED%83%9C%EC%95%84%EC%97%90+%ED%8A%B9%ED%9E%88+%EB%94+%EC%9C%84%ED%97%98%22&rft.au=%ED%95%9C%EC%84%B8%ED%98%84&rft.date=2013-12-07&rft.genre=article&rft.jtitle=SBS&rft_id=http%3A%2F%2Fnews.sbs.co.kr%2Fnews%2FendPage.do%3Fnews_id%3DN1002121023&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-9"><span class="mw-cite-backlink"><a href="#cite_ref-9">↑</a></span> <span class="reference-text"><cite class="citation news">박현갑 (2013년 11월 27일). <a class="external text" href="http://www.seoul.co.kr/news/newsView.php?id=20131127031010" rel="nofollow">“한반도를 엄습하는 중국발 미세먼지”</a>. 《<a href="/wiki/%EC%84%9C%EC%9A%B8%EC%8B%A0%EB%AC%B8" title="서울신문">서울신문</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%ED%95%9C%EB%B0%98%EB%8F%84%EB%A5%BC+%EC%97%84%EC%8A%B5%ED%95%98%EB%8A%94+%EC%A4%91%EA%B5%EB%B0%9C+%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.au=%EB%B0%95%ED%98%84%EA%B0%91&rft.date=2013-11-27&rft.genre=article&rft.jtitle=%EC%84%9C%EC%9A%B8%EC%A0%EB%AC%B8&rft_id=http%3A%2F%2Fwww.seoul.co.kr%2Fnews%2FnewsView.php%3Fid%3D20131127031010&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
<li id="cite_note-10"><span class="mw-cite-backlink"><a href="#cite_ref-10">↑</a></span> <span class="reference-text"><cite class="citation news">이주영 (2015년 3월 23일). <a class="external text" href="http://www.yonhapnews.co.kr/bulletin/2015/03/23/0200000000AKR20150323110800017.HTML" rel="nofollow">“<span style="padding-left:0.2em;">"</span>미세먼지 주성분 PAH, 과체중 노년여성 건강위협<span style="padding-right:0.2em;">"</span>”</a>. 《<a href="/wiki/%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4" title="연합뉴스">연합뉴스</a>》.</cite><span class="Z3988" title="ctx_ver=Z39.88-2004&rfr_id=info%3Asid%2Fko.wikipedia.org%3A%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7&rft.atitle=%22%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7+%EC%A3%BC%EC%84%B1%EB%B6%84+PAH%2C+%EA%B3%BC%EC%B2%B4%EC%A4%91+%EB%85%B8%EB%85%84%EC%97%AC%EC%84%B1+%EA%B1%B4%EA%B0%95%EC%9C%84%ED%98%91%22&rft.au=%EC%9D%B4%EC%A3%BC%EC%98%81&rft.date=2015-03-23&rft.genre=article&rft.jtitle=%EC%97%B0%ED%95%A9%EB%89%B4%EC%8A%A4&rft_id=http%3A%2F%2Fwww.yonhapnews.co.kr%2Fbulletin%2F2015%2F03%2F23%2F0200000000AKR20150323110800017.HTML&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal"><span style="display:none;"> </span></span></span>
</li>
</ol></div></div>
<h2><span id=".EC.99.B8.EB.B6.80_.EB.A7.81.ED.81.AC"></span><span class="mw-headline" id="외부_링크">외부 링크</span><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80&action=edit&section=18" title="부분 편집: 외부 링크">편집</a><span class="mw-editsection-bracket">]</span></span></h2>
<ul><li><a class="external text" href="https://web.archive.org/web/20150831085115/http://www.airkorea.or.kr/dustForecast" rel="nofollow">에어 코리아 - 대한민국 실시간 대기오염도</a></li></ul>
<div aria-labelledby="난방,_환기,_공기_조화" class="navbox" role="navigation" style="vertical-align: middle;;padding:3px"><table class="nowraplinks collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th class="navbox-title" colspan="2" scope="col"><div class="plainlinks hlist navbar mini"><ul><li class="nv-view"><a href="/wiki/%ED%8B%80:HVAC" title="틀:HVAC"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀을 보기">v</abbr></a></li><li class="nv-talk"><a class="new" href="/w/index.php?title=%ED%8B%80%ED%86%A0%EB%A1%A0:HVAC&action=edit&redlink=1" title="틀토론:HVAC (없는 문서)"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀에 대한 토론">d</abbr></a></li><li class="nv-edit"><a class="external text" href="https://ko.wikipedia.org/w/index.php?title=%ED%8B%80:HVAC&action=edit"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀을 편집하기">e</abbr></a></li><li class="nv-history"><a class="external text" href="https://ko.wikipedia.org/w/index.php?title=%ED%8B%80:HVAC&action=history"><abbr style=";;background:none transparent;border:none;-moz-box-shadow:none;-webkit-box-shadow:none;box-shadow:none; padding:0;" title="이 틀의 역사">h</abbr></a></li></ul></div><div id="난방,_환기,_공기_조화" style="font-size:114%;margin:0 4em"><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94%EA%B8%B0%EC%88%A0" title="공기조화기술">난방, 환기, 공기 조화</a></div></th></tr><tr><th class="navbox-group" scope="row" style="width:1%">기본 개념</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EB%8C%80%EB%A5%98" title="대류">대류</a></li>
<li><a href="/wiki/%EC%97%94%ED%83%88%ED%94%BC" title="엔탈피">엔탈피</a></li>
<li><a href="/wiki/%EC%9C%A0%EC%B2%B4%EB%8F%99%EC%97%AD%ED%95%99" title="유체동역학">유체동역학</a></li>
<li><a href="/wiki/%EC%A0%84%EC%97%B4" title="전열">전열</a></li>
<li><a href="/wiki/%EC%8A%B5%EB%8F%84" title="습도">습도</a></li>
<li><a href="/wiki/%EC%9E%A0%EC%97%B4" title="잠열">잠열</a></li>
<li><a class="mw-selflink selflink">미세먼지</a></li>
<li><a href="/wiki/%EA%B5%B4%EB%9A%9D_%ED%9A%A8%EA%B3%BC" title="굴뚝 효과">굴뚝 효과</a></li>
<li><a href="/wiki/%EC%97%B4%EC%97%AD%ED%95%99" title="열역학">열역학</a></li>
<li><a href="/wiki/%EB%AC%BC%EC%9D%98_%EC%A6%9D%EA%B8%B0%EC%95%95" title="물의 증기압">물의 증기압</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">기술</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94" title="공기조화">공기조화</a></li>
<li><a href="/wiki/%EB%B6%80%EB%8F%99%EC%95%A1" title="부동액">부동액</a></li>
<li><a href="/wiki/%EC%A4%91%EC%95%99_%EB%82%9C%EB%B0%A9" title="중앙 난방">중앙 난방</a></li>
<li><a href="/wiki/%EB%83%89%EA%B0%81%EC%A0%9C" title="냉각제">냉각제</a></li>
<li><a href="/wiki/%EC%A0%84%EA%B8%B0%EB%82%9C%EB%A1%9C" title="전기난로">전기난로</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%A1%B0%ED%99%94%EA%B8%B0%EC%88%A0" title="공기조화기술">공기조화기술</a></li>
<li><a href="/wiki/%ED%8C%A8%EC%8B%9C%EB%B8%8C_%ED%95%98%EC%9A%B0%EC%8A%A4" title="패시브 하우스">패시브 하우스</a></li>
<li><a href="/wiki/%EB%83%89%EC%9E%A5" title="냉장">냉장</a></li>
<li><a href="/wiki/%ED%99%98%EA%B8%B0" title="환기">환기</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">구성 요소</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EC%9D%B8%EB%B2%84%ED%84%B0" title="인버터">인버터</a></li>
<li><a class="new" href="/w/index.php?title=%EC%97%90%EC%96%B4_%EB%8F%84%EC%96%B4&action=edit&redlink=1" title="에어 도어 (없는 문서)">에어 도어</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%97%AC%EA%B3%BC%EA%B8%B0" title="공기여과기">공기여과기</a></li>
<li><a href="/wiki/%EA%B3%B5%EA%B8%B0%EC%B2%AD%EC%A0%95%EA%B8%B0" title="공기청정기">공기청정기</a></li>
<li><a href="/wiki/%EB%B3%B4%EC%9D%BC%EB%9F%AC" title="보일러">보일러</a></li>
<li><a href="/wiki/%EC%86%A1%ED%92%8D%EA%B8%B0" title="송풍기">송풍기</a></li>
<li><a href="/wiki/%EB%B3%B5%EC%88%98%EA%B8%B0" title="복수기">콘덴서</a></li>
<li><a href="/wiki/%EB%83%89%EA%B0%81%ED%83%91" title="냉각탑">냉각탑</a></li>
<li><a href="/wiki/%EC%A0%9C%EC%8A%B5%EA%B8%B0" title="제습기">제습기</a></li>
<li><a href="/wiki/%EB%B2%BD%EB%82%9C%EB%A1%9C" title="벽난로">벽난로</a></li>
<li><a href="/wiki/%ED%93%B8_%ED%9B%84%EB%93%9C" title="퓸 후드">퓸 후드</a></li>
<li><a href="/wiki/%EC%9A%94%EB%A1%9C" title="요로">요로</a></li>
<li><a href="/wiki/%EC%97%B4%EA%B5%90%ED%99%98%EA%B8%B0" title="열교환기">열교환기</a></li>
<li><a href="/wiki/%ED%9E%88%ED%8A%B8%ED%8C%8C%EC%9D%B4%ED%94%84" title="히트파이프">히트파이프</a></li>
<li><a href="/wiki/%EC%97%B4%ED%8E%8C%ED%94%84" title="열펌프">열펌프</a></li>
<li><a href="/wiki/HEPA" title="HEPA">HEPA</a></li>
<li><a href="/wiki/%EA%B0%80%EC%8A%B5%EA%B8%B0" title="가습기">가습기</a></li>
<li><a href="/wiki/%EC%84%A0%ED%92%8D%EA%B8%B0" title="선풍기">선풍기</a></li>
<li><a href="/wiki/%EA%B8%B0%EA%B3%84%EC%8B%A4" title="기계실">기계실</a></li>
<li><a href="/wiki/%EC%84%9D%EC%9C%A0%EB%82%9C%EB%A1%9C" title="석유난로">석유난로</a></li>
<li><a href="/wiki/%EB%83%89%EB%A7%A4" title="냉매">냉매</a></li>
<li><a href="/wiki/%ED%9E%88%ED%84%B0" title="히터">히터</a></li>
<li><a href="/wiki/%ED%8A%B8%EB%A1%AC%EB%B8%8C_%EB%B2%BD" title="트롬브 벽">트롬브 벽</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">측정<br/>및 제어</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EC%9D%B8%ED%85%94%EB%A6%AC%EC%A0%84%ED%8A%B8_%EB%B9%8C%EB%94%A9" title="인텔리전트 빌딩">인텔리전트 빌딩</a></li>
<li><a href="/wiki/%EC%9D%B4%EC%82%B0%ED%99%94_%ED%83%84%EC%86%8C_%EC%84%BC%EC%84%9C" title="이산화 탄소 센서">이산화 탄소 센서</a></li>
<li><a href="/wiki/%EC%9D%B8%ED%85%94%EB%A6%AC%EC%A0%84%ED%8A%B8_%EB%B9%8C%EB%94%A9" title="인텔리전트 빌딩">인텔리전트 빌딩</a></li>
<li><a href="/wiki/%EC%8B%A4%EC%98%A8" title="실온">실온</a></li>
<li><a href="/wiki/%EC%98%A8%EB%8F%84%EC%A1%B0%EC%A0%88%EA%B8%B0" title="온도조절기">온도조절기</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">직업, 무역,<br/>서비스</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B1%B4%EC%B6%95%EA%B3%B5%ED%95%99" title="건축공학">건축공학</a></li>
<li><a href="/wiki/%EB%B9%8C%EB%94%A9_%EC%A0%95%EB%B3%B4_%EB%AA%A8%EB%8D%B8%EB%A7%81" title="빌딩 정보 모델링">빌딩 정보 모델링</a> (BIM)</li>
<li><a href="/wiki/%ED%99%98%EA%B2%BD%EA%B3%B5%ED%95%99" title="환경공학">환경공학</a></li>
<li><a href="/wiki/%EA%B8%B0%EA%B3%84%EA%B3%B5%ED%95%99" title="기계공학">기계공학</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">산업 단체</th><td class="navbox-list navbox-even hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a class="new" href="/w/index.php?title=ASHRAE&action=edit&redlink=1" title="ASHRAE (없는 문서)">ASHRAE</a></li>
<li><a class="new" href="/w/index.php?title=ASTM_%EC%9D%B8%ED%84%B0%EB%82%B4%EC%85%94%EB%84%90&action=edit&redlink=1" title="ASTM 인터내셔널 (없는 문서)">ASTM 인터내셔널</a></li>
<li><a class="new" href="/w/index.php?title=BSRIA&action=edit&redlink=1" title="BSRIA (없는 문서)">BSRIA</a></li></ul>
</div></td></tr><tr><th class="navbox-group" scope="row" style="width:1%">건강 및 안전</th><td class="navbox-list navbox-odd hlist" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px;text-align: middle;"><div style="padding:0em 0.25em">
<ul><li><a class="new" href="/w/index.php?title=%EC%8B%A4%EB%82%B4_%EB%8C%80%EA%B8%B0%EC%A7%88&action=edit&redlink=1" title="실내 대기질 (없는 문서)">실내 대기질</a> (IAQ)</li>
<li><a href="/wiki/%EA%B0%84%EC%A0%91_%ED%9D%A1%EC%97%B0" title="간접 흡연">간접 흡연</a></li>
<li><a href="/wiki/%EC%95%84%ED%94%88_%EA%B1%B4%EB%AC%BC_%EC%A6%9D%ED%9B%84%EA%B5%B0" title="아픈 건물 증후군">아픈 건물 증후군</a> (SBS)</li></ul>
</div></td></tr></tbody></table></div>
<p><small><a class="image" href="/wiki/%ED%8C%8C%EC%9D%BC:PD-icon.svg"><img alt="PD-icon.svg" data-file-height="196" data-file-width="196" decoding="async" height="20" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/20px-PD-icon.svg.png" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/30px-PD-icon.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/62/PD-icon.svg/40px-PD-icon.svg.png 2x" width="20"/></a> 본 문서에는 <a href="/wiki/%EC%84%9C%EC%9A%B8%ED%8A%B9%EB%B3%84%EC%8B%9C" title="서울특별시">서울특별시</a>에서 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%EC%A7%80%EC%8B%9D%EA%B3%B5%EC%9C%A0_%ED%94%84%EB%A1%9C%EC%A0%9D%ED%8A%B8" title="위키백과:지식공유 프로젝트">지식공유 프로젝트</a>를 통해 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%ED%8D%BC%EB%B8%94%EB%A6%AD_%EB%8F%84%EB%A9%94%EC%9D%B8" title="위키백과:퍼블릭 도메인">퍼블릭 도메인</a>으로 공개한 <a href="/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%EC%84%9C%EC%9A%B8%EC%8B%9C_%EC%A7%80%EC%8B%9D%EA%B3%B5%EC%9C%A0_%ED%94%84%EB%A1%9C%EC%A0%9D%ED%8A%B8" title="위키백과:서울시 지식공유 프로젝트">저작물</a>을 기초로 작성된 내용이 포함되어 있습니다.</small>
</p>
<div aria-labelledby="전거_통제" class="navbox" role="navigation" style="padding:3px"><table class="nowraplinks hlist navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th class="navbox-group" id="전거_통제" scope="row" style="width:1%"><a href="/wiki/%EC%A0%84%EA%B1%B0_%ED%86%B5%EC%A0%9C" title="전거 통제">전거 통제</a></th><td class="navbox-list navbox-odd" style="text-align:left;border-left-width:2px;border-left-style:solid;width:100%;padding:0px"><div style="padding:0em 0.25em">
<ul><li><a href="/wiki/%EA%B2%8C%EB%A7%88%EC%9D%B8%EC%9E%90%EB%A9%94_%EB%85%B8%EB%A6%84%EB%8B%A4%ED%83%80%EC%9D%B4" title="게마인자메 노름다타이">GND</a>: <span class="uid"><a class="external text" href="http://d-nb.info/gnd/4153891-2" rel="nofollow">4153891-2</a></span></li></ul>
</div></td></tr></tbody></table></div>
<!--
NewPP limit report
Parsed by mw1336
Cached time: 20190831132546
Cache expiry: 2592000
Dynamic content: false
Complications: []
CPU time usage: 0.252 seconds
Real time usage: 0.358 seconds
Preprocessor visited node count: 606/1000000
Preprocessor generated node count: 0/1500000
Post‐expand include size: 32542/2097152 bytes
Template argument size: 418/2097152 bytes
Highest expansion depth: 9/40
Expensive parser function count: 0/500
Unstrip recursion depth: 0/20
Unstrip post‐expand size: 10062/5000000 bytes
Number of Wikibase entities loaded: 1/400
Lua time usage: 0.080/10.000 seconds
Lua memory usage: 3.18 MB/50 MB
-->
<!--
Transclusion expansion time report (%,ms,calls,template)
100.00% 200.295 1 -total
48.98% 98.100 1 틀:각주
40.65% 81.416 8 틀:뉴스_인용
17.93% 35.914 1 틀:전거_통제
15.97% 31.997 1 틀:Llang
10.58% 21.196 1 틀:HVAC
8.90% 17.826 1 틀:둘러보기_상자
3.68% 7.378 1 틀:Lang
1.84% 3.677 1 틀:오염
1.68% 3.360 2 틀:일반_기타
-->
<!-- Saved in parser cache with key kowiki:pcache:idhash:48995-0!canonical and timestamp 20190831132546 and revision id 24624501
-->
</div><noscript><img alt="" height="1" src="//ko.wikipedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" style="border: none; position: absolute;" title="" width="1"/></noscript></div>
9 --------------------------------------------------
###Markdown
의미 있는 구간을 추출 하려면 해당 웹페이지에 접속 후 크롬 개발자 도구의 검사하기를 이용하여, 중요한 HTML tag를 찾자! 아래와 같은 div태그가 유의미한 블럭~~~html..~~~
###Code
content = soup.find('div',{'id':'content'})
content
# 그 안에 첫번째 <p> 태그에 요약된 정보가 들어가 있다.
content.find('p').text
###Output
_____no_output_____
###Markdown
2. 네이버 실시간 검색어 스크래핑 하기
###Code
#실패한 케이스
url = "https://datalab.naver.com/keyword/realtimeList.naver?where=main"
res = requests.get(url)
res.text
# 대부분의 웹사이트는 무분별한 콘텐츠 스크래핑을 방지하기 위해, 브라우저 외의 요청은 못하게 막는 경우가 있다.
# 하지만 만약 우리가 웹 브라우저인척을 한다면? http://useragentstring.com/
url = "https://datalab.naver.com/keyword/realtimeList.naver?where=main"
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
res = requests.get(url,headers=headers)
res.text
import datetime
soup = BeautifulSoup(res.text, "html.parser")
rows = soup.find('div',{'class':'keyword_rank'}).find_all('span',{'class':'title'})
print("현재시간:", datetime.datetime.now())
for rank, row in enumerate(rows):
print(rank+1, row.text)
###Output
현재시간: 2019-09-05 20:08:37.637674
1 뮬라웨어 쓰리패스
2 최성해
3 한국 조지아
4 대한민국 조지아
5 축구
6 무지개
7 웨이크메이크 올영세일
8 구혜선 안재현 문자
9 오늘 축구경기
10 방정현 변호사
11 단양 마늘정식
12 강민호
13 김두관
14 동양대 총장
15 한국 국가대표 축구 일정
16 조지아
17 황교안자녀장관상
18 일본 파라과이
19 송기헌
20 갤럭시 폴드
###Markdown
3. 네이버 영화 댓글 가져오기
###Code
url="https://movie.naver.com/movie/bi/mi/point.nhn?code=159070"
res = requests.get(url)
res.text
#일부 댓글 부분이 누락됐다. 진짜 URL을 찾아보자!
url = "https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=159070&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page=1"
res = requests.get(url)
res.text
soup = BeautifulSoup(res.text, "html.parser")
rows = soup.find('div',{'class':'score_result'}).find_all('li')
for row in rows:
# print(row)
comment = row.find('p').text
user = row.find_all('em')[1].text.strip()
date = row.find_all('em')[2].text
score = row.find_all('em')[0].text
print(score+"점", date, user, comment)
###Output
1점 2019.02.27 11:06 초코파이(andy****) 곧 있으면 자동차왕곽한구 나올 기새네
1점 2019.02.27 09:20 13도(mak4****) 배우 정지훈... 히트작 구경한 지가 언제냐... 이 영화 망하고 나면 그나마도 못 나오겠네. 집에서 태희 누나한테 잘 해주고 좋은 데 많이 놀러 다녀. 행복이 최고다^^ 모아둔 돈 많잖아.
1점 2019.02.27 09:04 삑삑이(the_****) 이젠 관객들 푯값도 훔쳐가네
1점 2019.02.27 11:01 알껍서(kick****) 전차왕 계엄폭동 ㅋㅋㅋ
2점 2019.02.27 09:55 040614 대구중 박재호(wjse****) 제발 반일감정을 이용한 국뽕영화좀 그만만드셈
1점 2019.02.27 09:57 towa**** 이런영화에 100억을 투자를 한거라구??정지훈 연기 왜이렇구 못하냐?? 술먹고 인스타할 시간에, 연기공부좀 해라!! 술주정으로 인스타 하지 말구그리고 주식먹튀한거 사과하고 배우활동 하길뻔뻔하다 참.. ㅉㅉ
1점 2019.02.27 09:12 TT(seun****) 리얼,악녀,염력,인랑,물괴,창궐,엄복동
1점 2019.02.27 09:23 파울루 벤투(snrn****) 이거보지말고 피시방7시간이 좋습니다.
1점 2019.02.27 12:53 아디다스(ldj3****) 1점을 준 이유는 0점이 없기 때문이다
1점 2019.02.27 09:49 니베아립케어(ripl****) 제목만 봐도 스트레스가...애국심 마켓팅 그만합시다3.1운동 100주년 맞춰 개봉하면 천만관객 동원할 줄 알았나요?
|
01/chapter3/04_chipotle_example.ipynb | ###Markdown
4. Chipotle 분석 예제를 통한 라이브러리 사용법 익히기 1) 데이터셋의 기본 정보 살펴보기
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
file_path = '../../dataset/chipotle.tsv'
chipo = pd.read_csv(file_path, sep = '\t')
print(chipo.shape)
print("------------------------------------")
print(chipo.info())
print(chipo.columns)
print("------------------------------------")
print(chipo.index)
###Output
Index(['order_id', 'quantity', 'item_name', 'choice_description',
'item_price'],
dtype='object')
------------------------------------
RangeIndex(start=0, stop=4622, step=1)
###Markdown
---- 2) 피처의 특성 및 통계 정보 살펴보기
###Code
chipo['order_id'] = chipo['order_id'].astype(str)
print(chipo.describe())
print(len(chipo['order_id'].unique()))
print(len(chipo['item_name'].unique()))
###Output
1834
50
###Markdown
---- 3) 라이브러리를 활용한 데이터 탐색
###Code
item_count = chipo['item_name'].value_counts()[:10]
for idx, (val, cnt) in enumerate(item_count.iteritems(), 1):
print("Top", idx, ":", val, cnt)
order_count = chipo.groupby('item_name')['order_id'].count()
order_count[:10]
item_quantity = chipo.groupby('item_name')['quantity'].sum()
item_quantity[:10]
###Output
_____no_output_____
###Markdown
---- 4) 라이브러리를 활용한 시각화
###Code
item_name_list = item_quantity.index.tolist()
x_pos = np.arange(len(item_name_list))
order_cnt = item_quantity.values.tolist()
plt.bar(x_pos, order_cnt, align='center')
plt.ylabel('ordered_item_count')
plt.title('Distribution of all orderd item')
plt.show()
###Output
_____no_output_____
###Markdown
---- 5) apply 함수를 이용한 데이터 전처리
###Code
print(chipo.info())
chipo['item_price'].head()
chipo['item_price'] = chipo['item_price'].apply(lambda x: float(x[1:]))
chipo['item_price'].head()
chipo['item_price'].describe()
###Output
_____no_output_____
###Markdown
---- 6) 여러 가지 데이터 탐색 응용
###Code
chipo.groupby('order_id')['item_price'].sum().mean()
chipo_orderid_group = chipo.groupby('order_id').sum()
results = chipo_orderid_group[chipo_orderid_group.item_price >= 10]
print(results.index.values)
chipo_one_item = chipo[chipo.quantity == 1]
price_per_item = chipo_one_item.groupby('item_name').min()
price_per_item.sort_values(by = "item_price", ascending = False)[:10]
item_name_list = price_per_item.index.tolist()
x_pos = np.arange(len(item_name_list))
item_price = price_per_item['item_price'].tolist()
plt.bar(x_pos, item_price, align='center')
plt.ylabel('item price($)')
plt.title('Distribution of item price')
plt.show()
plt.hist(item_price)
plt.ylabel('counts')
plt.title('Histogram of item price')
plt.show()
###Output
_____no_output_____ |
submission_07.ipynb | ###Markdown
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import BatchNormalization
from google.colab import drive
drive.mount('/gdrive')
df_train = pd.read_csv('/gdrive/My Drive/DACON-semiconductor-competition/dataset/train.csv')
df_test = pd.read_csv('/gdrive/My Drive/DACON-semiconductor-competition/dataset/test.csv')
# 독립변수와 종속변수를 분리합니다.
train_X = df_train.iloc[:,4:]
train_Y = df_train.iloc[:,0:4]
test_X = df_test.iloc[:,1:]
###Output
_____no_output_____
###Markdown
Model 7 * 8 layers* (108, 82, 56, 30) units, he_normal, relu* BatchNormalization* Adam(0.008)* epochs 100* batch_size 500* layer의 층을 두 배로 늘림* layer의 층을 더 쌓고, 양쪽 layer units의 평균을 해당 layer units으로 설정* learning_rate 변경* Model 6보다 성능이 떨어짐
###Code
# 케라스를 통해 모델 생성을 시작합니다.
model_07 = Sequential()
model_07.add(Dense(units=108, input_dim=226, kernel_initializer='he_normal'))
model_07.add(Dense(units=108, kernel_initializer='he_normal'))
model_07.add(BatchNormalization())
model_07.add(Activation('relu'))
model_07.add(Dense(units=82, kernel_initializer='he_normal'))
model_07.add(Dense(units=82, kernel_initializer='he_normal'))
model_07.add(BatchNormalization())
model_07.add(Activation('relu'))
model_07.add(Dense(units=56, kernel_initializer='he_normal'))
model_07.add(Dense(units=56, kernel_initializer='he_normal'))
model_07.add(BatchNormalization())
model_07.add(Activation('relu'))
model_07.add(Dense(units=30, kernel_initializer='he_normal'))
model_07.add(Dense(units=30, kernel_initializer='he_normal'))
model_07.add(BatchNormalization())
model_07.add(Activation('relu'))
model_07.add(Dense(units=4, activation='linear'))
adam = keras.optimizers.Adam(0.008)
model_07.compile(loss='mae', optimizer=adam, metrics=['accuracy'])
hist = model_07.fit(train_X, train_Y, epochs=100, batch_size=500, validation_split=0.05)
%matplotlib inline
import matplotlib.pyplot as plt
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
acc_ax.plot(hist.history['acc'], 'b', label='train acc')
acc_ax.plot(hist.history['val_acc'], 'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('mean absolute error')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
# 예측값을 생성합니다.
pred_test_07 = model_07.predict(test_X)
# submission 파일을 생성합니다.
sample_sub = pd.read_csv('/gdrive/My Drive/DACON-semiconductor-competition/dataset/sample_submission.csv', index_col=0)
submission = sample_sub+pred_test_07
submission.to_csv('/gdrive/My Drive/DACON-semiconductor-competition/submission_07.csv')
###Output
_____no_output_____ |
pyda-1.2.-datatypes-cycles-practice.ipynb | ###Markdown
Простые типы данных
###Code
my_integer = 10
type(my_integer)
my_float = 5.5
type(my_float)
my_string = 'Hello World!'
my_string_2 = "Hello World"
type(my_string_2)
my_bool = True
# my_bool = False
type(my_bool)
x = 5
y = 1
print(type(x > y))
# преобразование типов
salary = 1000
print('Ваша годовая зарплата составляет ', salary, ' условных единиц')
print('Ваша годовая зарплата составляет ' + str(salary) + ' условных единиц')
# неявное преобразование типов
# print(20 / 5.1)
# print(1 + True)
###Output
_____no_output_____
###Markdown
Строки
###Code
'Hi, ' + 'Oleg'
my_string.upper()
my_string.lower()
my_string.capitalize()
my_string.replace('Hello', 'Goodbye')
len(my_string)
###Output
_____no_output_____
###Markdown
f-строки
###Code
name = 'oleg'
lang = 'python'
t = f"Hello, {name.capitalize()}, i know {lang} a bit"
print(t)
print(type(t))
###Output
_____no_output_____
###Markdown
Индексация и срезы
###Code
my_string = 'Hello World'
my_string[2]
my_string[-4]
my_string[0:5]
my_string = 'Hello World'
my_string[0:8:2]
my_string[6:]
my_string[:5]
###Output
_____no_output_____
###Markdown
УпражнениеВыделите только из даты следующего формата
###Code
date = '2019-08-27T23:59:00.932'
print(date[0:10])
###Output
2019-08-27
###Markdown
Проверка на вхождение элемента в объект
###Code
my_string = 'Hello World'
target_string = 'World'
if target_string in my_string:
print('find!')
###Output
_____no_output_____
###Markdown
Списки
###Code
month_list = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep']
income_list = [13000, 14000, 14300, 15000, 13800, 13000, 14900, 15200, 15300]
income_by_months = [['Jan', 13000], ['Feb', 14000], ['Mar', 14300], ['Apr', 15000], ['May', 13800], ['Jun', 13000], ['Jul', 14900], ['Aug', 15200], ['Sep', 15300]]
print(type(month_list))
print(type(income_list))
print(type(income_by_months))
# индексация элементов в списке
print(month_list[0])
print(month_list[-1])
print(income_by_months[-4])
# срезы
print(income_by_months[0:2])
print('--------------')
print(income_by_months[-8:-6])
print('--------------')
print(income_by_months[2:])
print('--------------')
print(income_by_months[:3])
# можно обращаться к любому уровню вложенности
income_by_months = [['Jan', 13000], ['Feb', 14000], ['Mar', 14300], ['Apr', 15000], ['May', 13800], ['Jun', 13000], ['Jul', 14900], ['Aug', 15200], ['Sep', 15300]]
income_by_months[0][0]
# изменение списков
income_by_months[0][1] = 13100
print(income_by_months)
income_by_months[0:2] = [['Jan', 13200], ['Feb', 13900]]
print(income_by_months)
income_by_months_2 = [['Nov', 15400], ['Dec', 17000]]
income_by_month = income_by_months + income_by_months_2
print(income_by_month)
###Output
_____no_output_____
###Markdown
Распаковка списков
###Code
first, second, third = ['первый', 'второй', 'третий']
first
# когда число элементов неизвестно
first, *_ = ['первый', 'второй', 'третий']
first, other
first, *other, last = ['первый', 'второй', 'третий', 'четвертый']
first, last
###Output
_____no_output_____
###Markdown
Операции со списками
###Code
income_by_months
# Удаляем элемент по индексу
del(income_by_months[-1])
income_by_months
# удаляем элемент по значению
month_list.remove('Sep')
print(month_list)
# добавляем элемент в конец списка
income_by_months.append(['Dec', 17000])
income_by_months
# добавляем элемент по нужному индексу
income_list.insert(2, 1111111)
print(income_list)
# считаем количество вхождений элемента в список
income_list.count(13000)
# узнаем индекс элемента в списка (только первое вхождение!)
income_list.index(13000)
# income_list.index(13000, 1)
# разворачиваем список
month_list.reverse()
month_list
# узнаем длину списка
len(income_list)
# сумма элементов
sum(income_list)
# максимальный элемент элементов
max(income_list)
# минимальный элемент элементов
min(income_list)
# сортировка по возрастанию
sorted(income_list)
# изменить порядок сортировки
sorted(income_list, reverse= True)
# а это сортировка строк по алфавиту
sorted(month_list)
###Output
_____no_output_____
###Markdown
Изменение списковВ примере ниже переменная a и b на самом деле указывают на один и тот же объект. В результате, при добавлении в b очередного элемента этот элемент добавляется и в исходном листе a
###Code
a = [1, 2, 3]
b = a
b.append(4)
'a = {}'.format(a)
id(a), id(b)
# создаем копию объекта
a = [1, 2, 3]
b = list(a)
b.append(4)
print(a)
print(b)
id(a), id(b)
###Output
_____no_output_____
###Markdown
Используем модуль copy
###Code
import copy
a = [1, 2, 3]
b = copy.copy(a)
id(a), id(b)
b.append(4)
print('a = {}'.format(a))
print('b = {}'.format(b))
###Output
_____no_output_____
###Markdown
Списки и строки
###Code
queries_string = "смотреть сериалы онлайн,новости спорта,афиша кино,курс доллара,сериалы этим летом,курс по питону,сериалы про спорт"
# преобразование строки в список (например, из CSV-файла)
queries_string.split(' ')
# Преобразование списка в строку
','.join(['Столбец 1', 'Столбец 2', 'Столбец 3'])
# проверка вхождения элемента в список:
# 'Москва' in ['Ленинград', 'Одесса', 'Севастополь', 'Москва']
'Москва' not in ['Ленинград', 'Одесса', 'Севастополь', 'Москва']
###Output
_____no_output_____
###Markdown
Tuple (кортежы)
###Code
salary_tuple = (1000, 1200, 1300, 900, 800)
type(salary_tuple)
type(list(salary_tuple))
# print(salary_tuple[0])
salary_tuple[0] = 500
# кортеж из одного элемента задается так:
t = ('one', )
# без запятой получится строка
type( ('one') )
# функция zip
salaries = set([1000, 1200, 1300, 900, 800, 1000])
names = set(['Robert', 'Jane', 'Liza', 'Richard', 'John'])
salaries_by_names = zip(names, salaries)
# print(salaries_by_names)
print(list(salaries_by_names))
###Output
[('John', 800), ('Robert', 900), ('Jane', 1000), ('Liza', 1200), ('Richard', 1300)]
###Markdown
Циклы Цикл while
###Code
x = 5
while x != 0:
# x = x / 1
x -= 1
print(x)
x = 7
while x != 0:
if x % 2 == 0:
print(x, '- четное число')
else:
print(x, '- нечетное число')
x = x - 1
# будем запрашивать целые числа до тех пор, пока не будет введен 0 и выведем сумму полученных чисел
sum_ = 0
a = ''
while a != 0:
a = int(input())
sum_ += a
sum_
###Output
_____no_output_____
###Markdown
Цикл for
###Code
# итерация по строкам
company_name = 'Orange'
for letter in company_name:
print(letter)
# letter = letter.capitalize()
# print(letter)
print(letter)
# итерация по спискам
companies_capitalization = [
['Orange', 1.3],
['Maxisoft', 1.5],
['Headbook', 0.8],
['Nicola', 2.2]
]
for company in companies_capitalization:
# print(company)
print(company[0], 'capitalization is', company[1])
# print('end of iteration')
###Output
_____no_output_____
###Markdown
break, pass, continue
###Code
phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'
for letter in phrase:
if letter == ' ':
break
print(letter, end='')
for letter in phrase:
if letter == ' ':
continue
print(letter)
print('finish loop')
for letter in phrase:
if letter == ' ':
pass
print(letter)
print('finish loop')
###Output
_____no_output_____
###Markdown
Функции range и enumerate
###Code
range(10)
type(range(10))
list(range(2, 10, 5))
for i in range(10):
print(i)
# с указанием левой и правой границы
for i in range(3, 20):
print(i)
# третий аргумент - шаг
for i in range(3, 20, 5):
print(i)
# enumerate позволяет получить индекс каждого элемента
enumerate([1, 2, 3, 4, 5])
# list(enumerate([1, 2, 3, 4, 5]))
companies_capitalization = [
['Orange', 1.3],
['Maxisoft', 1.5],
['Headbook', 0.8],
['Nicola', 2.2]
]
for i, company in enumerate(companies_capitalization):
print(i+1, company[0], 'capitalization is', company[1])
###Output
_____no_output_____
###Markdown
Попрактикуемся Имеется структура данных cook_book, в которой хранится информация об ингредиентах блюд и их количестве в расчете на одну порцию и переменная, в которой хранится количество людей, на которых необходимо приготовить данные блюда:
###Code
cook_book = [
['салат',
[
['картофель', 100, 'гр.'],
['морковь', 50, 'гр.'],
['огурцы', 50, 'гр.'],
['горошек', 30, 'гр.'],
['майонез', 70, 'мл.'],
]
],
['пицца',
[
['сыр', 50, 'гр.'],
['томаты', 50, 'гр.'],
['тесто', 100, 'гр.'],
['бекон', 30, 'гр.'],
['колбаса', 30, 'гр.'],
['грибы', 20, 'гр.'],
],
],
['фруктовый десерт',
[
['хурма', 60, 'гр.'],
['киви', 60, 'гр.'],
['творог', 60, 'гр.'],
['сахар', 10, 'гр.'],
['мед', 50, 'мл.'],
]
]
]
person = 5
###Output
_____no_output_____
###Markdown
Необходимо вывести пользователю список покупок необходимого количества ингредиентов для приготовления блюд на определенное число персон в следующем виде: Салат: картофель, 500гр. морковь, 250гр. огурцы, 250гр. горошек, 150гр. майонез, 350мл. Пицца: сыр, 250гр. томаты, 250гр. тесто, 500гр. бекон, 150гр. колбаса, 150гр. грибы, 100гр. Фруктовый десерт: хурма, 300гр. киви, 300гр. творог, 300гр. сахар, 50гр. мед, 250мл. List comprehension
###Code
# Дана последовательность чисел. Мы хотим оставить только те, что делятся на 5
sequence = range(0, 40, 3)
list(sequence)
# решение в лоб
for num in sequence:
if num % 5 == 0:
print(num)
# если хотим получить отфильтрованный лист, то будет даже так
filtered_sequence = []
for num in sequence:
if num % 5 == 0:
filtered_sequence.append(num)
print(filtered_sequence)
[num for num in sequence if num % 5 == 0]
[w for w in sequence if w % 5 == 0]
# с list comprehension получается покороче
[x**2 for x in sequence if x % 5 == 0]
###Output
_____no_output_____
###Markdown
Пример вычисления метрики из набора списков. Столбцы в каждой строке:- дата- номер счетчика- количество визитовНайдем среднее количество визитов по нашим данным
###Code
api_response = [
['2017-12-26', '777', 184],
['2017-12-27', '111', 146],
['2017-12-28', '777', 98],
['2017-12-29', '777', 206],
['2017-12-30', '111', 254],
['2017-12-31', '777', 89],
['2018-01-01', '111', 54],
['2018-01-02', '777', 68],
['2018-01-03', '777', 74],
['2018-01-04', '111', 89],
['2018-01-05', '777', 104],
['2018-01-06', '777', 99],
['2018-01-07', '777', 145],
['2018-01-08', '111', 184],
]
for element in api_response:
print(element[2])
sum([x[2] for x in api_response])/len(api_response)
###Output
_____no_output_____
###Markdown
ПопрактикуемсяИмеется поток данных о ценах товаров (считайте, что цена всегда больше 0). Необходимо написать алгоритм определения минимального четного числа в этом потоке.
###Code
# sys_stdin сейчас просто название переменной
sys_stdin = [78, 68, 484, 3, 254, 90, 143, 78, 43, 42, 3053, 473, 5, 8593, 16, 3, 1454, 37, 96, 8547]
###Output
_____no_output_____
###Markdown
Множества (set)Набор неповторяющихся элементов в случайном порядке
###Code
data_scientist_skills = set(['Python', 'R', 'SQL', 'Tableau', 'SAS', 'Git'])
data_engineer_skills = set(['Python', 'Java', 'Scala', 'Git', 'SQL', 'Hadoop'])
# логическое ИЛИ – что нужно знать data-scientst, который по совместительству data-engineer
print(data_scientist_skills.union(data_engineer_skills))
print(data_scientist_skills | data_engineer_skills)
# логическое И – что нужно знать и data-scientist и data-engineer
print(data_scientist_skills.intersection(data_engineer_skills))
print(data_scientist_skills & data_engineer_skills)
# разность множеств – что знает data-scientist, но не знает data-engineer (и наоборот)
# print(data_scientist_skills.difference(data_engineer_skills))
# print(data_scientist_skills - data_engineer_skills)
print(data_engineer_skills.difference(data_scientist_skills))
print(data_engineer_skills - data_scientist_skills)
# симметричная разность множеств – что такого знают data-scientist и data-engineer, чего не знают они оба
# print(data_scientist_skills.symmetric_difference(data_engineer_skills))
# print(data_scientist_skills ^ data_engineer_skills)
print(data_engineer_skills.symmetric_difference(data_scientist_skills))
print(data_engineer_skills ^ data_scientist_skills)
# Из списка можно убрать все повторения просто обратив его в set!
###Output
_____no_output_____
###Markdown
Словари
###Code
salaries = {
'John': 1200,
'Mary': 500,
'Steven': 1000,
'Liza': 1500
}
# обращение к элементу словаря
salaries['John']
# удаляем элемент из словаря
del(salaries['Liza'])
salaries
# добавляем элемент в словарь
salaries['James'] = 2000
salaries
# изменяем значение по ключу
salaries['Mary'] = 2000
salaries
# безопасно получаем значение по ключу
salaries['Oleg']
# salaries.get('Oleg', 'Not Found')
salaries.get('Mary', 'Not Found')
# проверка на наличие ключа в словаре
recruit = 'Amanda'
if recruit in salaries:
print('Значение для ключа уже существует')
else:
print('Добавляю новый ключ')
salaries[recruit] = 2200
print(salaries)
# Можно использовать метод setdefault
# setdefault не изменяет значение, если ключ уже был в словаре
salaries.setdefault('Mary', 3000)
salaries
# salaries.setdefault('Paul', 3000)
# salaries
# перейдем к более сложному словарю
staff_dict = {
'Robert': {'salary': 800, 'bonus': 200},
'Jane': {'salary': 200, 'bonus': 300},
'Liza': {'salary': 1300, 'bonus': 200},
'Richard': {'salary': 500, 'bonus': 1200}
}
staff_dict['Robert']['salary']
staff_dict['Oleg'] = {'salary': 1000000, 'bonus': 300}
staff_dict
# получаем только ключи/значения из словаря (очень пригодиться в циклах)
# print(staff_dict.keys())
# print(staff_dict.values())
print(staff_dict.items())
# print(list(staff_dict.keys()))
# print(list(staff_dict.values()))
print(list(staff_dict.items()))
# итерация по словарям
# так бы было без цикла
print("Robert's salary:", staff_dict['Robert']['salary'])
print("Jane's salary:", staff_dict['Jane']['salary'])
print("Richard's salary:", staff_dict['Richard']['salary'])
for person in staff_dict:
print(person)
for key in staff_dict.keys():
print(key)
for value in staff_dict.values():
print(value)
for key, value in staff_dict.items():
print(key, value)
for i, person in enumerate(staff_dict):
print(i+1, person)
# используем цикл
for person, info in staff_dict.items():
# print(person, info)
print(person, "'s salary: ", info['salary'], sep='')
# добавим уровень з/п
for person, info in staff_dict.items():
# print(person)
if info['salary'] > 1000:
info['status'] = 'above average'
else:
info['status'] = 'below average'
# print(f"{person}'s salary: {info['salary']} ({status})")
staff_dict
# функция zip
categories = ['Еда', 'Авто', 'Политика', '346346']
audience = [100, 200, 300]
categories_dict = dict(zip(categories, audience))
print(categories_dict)
###Output
_____no_output_____
###Markdown
ПопрактикуемсяДля каждого источника посчитайте ROI (revenue / cost - 1)
###Code
results = {
'vk': {'revenue': 103, 'cost': 98},
'yandex': {'revenue': 179, 'cost': 153},
'facebook': {'revenue': 103, 'cost': 110},
'adwords': {'revenue': 35, 'cost': 34},
'twitter': {'revenue': 11, 'cost': 24},
}
for site, info in results.items():
roi = info['revenue'] / info['cost'] - 1
print(site, "roi:", roi)
###Output
vk roi: 0.05102040816326525
yandex roi: 0.16993464052287588
facebook roi: -0.0636363636363636
adwords roi: 0.02941176470588225
twitter roi: -0.5416666666666667
###Markdown
Dict comprehensionПохоже на list comprehension
###Code
[x**2 for x in range(10)]
{n: n**2 for n in range(10)}
results = [('date', '2018-01-01'), ('counter', '777'), ('visits', 154)]
{metric: value for (metric, value) in results}
cook_book = {
'салат': [
{'ingridient_name': 'сыр', 'quantity': 50, 'measure': 'гр'},
{'ingridient_name': 'томаты', 'quantity': 20, 'measure': 'гр'},
{'ingridient_name': 'огурцы', 'quantity': 20, 'measure': 'гр'},
{'ingridient_name': 'маслины', 'quantity': 10, 'measure': 'гр'},
{'ingridient_name': 'оливковое масло', 'quantity': 20, 'measure': 'мл'},
{'ingridient_name': 'салат', 'quantity': 10, 'measure': 'гр'},
{'ingridient_name': 'перец', 'quantity': 20, 'measure': 'гр'}
],
'пицца': [
{'ingridient_name': 'сыр', 'quantity': 20, 'measure': 'гр'},
{'ingridient_name': 'колбаса', 'quantity': 30, 'measure': 'гр'},
{'ingridient_name': 'бекон', 'quantity': 30, 'measure': 'гр'},
{'ingridient_name': 'оливки', 'quantity': 10, 'measure': 'гр'},
{'ingridient_name': 'томаты', 'quantity': 20, 'measure': 'гр'},
{'ingridient_name': 'тесто', 'quantity': 100, 'measure': 'гр'},
],
'лимонад': [
{'ingridient_name': 'лимон', 'quantity': 1, 'measure': 'шт'},
{'ingridient_name': 'вода', 'quantity': 200, 'measure': 'мл'},
{'ingridient_name': 'сахар', 'quantity': 10, 'measure': 'гр'},
{'ingridient_name': 'лайм', 'quantity': 20, 'measure': 'гр'},
]
}
###Output
_____no_output_____ |
euler_implicit.ipynb | ###Markdown
The following equations obtained using Newton's laws of motion are solved in the code using an implicit euler method:$$\frac{d^2x}{dt^2}=-\frac{a}{m}\sqrt{{v_x}^2+{v_y}^2}-\frac{wb}{m}v_y$$$$\frac{d^2y}{dt^2}=--g-\frac{a}{m}\sqrt{{v_x}^2+{v_y}^2}-\frac{wb}{m}v_x$$$$v_x=\frac{dx}{dt}$$$$v_y=\frac{dy}{dt}$$The constant a, b, w and m are 0.05, 0.02, 0.1 and 0.25, respectively.
###Code
from numpy import array, sin, cos, zeros, ones, linspace
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
def acc_x(vx,vy): # for calculation of x-acceleration
return -(a/m)*((vx**2+vy**2)**0.5)*vx-(w*b/m)*vy
def acc_y(vx,vy): # for calculation of y-acceleration
return -g-(a/m)*((vx**2+vy**2)**0.5)*vy+(w*b/m)*vx
def f(z): #fsolve will solve these two functions
a,b=z
f1=vx[i]-a+dt*acc_x(a,b)
f2=vy[i]-b+dt*acc_y(a,b)
return [f1,f2]
a=0.05
b=0.02
m=0.25
g=9.81
w=0.1
dt = 0.5 # time step size
tf = 10 # final time
nt=int(tf/dt) # number of time steps to be calculated
v_ini=30 # initial absolute velocity
angle=1.0472 # 60 degrees in radians, can be changed to any angle
vx=[v_ini*cos(angle)]*ones(nt+1) #initial x-velocity
vy=[v_ini*sin(angle)]*ones(nt+1) #initial y-velocity
x=zeros(nt+1) # this will initialize the initial x-coordinate as zero
y=zeros(nt+1) # this will initialize the initial y-coordinate as zero
for i in range(0,nt): #implicit euler method loop
v=fsolve(f,[vx[i+1],vy[i+1]]) # v is the solution obtained by solving the nonlinear equations obtained
#print(v)
vx[i+1]=v[0]
vy[i+1]=v[1]
x[i+1]=x[i]+dt*vx[i+1]
y[i+1]=y[i]+dt*vy[i+1]
t = [ i for i in linspace(0,tf,nt+1) ]
t2 = [ i for i in linspace(0,tf,nt+1) ]
#plotting the x and y position of particle with time
plt.plot(t,x,label='x-coordinate')
plt.plot(t,y, label='y-coordinate')
plt.title('x and y vs time ', fontweight = 'bold', fontsize = 16)
plt.xlabel('t', fontweight = 'bold', fontsize = 14)
plt.ylabel('X,Y', fontweight = 'bold', fontsize = 14)
plt.legend()
plt.show()
# plotting the trajectory of the particle
plt.figure(2)
plt.plot(x,y)
plt.title('Trajectory plot', fontweight = 'bold', fontsize = 16)
plt.xlabel('x', fontweight = 'bold', fontsize = 14)
plt.ylabel('y', fontweight = 'bold', fontsize = 14)
plt.show()
# plotting the x and y velocities with time
plt.figure(3)
plt.plot(t2,vx,label='x velocity, dx/dt')
plt.plot(t2,vy,label='y velocity, dy/dt')
plt.title('velocity vs time plot', fontweight = 'bold', fontsize = 16)
plt.xlabel('t', fontweight = 'bold', fontsize = 14)
plt.ylabel('vx,vy', fontweight = 'bold', fontsize = 14)
plt.legend()
plt.show()
###Output
_____no_output_____ |
sample-files/gp_assignment.ipynb | ###Markdown
First things firstClick **File -> Save a copy in Drive** and click **Open in new tab** in the pop-up window to save your progress in Google Drive. Gaussian processes and Bayesian optimization In this assignment you will learn how to use GPy and GPyOpt libraries to deal with gaussian processes. These libraries provide quite simple and inuitive interfaces for training and inference, and we will try to get familiar with them in a few tasks. SetupLoad auxiliary files and then install and import the necessary libraries.
###Code
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
print("Downloading Colab files")
! shred -u setup_google_colab.py
! wget https://raw.githubusercontent.com/hse-aml/bayesian-methods-for-ml/master/setup_google_colab.py -O setup_google_colab.py
import setup_google_colab
setup_google_colab.load_data_week6()
! pip install GPy gpyopt xgboost
import numpy as np
import GPy
import GPyOpt
import matplotlib.pyplot as plt
from sklearn.svm import SVR
import sklearn.datasets
from xgboost import XGBRegressor
from sklearn.model_selection import cross_val_score
import time
from w6_grader import GPGrader
%matplotlib inline
###Output
_____no_output_____
###Markdown
GradingWe will create a grader instace below and use it to collect your answers. Note that these outputs will be stored locally inside grader and will be uploaded to platform only after running submiting function in the last part of this assignment. If you want to make partial submission, you can run that cell any time you want.
###Code
grader = GPGrader()
###Output
_____no_output_____
###Markdown
Gaussian processes: GPy (documentation) We will start with a simple regression problem, for which we will try to fit a Gaussian Process with RBF kernel.
###Code
def generate_points(n=25, noise_variance=0.0036):
np.random.seed(777)
X = np.random.uniform(-3., 3., (n, 1))
y = np.sin(X) + np.random.randn(n, 1) * noise_variance**0.5
return X, y
def generate_noise(n=25, noise_variance=0.0036):
np.random.seed(777)
X = np.random.uniform(-3., 3., (n, 1))
y = np.random.randn(n, 1) * noise_variance**0.5
return X, y
# Create data points
X, y = generate_points()
plt.plot(X, y, '.')
plt.show()
###Output
_____no_output_____
###Markdown
To fit a Gaussian Process, you will need to define a kernel. For Gaussian (GBF) kernel you can use `GPy.kern.RBF` function. Task 1.1: Create RBF kernel with variance 1.5 and length-scale parameter 2 for 1D samples and compute value of the kernel between points `X[5]` and `X[9]`. Submit a single number. Hint: use `.K` property of kernel object.
###Code
kernel = GPy.kern.RBF(input_dim=1, variance=1.5, lengthscale=2.) ### YOUR CODE HERE
kernel_59 = kernel.K(np.array([X[5]]),np.array([X[9]])) ### YOUR CODE HERE
grader.submit_GPy_1(kernel_59)
###Output
Current answer for task 1.1 is: 1.0461813545396959
###Markdown
Task 1.2: Fit GP into generated data. Use kernel from previous task. Submit predicted mean and vairance at position $x=1$.Hint: use `GPy.models.GPRegression` class.
###Code
model = GPy.models.GPRegression(X, y, kernel) ### YOUR CODE HERE
mean_1, variance_1 = model.predict(np.array([[1]]))
mean = mean_1 ### YOUR CODE HERE
variance = variance_1 ### YOUR CODE HERE
grader.submit_GPy_2(mean, variance)
model.plot()
plt.show()
###Output
_____no_output_____
###Markdown
We see that the model didn't fit the data quite well. Let's try to fit kernel and noise parameters automatically as discussed in the lecture! You can see the current parameters below:
###Code
model
###Output
_____no_output_____
###Markdown
Task 1.3: Optimize length-scale, variance and noise component of the model and submit optimal length-scale value of the kernel. Hint: Use `.optimize()` function of the model and `.lengthscale` property of the kernel.
###Code
### YOUR CODE HERE
model.optimize()
lengthscale = kernel.lengthscale
grader.submit_GPy_3(lengthscale)
model.plot()
plt.show()
###Output
_____no_output_____
###Markdown
As you see, the process generates outputs just right. Let's see if GP can figure out itself when we try to fit it into noise or signal. Task 1.4: Generate two datasets: sinusoid wihout noise and samples from gaussian noise. Optimize kernel parameters and submit optimal values of noise component.Note: generate data only using ```generate_points(n, noise_variance)``` and ```generate_noise(n, noise_variance)``` function!
###Code
X, y = generate_noise(noise_variance=10)
### YOUR CODE HERE
kernel = GPy.kern.RBF(1, 1.5, 2)
model = GPy.models.GPRegression(X, y, kernel)
model.optimize()
noise = model.Gaussian_noise[0]
model.plot()
X, y = generate_points(noise_variance=0)
### YOUR CODE HERE
kernel = GPy.kern.RBF(1, 1.5, 2)
model = GPy.models.GPRegression(X, y, kernel)
model.optimize()
just_signal = model.Gaussian_noise.variance
model.plot()
grader.submit_GPy_4(noise, just_signal)
###Output
Current answer for task 1.4 (noise) is: 10.143341903515504
Current answer for task 1.4 (just signal) is: 1.0317301438313095e-15
###Markdown
Sparse GPNow let's consider the speed of GP. We will generate a dataset of 3000 points and measure the time that is consumed for prediction of mean and variance for each point. We will then try to use inducing inputs and find the optimal number of points according to quality-time tradeoff.For the sparse model with inducing points, you should use ```GPy.models.SparseGPRegression``` class. You can set the number of inducing inputs with parameter ```num_inducing``` and optimize their positions and values with ```.optimize()``` call. Task 1.5: Create a dataset of 1000 points and fit GPRegression. Measure time for predicting mean and variance at position $x=1$. Then fit `SparseGPRegression` with 10 inducing inputs and repeat the experiment. Report speedup as a ratio between consumed time without and with inducing inputs.
###Code
X, y = generate_points(1000)
kernel = GPy.kern.RBF(1, 1.5, 2)
model = GPy.models.GPRegression(X, y, kernel)
model.optimize()
start = time.time()
### YOUR CODE HERE
mean, variance = model.predict(np.array([[1]]))
time_gp = time.time()-start
kernel = GPy.kern.RBF(1, 1.5, 2)
model = GPy.models.SparseGPRegression(X, y, kernel, num_inducing=10)
model.optimize()
start = time.time()
### YOUR CODE HERE
mean, variance = model.predict(np.array([[1]]))
time_sgp = time.time()-start
model.plot()
plt.show()
grader.submit_GPy_5(time_gp / time_sgp)
###Output
Current answer for task 1.5 is: 5.179500796601168
###Markdown
Bayesian optimization: GPyOpt (documentation, tutorials) In this part of the assignment, we will try to find optimal hyperparameters to XGBoost model! We will use data from a small competition to speed things up, but keep in mind that the approach works even for large datasets.We will use diabetes dataset provided in sklearn package.
###Code
dataset = sklearn.datasets.load_diabetes()
X = dataset['data']
y = dataset['target']
###Output
_____no_output_____
###Markdown
We will use cross-validation score to estimate accuracy and our goal will be to tune: ```max_depth```, ```learning_rate```, ```n_estimators``` parameters. The baseline MSE with default XGBoost parameters is $0.2$. Let's see if we can do better. First, we have to define optimization function and domains.
###Code
# Score. Optimizer will try to find minimum, so we will add a "-" sign.
def f(parameters):
parameters = parameters[0]
score = -cross_val_score(
XGBRegressor(learning_rate=parameters[0],
max_depth=int(parameters[2]),
n_estimators=int(parameters[3]),
gamma=int(parameters[1]),
min_child_weight = parameters[4]),
X, y, scoring='neg_mean_squared_error'
).mean()
score = np.array(score)
return score
baseline = -cross_val_score(
XGBRegressor(), X, y, scoring='neg_mean_squared_error'
).mean()
baseline
# Bounds (NOTE: define continuous variables first, then discrete!)
bounds = [
{'name': 'learning_rate',
'type': 'continuous',
'domain': (0, 1)},
{'name': 'gamma',
'type': 'continuous',
'domain': (0, 5)},
{'name': 'max_depth',
'type': 'discrete',
'domain': (1, 50)},
{'name': 'n_estimators',
'type': 'discrete',
'domain': (1, 300)},
{'name': 'min_child_weight',
'type': 'discrete',
'domain': (1, 10)}
]
np.random.seed(777)
optimizer = GPyOpt.methods.BayesianOptimization(
f=f, domain=bounds,
acquisition_type ='MPI',
acquisition_par = 0.1,
exact_eval=True
)
max_iter = 50
max_time = 60
optimizer.run_optimization(max_iter, max_time)
optimizer.plot_convergence()
###Output
_____no_output_____
###Markdown
Best values of parameters:
###Code
optimizer.X[np.argmin(optimizer.Y)]
print('MSE:', np.min(optimizer.Y),
'Gain:', baseline/np.min(optimizer.Y)*100)
###Output
MSE: 3189.185708011576 Gain: 107.77278973061391
###Markdown
We were able to get 9% boost without tuning parameters by hand! Let's see if you can do the same. Task 2.1: Tune SVR model. Find optimal values for three parameters: `C`, `epsilon` and `gamma`. Use range (1e-5, 1000) for `C`, (1e-5, 10) for `epsilon` and `gamma`. Use MPI as an acquisition function with weight 0.1. Submit the optimal value of epsilon that was found by a model.
###Code
bounds = [
{'name': 'C', 'type': 'continuous', 'domain': (1e-5, 1000)},
{'name': 'gamma', 'type': 'continuous', 'domain': (1e-5, 10)},
{'name': 'epsilon', 'type': 'continuous', 'domain': (1e-5, 10)}
]
def svr_score(parameters):
parameters = parameters[0]
score = -cross_val_score(
SVR(C=parameters[0], epsilon=parameters[1], gamma=parameters[2]),
X, y, scoring='neg_mean_squared_error').mean()
score = np.array(score)
return score
baseline = -cross_val_score(SVR(), X, y, scoring='neg_mean_squared_error').mean()
print(baseline)
optimizer = GPyOpt.methods.BayesianOptimization(f=svr_score, domain=bounds,
acquisition_type = 'MPI',
acquisition_par = 0.1,
exact_eval = True)
max_iter = 50
max_time = 60
optimizer.run_optimization(max_iter, max_time)
optimizer.plot_convergence()
best_params = optimizer.X[np.argmin(optimizer.Y)]
print(best_params)
print('MSE:', np.min(optimizer.Y), 'Gain:', baseline/np.min(optimizer.Y))
### YOUR CODE HERE
best_epsilon = best_params[1] ### YOUR CODE HERE
grader.submit_GPyOpt_1(best_epsilon)
###Output
Current answer for task 2.1 is: 6.696411810398705
###Markdown
Task 2.2: For the model above submit boost in improvement that you got after tuning hyperparameters (output percents) [e.g. if baseline MSE was 40 and you got 20, output number 200]
###Code
performance_boost = baseline/np.min(optimizer.Y) ### YOUR CODE HERE
grader.submit_GPyOpt_2(performance_boost*100)
###Output
Current answer for task 2.2 is: 170.68312228647443
###Markdown
Authorization & SubmissionTo submit assignment parts to Cousera platform, please, enter your e-mail and token into variables below. You can generate a token on this programming assignment's page. Note: The token expires 30 minutes after generation.
###Code
STUDENT_EMAIL = '[email protected]'
STUDENT_TOKEN = 'y2aa99KxdSerkAcl'
grader.status()
###Output
You want to submit these numbers:
Task 1.1: 1.0461813545396959
Task 1.2 (mean): 0.6646774926102937
Task 1.2 (variance): 1.1001478223790582
Task 1.3: 1.6252681650349912
Task 1.4 (noise): 10.143341903515504
Task 1.4 (just signal): 1.0317301438313095e-15
Task 1.5: 5.179500796601168
Task 2.1: 6.696411810398705
Task 2.2: 170.68312228647443
###Markdown
If you want to submit these answers, run cell below
###Code
grader.submit(STUDENT_EMAIL, STUDENT_TOKEN)
###Output
Submitted to Coursera platform. See results on assignment page!
|
project/ML/ML_b/energy.ipynb | ###Markdown
https://www.bigdata-environment.kr/user/data_market/detail.do?id=6ecb2ce0-03d1-11ec-82b9-3debd40f3738!--> 에너지 및 온실가스 감축기술https://www.bigdata-environment.kr/user/data_market/detail.do?id=56f03d80-f36a-11eb-b976-6966248a20b9--> 사업장별 온실가스 배출량∙에너지 사용량 TOE(Ton of Oil Equivalent)(석유환산톤) -> 석유 1톤을 연소할 때 발생하는 에너지로 석유 1톤의 발열량 TJ = Terajoule ( 에너지 단위 줄의 테라단위 ) tCO₂ = 이산화탄소 배출량 ≒ 온실가스 배출량온실가스 배출량(tCO2eq) = ∑ [전력사용량(MWh) × 배출계수(tGHG(CO2/CH4/N2O)/ MWh) × 지구온난화지수]에너지 사용량(TJ) = 전력사용량(MWh) × 9 × 10-31.전력사용량 : 법정계량기 등으로 측정된 시설별 전력 사용량(한국전력 등 전력 공급자가 발행하고 전력사용량이 기입된 요금청구서의 전력사용량 등을 이용하여 산정)2.배출계수 : 배출계수는 3년간 고정하여 적용하며 향후 한국전력거래소에서 제공하는 전력간접배출계수를 센터에서 확인·공표하면 그 값을 적용3.지구온난화지수 : CO2 = 1, CH4 = 21, N2O = 310https://www.konetic.or.kr/dataroom/calculator_view.asp?1=1&gotopage=1&sStart=%EC%9E%90&sEnd=%EC%B0%A8&unique_num=1214https://ko.wikipedia.org/wiki/%EC%A4%84_(%EB%8B%A8%EC%9C%84)https://blog.daum.net/jwon5279/5718723https://www.keei.re.kr/main.nsf/index.html?open&p=%2Fweb_keei%2Fchange.nsf%2FCaloryConverF&s=%3FOpenFormhttp://tips.energy.or.kr/main/main.dohttps://ngms.gir.go.kr/link.do?menuNo=30130103&link=/websquare/websquare.html%3Fw2xPath%3D/cm/bbs/OGCMBBS023V.xml%26menu%3D30130103
###Code
import pandas as pd
df_2016 = pd.read_csv('./사업장별_온실가스_배출량∙에너지_사용량_(2016).CSV')
df_2017 = pd.read_csv('./사업장별_온실가스_배출량∙에너지_사용량_(2017).CSV')
df_2018 = pd.read_csv('./사업장별_온실가스_배출량∙에너지_사용량_(2018).CSV')
df_2019 = pd.read_csv('./사업장별_온실가스_배출량∙에너지_사용량_(2019).CSV')
df_2020 = pd.read_csv('./사업장별_온실가스_배출량∙에너지_사용량_(2020).csv')
df_tech = pd.read_csv('에너지_및_온실가스_감축기술.csv')
df_2016.info(),df_2017.info(),df_2018.info(),df_2019.info(),df_2020.info(),df_tech.info()
#df_sum = pd.concat([df_2016,df_2017,df_2018,df_2019,df_2020], ignore_index=True)
#df_sum.to_csv('(2016~2020)_사업장별_온실가스_배출량∙에너지_사용량.csv',encoding='cp949')
###Output
_____no_output_____
###Markdown
|NO|DPRTM CHRG|CORP NM|ADDRESS|TRGT YEAR|DSGN CLSF|DSGN INDS|GHG EMS|GHG UNIT|GHG EMS BU|GHG EMS BU UNIT||---|---|---|---|---|---|---|---|---|---|---||연번|소관부처|법인명|주소|대상연도|지정구분|지정업종|온실가스 배출량|온실가스 배출량 단위|온실가스 배출량 원단위 값|온실가스 배출량 원단위 단위| |ENG CNSM|ENG UNIT|ENG CNSM BU|ENG CNSM BU UNIT|VRFCT AGNCY|TST||---|---|---|---|---|---||에너지 사용량|에너지 사용량 단위|에너지 사용량 원단위 값|에너지 사용량 원단위 단위|검증수행기관|매출액 ADDRESS / TRGT_YEAR / DSGN_INDS / GHG EMS
###Code
print(df_sum['DSGN_INDS'].unique(), df_sum['DSGN_INDS'].unique().size, sep='\n\n')
df_2020.head(5)
###Output
_____no_output_____
###Markdown
에너지 및 온실가스 감축기술 데이터
###Code
df_tech = pd.read_csv('에너지_및_온실가스_감축기술.csv')
df_tech.info()
###Output
_____no_output_____
###Markdown
NO - 분류형 induty - 분류형 TRGT_EQMT - 분류형 CLSF IMPRV - 분류형 IMPRV ACT - 분류형 ROI PD - 연속형 ROI PD UNIT - 분류형 RDC COST - 연속형 RDC COST UNIT - 분류형 INVST_COST - 연속형 INVST COST UNIT - 분류형 DGN YR - 연속형 ENG CNSM SCL - 분류형 RDC ENG SORT - 분류형 AMNT RDC ENG FUEL - 연속형 AMNT_RDC_ENG_FUEL_UNIT - 분류형 AMNT RDC ENG ELTC - 연속형 AMNT RDC ENG ELTC UNIT - 분류형 AMNT RDC GHG - 연속형 AMNT RDC GHG UNIT -분류형 NO - 연번 induty - 업종 TRGT_EQMT - 감축 대상 설비 CLSF IMPRV - 개선 구분 IMPRV ACT - 개선활동명 ROI PD - 투자비 회수기간 ROI PD UNIT - 투자비 회수기간 단위 RDC COST - 절감액 RDC COST UNIT - 절감액 단위 INVST COST - 투자비 INVST COST UNIT - 투자비 단위 DGN YR - 진단 연도 ENG CNSM SCL - 에너지 사용량 규모 RDC ENG SORT - 에너지 절감 종류 AMNT RDC ENG FUEL - 에너지 절감량(연료) AMNT_RDC_ENG_FUEL_UNIT - 연료 단위 AMNT RDC ENG ELTC - 에너지 절감량(전력) AMNT RDC ENG ELTC UNIT - 전력 단위 AMNT RDC GHG - 온실가스 감축량 AMNT RDC GHG UNIT - 감축량 단위
###Code
df_tech.head(5)
df_tech_kor = df_tech
df_tech_kor.columns = ['연번','업종','감축 대상 설비','개선 구분','개선활동명','투자비 회수기간','투자비 회수기간 단위','절감액','절감액 단위','투자비','투자비 단위','진단 연도','에너지 사용량 규모','에너지 절감 종류','에너지 절감량(연료)','연료 단위','에너지 절감량(전력)','전력 단위','온실가스 감축량','감축량 단위']
df_tech_kor.head(3)
df_tech.head(1620)
###Output
_____no_output_____
###Markdown
INDUTY(업종), TRGT_EQMT(감축 대상설비), CLSF_IMPRV(개선구분), IMPRV_ACT(개선활동명), AMNT_RDC_GHG(온실가스 감축량) column 검토
###Code
df_tech['AMNT_RDC_GHG_UNIT'].value_counts()
df_tech['INDUTY'].value_counts()
all_INDUTY = []
for x in df_tech['INDUTY'] :
all_INDUTY.extend(x.split('|'))
un_INDUTY = pd.unique(all_INDUTY)
un_INDUTY
df_tech['TRGT_EQMT'].value_counts()
df_tech['CLSF_IMPRV'].value_counts()
df_tech['IMPRV_ACT'].value_counts()
all_IMPRV_ACT = []
for j in df_tech['IMPRV_ACT']:
all_IMPRV_ACT.extend(j.split('|'))
un_IMPRV_ACT = pd.unique(all_IMPRV_ACT)
un_IMPRV_ACT
df_tech_np = df_tech.values
df_tech_np
###Output
_____no_output_____
###Markdown
------------------------------ 전처리 가공 > '-' 제거
###Code
# INDUTY(업종), TRGT_EQMT(감축 대상설비), CLSF_IMPRV(개선구분), IMPRV_ACT(개선활동명), AMNT_RDC_GHG(온실가스 감축량)
# idx_nm_1 = df_sample_4[df_sample_4['성별코드'] == 1].index
# df_sample_5 = df_sample_4.drop(idx_nm_1)
df_tech_edit = df_tech.loc[:,['INDUTY','TRGT_EQMT','CLSF_IMPRV','IMPRV_ACT','AMNT_RDC_GHG']]
df_tech_edit.info()
df_INDUTY = df_tech_edit[df_tech_edit['INDUTY']=='-'].index
df_TRGT_EQMT = df_tech_edit[df_tech_edit['TRGT_EQMT']=='-'].index
df_CLSF_IMPRV = df_tech_edit[df_tech_edit['CLSF_IMPRV']=='-'].index
df_IMPRV_ACT = df_tech_edit[df_tech_edit['IMPRV_ACT']=='-'].index
df_tech_edit2 = df_tech_edit.drop(df_INDUTY)
df_tech_edit2 = df_tech_edit2.drop(df_TRGT_EQMT)
df_tech_edit2 = df_tech_edit2.drop(df_CLSF_IMPRV)
df_tech_edit2 = df_tech_edit2.drop(df_IMPRV_ACT)
df_tech_edit2.info()
df_tech_edit.info()
#df_tech_edit2.to_csv('(전처리_후)에너지_및_온실가스_감축기술).csv',encoding='cp949')
###Output
_____no_output_____
###Markdown
> 머신러닝 ( 군집 )
###Code
import pandas as pd
import numpy as np
df_tech_edit2 = pd.read_csv('(전처리_후)에너지_및_온실가스_감축기술).csv',encoding='cp949')
df_tech_edit2.info()
df_tech_edit2 = df_tech_edit2.drop('Unnamed: 0', axis=1)
df_tech_edit2.head()
X = df.iloc[:,:]
dfx.shape
dfx_INDUTY = pd.get_dummies(df_tech_edit2['INDUTY'])
dfx_INDUTY
dfx_TRGT_EQMT = pd.get_dummies(df_tech_edit2['TRGT_EQMT'])
dfx_TRGT_EQMT
dfx_CLSF_IMPRV = pd.get_dummies(df_tech_edit2['CLSF_IMPRV'])
dfx_CLSF_IMPRV
dfx_IMPRV_ACT = pd.get_dummies(df_tech_edit2['IMPRV_ACT'])
dfx_IMPRV_ACT
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(dfx_INDUTY)
dfx_INDUTY = scaler.transform(dfx_INDUTY)
scaler = StandardScaler()
scaler.fit(dfx_TRGT_EQMT)
dfx_TRGT_EQMT = scaler.transform(dfx_TRGT_EQMT)
scaler = StandardScaler()
scaler.fit(dfx_CLSF_IMPRV)
dfx_CLSF_IMPRV = scaler.transform(dfx_CLSF_IMPRV)
from sklearn import cluster
kmeans1 = cluster.KMeans(n_clusters=20)
kmeans2 = cluster.KMeans(n_clusters=22)
kmeans3 = cluster.KMeans(n_clusters=90)
kmeans1.fit(dfx_INDUTY)
kmeans1.labels_
kmeans2.fit(dfx_TRGT_EQMT)
kmeans2.labels_
kmeans3.fit(dfx_CLSF_IMPRV)
kmeans3.labels_
df_tech_edit2['INDUTY_label'] = kmeans1.labels_
df_tech_edit2['TRGT_EQMT_label'] = kmeans2.labels_
df_tech_edit2['CLSF_IMPRV_label'] = kmeans3.labels_
df_tech_edit2.head()
df_tech_edit2.head(50)
df_tech_edit2.info()
df_tech_edit2.plot(kind='scatter',x='INDUTY_label', y ='AMNT_RDC_GHG', c='CLSF_IMPRV_label' ,cmap = 'Set1')
df_tech_edit2['AMNT_RDC_GHG'].value_counts()
df_tech_edit2['AMNT_RDC_GHG'].max()
df_tech_edit2['AMNT_RDC_GHG'].min()
###Output
_____no_output_____ |
prediction/single task/function documentation generation/ruby/t5 interface/small_model.ipynb | ###Markdown
Install the library and download the pretrained models
###Code
print("Installing dependencies...")
%tensorflow_version 2.x
!pip install -q t5==0.6.4
import functools
import os
import time
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import t5
!wget "https://www.dropbox.com/sh/kjoqdpj7e16dny9/AADdvjWVFckCgNQN-AqMKhiDa?dl=1" -O vocabulary.zip
!unzip vocabulary.zip
!rm vocabulary.zip
!wget "https://www.dropbox.com/sh/012r5jxhm5eiprt/AACxnFoc6egkqn8IJZvnoQsza?dl=1" -O ruby.zip
!unzip ruby.zip
!rm ruby.zip
###Output
Installing dependencies...
[K |████████████████████████████████| 163kB 2.8MB/s
[K |████████████████████████████████| 1.3MB 8.2MB/s
[K |████████████████████████████████| 1.1MB 18.1MB/s
[K |████████████████████████████████| 2.6MB 12.3MB/s
[K |████████████████████████████████| 3.6MB 47.2MB/s
[K |████████████████████████████████| 348kB 47.8MB/s
[K |████████████████████████████████| 71kB 7.8MB/s
[K |████████████████████████████████| 890kB 50.3MB/s
[K |████████████████████████████████| 2.9MB 42.6MB/s
[?25h Building wheel for sacremoses (setup.py) ... [?25l[?25hdone
INFO:tensorflow:tokens_length=568 inputs_length=512 targets_length=114 noise_density=0.15 mean_noise_span_length=3.0
--2020-11-10 13:49:27-- https://www.dropbox.com/sh/kjoqdpj7e16dny9/AADdvjWVFckCgNQN-AqMKhiDa?dl=1
Resolving www.dropbox.com (www.dropbox.com)... 162.125.82.1, 2620:100:6032:1::a27d:5201
Connecting to www.dropbox.com (www.dropbox.com)|162.125.82.1|:443... connected.
HTTP request sent, awaiting response... 301 Moved Permanently
Location: /sh/dl/kjoqdpj7e16dny9/AADdvjWVFckCgNQN-AqMKhiDa [following]
--2020-11-10 13:49:28-- https://www.dropbox.com/sh/dl/kjoqdpj7e16dny9/AADdvjWVFckCgNQN-AqMKhiDa
Reusing existing connection to www.dropbox.com:443.
HTTP request sent, awaiting response... 302 Found
Location: https://uc6e5412029fd7d5c933c98cab95.dl.dropboxusercontent.com/zip_download_get/AmU_jG9VmcHK9WGCJ6VM6m7EabqZwHcY6BH2hLHE-hxlgK2QK0-M_4mqbpJedcKnEgkNP6mtnfuoJw8KDuGzpeWe6fG4tuXEU7C6dn5GUOOnmw?dl=1 [following]
--2020-11-10 13:49:28-- https://uc6e5412029fd7d5c933c98cab95.dl.dropboxusercontent.com/zip_download_get/AmU_jG9VmcHK9WGCJ6VM6m7EabqZwHcY6BH2hLHE-hxlgK2QK0-M_4mqbpJedcKnEgkNP6mtnfuoJw8KDuGzpeWe6fG4tuXEU7C6dn5GUOOnmw?dl=1
Resolving uc6e5412029fd7d5c933c98cab95.dl.dropboxusercontent.com (uc6e5412029fd7d5c933c98cab95.dl.dropboxusercontent.com)... 162.125.82.15, 2620:100:6032:15::a27d:520f
Connecting to uc6e5412029fd7d5c933c98cab95.dl.dropboxusercontent.com (uc6e5412029fd7d5c933c98cab95.dl.dropboxusercontent.com)|162.125.82.15|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1381528 (1.3M) [application/zip]
Saving to: ‘vocabulary.zip’
vocabulary.zip 100%[===================>] 1.32M 8.71MB/s in 0.2s
2020-11-10 13:49:29 (8.71 MB/s) - ‘vocabulary.zip’ saved [1381528/1381528]
Archive: vocabulary.zip
warning: stripped absolute path spec from /
mapname: conversion of failed
extracting: code_spm_unigram_40M.model
extracting: code_spm_unigram_40M.vocab
--2020-11-10 13:49:29-- https://www.dropbox.com/sh/012r5jxhm5eiprt/AACxnFoc6egkqn8IJZvnoQsza?dl=1
Resolving www.dropbox.com (www.dropbox.com)... 162.125.82.1, 2620:100:6032:1::a27d:5201
Connecting to www.dropbox.com (www.dropbox.com)|162.125.82.1|:443... connected.
HTTP request sent, awaiting response... 301 Moved Permanently
Location: /sh/dl/012r5jxhm5eiprt/AACxnFoc6egkqn8IJZvnoQsza [following]
--2020-11-10 13:49:30-- https://www.dropbox.com/sh/dl/012r5jxhm5eiprt/AACxnFoc6egkqn8IJZvnoQsza
Reusing existing connection to www.dropbox.com:443.
HTTP request sent, awaiting response... 302 Found
Location: https://ucb5dd6d6f88805af3537ddf63f6.dl.dropboxusercontent.com/zip_download_get/AmX2qK2Y8zciG7yv79y8oqoOboaCHRfkZTCXWKJgvcrEdkgBCXHq1AqHImVV2Ydk_iAxRdlQqEpU9p7je-ns9Oz9hJFIFohg-PYnuyPBfOX0Pg?dl=1 [following]
--2020-11-10 13:49:30-- https://ucb5dd6d6f88805af3537ddf63f6.dl.dropboxusercontent.com/zip_download_get/AmX2qK2Y8zciG7yv79y8oqoOboaCHRfkZTCXWKJgvcrEdkgBCXHq1AqHImVV2Ydk_iAxRdlQqEpU9p7je-ns9Oz9hJFIFohg-PYnuyPBfOX0Pg?dl=1
Resolving ucb5dd6d6f88805af3537ddf63f6.dl.dropboxusercontent.com (ucb5dd6d6f88805af3537ddf63f6.dl.dropboxusercontent.com)... 162.125.82.15, 2620:100:6032:15::a27d:520f
Connecting to ucb5dd6d6f88805af3537ddf63f6.dl.dropboxusercontent.com (ucb5dd6d6f88805af3537ddf63f6.dl.dropboxusercontent.com)|162.125.82.15|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 658504252 (628M) [application/zip]
Saving to: ‘ruby.zip’
ruby.zip 100%[===================>] 628.00M 14.2MB/s in 42s
2020-11-10 13:50:14 (14.8 MB/s) - ‘ruby.zip’ saved [658504252/658504252]
Archive: ruby.zip
warning: stripped absolute path spec from /
mapname: conversion of failed
creating: base/
creating: small/
extracting: base/command
extracting: small/command
extracting: base/command.1
extracting: small/command.1
extracting: base/checkpoint
extracting: small/checkpoint
extracting: base/graph.pbtxt
extracting: small/graph.pbtxt
extracting: small/t5_code_tasks.py
extracting: base/model.ckpt-8000.meta
extracting: base/operative_config.gin
extracting: base/model.ckpt-8000.index
extracting: small/operative_config.gin
extracting: small/model.ckpt-10000.meta
extracting: base/t5_code_tasks_colab.py
extracting: small/model.ckpt-10000.index
extracting: base/model.ckpt-8000.data-00000-of-00002
extracting: base/model.ckpt-8000.data-00001-of-00002
extracting: small/model.ckpt-10000.data-00001-of-00002
extracting: small/model.ckpt-10000.data-00000-of-00002
###Markdown
Set sentencepiece model
###Code
from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
vocab_model_path = 'code_spm_unigram_40M.model'
vocab = SentencePieceVocabulary(vocab_model_path, extra_ids=100)
print("Vocab has a size of %d\n" % vocab.vocab_size)
###Output
Vocab has a size of 32100
###Markdown
Set the preprocessors and the task registry for the t5 model
###Code
def ruby_codeSearchNet_dataset_fn(split, shuffle_files=False):
tf.random.shuffle(ruby_path[split])
ds = tf.data.TextLineDataset(ruby_path[split])
ds = ds.map(
functools.partial(tf.io.decode_csv, record_defaults=["", ""], field_delim="\t", use_quote_delim=False),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
ds = ds.map(lambda *ex: dict(zip(["code", "docstring"], ex)))
return ds
def ruby_preprocessor(ds):
def normalize_text(text):
return text
def to_inputs_and_targets(ex):
return {
"inputs": tf.strings.join(["function documentation generation ruby: ", normalize_text(ex["code"])]),
"targets": normalize_text(ex["docstring"])
}
return ds.map(to_inputs_and_targets, num_parallel_calls=tf.data.experimental.AUTOTUNE)
t5.data.TaskRegistry.remove('function_documentation_generation_ruby_code')
t5.data.TaskRegistry.add(
"function_documentation_generation_ruby_code",
dataset_fn=ruby_codeSearchNet_dataset_fn,
output_features={
"inputs": t5.data.utils.Feature(vocabulary=vocab),
"targets": t5.data.utils.Feature(vocabulary=vocab),
},
splits=["train", "validation"],
text_preprocessor=[ruby_preprocessor],
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.bleu, t5.evaluation.metrics.accuracy, t5.evaluation.metrics.rouge],
)
###Output
_____no_output_____
###Markdown
Set t5 small model
###Code
MODEL_DIR = "small"
model_parallelism = 1
train_batch_size = 256
tf.io.gfile.makedirs(MODEL_DIR)
model = t5.models.MtfModel(
model_dir=MODEL_DIR,
tpu=None,
tpu_topology=None,
model_parallelism=model_parallelism,
batch_size=train_batch_size,
sequence_length={"inputs": 512, "targets": 512},
mesh_shape="model:1,batch:1",
mesh_devices=["GPU:0"],
learning_rate_schedule=0.003,
save_checkpoints_steps=5000,
keep_checkpoint_max=None,
iterations_per_loop=100,
)
###Output
_____no_output_____
###Markdown
Code Documentation Summarization Give the code for summarization
###Code
code = "def add(severity, progname, &block)\n return true if io.nil? || severity < level\n message = format_message(severity, progname, yield)\n MUTEX.synchronize { io.write(message) }\n true\n end" #@param {type:"raw"}
###Output
_____no_output_____
###Markdown
Parsing and Tokenization
###Code
!pip install tree_sitter
!git clone https://github.com/tree-sitter/tree-sitter-ruby
from tree_sitter import Language, Parser
Language.build_library(
'build/my-languages.so',
['tree-sitter-ruby']
)
RUBY_LANGUAGE = Language('build/my-languages.so', 'ruby')
parser = Parser()
parser.set_language(RUBY_LANGUAGE)
def get_string_from_code(node, lines):
line_start = node.start_point[0]
line_end = node.end_point[0]
char_start = node.start_point[1]
char_end = node.end_point[1]
if line_start != line_end:
code_list.append(' '.join([lines[line_start][char_start:]] + lines[line_start+1:line_end] + [lines[line_end][:char_end]]))
else:
code_list.append(lines[line_start][char_start:char_end])
def my_traverse(node, code_list):
lines = code.split('\n')
if node.child_count == 0:
get_string_from_code(node, lines)
elif node.type == 'string':
get_string_from_code(node, lines)
else:
for n in node.children:
my_traverse(n, code_list)
return ' '.join(code_list)
tree = parser.parse(bytes(code, "utf8"))
code_list=[]
tokenized_code = my_traverse(tree.root_node, code_list)
print("Output after tokenization: " + tokenized_code)
###Output
Output after tokenization: def add ( severity , progname , & block ) return true if io . nil? || severity < level message = format_message ( severity , progname , yield ) MUTEX . synchronize { io . write ( message ) } true end
###Markdown
Record the code for summarization with the prefix to a txt file
###Code
codes = [tokenized_code]
inputs_path = 'input.txt'
with tf.io.gfile.GFile(inputs_path, "w") as f:
for c in codes:
f.write("function documentation generation ruby: %s\n" % c)
predict_outputs_path = 'MtfModel-output.txt'
###Output
_____no_output_____
###Markdown
Running the model with the best checkpoint to summarize the given code
###Code
model.batch_size = 8 # Min size for small model on v2-8 with parallelism 1.
model.predict(
input_file="input.txt",
output_file=predict_outputs_path,
checkpoint_steps=10000,
beam_size=4,
vocabulary=vocab,
# Select the most probable output token at each step.
temperature=0,
)
###Output
INFO:tensorflow:Using config: {'_model_dir': 'small', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
graph_options {
rewrite_options {
meta_optimizer_iterations: ONE
}
}
, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': None, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': ClusterSpec({}), '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1, '_tpu_config': TPUConfig(iterations_per_loop=100, num_shards=None, num_cores_per_replica=1, per_host_input_for_training=4, tpu_job_name=None, initial_infeed_sleep_secs=None, input_partition_dims=None, eval_training_input_configuration=2, experimental_host_call_every_n_steps=1), '_cluster': None}
INFO:tensorflow:_TPUContext: eval_on_tpu True
WARNING:tensorflow:eval_on_tpu ignored because use_tpu is False.
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Running infer on CPU/GPU
INFO:tensorflow:feature inputs : Tensor("Reshape:0", shape=(8, 512), dtype=int32)
WARNING:tensorflow:Using default tf glorot_uniform_initializer for variable encoder/block_000/layer_000/SelfAttention/relative_attention_bias The initialzer will guess the input and output dimensions based on dimension order.
WARNING:tensorflow:Using default tf glorot_uniform_initializer for variable decoder/block_000/layer_000/SelfAttention/relative_attention_bias The initialzer will guess the input and output dimensions based on dimension order.
WARNING:tensorflow:Using default tf glorot_uniform_initializer for variable decoder/block_000/layer_000/SelfAttention/relative_attention_bias The initialzer will guess the input and output dimensions based on dimension order.
INFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/relative_attention_bias size 256 slice_size 256 Shape[heads=8, buckets=32]
INFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_000/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_000/layer_001/EncDecAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_000/layer_001/EncDecAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_000/layer_001/EncDecAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_000/layer_001/EncDecAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_000/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_000/layer_002/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable decoder/block_000/layer_002/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable decoder/block_000/layer_002/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_001/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_001/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_001/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_001/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_001/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_001/layer_001/EncDecAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_001/layer_001/EncDecAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_001/layer_001/EncDecAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_001/layer_001/EncDecAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_001/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_001/layer_002/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable decoder/block_001/layer_002/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable decoder/block_001/layer_002/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_002/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_002/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_002/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_002/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_002/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_002/layer_001/EncDecAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_002/layer_001/EncDecAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_002/layer_001/EncDecAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_002/layer_001/EncDecAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_002/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_002/layer_002/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable decoder/block_002/layer_002/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable decoder/block_002/layer_002/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_003/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_003/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_003/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_003/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_003/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_003/layer_001/EncDecAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_003/layer_001/EncDecAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_003/layer_001/EncDecAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_003/layer_001/EncDecAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_003/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_003/layer_002/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable decoder/block_003/layer_002/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable decoder/block_003/layer_002/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_004/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_004/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_004/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_004/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_004/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_004/layer_001/EncDecAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_004/layer_001/EncDecAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_004/layer_001/EncDecAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_004/layer_001/EncDecAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_004/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_004/layer_002/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable decoder/block_004/layer_002/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable decoder/block_004/layer_002/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_005/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_005/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_005/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_005/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_005/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_005/layer_001/EncDecAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_005/layer_001/EncDecAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable decoder/block_005/layer_001/EncDecAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_005/layer_001/EncDecAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable decoder/block_005/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/block_005/layer_002/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable decoder/block_005/layer_002/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable decoder/block_005/layer_002/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable decoder/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/relative_attention_bias size 256 slice_size 256 Shape[heads=8, buckets=32]
INFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_000/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_000/layer_001/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable encoder/block_000/layer_001/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable encoder/block_000/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_001/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_001/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable encoder/block_001/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_001/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_001/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_001/layer_001/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable encoder/block_001/layer_001/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable encoder/block_001/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_002/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_002/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable encoder/block_002/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_002/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_002/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_002/layer_001/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable encoder/block_002/layer_001/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable encoder/block_002/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_003/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_003/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable encoder/block_003/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_003/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_003/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_003/layer_001/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable encoder/block_003/layer_001/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable encoder/block_003/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_004/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_004/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable encoder/block_004/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_004/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_004/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_004/layer_001/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable encoder/block_004/layer_001/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable encoder/block_004/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_005/layer_000/SelfAttention/k size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_005/layer_000/SelfAttention/o size 262144 slice_size 262144 Shape[heads=512, d_model=512]
INFO:tensorflow:Variable encoder/block_005/layer_000/SelfAttention/q size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_005/layer_000/SelfAttention/v size 262144 slice_size 262144 Shape[d_model=512, heads=512]
INFO:tensorflow:Variable encoder/block_005/layer_000/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/block_005/layer_001/DenseReluDense/wi/kernel size 1048576 slice_size 1048576 Shape[d_model=512, d_ff=2048]
INFO:tensorflow:Variable encoder/block_005/layer_001/DenseReluDense/wo/kernel size 1048576 slice_size 1048576 Shape[d_ff=2048, d_model=512]
INFO:tensorflow:Variable encoder/block_005/layer_001/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable encoder/rms_norm/scale size 512 slice_size 512 Shape[d_model=512]
INFO:tensorflow:Variable shared/embedding size 16449536 slice_size 16449536 Shape[vocab=32128, d_model=512]
INFO:tensorflow:Trainable Variables count: 131 Total size: 60506624 Total slice_size: 60506624
INFO:tensorflow:All Variables count: 131 Total size: 60506624 Total slice_size: 60506624
INFO:tensorflow:Counters:
allconcat: 8.32e+03
allconcat/0: 128
allconcat/0/reshape_op: 128
allconcat/1: 8.19e+03
allconcat/1/reshape_op: 8.19e+03
allreduce: 1.87e+08
allreduce/[0]: 1.87e+08
allreduce/[0]/einsum_op: 1.87e+08
allreduce/[0]/reduce_op: 64
allreduce/[1]: 2
allreduce/[1]/reduce_op: 2
einsum: 1.18e+12
einsum_unique: 1.18e+12
output: 1.08e+10
output/AddOperation: 3.02e+09
output/BinaryOpWithBroadcasting: 1.9e+07
output/BroadcastOperation: 32
output/Constant: 1.01e+08
output/EinsumOperation: 2.8e+09
output/ImportOperation: 4.15e+03
output/MinMaxOperation: 4.72e+06
output/OneHotOperation: 7.59e+08
output/RangeOperation: 1.02e+03
output/ReduceOperation: 3.9e+06
output/ReshapeOperation: 6.56e+08
output/ScalarAddOperation: 6.66e+06
output/ScalarMultiplyOperation: 3.39e+07
output/ShiftOperation: 1.64e+04
output/SlicewiseOperation: 2.31e+09
output/StopGradient: 9.06e+08
output/Variable: 6.05e+07
output/WhileLoopOperation: 1.01e+08
output_unique: 1.08e+10
output_unique/AddOperation: 3.02e+09
output_unique/BinaryOpWithBroadcasting: 1.9e+07
output_unique/BroadcastOperation: 32
output_unique/Constant: 1.01e+08
output_unique/EinsumOperation: 2.8e+09
output_unique/ImportOperation: 4.15e+03
output_unique/MinMaxOperation: 4.72e+06
output_unique/OneHotOperation: 7.59e+08
output_unique/RangeOperation: 1.02e+03
output_unique/ReduceOperation: 3.9e+06
output_unique/ReshapeOperation: 6.56e+08
output_unique/ScalarAddOperation: 6.66e+06
output_unique/ScalarMultiplyOperation: 3.39e+07
output_unique/ShiftOperation: 1.64e+04
output_unique/SlicewiseOperation: 2.31e+09
output_unique/StopGradient: 9.06e+08
output_unique/Variable: 6.05e+07
output_unique/WhileLoopOperation: 1.01e+08
variables: 6.05e+07
variables/trainable: 6.05e+07
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from small/model.ckpt-10000
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Before copy master to slices.
INFO:tensorflow:Done with copy master to slices.
INFO:tensorflow:decoded 0: b'function documentation generation ruby: def add ( severity , progname , & block ) return true if io . nil? || severity < level message = format_message ( severity , progname , yield ) MUTEX . synchronize { io . write ( message ) } true end'
INFO:tensorflow: -> b'Log a message at the given level if the logger is present'
INFO:tensorflow:decoded 1: b'function documentation generation ruby: def add ( severity , progname , & block ) return true if io . nil? || severity < level message = format_message ( severity , progname , yield ) MUTEX . synchronize { io . write ( message ) } true end'
INFO:tensorflow: -> b'Log a message at the given level if the logger is present'
INFO:tensorflow:decoded 2: b'function documentation generation ruby: def add ( severity , progname , & block ) return true if io . nil? || severity < level message = format_message ( severity , progname , yield ) MUTEX . synchronize { io . write ( message ) } true end'
INFO:tensorflow: -> b'Log a message at the given level if the logger is present'
INFO:tensorflow:decoded 4: b'function documentation generation ruby: def add ( severity , progname , & block ) return true if io . nil? || severity < level message = format_message ( severity , progname , yield ) MUTEX . synchronize { io . write ( message ) } true end'
INFO:tensorflow: -> b'Log a message at the given level if the logger is present'
INFO:tensorflow:prediction_loop marked as finished
INFO:tensorflow:prediction_loop marked as finished
###Markdown
Code Summarization Result
###Code
prediction_file = "MtfModel-output.txt-10000"
print("\nPredictions using checkpoint 10000:\n" )
with tf.io.gfile.GFile(prediction_file) as f:
for c, d in zip(codes, f):
if c:
print("Code for prediction: " + c + '\n')
print("Generated Documentation: " + d)
###Output
Predictions using checkpoint 10000:
Code for prediction: def add ( severity , progname , & block ) return true if io . nil? || severity < level message = format_message ( severity , progname , yield ) MUTEX . synchronize { io . write ( message ) } true end
Generated Documentation: b'Log a message at the given level if the logger is present'
|
ML-week/ds_t/Pan-Performance-Eval-and-Query.ipynb | ###Markdown
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).**The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* High-Performance Pandas: eval() and query() As we've already seen in previous sections, the power of the PyData stack is built upon the ability of NumPy and Pandas to push basic operations into C via an intuitive syntax: examples are vectorized/broadcasted operations in NumPy, and grouping-type operations in Pandas.While these abstractions are efficient and effective for many common use cases, they often rely on the creation of temporary intermediate objects, which can cause undue overhead in computational time and memory use.As of version 0.13 (released January 2014), Pandas includes some experimental tools that allow you to directly access C-speed operations without costly allocation of intermediate arrays.These are the ``eval()`` and ``query()`` functions, which rely on the [Numexpr](https://github.com/pydata/numexpr) package.In this notebook we will walk through their use and give some rules-of-thumb about when you might think about using them.
###Code
import platform
print(platform.python_version())
%load_ext watermark
%watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas,scipy,scikit-learn,matplotlib,seaborn,jupyter,notebook,line_profiler,memory_profiler,numexpr
###Output
3.5.4
###Markdown
Motivating ``query()`` and ``eval()``: Compound ExpressionsWe've seen previously that NumPy and Pandas support fast vectorized operations; for example, when adding the elements of two arrays:
###Code
import numpy as np
rng = np.random.RandomState(42)
x = rng.rand(1000000)
y = rng.rand(1000000)
%timeit x + y
###Output
1.28 ms ± 435 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
As discussed in [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb), this is much faster than doing the addition via a Python loop or comprehension:
###Code
%timeit np.fromiter((xi + yi for xi, yi in zip(x, y)), dtype=x.dtype, count=len(x))
###Output
347 ms ± 84.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
But this abstraction can become less efficient when computing compound expressions.For example, consider the following expression:
###Code
mask = (x > 0.5) & (y < 0.5)
###Output
_____no_output_____
###Markdown
Because NumPy evaluates each subexpression, this is roughly equivalent to the following:
###Code
tmp1 = (x > 0.5)
tmp2 = (y < 0.5)
mask = tmp1 & tmp2
###Output
_____no_output_____
###Markdown
In other words, *every intermediate step is explicitly allocated in memory*. If the ``x`` and ``y`` arrays are very large, this can lead to significant memory and computational overhead.The Numexpr library gives you the ability to compute this type of compound expression element by element, without the need to allocate full intermediate arrays.The [Numexpr documentation](https://github.com/pydata/numexpr) has more details, but for the time being it is sufficient to say that the library accepts a *string* giving the NumPy-style expression you'd like to compute:
###Code
import numexpr
mask_numexpr = numexpr.evaluate('(x > 0.5) & (y < 0.5)')
np.allclose(mask, mask_numexpr)
###Output
_____no_output_____
###Markdown
The benefit here is that Numexpr evaluates the expression in a way that does not use full-sized temporary arrays, and thus can be much more efficient than NumPy, especially for large arrays.The Pandas ``eval()`` and ``query()`` tools that we will discuss here are conceptually similar, and depend on the Numexpr package. ``pandas.eval()`` for Efficient OperationsThe ``eval()`` function in Pandas uses string expressions to efficiently compute operations using ``DataFrame``s.For example, consider the following ``DataFrame``s:
###Code
import pandas as pd
nrows, ncols = 100000, 100
rng = np.random.RandomState(42)
df1, df2, df3, df4 = (pd.DataFrame(rng.rand(nrows, ncols))
for i in range(4))
###Output
_____no_output_____
###Markdown
To compute the sum of all four ``DataFrame``s using the typical Pandas approach, we can just write the sum:
###Code
%timeit df1 + df2 + df3 + df4
###Output
113 ms ± 14.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
The same result can be computed via ``pd.eval`` by constructing the expression as a string:
###Code
%timeit pd.eval('df1 + df2 + df3 + df4')
###Output
50.2 ms ± 3.19 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
The ``eval()`` version of this expression is about 50% faster (and uses much less memory), while giving the same result:
###Code
np.allclose(df1 + df2 + df3 + df4,
pd.eval('df1 + df2 + df3 + df4'))
###Output
_____no_output_____
###Markdown
Operations supported by ``pd.eval()``As of Pandas v0.16, ``pd.eval()`` supports a wide range of operations.To demonstrate these, we'll use the following integer ``DataFrame``s:
###Code
df1, df2, df3, df4, df5 = (pd.DataFrame(rng.randint(0, 1000, (100, 3)))
for i in range(5))
###Output
_____no_output_____
###Markdown
Arithmetic operators``pd.eval()`` supports all arithmetic operators. For example:
###Code
result1 = -df1 * df2 / (df3 + df4) - df5
result2 = pd.eval('-df1 * df2 / (df3 + df4) - df5')
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
Comparison operators``pd.eval()`` supports all comparison operators, including chained expressions:
###Code
result1 = (df1 < df2) & (df2 <= df3) & (df3 != df4)
result2 = pd.eval('df1 < df2 <= df3 != df4')
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
Bitwise operators``pd.eval()`` supports the ``&`` and ``|`` bitwise operators:
###Code
result1 = (df1 < 0.5) & (df2 < 0.5) | (df3 < df4)
result2 = pd.eval('(df1 < 0.5) & (df2 < 0.5) | (df3 < df4)')
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
In addition, it supports the use of the literal ``and`` and ``or`` in Boolean expressions:
###Code
result3 = pd.eval('(df1 < 0.5) and (df2 < 0.5) or (df3 < df4)')
np.allclose(result1, result3)
###Output
_____no_output_____
###Markdown
Object attributes and indices``pd.eval()`` supports access to object attributes via the ``obj.attr`` syntax, and indexes via the ``obj[index]`` syntax:
###Code
result1 = df2.T[0] + df3.iloc[1]
result2 = pd.eval('df2.T[0] + df3.iloc[1]')
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
Other operationsOther operations such as function calls, conditional statements, loops, and other more involved constructs are currently *not* implemented in ``pd.eval()``.If you'd like to execute these more complicated types of expressions, you can use the Numexpr library itself. ``DataFrame.eval()`` for Column-Wise OperationsJust as Pandas has a top-level ``pd.eval()`` function, ``DataFrame``s have an ``eval()`` method that works in similar ways.The benefit of the ``eval()`` method is that columns can be referred to *by name*.We'll use this labeled array as an example:
###Code
df = pd.DataFrame(rng.rand(1000, 3), columns=['A', 'B', 'C'])
df.head()
###Output
_____no_output_____
###Markdown
Using ``pd.eval()`` as above, we can compute expressions with the three columns like this:
###Code
result1 = (df['A'] + df['B']) / (df['C'] - 1)
result2 = pd.eval("(df.A + df.B) / (df.C - 1)")
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
The ``DataFrame.eval()`` method allows much more succinct evaluation of expressions with the columns:
###Code
result3 = df.eval('(A + B) / (C - 1)')
np.allclose(result1, result3)
###Output
_____no_output_____
###Markdown
Notice here that we treat *column names as variables* within the evaluated expression, and the result is what we would wish. Assignment in DataFrame.eval()In addition to the options just discussed, ``DataFrame.eval()`` also allows assignment to any column.Let's use the ``DataFrame`` from before, which has columns ``'A'``, ``'B'``, and ``'C'``:
###Code
df.head()
###Output
_____no_output_____
###Markdown
We can use ``df.eval()`` to create a new column ``'D'`` and assign to it a value computed from the other columns:
###Code
df.eval('D = (A + B) / C', inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
In the same way, any existing column can be modified:
###Code
df.eval('D = (A - B) / C', inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Local variables in DataFrame.eval()The ``DataFrame.eval()`` method supports an additional syntax that lets it work with local Python variables.Consider the following:
###Code
column_mean = df.mean(1)
result1 = df['A'] + column_mean
result2 = df.eval('A + @column_mean')
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
The ``@`` character here marks a *variable name* rather than a *column name*, and lets you efficiently evaluate expressions involving the two "namespaces": the namespace of columns, and the namespace of Python objects.Notice that this ``@`` character is only supported by the ``DataFrame.eval()`` *method*, not by the ``pandas.eval()`` *function*, because the ``pandas.eval()`` function only has access to the one (Python) namespace. DataFrame.query() MethodThe ``DataFrame`` has another method based on evaluated strings, called the ``query()`` method.Consider the following:
###Code
result1 = df[(df.A < 0.5) & (df.B < 0.5)]
result2 = pd.eval('df[(df.A < 0.5) & (df.B < 0.5)]')
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
As with the example used in our discussion of ``DataFrame.eval()``, this is an expression involving columns of the ``DataFrame``.It cannot be expressed using the ``DataFrame.eval()`` syntax, however!Instead, for this type of filtering operation, you can use the ``query()`` method:
###Code
result2 = df.query('A < 0.5 and B < 0.5')
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
In addition to being a more efficient computation, compared to the masking expression this is much easier to read and understand.Note that the ``query()`` method also accepts the ``@`` flag to mark local variables:
###Code
Cmean = df['C'].mean()
result1 = df[(df.A < Cmean) & (df.B < Cmean)]
result2 = df.query('A < @Cmean and B < @Cmean')
np.allclose(result1, result2)
###Output
_____no_output_____
###Markdown
Performance: When to Use These FunctionsWhen considering whether to use these functions, there are two considerations: *computation time* and *memory use*.Memory use is the most predictable aspect. As already mentioned, every compound expression involving NumPy arrays or Pandas ``DataFrame``s will result in implicit creation of temporary arrays:For example, this:
###Code
x = df[(df.A < 0.5) & (df.B < 0.5)]
###Output
_____no_output_____
###Markdown
Is roughly equivalent to this:
###Code
tmp1 = df.A < 0.5
tmp2 = df.B < 0.5
tmp3 = tmp1 & tmp2
x = df[tmp3]
###Output
_____no_output_____
###Markdown
If the size of the temporary ``DataFrame``s is significant compared to your available system memory (typically several gigabytes) then it's a good idea to use an ``eval()`` or ``query()`` expression.You can check the approximate size of your array in bytes using this:
###Code
df.values.nbytes
###Output
_____no_output_____ |
StrathconaCountyOpenData/Group5Countries/Countries_workbook-interactive.ipynb | ###Markdown

###Code
#from IPython.display import HTML, display
#display(HTML("<table><tr><td><img src='data/map.png' width='550'></td><td><img src='data/globe.jpeg' width='420'></td></tr></table>"))
###Output
_____no_output_____
###Markdown
Prep work Run the next cell to load libaries and pre-defined functions:
###Code
import pandas as pd
import IPython
from plotly.offline import init_notebook_mode
#to enable plotting in colab
def enable_plotly_in_cell():
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
'''))
init_notebook_mode(connected=False)
get_ipython().events.register('pre_run_cell', enable_plotly_in_cell)
###Output
_____no_output_____
###Markdown
Group goal Go through the analysis below, work on challenges.**Extra challenge**:Is there anything else interesting you can find and visualize for this data? Getting dataThis dataset was created by [Bootstrap](https://www.bootstrapworld.org/index.shtml) company and can be downloaded from [here](https://docs.google.com/spreadsheets/d/19VoYxPw0tmuSViN1qFIkyUoepjNSRsuQCe0TZZDmrZs/editgid=213565368).Data was aggreagted from the following souces : - The World Factbook: - [GDP (PPP)](https://www.cia.gov/library/publications/the-world-factbook/rankorder/2001rank.html) - [Life expectancy at birth](https://www.cia.gov/library/publications/the-world-factbook/fields/355rank.html) - [Population](https://www.cia.gov/library/publications/the-world-factbook/fields/335rank.html)- Wikipedia: - [Universal Health Care](https://en.wikipedia.org/wiki/List_of_countries_with_universal_health_care) Some countries/territories/regions were omitted from the dataset due to incomplete data.
###Code
#reading from cloud object storage
target_url="https://swift-yeg.cloud.cybera.ca:8080/v1/AUTH_d22d1e3f28be45209ba8f660295c84cf/hackaton/countries2.csv"
#reading the input file and creating dataframe
countries = pd.read_csv(target_url)
#how many rows and colums does the dataframe have?
countries.shape
#what are the column names?
countries.columns
###Output
_____no_output_____
###Markdown
Columns description: **gdp(\$US)** - the sum value of all goods and services produced in the country valued at prices prevailing in the United States.**life-expectancy (yrs)** - the average number of years to be lived by a group of people born in the same year, if mortality at each age remains constant in the future. Life expectancy at birth is also a measure of overall quality of life in a country and summarizes the mortality at all ages.**population** - population of the country.**has-univ-healthcare** - Universal health coverage is a broad concept that has been implemented in several ways. The common denominator for all such programs is some form of government action aimed at extending access to health care as widely as possible and setting minimum standards.**code** - Country code
###Code
#display first 5 rows to explore how the data looks like
countries.head()
#let's create another column - GDP per person
countries['gdp ($US) person'] = countries['gdp ($US)']/countries["population"]
###Output
_____no_output_____
###Markdown
Exploring data by country We can plot all countries that we have using `px.choropleth()` function. Lets try coloring countries differently depending on the specific column:
###Code
import plotly.express as px
fig = px.choropleth(countries, locations="code",
color="life-expectancy (yrs)", #coloring by life-expectancy
hover_name="country") #country name will appear when you hover your mouse over it
fig.show()
###Output
_____no_output_____
###Markdown
Look at the map - interestingly - Japan has the highest life expectancy! Lets find out what is the exact number:
###Code
countries[countries["country"]=="Japan"]
###Output
_____no_output_____
###Markdown
Challenge - Using the cells above as an example - create new cells and draw a map colored by `population` and `gdp ($US)` - Print on the screen the exact number for China population. - If you look at both maps you created - do they look similar? Why do you think it happens?
###Code
#plot by new created column
fig = px.choropleth(countries, locations="code",
color="gdp ($US) person",
hover_name="country")
fig.show()
###Output
_____no_output_____
###Markdown
For the next part of the notebook we need additional libaries loaded.
###Code
#library should be installed already
#!pip install cufflinks ipywidgets
import cufflinks as cf
cf.go_offline()
###Output
_____no_output_____
###Markdown
Lets find out the top 20 countries having the highest "gdp per person" value.
###Code
#select only two columns - "gdp ($US) person" and "country"
gdp_person = countries[["gdp ($US) person","country"]]
#order by "gdp ($US) person", having highest numbers on top and get top 20
gdp_person = gdp_person.sort_values("gdp ($US) person", ascending = False).head(20)
gdp_person
#plotting top 20 countries, setting index to country - so the bars are marked with country names
gdp_person.set_index("country").iplot(kind = "bar", yTitle='GDP (USD) Per Person', xTitle="Country")
###Output
_____no_output_____
###Markdown
Looks like some of the countries in the top 20 are quite small - like Luxembourg or Brunei. Let's find out what is the population for these countries.
###Code
# creating new column - population in thousands
countries["population_t"] = countries["population"]/1000
#this time we select 3 columns - "gdp ($US) person", "population_t" and "country"
gdp_person_pop = countries[["gdp ($US) person","population_t" ,"country"]]
#sorting again by "gdp ($US) person"
gdp_person_pop = gdp_person_pop.sort_values("gdp ($US) person", ascending = False).head(20)
gdp_person_pop.set_index("country").iplot(kind = "bar",yTitle="Population in thousands and GDP",xTitle="Country")
###Output
_____no_output_____
###Markdown
We can see that the majority of countires in top 20 have smaller population, but United States populalion significantly larger than other countries, so there likely no connection between GDP per person and population number Challenge - Using the cells above as an example create new cells and find out the top 20 countries with least life expectancy. - Do these countries have Universal Health Care? Exploring data by continent Number of countries per continent
###Code
#unique continents
continents = countries["continent"].unique()
#how many of them?
print(len(continents)," continents")
continents
#group by continent anc calclulate how many rows/countries
counts_by_continent = countries.groupby("continent").size()
#Create additional column - count
counts_by_continent = counts_by_continent.reset_index(name="count")
counts_by_continent
#using kind pie to create a pie chart
counts_by_continent.iplot(kind="pie",labels = "continent",values = "count")
###Output
_____no_output_____
###Markdown
Looks like Asia, Africa nad Europe have almost equal number of countries. Population by continentCalculate wich continent has the largest population:
###Code
#group by continent anc calclulate sum for every column
sum_by_continent = countries.groupby("continent").sum()
#convert index(row names) into additional column
sum_by_continent = sum_by_continent.reset_index()
sum_by_continent
# we select only one column - population and create a pie chart
sum_by_continent.iplot(kind="pie", values="population",labels="continent")
###Output
_____no_output_____ |
Content/1.python/2.python_advanced/01.OOP/4.Static_method_in_python.ipynb | ###Markdown
What is a static method in python?Static methods are methods within a class that have no access to anything else in the class (no `self` keyword or `cls` keyword). - They cannot change or look at any object attributes or call other methods within the class. - They can be thought of as a special kind of function that sits inside of the class. - When we create a static method we must use something called a [decorator](https://realpython.com/primer-on-python-decorators/syntactic-sugar). The decorator for a static method is `@staticmethod`. Don't worry about it, you will see more about it later in the course.In other words, you can create a callable class using the static method and use it with some restrictions. It helps developers write code in a safe architectural way to prevent conflicts in the code.(We'll go deeper into the decorators later.)
###Code
class Calculator:
"""
Class that contains methods to perform basic operations.
"""
@staticmethod
def multiply(number_one, number_two):
result = number_one * number_two
print(f"Muliply: {result}")
@staticmethod
def add(number_one, number_two):
result = number_one + number_two
print(f"Addition: {result}")
Calculator.multiply(2, 5)
Calculator.add(2, 5)
###Output
Muliply: 10
Addition: 7
###Markdown
Check another program to use the built-in `staticmethod()` function.
###Code
class Person:
"""
Class that defines a person who has an age defined by the age() method.
"""
def age(age_number):
if(age_number <= 30):
print("Young")
elif(age_number <= 50):
print("Middle Age")
else:
print("Senior Age")
John = Person
age_category = staticmethod(John.age(45))
###Output
Middle Age
|
Course_2_CNN_in_Tensorflow/4.Multiclass_Classifications/Exercise_4_Multi_class_classifier_Question-FINAL.ipynb | ###Markdown
Submission Instructions
###Code
# Now click the 'Submit Assignment' button above.
###Output
_____no_output_____
###Markdown
When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This will free up resources for your fellow learners.
###Code
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
IPython.notebook.session.delete();
window.onbeforeunload = null
setTimeout(function() { window.close(); }, 1000);
###Output
_____no_output_____ |
notebookTemplate/DSFPtemplate.ipynb | ###Markdown
As a preamble - note that JuPyTer notebooks are formatted using [markdown](http://daringfireball.net/projects/markdown/), a text-to-html conversion tool. If you are not familiar, there are [cheatsheets](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) available with all the basic functionality that you will need. LSSTC DSFP Template Notebook: How to put together a DSFP notebook using our standard formatting**Version 0.1**This notebook serves as a template for constructing problems for students as part of the [LSSTC Data Science Fellowship Program](http://ciera.northwestern.edu/Education/LSSTC_DSFPOverview.php). This preamble to the notebook contains an overview of the problems with a brief introduction to the big picture behind the problem. For some example notebooks, take a look at AAM's problems on [Unsupervised Machine Learning](https://github.com/LSSTC-DSFP/LSSTC-DSFP-Sessions/blob/master/Session1/Day4/IntroToMachineLearning.ipynb) and [Building An End-to-end Machine Learning Pipeline]().*Note - tips and suggestions based on AAM's experience developing problems for the DSFP are italicized in this notebook. Otherwise, we typically reserve italics for **hints** on specific problems for the students.** * *By AA Miller (CIERA/Northwestern & Adler) *The necessary libraries are imported at the beginning of the notebook*
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib notebook
###Output
_____no_output_____
###Markdown
Problem 1) Creating DSFP ProblemsWe have found that modular notebooks work best. Thus the typical construction includes $\sim3-5$ capital P Problems, each with sub-problems a, b, c, d, etc. The Problems address major ideas from the lecture, while the sub-problems directly address the nitty gritty details related to implementation of the idea. Most sub-problems require some measure of coding (*typically $\lesssim 10$ lines in a single sub-problem is best*), though some simply ask for responses regarding the data or the code that has just been written. The modular structure allows the students to move at their own pace, and at this point they all understand that problems get progressively more difficult throughout the notebook. Within the time that is allotted, some students finish and others do not. Following the Problems, there is always a **Challenge Problem** for the students that finish early. These Challenge Problems are always more difficult and typically are not as well structured as the earlier Problems in the notebook.**An essential tip** - in my experience it is by *far* best to start by writing the solutions to the notebook. This ensures that all the code works and behaves as expected [it also helps control the total time necessary to complete the problems]. Then the notebook for the students can be created by copying the solutions and removing important portions of the code using ` complete` as you will see in the examples below. It is often, though not always, useful to include code that the students run to load data for the notebook. Here is an example to load some columns from SDSS. As this data is required for the exercise, this is not considered a problem.
###Code
# excecute this cell
from astroquery.sdss import SDSS # enables direct queries to the SDSS database
TSquery = """SELECT TOP 1000
p.psfMag_r, p.fiberMag_r, p.fiber2Mag_r, p.petroMag_r,
p.deVMag_r, p.expMag_r, p.modelMag_r, p.cModelMag_r,
s.class
FROM PhotoObjAll AS p JOIN specObjAll s ON s.bestobjid = p.objid
WHERE p.mode = 1 AND s.sciencePrimary = 1 AND p.clean = 1 AND s.class != 'QSO'
ORDER BY p.objid ASC
"""
SDSSts = SDSS.query_sql(TSquery)
SDSSts
###Output
_____no_output_____
###Markdown
For the actual problems it is best to provide the students with code snippets as in the example below. It is also useful to define specific variable names that will be used throughout the notebook.**Problem 1a**How many sources in the random selection from SDSS have a spectroscopic class of `STAR`? Store the result in a variable called `Nstar`.*Hint* - pay attention to the python type for the `class` column in `SDSSts`.
###Code
stars = SDSSts["class"] # complete
Nstar = # complete
print("There are {:d} stars in the data set.".format( # complete
###Output
_____no_output_____
###Markdown
**Problem 1b**What fraction of the stars in the data set are fainter than $r' = 20 \; \mathrm{mag}$? Store the result in a variable called `Nbright_star`.
###Code
bright = SDSSts[ # complete
Nbright_star = # complete
print("There are {:d} stars with r' < 20 the data set.".format( # complete
###Output
_____no_output_____
###Markdown
Sometimes it is useful to add an explanation for the acquired results. Or to add some text setting up the next portion of the problem. Alternatively, you may want the students to think about the results and provide a response for what they found, as follows: *in this case it's good to provide a markdown cell for them to write their answer***Problem 1c**Based on your knowledge of SDSS, do your results above make sense? *Provide your answer to Problem 1c here* While plots are useful, and often necessary, it's best to keep them compact if possible. We don't want students struggling with plot syntax (unless the specific topic is visualization). Thus, in these cases, it's best to provide code snippets that are more close to being complete. **Problem 1d**For the bright stars, make a scatter plot of `psdMag_r` vs. `deVMag_r`.
###Code
plt.scatter(SDSSts[(bright) & (star)][ # complete
plt.xtitle('psfMag_r')
plt.ytitle('deVMag_r')
plt.xlimit( # complete
plt.xlimit( # complete
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Problem 2When the first idea is complete, begin a new Problem. The notebook continues with this modular structure until the challenge problem. Problems 2a, 2b, 2c, etc. are eventually to be followed by Problem 3... and so on. Text explaining what's going on** Problem 2a **Definition of Problem 2a.
###Code
code_snippet1 = # complete
code_snippet2 = # complete
code_snippet3 = # complete
code_snippet4 = # complete
###Output
_____no_output_____
###Markdown
Finally, as noted above, the notebook should end with a Challenge Problem in case any of the students finish the notebook before time is up. This problem should be harder and include fewer prompts. Challenge ProblemComplete the following problem, which can be arbitrarily difficult.
###Code
# no code snippets provided here
###Output
_____no_output_____ |
Time Series/Time Series.ipynb | ###Markdown
Variance:$Var(X) = E[(X - E(X))^2] = \frac{1}{N-1} \sum_{i=1}^N (x_i - \bar{x})^2 $ Covariance:$Cov(X,Y) = E[(X - E(X))(Y - E(Y))] = \frac{1}{N-1} \sum_{i=1}^N (x_i - \bar{x})(y_i - \bar{y})$ Standard Deviation:$\sigma_{x} = \sqrt{Var(X)}$ Correlation:$Corr(X,Y) = \frac{Cov(X,Y)}{\sigma_{x} * \sigma_{y}}$ Autocovariance:$Cov(X_t, X_{t+k}) = E[(X_{t} - E(X_{t}))(X_{t+k} - E(X_{t+k}))] = E[(X_{t} - \bar{x})(X_{t+k} - \bar{x})]$ Autocorrelation:$Corr(X_{t},X_{t+k}) = \frac{Cov(X_t, X_{t+k})}{\sigma_x^2}$ Moving Average:$X_t = \mu + \varepsilon_t + \sum_{i=1}^q \theta_i \varepsilon_{t-i}$ Auto-Regressive:$X_t = c + \sum_{i=1}^p \varphi_i X_{t-i} + \varepsilon_t$ Auto-Regressive Moving Average:$X_t = \sum_{i=1}^p \varphi_i X_{t-i} + \varepsilon_t + \sum_{i=1}^q \theta_i \varepsilon_{t-i}$ Auto-Regressive Integrated Moving Average:From ARMA,$X_t - \sum_{i=1}^{p^{\prime}} \varphi_i X_{t-i} = \varepsilon_t + \sum_{i=1}^q \theta_i \varepsilon_{t-i}$Let$L^{i}X_t = X_{t-i}, i = 1, 2, 3...$and$L^{i}\varepsilon_t = \varepsilon_{t-i}, i = 1, 2, 3...$then$X_t - \sum_{i=1}^{p^{\prime}} \varphi_i L^{i}X_t = \varepsilon_t + \sum_{i=1}^q \theta_i L^{i}\varepsilon_t$$\left(1 - \sum_{i=1}^{p^{\prime}} \varphi_i L^{i}\right)X_t = \left(1 + \sum_{i=1}^q \theta_i L^{i}\right)\varepsilon_t$Assume that$\left(1 - \sum_{i=1}^{p^{\prime}} \varphi_i L^{i}\right) = \left(1 - \sum_{i=1}^{p^{\prime} - d} \alpha_i L^{i}\right)(1 - L)^{d}$then$\left(1 - \sum_{i=1}^{p} \alpha_i L^{i}\right)(1 - L)^{d}X_t = \left(1 + \sum_{i=1}^q \theta_i L^{i}\right)\varepsilon_t$where$p = p^{\prime} - d \quad (\text{d integration})$
###Code
import datetime as dt
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.plotting import lag_plot
from pandas.plotting import autocorrelation_plot
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
import pandas_datareader.data as web
import matplotlib.pyplot as plt
start = dt.datetime(2020, 1, 1)
end = dt.datetime(2020, 7, 28)
data = web.DataReader("BTC-USD", "yahoo", start, end)
print(data.head())
autocorrelation_plot(data["Close"])
plt.show()
plt.figure(figsize=(10,10))
lag_plot(data['Close'], lag=10)
plt.title('BTC Autocorrelation plot')
model = ARIMA(data["Close"].values, order=(10,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
residuals = DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
print(residuals.describe())
df = pd.DataFrame(data, columns=["Close", "Volume"])
df = df.reset_index()
train = df[df["Date"] < "2020-07-01"]
test = df[df["Date"] >= "2020-07-01"]
plt.figure(figsize=(16,10))
plt.grid(True)
plt.xlabel('Date')
plt.ylabel('Price (USD)')
plt.plot(train['Date'], train['Close'], 'blue', label = 'Train data')
plt.plot(test['Date'], test['Close'], 'red', label = 'Test data')
plt.title('BTC Price (USD)')
plt.show()
train_val = train['Close'].values
test_val = test['Close'].values
model = ARIMA(train_val, order=(10,1,0))
model_fit = model.fit(disp=0)
predictions = model_fit.forecast(len(test_val))
error = mean_squared_error(test_val, predictions[0])
print('Testing Mean Squared Error: %.3f' % error)
plt.figure(figsize=(16,10))
plt.grid(True)
plt.plot(train.index, train['Close'], color='blue', label='Training Data')
plt.plot(test.index, test['Close'], color='red', label='Actual Price')
plt.plot(test.index, predictions[0], color='green', label='Predicted Price')
plt.title('BTC Price 28-days Prediction')
plt.xlabel('Dates')
plt.ylabel('Price (USD)')
plt.legend()
history = [x for x in train_val]
predictions = []
for t in range(len(test_val)):
model = ARIMA(history, order=(10,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test_val[t]
history.append(obs)
error = mean_squared_error(test_val, predictions)
print('Testing Mean Squared Error: %.3f' % error)
plt.figure(figsize=(16,10))
plt.grid(True)
plt.plot(train.index, train['Close'], color='blue', label='Training Data')
plt.plot(test.index, test['Close'], color='red', label='Actual Price')
plt.plot(test.index, predictions, color='green', label='Predicted Price')
plt.title('BTC Price 1-day Prediction Rolling')
plt.xlabel('Dates')
plt.ylabel('Price (USD)')
plt.legend()
print(len(test_val))
history = [x for x in train_val]
predictions = []
for t in range(4):
model = ARIMA(history, order=(10,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast(7)
yhats = output[0]
predictions.extend(yhats)
history.extend(test_val[t * 7 : (t + 1) * 7])
error = mean_squared_error(test_val, predictions)
print('Testing Mean Squared Error: %.3f' % error)
plt.figure(figsize=(16,10))
plt.grid(True)
plt.plot(train.index, train['Close'], color='blue', label='Training Data')
plt.plot(test.index, test['Close'], color='red', label='Actual Price')
plt.plot(test.index, predictions, color='green', label='Predicted Price')
plt.title('BTC Price Weekly Prediction Rolling')
plt.xlabel('Dates')
plt.ylabel('Price (USD)')
plt.legend()
###Output
_____no_output_____ |
Day-2/Task-2/PY0101EN-1-3-Operaters.ipynb | ###Markdown
Python Operators`Operators` are used to perform `operations` on variables and values.Python divides the operators in the following groups:1. Arithmetic operators2. Assignment operators3. Comparison operators4. Logical operators5. Identity operators6. Membership operators7. Bitwise operators Arithmetic operators**Arithmetic operators** are used to perform mathematical operations like addition, subtraction, multiplication etc. Arithmetic operators in Python Operator Meaning Example + Add two operands or unary plus x + y - Subtract right operand from the left or unary minus x - y * Multiply two operands x * y / Divide left operand by the right one (always results into float) x / y (Q) % Modulus - remainder of the division of left operand by the right x % y (remainder of x/y) // Floor division - division that results into whole number adjusted to the left in the number line x // y ** Exponent - left operand raised to the power of right x**y (x to the power y)
###Code
a = 10
b = 20
a+b
a-b
a*b
a/b
a//b
a%b
a**b
###Output
_____no_output_____
###Markdown
Special operatorsPython language offers some `special type of operators` like the 1. Identity operator [is , isnot ]2. Membership operator. [in ,notin]They are described below with examples. Identity operators* `is` and `is not` are the identity operators in Python. * They are used to check if two values (or variables) are located on the same part of the memory. * Two variables that are equal does not imply that they are identical.
###Code
X = 5
y = 5
x1 = "Hello"
y2 = "Hello"
x1 is X
X is y
'H' is y2
x1 is y2
X is not y2
X is not y
###Output
_____no_output_____
###Markdown
Membership operators* `in` and `not in` are the **membership operators** in Python. * They are used to test whether a value or variable is found in a sequence **(string, list, tuple, set and dictionary)**.* In a dictionary we can only test for presence of key, not the value.
###Code
a = "Hello"
'H' in a
"he" not in a
'He' not in a
a = '10'
b = "10"
a in b
###Output
_____no_output_____ |
real_data/gnldr/same_cond_transfer_analysis/generate_transfer_analysis_plots.ipynb | ###Markdown
A notebook for generating the final results for a fully cross-validated transfer analysis
###Code
%load_ext autoreload
%autoreload 2
import pathlib
import re
import matplotlib.pyplot as plt
import numpy as np
import torch
from janelia_core.utils.file_system import get_immediate_subfolders
%matplotlib notebook
###Output
_____no_output_____
###Markdown
Parameters go here
###Code
# A list of of base_folders with the results of different analyses. A single analysis consists of
# runing the full cross-validated results with multiple amounts of training data for models fit
# both individually and combined, with a *single* set of parameters. In this convention, we could
# run different analyses using different numbers of hypercubes in the prior, for example, and then compare results.
base_folders = [r'/groups/bishop/bishoplab/projects/probabilistic_model_synthesis/results/real_data/gnldr/same_cond_transfer_analysis/v2']
# The names of files holding post-processed results for each type of analysis
results_files = ['pp_test_results.pt']
# Subjects we want to evaluate performance on
eval_subjs = [8]#, 9, 10, 11]
subj_clrs = np.asarray([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 1.0, 0.0]])
# Training quantities we want to evaluate performance on
tq_strings = ['fold_str_base_14_tgt_1']#,
#'fold_str_base_14_tgt_2',
#'fold_str_base_14_tgt_4',
#'fold_str_base_14_tgt_8',
#'fold_str_base_14_tgt_14']
tq_fracs = np.asarray([1.0/14])#,
# 2.0/14,
# 4.0/14,
# 8.0/14,
# 14.0/14.0])
###Output
_____no_output_____
###Markdown
Define helper functions here
###Code
def get_analysis_results(base_folder, results_file, fit_type: str = 'ip', data_type: str = 'test'):
training_quantity_folders = get_immediate_subfolders(base_folder)
training_quantity_folders = tq_strings
tq_rs = dict()
for tq_folder in training_quantity_folders:
#print('TQ folder: ' + tq_folder)
tq_folder_path = pathlib.Path(base_folder) / tq_folder
fold_folders = get_immediate_subfolders(tq_folder_path)
n_folds = len(fold_folders)
fold_rs = dict()
for fold_folder in fold_folders:
cur_fold = int(re.match('.*_(\d*)', fold_folder)[1])
#print('Fold: ' + str(cur_fold))
fold_folder_path = pathlib.Path(tq_folder_path) / fold_folder
subj_folders = get_immediate_subfolders(fold_folder_path)
n_subjs = len(subj_folders)
subj_rs = dict()
for subj_folder in subj_folders:
#print('Subject folder: ' + subj_folder)
subj_folder_path = pathlib.Path(fold_folder_path) / subj_folder
type_folders = get_immediate_subfolders(subj_folder_path)
eval_subj = int(re.match('.*_(\d*)', subj_folder)[1])
#print('Eval Subject: ' + str(eval_subj))
type_rs = dict()
for type_folder in type_folders:
#print('Type Folder: ' + str(type_folder))
cur_type = type_folder
type_folder_path = pathlib.Path(subj_folder_path) / type_folder
results_file_path = type_folder_path / results_file
#print('Results file path: ' + str(results_file_path))
c_rs = torch.load(results_file_path)
elbo = c_rs[fit_type]['elbo_vls'][eval_subj][data_type]['elbo'].item()
type_rs[cur_type] = elbo
subj_rs[eval_subj] = type_rs
fold_rs[cur_fold] = subj_rs
tq_rs[tq_folder] = fold_rs
return tq_rs
def get_subj_rs(rs, subj, fit_type: str = 'ind'):
""" Gets average performance for a single subject, for each for fold, for a single fit type
for a single training quantity. """
n_folds = len(rs)
folds = np.sort(np.asarray(list(rs.keys())))
fold_rs = np.zeros(n_folds)
for f_i, f_n in enumerate(folds):
fold_rs[f_i] = np.mean(rs[f_n][subj][fit_type])
return fold_rs
def get_avg_fit_type_rs_for_fixed_training_quantity(rs, subjs, fit_type: str = 'ind'):
""" Gets average and standard error of performance across folds for multiple subjects for a single fit type
and for a single training quantity."""
n_subjs = len(subjs)
mn_rs = np.zeros(n_subjs)
std_er_rs = np.zeros(n_subjs)
for s_i, subj in enumerate(subjs):
fold_rs = get_subj_rs(rs, subj=subj, fit_type=fit_type)
mn_rs[s_i] = np.mean(fold_rs)
std_er_rs[s_i] = np.std(fold_rs)/np.sqrt(len(fold_rs))
return [mn_rs, std_er_rs]
def get_fit_type_rs(rs, train_quantity_keys, subjs, fit_type: str = 'ind'):
n_train_quantity_keys = len(train_quantity_keys)
n_subjs = len(subjs)
mn_rs = np.zeros([n_train_quantity_keys, n_subjs])
std_er_rs = np.zeros([n_train_quantity_keys, n_subjs])
for tq_i, tq_key in enumerate(train_quantity_keys):
mn_rs[tq_i, :], std_er_rs[tq_i, :] = get_avg_fit_type_rs_for_fixed_training_quantity(rs[tq_key], subjs, fit_type)
return mn_rs, std_er_rs
c_rs = get_analysis_results(base_folders[0], results_files[0])
comb_rs = get_fit_type_rs(c_rs, tq_strings, subjs=eval_subjs, fit_type='comb')
ind_rs = get_fit_type_rs(c_rs, tq_strings, subjs=eval_subjs, fit_type='ind')
comb_rs
ind_rs
comb_avg = np.mean(comb_rs[0], axis=1)
ind_avg = np.mean(ind_rs[0], axis=1)
###Output
_____no_output_____
###Markdown
Plot results
###Code
plt.figure()
ax = plt.subplot(1,1,1)
for s_i, subj in enumerate(eval_subjs):
plt.plot(tq_fracs, comb_rs[0][:, s_i], '-', color=subj_clrs[s_i])
plt.legend(eval_subjs)
plt.xlabel('Training Percentage')
plt.ylabel('ELBO')
for s_i, subj in enumerate(eval_subjs):
plt.plot(tq_fracs, ind_rs[0][:, s_i], '--', color=subj_clrs[s_i])
#ax.set_ylim([0, 1])
plt.figure()
plt.plot(tq_fracs, comb_avg, 'k-')
plt.plot(tq_fracs, ind_avg, 'k--')
###Output
_____no_output_____ |
section_4/04_exercise.ipynb | ###Markdown
演習偏微分、勾配降下法、ネイピア数の扱いに慣れていきましょう。 1. 偏導関数を求める以下の2変数関数を、$x$及び$y$で偏微分しましょう。 $$ f(x,y)=2x^3+4x^2y+xy^2-4y^2 $$答えは紙に書いても、テキストセルにLaTeXで記述してもかまいません。 2. 局所的な最小値からの脱出以下の勾配降下法のコードを実行すると、局所的な最小値に捕獲されてしまいます。 `x`の初期値を変更して、全体の最小値にたどり着けるようにしましょう。
###Code
import numpy as np
import matplotlib.pyplot as plt
def my_func(x): # 最小値を求める関数
return x**4 - 2*x**3 - 3*x**2 + 2*x
def grad_func(x): # 導関数
return 4*x**3 - 6*x**2 - 6*x + 2
eta = 0.01 # 定数
x = -1.6 # === ここで、xの初期値を変更する ===
record_x = [] # xの記録
record_y = [] # yの記録
for i in range(20): # 20回xを更新する
y = my_func(x)
record_x.append(x)
record_y.append(y)
x -= eta * grad_func(x) # (式1)
x_f = np.linspace(-1.6, 2.8) # 表示範囲
y_f = my_func(x_f)
plt.plot(x_f, y_f, linestyle="dashed") # 関数を点線で描画
plt.scatter(record_x, record_y) # xとyの記録を点で表示
plt.xlabel("x", size=14)
plt.ylabel("y", size=14)
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
3. ネイピア数を求める以下の数式における$n$の値を少しずつ大きくして、$a_n$の値がネイピア数に近づくことをコードで確認しましょう。$$ a_n = \Bigl(1+\frac{1}{n}\Bigr)^n$$
###Code
# ネイピア数: e = 2.71828 18284 59045 23536 02874 71352 …
import numpy as np
def approach_napier(n):
return (1 + 1/n)**n
n_list = [2, 4, 10] # このリストにさらに大きな値を追加する
for n in n_list:
print("a_"+ str(n) + " =", approach_napier(n))
###Output
_____no_output_____
###Markdown
解答例以下は解答例です。 1. 偏導関数を求める$$ \frac{\partial}{\partial x}f(x,y) = 6x^2+8xy+y^2$$$$ \frac{\partial}{\partial y}f(x,y) = 4x^2+2xy-8y$$ 2. 局所的な最小値からの脱出
###Code
import numpy as np
import matplotlib.pyplot as plt
def my_func(x): # 最小値を求める関数
return x**4 - 2*x**3 - 3*x**2 + 2*x
def grad_func(x): # 導関数
return 4*x**3 - 6*x**2 - 6*x + 2
eta = 0.01 # 定数
x = 1.0 # === ここで、xの初期値を変更する ===
record_x = [] # xの記録
record_y = [] # yの記録
for i in range(20): # 20回xを更新する
y = my_func(x)
record_x.append(x)
record_y.append(y)
x -= eta * grad_func(x) # (式1)
x_f = np.linspace(-1.6, 2.8) # 表示範囲
y_f = my_func(x_f)
plt.plot(x_f, y_f, linestyle="dashed") # 関数を点線で表示
plt.scatter(record_x, record_y) # xとyの記録を表示
plt.xlabel("x", size=14)
plt.ylabel("y", size=14)
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
3. ネイピア数を求める
###Code
# ネイピア数: e = 2.71828 18284 59045 23536 02874 71352 …
import numpy as np
def approach_napier(n):
return (1 + 1/n)**n
n_list = [2, 4, 10, 100, 1000, 10000] # このリストにさらに大きな数を追加する
for n in n_list:
print("a_"+ str(n) + " =", approach_napier(n))
###Output
_____no_output_____ |
GridModel_GridImpact/SupplementPlotting/supfig17_fuelprices.ipynb | ###Markdown
Supplementary Figure - Changing gas generator fuel pricesSiobhan Powell, 2021.
###Code
import os
os.chdir('../')
from matplotlib.gridspec import GridSpec
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
def load_emissions_values(noev_scenario, fuel=1, solar=3.5, wind=3, folder='Fuel1_Solar35_Wind3', storage_before='_storagebefore', penlevel=0.5, date='20220408'):
scens1 = ['_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersRandom_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem']
scens2 = ['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork']
vals1 = np.zeros((7, 5)) # overgeneration
tables_dfs1 = pd.DataFrame(np.zeros((7, 5)),
index=['_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol','_TimersRandom_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem', '_TimersMixed_WPcontrol_minpeak'],
columns=['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork', 'BusinessAsUsual'])
for i, scen1 in enumerate(scens1):
for j, scen2 in enumerate(scens2):
overgen = None
dpdf = pd.read_csv('Results/'+folder+'/fuel'+str(fuel)+'_solar'+str(solar)+'_wind'+str(wind)+'_'+scen2+scen1+'_penlevel'+str(penlevel)+storage_before+'_withstorage_dpdf_'+date+'.csv')
# assumes 5 miles / kWh
vals1[i, j] = 0.2 * (dpdf.co2_tot.sum() - noev_scenario.co2_tot.sum()) / (dpdf.total_incl_noncombustion.sum() - noev_scenario.total_incl_noncombustion.sum()) # Emissions / total miles
tables_dfs1.loc[scen1, scen2] = 0.2 * (dpdf.co2_tot.sum() - noev_scenario.co2_tot.sum()) / (dpdf.total_incl_noncombustion.sum() - noev_scenario.total_incl_noncombustion.sum()) # Emissions / total miles
return vals1, tables_dfs1
noev_scenario = pd.read_csv('Results/NoEVs_year2035_solar3.5x_wind3x_withstorage_dpdf_20220408.csv')
vals_1, tables_dfs_1 = load_emissions_values(noev_scenario, date='20220506', penlevel=1.0, fuel=0.5, solar=3.5, wind=3, folder='Fuel05_Solar35_Wind3')
vals_2, tables_dfs_2 = load_emissions_values(noev_scenario, penlevel=1.0, fuel=1, solar=3.5, wind=3, folder='Fuel1_Solar35_Wind3')
vals_3, tables_dfs_3 = load_emissions_values(noev_scenario, date='20220506', penlevel=1.0, fuel=2, solar=3.5, wind=3, folder='Fuel2_Solar35_Wind3')
fig, axes = plt.subplots(1, 3, figsize=(12, 4.5), sharey=False, sharex=True)
titles = ['UH', 'HH', 'LHHW', 'LHLW']
colors = ['#d7301f', '#fc8d59', '#91cf60', '#737373', '#9ebcda', '#88419d']
control_labels = ['9pm SFH Timers', '12am SFH Timers', 'Random SFH Timers', 'Uncontrolled', 'Min(Peak) Work Control', 'Min(Avg Em) Work Control']
for i in range(3):
axes[i].set_xticks(np.arange(0, 4))
axes[i].set_xticklabels(labels=titles, fontsize=11.5)
axes[i].set_xlabel('Access Scenario', fontsize=16)
shifts = [-0.2, -0.1, 0, 0.1, 0.2]
ms = [8, 8, 8, 14, 8, 8]
lines = ['-P', '-X', '-d', '-*', '-^','-v']
for control_idx in range(6):
axes[0].plot(np.arange(0, 4), vals_1[control_idx, np.arange(0, 4)], lines[control_idx], color=colors[control_idx], ms=ms[control_idx], label=control_labels[control_idx], zorder=1)
for control_idx in range(6):
axes[1].plot(np.arange(0, 4), vals_2[control_idx, np.arange(0, 4)], lines[control_idx], color=colors[control_idx], ms=ms[control_idx], zorder=1)
for control_idx in range(6):
axes[2].plot(np.arange(0, 4), vals_3[control_idx, np.arange(0, 4)], lines[control_idx], color=colors[control_idx], ms=ms[control_idx], zorder=1)
axes[0].set_xlim([-0.5, 3.5])
axes[0].set_yticks(np.arange(42, 52))
axes[0].set_yticklabels(np.arange(42, 52), fontsize=16)
axes[1].set_yticks(np.arange(89, 94))
axes[1].set_yticklabels(np.arange(89, 94), fontsize=16)
axes[2].set_yticks(np.arange(137,141))
axes[2].set_yticklabels(np.arange(137,141), fontsize=16)
for i in range(3):
axes[i].set_axisbelow(True)
axes[i].grid(axis='y')
axes[0].set_title('0.5X Natural Gas Prices', fontsize=16)
axes[1].set_title('1X Natural Gas Prices', fontsize=16)
axes[2].set_title('2X Natural Gas Prices', fontsize=16)
axes[0].legend()
axes[0].set_ylabel('EV Grid Emissions [g CO2 / mile]', fontsize=14)
plt.tight_layout()
plt.savefig('SupplementPlotting/Plots/supfig17_2035.pdf', bbox_inches='tight')
plt.show()
(vals_1[vals_1>0].max() - vals_1[vals_1>0].min())/ vals_1[vals_1>0].max()
(vals_2[vals_2>0].max() - vals_2[vals_2>0].min())/ vals_2[vals_2>0].max()
(vals_3[vals_3>0].max() - vals_3[vals_3>0].min())/ vals_3[vals_3>0].max()
###Output
_____no_output_____ |
further_resources_learn/nn_gridsearchcv/nn_gridsearchcv__randomized_search__doc.ipynb | ###Markdown
Comparing randomized search and grid search for hyperparameter estimationCompare randomized search and grid search for optimizing hyperparameters of arandom forest.All parameters that influence the learning are searched simultaneously(except for the number of estimators, which poses a time / quality tradeoff).The randomized search and the grid search explore exactly the same space ofparameters. The result in parameter settings is quite similar, while the runtime for randomized search is drastically lower.The performance is slightly worse for the randomized search, though thisis most likely a noise effect and would not carry over to a held-out test set.Note that in practice, one would not search over this many different parameterssimultaneously using grid search, but pick only the ones deemed most important.-- **NOTE:** This is sourced from ```scikit-learn``` learning module found here: https://scikit-learn.org/stable/auto_examples/model_selection/plot_randomized_search.htmlsphx-glr-auto-examples-model-selection-plot-randomized-search-py --
###Code
print(__doc__)
import numpy as np
from time import time
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(2, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search, cv=5, iid=False)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid, cv=5, iid=False)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_)
###Output
Automatically created module for IPython interactive environment
RandomizedSearchCV took 5.49 seconds for 20 candidates parameter settings.
Model with rank: 1
Mean validation score: 0.936 (std: 0.025)
Parameters: {'bootstrap': False, 'criterion': 'entropy', 'max_depth': None, 'max_features': 5, 'min_samples_split': 2}
Model with rank: 2
Mean validation score: 0.930 (std: 0.021)
Parameters: {'bootstrap': True, 'criterion': 'entropy', 'max_depth': None, 'max_features': 7, 'min_samples_split': 3}
Model with rank: 3
Mean validation score: 0.928 (std: 0.028)
Parameters: {'bootstrap': True, 'criterion': 'entropy', 'max_depth': None, 'max_features': 9, 'min_samples_split': 9}
GridSearchCV took 18.47 seconds for 72 candidate parameter settings.
Model with rank: 1
Mean validation score: 0.937 (std: 0.028)
Parameters: {'bootstrap': False, 'criterion': 'gini', 'max_depth': None, 'max_features': 3, 'min_samples_split': 3}
Model with rank: 2
Mean validation score: 0.937 (std: 0.017)
Parameters: {'bootstrap': False, 'criterion': 'entropy', 'max_depth': None, 'max_features': 10, 'min_samples_split': 2}
Model with rank: 3
Mean validation score: 0.932 (std: 0.020)
Parameters: {'bootstrap': False, 'criterion': 'gini', 'max_depth': None, 'max_features': 10, 'min_samples_split': 2}
|
evaluations/sars-cov-2/template-3-index-genomes.ipynb | ###Markdown
1. Parameters
###Code
# Defaults
cases_dir = 'cases/unset'
reference_file = 'references/NC_045512.gbk.gz'
input_files_all = 'input/input-files.tsv'
iterations = 3
mincov = 10
ncores = 32
number_samples = 10
build_tree = False
sample_batch_size=2000
from pathlib import Path
from shutil import rmtree
from os import makedirs
import imp
fp, pathname, description = imp.find_module('gdi_benchmark', ['../../lib'])
gdi_benchmark = imp.load_module('gdi_benchmark', fp, pathname, description)
cases_dir_path = Path(cases_dir)
if cases_dir_path.exists():
rmtree(cases_dir_path)
if not cases_dir_path.exists():
makedirs(cases_dir_path)
input_files_all = Path(input_files_all)
reference_file = Path(reference_file)
case_name = str(cases_dir_path.name)
reference_name = reference_file.name.split('.')[0]
cases_input = cases_dir_path / 'input-files-case.tsv'
index_path = cases_dir_path / 'index'
benchmark_path = cases_dir_path / 'index-info.tsv'
output_tree = cases_dir_path / 'tree.tre'
###Output
_____no_output_____
###Markdown
2. Create subset input
###Code
import pandas as pd
all_input_df = pd.read_csv(input_files_all, sep='\t')
all_input_total = len(all_input_df)
subset_input_df = all_input_df.head(number_samples)
subset_input_total = len(subset_input_df)
subset_input_df.to_csv(cases_input, sep='\t', index=False)
print(f'Wrote {subset_input_total}/{all_input_total} samples to {cases_input}')
###Output
_____no_output_____
###Markdown
2. Index genomes
###Code
!gdi --version
###Output
_____no_output_____
###Markdown
2.1. Index reads
###Code
results_handler = gdi_benchmark.BenchmarkResultsHandler(name=case_name)
benchmarker = gdi_benchmark.IndexBenchmarker(benchmark_results_handler=results_handler,
index_path=index_path, input_files_file=cases_input,
reference_file=reference_file, mincov=mincov,
build_tree=build_tree,
ncores=ncores,
sample_batch_size=sample_batch_size)
benchmark_df = benchmarker.benchmark(iterations=iterations)
benchmark_df
benchmark_df.to_csv(benchmark_path, sep='\t', index=False)
###Output
_____no_output_____
###Markdown
3. Export trees
###Code
if build_tree:
!gdi --project-dir {index_path} export tree {reference_name} > {output_tree}
print(f'Wrote tree to {output_tree}')
else:
print(f'build_tree={build_tree} so no tree to export')
###Output
_____no_output_____ |
learning-rate-experiment.ipynb | ###Markdown
__Question__: how effective is CMA actually?
###Code
xopt, es = cma.fmin2(cma.ff.elli, 5 * [1], 1, {
'ftarget':1e-7,
'CMA_on': 1,
});
cma.plot();
es.sm.C
###Output
_____no_output_____
###Markdown
Take Home Messages------------------- make quick experiments- display everything nicely (in particular conveniently readable)- learn how to read graphs - check displayed x- and y-ranges- direct visual comparison is powerful __Question__: how does the dependency on the `CMA_on` factor actually look like?__Remark__: `CMA_on` is a multiplier for the learning rate of the covariance matrix
###Code
dimension = 3
evals = cs.defaultdict(list)
stops = cs.defaultdict(list)
if 11 < 3:
with open('_evals-%dD.py' % dimension, 'rt') as file:
evals.update(ast.literal_eval(file.read()))
with open('_stops-%dD.py' % dimension, 'rt') as file:
stops.update(ast.literal_eval(file.read()))
factors = [1, 2, 1/2, 4, 1/4, 8, 1/8, 1/16, 1/32, 1/64]
for irun in range(3):
for factor in factors:
es = cma.CMAEvolutionStrategy(dimension * [1], 1, {
'ftarget': 1e-9,
'verbose':-9,
'CMA_on': factor
})
try:
es.optimize(cma.ff.elli)
except:
print('!', end='')
evals[factor] += [es.result.evaluations]
stops[factor] += [es.stop()]
print(factor, evals[factor][-1], end=' | ')
with open('_evals-%dD.py' % dimension, 'wt') as file:
file.write(repr(dict(evals)))
with open('_stops-%dD.py' % dimension, 'wt') as file:
file.write(repr(dict(stops)))
###Output
_____no_output_____ |
docs/src/notebooks/interactive/magres_plotting.ipynb | ###Markdown
Analysing and plotting magnetic resonance data This notebook contains a quick example of plotting data stored in magres files, without performing any referencing.
###Code
from matador.scrapers import magres2dict
from matador.plotting import plot_magres
magres, failures = magres2dict("*.magres", as_model=True)
for doc in magres:
print(doc)
doc.print_sites()
###Output
Li3P: LiP_CASTEP18.magres
=========================
20 atoms. P1
(a, b, c) = 4.1333 Å, 6.0638 Å, 12.5450 Å
(α, β, γ) = 88.4518° 80.5130° 90.0085°
---
0 Li 0.0916 0.3584 0.9424
chemical_shielding_iso = 83.7003
magnetic_shielding_tensor =
[[ 8.16e+01 6.11e-03 -4.30e-02]
[ 1.27e-02 7.99e+01 1.36e+00]
[-5.07e-02 2.13e+00 8.96e+01]]
chemical_shift_aniso = 9.3772
chemical_shift_asymmetry = 0.3284
---
1 Li 0.1983 0.7851 0.7195
chemical_shielding_iso = 84.3395
magnetic_shielding_tensor =
[[8.40e+01 9.13e-02 4.66e-02]
[1.30e-01 8.70e+01 9.94e-01]
[9.87e-02 1.57e+00 8.21e+01]]
chemical_shift_aniso = 4.4293
chemical_shift_asymmetry = 0.7621
---
2 Li 0.2807 0.9980 0.5589
chemical_shielding_iso = 83.3730
magnetic_shielding_tensor =
[[ 8.12e+01 -1.51e-02 -4.42e-02]
[ 1.19e-02 8.44e+01 -4.39e+00]
[ 2.60e-03 -4.26e+00 8.45e+01]]
chemical_shift_aniso = 8.1433
chemical_shift_asymmetry = 0.1863
---
3 Li 0.3473 0.7369 0.4079
chemical_shielding_iso = 86.6380
magnetic_shielding_tensor =
[[ 8.52e+01 4.23e-02 -2.41e-02]
[ 4.89e-02 8.82e+01 1.01e+00]
[-2.43e-03 7.34e-01 8.65e+01]]
chemical_shift_aniso = 2.9372
chemical_shift_asymmetry = 0.4602
---
4 Li 0.4349 0.4794 0.2566
chemical_shielding_iso = 83.3435
magnetic_shielding_tensor =
[[81.2 0.13 -0.17]
[ 0.13 84.39 -4.39]
[-0.09 -4.27 84.44]]
chemical_shift_aniso = 8.1073
chemical_shift_asymmetry = 0.2058
---
5 Li 0.4413 0.0210 0.2380
chemical_shielding_iso = 85.0509
magnetic_shielding_tensor =
[[ 8.30e+01 8.90e-02 -4.87e-02]
[ 1.22e-01 8.63e+01 1.20e+00]
[ 4.24e-04 1.24e+00 8.59e+01]]
chemical_shift_aniso = 3.4360
chemical_shift_asymmetry = 0.8373
---
6 Li 0.5173 0.6897 0.0948
chemical_shielding_iso = 84.4205
magnetic_shielding_tensor =
[[8.39e+01 4.68e-02 7.09e-02]
[1.32e-01 8.73e+01 1.02e+00]
[8.51e-02 1.60e+00 8.21e+01]]
chemical_shift_aniso = 4.7370
chemical_shift_asymmetry = 0.6511
---
7 Li 0.6253 0.1122 0.8730
chemical_shielding_iso = 83.7612
magnetic_shielding_tensor =
[[ 8.17e+01 1.11e-02 -1.06e-01]
[ 4.81e-03 8.00e+01 1.34e+00]
[-1.34e-01 2.11e+00 8.95e+01]]
chemical_shift_aniso = 9.1321
chemical_shift_asymmetry = 0.3245
---
8 Li 0.6906 0.4626 0.7373
chemical_shielding_iso = 82.8192
magnetic_shielding_tensor =
[[ 7.98e+01 1.35e-01 -7.89e-02]
[ 1.16e-01 8.67e+01 -5.36e+00]
[-8.88e-02 -3.92e+00 8.19e+01]]
chemical_shift_aniso = 10.0677
chemical_shift_asymmetry = 0.1121
---
9 Li 0.7305 0.0937 0.6585
chemical_shielding_iso = 83.6264
magnetic_shielding_tensor =
[[ 8.39e+01 -2.15e-02 -2.34e-02]
[-1.26e-02 8.57e+01 5.72e+00]
[-5.97e-02 5.90e+00 8.13e+01]]
chemical_shift_aniso = -9.5122
chemical_shift_asymmetry = 0.9216
---
10 Li 0.8485 0.4570 0.4171
chemical_shielding_iso = 84.9298
magnetic_shielding_tensor =
[[ 9.07e+01 1.70e-01 -6.90e-02]
[ 1.65e-01 8.44e+01 1.00e+00]
[ 8.56e-03 4.94e-01 7.97e+01]]
chemical_shift_aniso = 8.6833
chemical_shift_asymmetry = 0.8534
---
11 Li 0.8585 0.0210 0.3974
chemical_shielding_iso = 84.9095
magnetic_shielding_tensor =
[[ 9.08e+01 -8.07e-02 -9.23e-02]
[-5.25e-02 8.44e+01 1.10e+00]
[-2.69e-02 5.81e-01 7.96e+01]]
chemical_shift_aniso = 8.7771
chemical_shift_asymmetry = 0.8596
---
12 Li 0.9853 0.3823 0.1564
chemical_shielding_iso = 83.6033
magnetic_shielding_tensor =
[[ 8.39e+01 -8.91e-03 -3.55e-02]
[-2.42e-03 8.57e+01 5.72e+00]
[-8.39e-02 5.95e+00 8.12e+01]]
chemical_shift_aniso = -9.5887
chemical_shift_asymmetry = 0.9116
---
13 Li 0.0239 0.0140 0.0804
chemical_shielding_iso = 82.6785
magnetic_shielding_tensor =
[[ 7.96e+01 1.38e-01 -8.10e-02]
[ 1.13e-01 8.66e+01 -5.51e+00]
[-1.02e-01 -4.10e+00 8.18e+01]]
chemical_shift_aniso = 10.3680
chemical_shift_asymmetry = 0.1147
---
14 Li 0.2721 0.4547 0.5780
chemical_shielding_iso = 85.0693
magnetic_shielding_tensor =
[[ 8.29e+01 1.08e-01 3.27e-03]
[ 1.34e-01 8.64e+01 1.12e+00]
[-5.75e-03 1.17e+00 8.59e+01]]
chemical_shift_aniso = 3.3677
chemical_shift_asymmetry = 0.9200
---
15 P 0.1818 0.2116 0.7570
chemical_shielding_iso = 350.0256
magnetic_shielding_tensor =
[[ 8.80e+01 5.62e-01 -3.10e-01]
[-8.16e-01 5.39e+02 9.52e+00]
[-9.03e-01 -6.02e+01 4.23e+02]]
chemical_shift_aniso = -392.9941
chemical_shift_asymmetry = 0.4806
---
16 P 0.3536 0.2387 0.4077
chemical_shielding_iso = 500.3197
magnetic_shielding_tensor =
[[497.42 10.84 12.11]
[ 0.66 418.21 -66.52]
[ 3.68 -66.28 585.33]]
chemical_shift_aniso = 162.6815
chemical_shift_asymmetry = 0.9530
---
17 P 0.5341 0.2630 0.0584
chemical_shielding_iso = 353.3182
magnetic_shielding_tensor =
[[ 9.25e+01 5.45e-01 1.54e+00]
[-1.74e-01 5.40e+02 1.12e+01]
[-9.61e-01 -5.78e+01 4.27e+02]]
chemical_shift_aniso = -391.2453
chemical_shift_asymmetry = 0.4668
---
18 P 0.7688 0.7316 0.5822
chemical_shielding_iso = 530.9353
magnetic_shielding_tensor =
[[ 4.13e+02 8.13e+00 4.95e+00]
[ 6.32e-01 5.05e+02 -9.77e+00]
[ 1.06e+00 -4.89e+01 6.75e+02]]
chemical_shift_aniso = 223.9065
chemical_shift_asymmetry = 0.5876
---
19 P 0.9456 0.7440 0.2335
chemical_shielding_iso = 531.2349
magnetic_shielding_tensor =
[[411.31 8.65 7.53]
[ 0.72 505.85 -7.57]
[ 1.75 -45.03 676.55]]
chemical_shift_aniso = 223.9919
chemical_shift_asymmetry = 0.6112
---
Na3P: NaP_QE6.magres
====================
4 atoms. Fm-3m
(a, b, c) = 5.0075 Å, 5.0075 Å, 5.0075 Å
(α, β, γ) = 120.0020° 120.0020° 89.9965°
---
0 Na 0.0000 0.0000 0.0000
chemical_shielding_iso = 518.1528
magnetic_shielding_tensor =
[[518.53 -0. 0. ]
[ -0. 518.53 0. ]
[ -0. 0. 517.4 ]]
chemical_shift_aniso = -1.1365
chemical_shift_asymmetry = -0.0000
---
1 Na 0.2500 0.7500 0.5000
chemical_shielding_iso = 467.6065
magnetic_shielding_tensor =
[[468.02 -0. 0. ]
[ -0. 468.02 0. ]
[ 0. 0. 466.79]]
chemical_shift_aniso = -1.2261
chemical_shift_asymmetry = -0.0000
---
2 Na 0.7500 0.2500 0.5000
chemical_shielding_iso = 467.6065
magnetic_shielding_tensor =
[[468.02 0. -0. ]
[ 0. 468.02 0. ]
[ -0. 0. 466.79]]
chemical_shift_aniso = -1.2261
chemical_shift_asymmetry = -0.0000
---
3 P 0.5000 0.5000 0.0000
chemical_shielding_iso = 275.3424
magnetic_shielding_tensor =
[[273.7 -0. -0. ]
[ -0. 273.7 0. ]
[ -0. 0. 278.62]]
chemical_shift_aniso = 4.9146
chemical_shift_asymmetry = 0.0000
---
###Markdown
Species as separate figures
###Code
plot_magres(magres, "P")
plot_magres(magres, "Na")
plot_magres(magres, "Li", broadening_width=0.01);
###Output
No sites of Na found in LiP_CASTEP18.magres, signal will be empty.
No sites of Li found in NaP_QE6.magres, signal will be empty.
###Markdown
Species as subplots with custom colours and labels
###Code
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2, 1, figsize=(8, 6))
line_kwargs = [{"c": "red", "ls": "--"}, {"c": "black", "ls": "-."}]
labels = ["B", "A"]
plot_magres(
magres, "Na",
ax=axes[0], line_kwargs=line_kwargs, signal_labels=labels
)
plot_magres(
magres, "Li", broadening_width=0.01,
ax=axes[1], line_kwargs=line_kwargs, signal_labels=labels
)
plt.tight_layout()
###Output
No sites of Na found in LiP_CASTEP18.magres, signal will be empty.
No sites of Li found in NaP_QE6.magres, signal will be empty.
###Markdown
Other magres quantities as subplots
###Code
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2, 1, figsize=(8, 6))
plot_magres(
magres, "P", magres_key="chemical_shift_aniso",
ax=axes[0]
)
plot_magres(
magres, "P", magres_key="chemical_shift_asymmetry",
ax=axes[1],
broadening_width=0,
)
plt.tight_layout()
###Output
_____no_output_____ |
Chapter04_Numerical_Computation.ipynb | ###Markdown
4.1 Overflow and Underflow As said in the book, most of you deep learning developers / engineers don't have to bother thinking about overflows and underflows. Most of us, including me, use high-level libraries. If you are curious, you can always dig into the low-level libraries where some people have done a lot of work already. Surprisingly you will find some "hacks" too, which might not make 100% sense to you, if you are a math-nerd. As of writing this text, 30 July, 2020, most deep learning developers use either tensorflow or pytorch as their main deep learning framework. All of the low-level implementations are done by the contributers. Nonetheless, it' always fun to try out some examples!
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Softmax is a very easy and popular function used throughout deep learning. You can think of it as a genearlized logistic function, which we have learned in [the previous chapter](https://github.com/tae898/DeepLearning/blob/master/Chapter03_Probability_and_Information_Theory.ipynb).
###Code
def logistic(scalar):
"""The logistic function.
Parameters
----------
scalar: a float-like
Returns
-------
logistic: a float-like
"""
return 1 / (1 + np.exp(-x))
def softmax(vec):
"""Softmax function.
Parameters
----------
vec: a numpy-array like
a vector-like
Returns
-------
softmax: a numpy-array like
a vector-like
"""
return np.exp(vec) / np.exp(vec).sum()
###Output
_____no_output_____
###Markdown
The input to the the softmax function should be a vector whose length is more than one.Let's say $x=[-2, 1.5, 0.5]$. When we plug this vector into the softmax function, it returns a probability distribution.
###Code
x = [-2, 1.5, 0.5]
softmax(x)
###Output
_____no_output_____
###Markdown
Each value in the returned vector is a probability and the sum of them should be 1, since it's a probability distribution. Let's recall the logistic function that we have learned in the previous chapter. It expects a scalar real number as input and outputs a probability, whose value is between 0 and 1. This can be thought of as the softmax function when the input vector has length 2. I will show you below.Let's say $$x = [x_{1}, x_{2}] \tag{1}$$When we plug this vector into the softmax function, then the output is $[\frac{e^{x_1}}{e^{x_1} + e^{x_2}}, \frac{e^{x_2}}{e^{x_1} + e^{x_2}}] \tag{2}$ This can be re-written as $[\frac{1}{1 + e^{-(x_1 - x_2)}}, \frac{1}{1 + e^{x_1 - x_2}} \tag{3}]$From the softmax function point of view, when the input is $x=[x_1 - x_2, 0]$, what we did with equation (2) and equation (3) are identical. Let's do the math.$[\frac{e^{x_1-x_2}}{e^{x_1-x_2} + e^{0}}, \frac{e^{0}}{e^{x_1-x_2} + e^{0}}] = [\frac{1}{1 + e^{-(x_1 - x_2)}}, \frac{1}{1 + e^{x_1 - x_2}}] \tag{4}$This means that when the input to the softmax is a vector of length 2, then we can always make it look like $[t, 0]$, by subtracting the second element. Recall that $logistic(t) = \frac{1}{1+e^{-t}}$, which is the probability of the first element of equation (3) and (4), when $t=x_1 - x_2$.What this is telling us is that, when the softmax input is a vector of length 2, then we can just simplfy it to the sigmoid function. Having two elements as input to the softmax is redundant. We don't need to calculate the probabilities twice since once we worked out the probability of the first element $p$, the second should be $1-p$ anyways. Enough with maths. Let's go back to overflow and underflow.Below cell will throw you an overflow warning.
###Code
x = np.array([1e10, 0.1, -123])
softmax(x)
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:15: RuntimeWarning: overflow encountered in exp
from ipykernel import kernelapp as app
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:15: RuntimeWarning: invalid value encountered in true_divide
from ipykernel import kernelapp as app
###Markdown
Obviously calculating $e^{e^{10}}$ results in a very big number.As said in the book, we can subtract the maximum value from every element in the input vector since this doesn't change the output.
###Code
x = np.array([1e10, 0.1, -123])
x = x - max(x)
softmax(x)
###Output
_____no_output_____
###Markdown
Let's try underflow.
###Code
x = np.array([-1e10, -5e10])
softmax(x)
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:15: RuntimeWarning: invalid value encountered in true_divide
from ipykernel import kernelapp as app
###Markdown
`invalid value encountered in true_divide` is a warning that numpy throws when it encounters division by 0.This can also be solved by subtracting the maximum value.
###Code
x = np.array([-1e10, -5e10])
x = x - max(x)
softmax(x)
###Output
_____no_output_____
###Markdown
logsoftmax mentioned in the book is nothing but the function composition of log and softmax. $log(softmax(\pmb{x}))$ is what it means. Let's say the vector $x=[-1000, 0.1]$.
###Code
x = np.array([-1000, 0.1])
softmax(x)
###Output
_____no_output_____
###Markdown
As you can see -1000 is already a pretty small number and when this goes to the softmax function, it results in a probability value of 0.
###Code
np.log(softmax(x))
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:15: RuntimeWarning: overflow encountered in exp
from ipykernel import kernelapp as app
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:15: RuntimeWarning: invalid value encountered in true_divide
from ipykernel import kernelapp as app
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in log
"""Entry point for launching an IPython kernel.
###Markdown
That's why above error happens!One of the hacks we can do is to add a very small value to the probabilities so that none of them are 0.
###Code
x = np.array([-1000, 0.1])
z = softmax(x)
z += 1e-100
np.log(z)
###Output
_____no_output_____
###Markdown
4.2 Poor Conditioning
###Code
import numpy as np
from numpy import linalg as LA
w
###Output
_____no_output_____ |
model_script.ipynb | ###Markdown
IMPORT &SETUP
###Code
import math
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV, cross_val_score, cross_val_predict, StratifiedKFold
from sklearn.linear_model import LogisticRegressionCV
from sklearn.naive_bayes import GaussianNB
from sklearn import preprocessing, metrics, svm, ensemble
from sklearn.metrics import accuracy_score, classification_report
import tabpy_client
import tabpy
import tabpy_server
#Google sheets -------------------------------------------------------
import gspread
from oauth2client.service_account import ServiceAccountCredentials
# pip install statsmodels
###Output
_____no_output_____
###Markdown
Set up Google Sheets
###Code
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("personal-finance-aug30-5f28b13e4879.json", scope)
sheetsclient = gspread.authorize(creds)
workbook = sheetsclient.open("personal_finance_test") # Open the workbook
###Output
_____no_output_____
###Markdown
Read Data from Google Sheets
###Code
sheet = sheetsclient.open("personal_finance_test").worksheet('portfolio_returns') # Open the spreadhseet master
#Loop to create dictionary for mapping investments
df = pd.DataFrame(sheet.get_all_records()) # Get a list of all records
df = df.set_index("Date")
# ARIMA example
from statsmodels.tsa.arima_model import ARIMA
# contrived dataset
# data = [x + random() for x in range(1, 100)]
# fit model
model = ARIMA(df['FXAIX'], order=(3, 1, 1))
model_fit = model.fit(disp=False)
print(model_fit.summary())
# multi-step out-of-sample forecast
forecast = model_fit.forecast(steps=365)[0]
[*forecast]
# Use LabelEncoder to convert textual classifications to numeric.
# We will use the same encoder later to convert them back.
encoder = preprocessing.LabelEncoder()
df['Class'] = encoder.fit_transform(df['Class'])
def ARIMA(fund):
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(df[fund], order=(3, 1, 1))
model_fit = model.fit(disp=False)
# print(model_fit.summary())
forecast = model_fit.forecast(steps=30)[0]
return [*forecast]
# Connect to TabPy server using the client library
connection = tabpy_client.Client('http://localhost:9004/')
connection.deploy('ARIMADemo',ARIMA,'Returns ARIMA forecast for next 30 days',override=True)
def add(x,y):
import numpy as np
return np.add(x, y).tolist()
client.deploy('add', add, 'Adds two numbers x and y',override = True)
from tabpy.tabpy_tools.client import Client
# import tabpy-tools
# import tabpy
client = Client('http://localhost:9004/')
lst = [1,2,3]
# float(lst)
list(map(lambda x:x*x,lst))
###Output
_____no_output_____ |
Coursera ML Course - AndrewNG/week 3/ex2/.ipynb_checkpoints/Logistic Regression - Regularization ( Python )-checkpoint.ipynb | ###Markdown
Visualizing Data
###Code
sns.lmplot(x='second', y='third', data=df, hue='result', size=9, fit_reg=False, scatter_kws={"s": 100})
# Plotting boundry line
sns.lmplot(x='second', y='third', data=df, hue='result', size=9, fit_reg=False, scatter_kws={"s": 100})
plt.contour(u,v,z,1)
###Output
_____no_output_____
###Markdown
Compuations Section
###Code
# Initialization
m_row = X.shape[0]
# Creating new features for getting more complicated plot
X_new = mapFeature(X)
m_column = X_new.shape[1]
_lambda = 0
theta = pd.Series(np.zeros(m_column))
gradient_function(theta, X_new, y, _lambda).T[0:5]
cost_function(theta,X_new, y, _lambda)
xopt = fmin_bfgs(f= cost_function,
x0= theta,
fprime= gradient_function,
args=(X_new,y, _lambda),
maxiter=400)
# Here is the grid range
u = np.linspace(-1,1.5,50)
v = np.linspace(-1,1.5,50)
z = np.zeros((u.size,v.size))
for i in range(u.size):
for j in range(v.size):
dd = pd.DataFrame([1, u[i], v[j]]).T
dd.columns = ['one', 'second', 'third']
z[i,j] = mapFeature(dd).dot(xopt)
z = z.T
###Output
_____no_output_____
###Markdown
Functions Section
###Code
# Map featuring
def mapFeature(X, degree= 7) :
count = 0;
X_new = pd.DataFrame(np.ones(X.shape[0]))
for i in range(degree):
for j in range(i + 1):
X_new[count] = ( X['second'] ** (i - j) ) * ( X['third'] ** j )
count += 1
return X_new
#functions Sections
def sigmoid(x):
return ( 1 / ( 1 + e ** ( -1 * x)))
def cost_function(theta,X,y, _lam):
J = 0
# finding hypothesis
h = pd.Series(np.dot( theta.T, X.T ).T)
# Computing Log(sigmoid(x)) for all of the hypotesis elements
h1 = sigmoid(h).apply(log)
# Computing Log( 1 - simgoid(x)) for all of the hypotesis elements
h2 = (1.0000000001 - sigmoid(h)).apply(log)
#Computing Cost of the hypotesis
J = ( -1 / m_row ) * ( y.T.dot(h1) + ( 1 - y ).T.dot(h2)) + ( _lam / ( 2 * m_row ) * sum( theta ** 2 ))
return J
def gradient_function( theta,X, y, _lam):
# finding hypotesis matrix
h = pd.Series(np.dot( theta.T, X.T ).T)
h = sigmoid(h)
# Computing the Gradient Of the Hypotesis
grad = pd.Series(np.zeros(m_column))
grad[0] = ( 1 / m_row ) * ( ( h - y ).T.dot(X[0]).T )
grad[1:] = ( 1 / m_row ) * ( ( h - y ).T.dot( X.T[1:].T ).T ) + ( _lam / m_row ) * theta[1:]
return grad
def gradient_algo(X, y, theta, _lam):
for n in range(iterations):
# finding gradient of each element
grad = gradient_function(X, y, theta, _lam)
# decreasing theta
theta = theta - alpha * ( grad )
#saving all of the costs
global last_j
last_j[n] = cost_function(X, y, theta, _lam)
return theta
###Output
_____no_output_____ |
notebooks/create-surge-lookup-tables.ipynb | ###Markdown
Create pyCIAM Storm Costs Lookup Table Calculating the storm costs in a CIAM model involves a numerical integration over both elevation and the quantiles of storm surge at each segment-ADM1 location. This is too computationally intensive to run for all seg-ADMs for each year for all SLR trajectories, especially when using pyCIAM to run a Monte Carlo analysis across tens of thousands of SLR trajectories. Instead, we build a lookup table indexed by seg-ADM, LSLR, adaptation type (retreat vs. protect), cost type (mortality vs. capital loss), and `rhdiff` (the difference between the retreat/protect height and lslr). This is similar to how it is treated in the original CIAM model except that:1. We use a lookup table rather than a parameterized exponential function of `rhdiff` and `lslr`2. We account for elevational heterogeneity in population and capital when evaluating our costs in retreat scenarios. The original CIAM included `lslr` in their exponential function only for the protect adaptation type, while for `noAdaptation` and `retreat`, the function was only of `rhdiff`.
###Code
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Setup
###Code
import distributed as dd
import pandas as pd
from dask_gateway import Gateway
from shared import (
PATH_PARAMS,
PATH_SLIIDERS_ECON,
PATH_SLIIDERS_ECON_SEG,
PATH_SLIIDERS_SLR_QUANTILES,
PATH_SURGE_LOOKUP,
PATH_SURGE_LOOKUP_SEG,
upload_pkg,
)
from pyCIAM.surge import damage_funcs
from pyCIAM.surge.lookup import create_surge_lookup
N_WORKERS = 700
SEG_CHUNKSIZE = 4
PARAMS = pd.read_json(PATH_PARAMS)["values"]
DMF_I = getattr(damage_funcs, PARAMS.dmf + "_i")
DDF_I = getattr(damage_funcs, PARAMS.ddf + "_i")
gateway = Gateway()
cluster = gateway.new_cluster(
idle_timeout=3600,
profile="micro",
env_items={
"DASK_DISTRIBUTED__WORKER__MEMORY__TARGET": "0.95",
"DASK_DISTRIBUTED__WORKER__MEMORY__SPILL": "0.95",
"DASK_DISTRIBUTED__WORKER__MEMORY__PAUSE": "0.95",
"DASK_DISTRIBUTED__WORKER__MEMORY__TERMINATE": "0.99",
},
)
client = cluster.get_client()
cluster.scale(N_WORKERS)
client.upload_file("shared.py")
upload_pkg(client, "../pyCIAM")
cluster
###Output
_____no_output_____
###Markdown
Run surge damage calculations for each combo
###Code
client.wait_for_workers(N_WORKERS * 0.75)
futs = create_surge_lookup(
PATH_SLIIDERS_ECON,
PATH_SLIIDERS_SLR_QUANTILES,
PATH_SURGE_LOOKUP,
"seg_adm",
PARAMS.at_start,
PARAMS.n_interp_pts_lslr,
PARAMS.n_interp_pts_rhdiff,
DDF_I,
DMF_I,
client=client,
client_kwargs={"batch_size": N_WORKERS},
force_overwrite=False,
dmf_kwargs={"floodmortality": PARAMS.floodmortality},
seg_chunksize=4,
)
futs_seg = create_surge_lookup(
PATH_SLIIDERS_ECON_SEG,
PATH_SLIIDERS_SLR_QUANTILES,
PATH_SURGE_LOOKUP_SEG,
"seg",
PARAMS.at_start,
PARAMS.n_interp_pts_lslr,
PARAMS.n_interp_pts_rhdiff,
DDF_I,
DMF_I,
client=client,
client_kwargs={"batch_size": N_WORKERS},
force_overwrite=True,
dmf_kwargs={"floodmortality": PARAMS.floodmortality},
seg_chunksize=4,
)
###Output
_____no_output_____
###Markdown
Close
###Code
# ensure completion and close cluster
dd.wait(futs + futs_seg)
cluster.close(), client.close()
###Output
_____no_output_____ |
jupyter_notebooks/0020_naive_bayes_classifier.ipynb | ###Markdown
Naive Bayes classifier
###Code
import pandas as pd
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
df = pd.read_pickle('cvr_prediction_20201228.pkl')
feature_col_names = ['ari_class', 'bormuth_score', 'bormuth_class', 'coleman_liau_class',
'flesch_class', 'flesch_kincaid_class', 'fog_score', 'fog_class',
'lix_class', 'rix_score', 'rix_class', 'smog_class', 'strain_class',
'aws', 'pdw', 'pew', 'ppw', 'psw', 'puw', 'sentences']
predicted_class_names = ['cvr_class']
x = df[feature_col_names].values
y = df[predicted_class_names].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
###Output
_____no_output_____
###Markdown
Validate distibution
###Code
print('{:.2f}% in training set'.format((len(x_train) / len(df)) * 100))
print('{:.2f}% in test set'.format((len(x_test) / len(df)) * 100))
print('Original 0: {} ({:.2f}%)'.format(len(df.loc[df.cvr_class == 0]),
(len(df.loc[df.cvr_class == 0]) / len(df)) * 100.0))
print('Original 1: {} ({:.2f}%)'.format(len(df.loc[df.cvr_class == 1]),
(len(df.loc[df.cvr_class == 1]) / len(df)) * 100.0))
print('Training 0: {} ({:.2f}%)'.format(len(y_train[y_train[:] == 0]),
(len(y_train[y_train[:] == 0]) / len(y_train) * 100.0)))
print('Training 1: {} ({:.2f}%)'.format(len(y_train[y_train[:] == 1]),
(len(y_train[y_train[:] == 1]) / len(y_train) * 100.0)))
print('Test 0: {} ({:.2f}%)'.format(len(y_test[y_test[:] == 0]),
(len(y_test[y_test[:] == 0]) / len(y_test) * 100.0)))
print('Test 1: {} ({:.2f}%)'.format(len(y_test[y_test[:] == 1]),
(len(y_test[y_test[:] == 1]) / len(y_test) * 100.0)))
###Output
_____no_output_____
###Markdown
Train
###Code
model = GaussianNB()
model.fit(x_train, y_train.ravel())
nb_predict_test = model.predict(x_test)
###Output
_____no_output_____
###Markdown
Results
###Code
print('Confusion Matrix:')
print('{}'.format(metrics.confusion_matrix(y_test, nb_predict_test, labels=[1, 0])))
print(metrics.classification_report(y_test, nb_predict_test, labels=[1,0]))
metrics.accuracy_score(y_test, nb_predict_test).round(6) # 0.6986301369863014
###Output
_____no_output_____ |
notebooks_dev/DS_01_preprocess.ipynb | ###Markdown
Define module in wihch `export` tag will save the code in `src`
###Code
#default_exp ds__preprocess
###Output
_____no_output_____
###Markdown
Import modules that are only used in documentation, nbdev related stuff like testing using assert and more generally inside this notebook (not going to src).
###Code
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2 #autoreload to make code from other modules get updated online inside notebook
import sys
sys.path.append('..') #appends project root to path in order to import project packages since `noteboks_dev` is not on the root
#DO NOT EDIT
#hide
#Internal Imports
#imports that are going to be used only during development and are not intended to be loaded inside the generated modules.
#for example: use imported modules to generate graphs for documentation, but lib is unused in actual package
#import ...
###Output
_____no_output_____
###Markdown
Data Science Preprocess> Module containing data preprocessing functionalities to be used in the Data Science pipelines Dev comments TODOs - THIS IS TEMPLATE CONTENT - THIS IS TEMPLATE CONTENT- [X] TODO: do something- [ ] TODO: do something else Notebook History - THIS IS TEMPLATE CONTENT - THIS IS TEMPLATE CONTENT- 16/02 - developed feature A as requested by business team- 17/02 - couldn't quite understand specific business rule, request explanation from business team- 21/02 - business rule is now clearly explained, foo should be ran before bar and not otherwise Code session External imports> imports that are intended to be loaded in the actual modules (going to src) e.g.: module dependencies
###Code
#export
#import ...
###Output
_____no_output_____
###Markdown
THIS IS TEMPLATE CONTENT-
###Code
#export
def func(a,b):
'''
a function that subs a and b
'''
return a + b
###Output
_____no_output_____
###Markdown
`func` comments and usage examples for documentation:
###Code
func(1,2)
###Output
_____no_output_____
###Markdown
THIS IS TEMPLATE CONTENT--
###Code
#export
class Class:
'''
a class to apply a function through the apply method
'''
def __init__(self, func):
assert callable(func), 'func should be callable type'
self.func = func
def apply(self, *args, **kwargs):
return self.func(*args, **kwargs)
###Output
_____no_output_____
###Markdown
`Class` comments and usage examples for documentation
###Code
cls_instance = Class(sum)
cls_instance.apply([1,2,3,4])
###Output
_____no_output_____
###Markdown
Code created from src> Session concainning code generated in src (.py files) and converted back to notebook using nbdev_update_lib command
###Code
#export
##############
#we highly recommend you creating ccode from the notebook instead
#of creating from src and running nbdev_update_lib. Still, if you want
#to proceeed, please create your new code bellow this tag before running nbdev_update_lib
##############
###Output
_____no_output_____
###Markdown
Experiment session> Session to run the code and test functions and classes generated in this notebook. Helpfull for documentation and experimental development. Tests> Session to write tests in the nb-dev fashion (using assert)
###Code
def test_1(a,b):
'''
tests if func is returning a sum
'''
return func(a,b) == a + b
assert test_1(1,2)
print('Passed!')
###Output
Passed!
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
_____no_output_____ |
dataproject/Southamerica_energy_use.ipynb | ###Markdown
Data analysis project: Energy use in South America We conduct this data analysis project with inspiration from the paper "Triangular Relationship between Energy Consumption, Price Index and National Income in Asian Countries: A Pooled Mean Group Approach in Presence of Structural Breaks" by Mehmood, Raza, Rana, Sohaib and Khan. It is a well known fact that energy consumption is a key factor in economic activity. The article mentioned above is the staring point for this project. We apply simple descriptive economic skills to investigate the relationship between energy use and GDP per capita. We apply the analysis for countries of SOuth America instead of Asian countries. Importing packages We import wb from pandas_datareader so we can extract data directly from the World Bank.
###Code
#Install packages usefull for this workbook
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
from ipywidgets import interact
from pandas_datareader import wb
###Output
_____no_output_____
###Markdown
Read and clean data Select countries and indicators We select the 12 countries of South America. For the analysis we use the indicators energy use per capita in kilogram of oil equivalents and GDP per capita in 2010 USD. The starting year of the data is 1971 and goes to 2014. The data is downloaded from the World Bank Development Indicators.
###Code
# a: Select countries
countries = ['ARG', 'BOL', 'BRA', 'CHL', 'COL', 'ECU', 'GUY', 'PRY', 'PER', 'VEN', 'URY', 'SUR']
# b: Select indiacators: GDP per capita and Energy use
euse = wb.download(indicator='EG.USE.PCAP.KG.OE', country=countries, start=1971, end=2014)
print("This is what the first indicator looks like:")
euse.head(3)
gdp = wb.download(indicator='NY.GDP.PCAP.KD', country=countries, start=1971, end=2014)
print("This is what the second indicator looks like:")
gdp.head(3)
###Output
This is what the second indicator looks like:
###Markdown
Note:For further analysis compare with the consumer price indexcpi = wb.download(indicator='FP.CPI.TOTL', country=countries, start=1971, end=2014)cpi.head(3) Cleaning data We have downloaded the two indicators separately so we want to merge the two datasets. Next, we sort it by countries and year to get the structure of the dataset as we want it.
###Code
# a: Merging the two data sets retaining the indicators
joiningdata = pd.merge(euse,gdp, how='inner', on=['country','year'])
joiningdata = joiningdata.reset_index()
joiningdata = joiningdata.rename(columns = {'country' : 'countries',
'EG.USE.PCAP.KG.OE' : 'euse',
'NY.GDP.PCAP.KD' : 'gdp'})
joiningdata['year'] = joiningdata.year.astype(float)
# b: Sorting data
joiningdata.sort_values(by=['countries','year'], inplace=True)
joiningdata = joiningdata.reset_index(drop = True)
###Output
_____no_output_____
###Markdown
The data now looks like this:
###Code
# Printing the 20 first rows of the dataset
joiningdata.head(20)
###Output
_____no_output_____
###Markdown
**Deleting rows with missing data**The countries Guyana and Suriname is missing a lot of data and we therefore exclude them from our analysis.We delete the rows with missing data by dropping the rows where the country name is Guyana or Suriname. We ensure that rows has been deleted by checking number of observations before and after.
###Code
print(f'before: {joiningdata.shape[0]} observations, {joiningdata.shape[1]} variables')
for val in ['Guyana', 'Suriname']:
I = joiningdata.countries.str.contains(val)
joiningdata.drop(joiningdata[I].index, inplace=True)
print(f'after: {joiningdata.shape[0]} observations, {joiningdata.shape[1]} variables')
###Output
before: 528 observations, 4 variables
after: 440 observations, 4 variables
###Markdown
We want to take a look at the mean of the two indicators for each country.
###Code
joiningdata.groupby('countries').mean()
###Output
_____no_output_____
###Markdown
The table above shows the mean of energy use and gdp for each country.We note that Venezuela and Argentina are the countries with the highest mean of energy use. They have as well high mean of gdp along with Brazil and Chile. AnalysisThe data set is cleaned. We begin the analysis of investigating the relationship between economic growth and energy use.First in section 4.1 we take a look at the growth in GDP per capita. We calculate the percentage change to see which country has the greatest economic development from 1971 to 2014. Next in section 4.2 we apply the same analysis with the indicator energy use to see the development in energy use throughout the years. We present the relevant results in tables and graphs. Economic growth GDP per CapitaWe calculate the percentage change in GDP for each country by: $ growth_ {GDP}= \frac{GDP_{2014} - GDP_{1971}}{GDP_{1971}} \bullet 100 \%$We calculate the percentage change by creating two new columns containing respectively the first and the last value of gdp, i.e. the value of GDp in 1971 and 2014.
###Code
# Calculate percentage change in gdp
joiningdata['gdp_pct'] = joiningdata.groupby('countries')['gdp'].pct_change() *100
# Economic growth
# Creating a column containing the value of gdp for each country year 1971
joiningdata_2=joiningdata.copy()
joiningdata_grouped = joiningdata_2.groupby('countries')
joiningdata_grouped_first = joiningdata_grouped.gdp.first()
joiningdata_grouped_first.name = 'first'
# The column is added to joiningdata_2.
joiningdata_2.set_index(['countries','year'],inplace=True)
joiningdata_2 = joiningdata_2.join(joiningdata_grouped_first)
joiningdata_2.reset_index(inplace=True)
# Creating a column containing the value of gdp for each country year 2014
joiningdata_grouped_last = joiningdata_grouped.gdp.last()
joiningdata_grouped_last.name = 'last'
# The column is added to joiningdata_2.
joiningdata_2.set_index(['countries','year'],inplace=True)
joiningdata_2 = joiningdata_2.join(joiningdata_grouped_last)
joiningdata_2.reset_index(inplace=True)
# Calculating the economic growth for each country in the period 1971-2014
joiningdata_2['growth_gdp'] = (joiningdata_2['last']-joiningdata_2['first'])/joiningdata_2['first']*100
#Table showing the total gdp growth rate for each country
joiningdata_mean=joiningdata_2.groupby('countries').mean().copy()
joiningdata_mean.drop(['year', 'euse', 'gdp', 'gdp_pct', 'first', 'last'],axis=1,inplace=True)
joiningdata_mean
# Sorting data:
#Add names
joiningdata_mean.sort_values(by=['growth_gdp'], inplace=True)
joiningdata_mean = joiningdata_mean.reset_index()
###Output
_____no_output_____
###Markdown
The table below shows GDP growth rate for each country:
###Code
joiningdata_mean
###Output
_____no_output_____
###Markdown
As can be seen Chile, Paraguay, Columbia, Uruguay and Brazil have the highest growth in gdp over the 44 years.The five countries are represented in the graph below.
###Code
#Making a graph showing the 5 countries of South America that has the highest GDP growth.
top5_gdp = joiningdata_2[joiningdata_2["countries"].isin(['Chile', 'Paraguay', 'Columbia', 'Uruguay', 'Brazil'])]
def plot(fig):
fig_gdp_change = fig.set_index('year')
fig_gdp_change.groupby(['countries'])['gdp'].plot(legend=True, grid=True, title='The 5 countries with highest level of GDP per cpaita');
plot(top5_gdp)
###Output
_____no_output_____
###Markdown
The graph shows that Brazil, Chile and Uruguay have experienced somewhat similar growth rates in the years from 1971 to 2014. The growht rate in GDP of Paraguay is substantially lower. Growth in energy use Energy useWe calculate the percentage change in energy use for each country by: $ growth_ {euse}= \frac{euse_{2014} - euse_{1971}}{euse_{1971}} \bullet 100 \%$The procedure is the same as the one conducted for GDP growth above.
###Code
# Creating a column containing the value of energy use for each country year 1971
joiningdata_3=joiningdata.copy()
joiningdata_grouped = joiningdata_3.groupby('countries')
joiningdata_grouped_first = joiningdata_grouped.euse.first()
joiningdata_grouped_first.name = 'first'
# The column is added to joiningdata_2.
joiningdata_3.set_index(['countries','year'],inplace=True)
joiningdata_3 = joiningdata_3.join(joiningdata_grouped_first)
joiningdata_3.reset_index(inplace=True)
# Creating a column containing the value of gdp for each country year 2014
joiningdata_grouped_last = joiningdata_grouped.euse.last()
joiningdata_grouped_last.name = 'last'
# The column is added to joiningdata_2.
joiningdata_3.set_index(['countries','year'],inplace=True)
joiningdata_3 = joiningdata_3.join(joiningdata_grouped_last)
joiningdata_3.reset_index(inplace=True)
# Calculating the economic growth for each country in the period 1971-2014
joiningdata_3['growth_euse'] = (joiningdata_3['last']-joiningdata_3['first'])/joiningdata_3['first']*100
#Table showing the total gdp growth rate for each country
joiningdata_mean=joiningdata_3.groupby('countries').mean().copy()
joiningdata_mean.drop(['year', 'euse', 'gdp', 'gdp_pct', 'first', 'last'],axis=1,inplace=True)
joiningdata_mean
# Sorting data:
joiningdata_mean.sort_values(by=['growth_euse'], inplace=True)
joiningdata_mean = joiningdata_mean.reset_index()
###Output
_____no_output_____
###Markdown
The table below shows GDP growth rate for each country:
###Code
joiningdata_mean
###Output
_____no_output_____
###Markdown
As seen in the table the 5 countries with the highest growth of energy use is Bolivia, Ecuador, Chile, Brazil and Uruguay. These countries are represented in the graph below.
###Code
#Making a graph showing the 5 countries of South America that has the highest growth in energy use.
top5_euse = joiningdata_2[joiningdata_2["countries"].isin(['Bolivia', 'Ecuador', 'Chile', 'Brazil', 'Uruguay'])]
def plot(fig):
fig_gdp_change = fig.set_index('year')
fig_gdp_change.groupby(['countries'])['euse'].plot(legend=True, grid=True, title='The 5 countries with highest levels of energy use');
plot(top5_euse)
###Output
_____no_output_____
###Markdown
The graph shows that Chile is the country that has the highest growth rate of energy use. Chile is also the only country experiencing a negative growth in the last couple of years. Brazil and Uruguay seems to follow the same growth rate since the mid 00's as the same for Bolivia and Ecuador. We have constructed an interactive figure for the comparison of two countries. In the first dropdown you can choose whether you want to look at GDP og energy use and the next two dropdowns are for choosing countries. You can compare the two countries with the highest energy use og the two countries with the highest and the lowest energy use, respectively to see the difference.
###Code
def plot_euse_gdp(df, variable, countries_A, countries_B):
if variable == 'Energy use':
y = 'euse'
else:
y = 'gdp'
I = df.countries == countries_A
Y = df.countries == countries_B
ax = df.loc[I,['year',y]].plot(x='year', y=y, style='-', grid=True)
bx = df.loc[Y,['year',y]].plot(ax=ax,x='year', y=y, style='-', grid=True)
ax.legend([countries_A, countries_B])
widgets.interact(plot_euse_gdp,
df = widgets.fixed(joiningdata),
variable = widgets.Dropdown(description='Variable',
options=['Energy use','GDP']),
countries_A = widgets.Dropdown(description='Country A',
options=joiningdata.countries.unique()) ,
countries_B = widgets.Dropdown(description='Country B',
options=joiningdata.countries.unique())
);
###Output
_____no_output_____
###Markdown
Indice calculationFor further analysis we calculate the indices of energy use and gdp. We group the data by countries and calculate the indice by applying the lambda function. By setting the index to year we ensure that the first observation for each country is 1971. Normalization of energy use and GDP per capita
###Code
data=joiningdata.copy()
#print(data)
#Indice calculation for euse
data.sort_values(by = ['countries', 'year'], inplace = True)
#data.reset_index(inplace = True)
#data.drop(['index'], axis = 1, inplace = True) #delete the old index
#Select the first element in a series
def first(x):
return x.iloc[0]
#Group the data and calcualte the index
grouped = data.groupby(['countries'])
for s in ['euse','gdp']:
data['index_'+s] = grouped[s].transform(lambda x: x/first(x)*100)
#Set index to the figure (run only once!)
data.set_index('year')
#Check the dataset
print(data.head(10))
###Output
countries year euse gdp gdp_pct index_euse \
0 Argentina 1971.0 1380.921398 7335.759136 NaN 100.000000
1 Argentina 1972.0 1379.818923 7329.921158 -0.079582 99.920164
2 Argentina 1973.0 1411.496781 7407.366754 1.056568 102.214129
3 Argentina 1974.0 1415.682954 7685.857212 3.759642 102.517273
4 Argentina 1975.0 1378.546993 7559.143749 -1.648658 99.828056
5 Argentina 1976.0 1404.744778 7291.840429 -3.536159 101.725180
6 Argentina 1977.0 1420.567765 7681.017571 5.337159 102.871008
7 Argentina 1978.0 1426.919260 7227.564124 -5.903560 103.330954
8 Argentina 1979.0 1484.565823 7849.363340 8.603164 107.505454
9 Argentina 1980.0 1487.617079 7849.115971 -0.003151 107.726412
index_gdp
0 100.000000
1 99.920418
2 100.976145
3 104.772486
4 103.045147
5 99.401307
6 104.706513
7 98.525101
8 107.001378
9 106.998006
###Markdown
The foloowing graph is interactive and you can se the development in the normalized energy use and gdp per capita for any South American coutnry.
###Code
def indice_graph(countries):
graph_data = data.loc[data['countries'] == countries,:]
graph_data.set_index('year').groupby('countries')['index_euse', 'index_gdp'].plot(legend=True, grid=True, title='Development in energy use and GDP per capita')
return
# Next we create an interactive line chart which displays the development of employment shares grouped by the regions of Denmark
interact(indice_graph, countries=['Bolivia', 'Ecuador', 'Chile', 'Brazil', 'Uruguay', 'Peru', 'Paraguay', 'Venezuela, RB', 'Argentina', 'Colombia'])
###Output
_____no_output_____ |
test_clustering.ipynb | ###Markdown
Examination of component clusteringQ: Which is better, clustering based off of Jaccard index, or simply taking the mean of components?
###Code
import matplotlib.pyplot as plt
import seaborn as sns
from descartes import PolygonPatch
from sklearn.cluster import DBSCAN
import numpy as np
import scipy.stats as st
import pandas as pd
import gzbuilder_analysis.aggregation as ag
import gzbuilder_analysis.rendering.jax.fit as fit
true_model = dict(
disk=dict(mux=51, muy=51, roll=np.pi/5, I=1, q=0.8, Re=15, n=1, c=2),
bulge=dict(mux=50.6, muy=51.4, roll=0, I=1, q=0.94, Re=3, n=1.3),
bar=dict(mux=51, muy=51, roll=0.4*np.pi, I=1, q=0.31, Re=7, n=0.8),
spiral=[
dict(t_min=2.5, t_max=5, A=9.8734, I=0.53,
phi=16.2, falloff=9.05, spread=6.81),
dict(t_min=-1.0, t_max=2.5, A=20.20, I=0.33,
phi=18, falloff=5.6, spread=7.2 ),
]
)
def circ_norm_rvs(loc=1, scale=1):
return lambda N: st.norm(loc=loc, scale=scale).rvs(N) % (2 * np.pi)
def truncated_normal(loc=1, scale=1, lower=0, upper=np.inf):
return st.truncnorm(loc=loc, scale=scale, a=(lower-loc)/scale, b=(upper-loc)/scale)
disk = pd.Series(true_model['disk'])
sd_disk = pd.Series(dict(mux=1, muy=1, roll=np.deg2rad(20), I=0.1, q=0.01, Re=2))
def circ_mean(angles, nsymm=1):
n = len(angles)
return np.arctan2(1/n * np.sin(angles).sum(), 1/n * np.cos(angles).sum())
disk_gen = dict(
mux=st.norm(loc=disk.mux, scale=sd_disk.mux).rvs,
muy=st.norm(loc=disk.muy, scale=sd_disk.muy).rvs,
roll=circ_norm_rvs(disk.roll, sd_disk.roll),
I=truncated_normal(disk.I, sd_disk.I, 0, np.inf).rvs,
q=truncated_normal(disk.q, sd_disk.q, 0, 1).rvs,
Re=truncated_normal(disk.Re, sd_disk.Re, 0, np.inf).rvs,
n=np.ones, c=lambda N: 2*np.ones(N),
)
N = 10
drawn_disks = pd.DataFrame({k: disk_gen[k](N) for k in disk_gen})
pivot = drawn_disks.describe()
pivot.loc['mean', 'roll-circmean'] = circ_mean(drawn_disks.roll)
pivot
BASE_CLS = dict(disk=None, bulge=None, bar=None, spiral=[])
fake_models = drawn_disks.apply(lambda d: {**BASE_CLS, 'disk': d.to_dict()}, axis=1)
agg_res = ag.AggregationResult(models=fake_models, galaxy_data=np.zeros((100, 100)))
mean_res = drawn_disks.describe().loc['mean']
# use a circular mean for the position angle
mean_res['roll'] = circ_mean(drawn_disks.roll)
pd.concat((
(mean_res - disk) / sd_disk,
(agg_res.params.disk - disk) / sd_disk
), axis=1, sort=False).dropna().rename(
columns={0: 'Mean-based aggregation', 1: 'Jaccard-distance aggregation'},
)
from gzbuilder_analysis.aggregation import make_ellipse
from shapely.affinity import scale as shapely_scale
geoms = drawn_disks.apply(lambda p: make_ellipse(p.to_dict()), axis=1)\
.dropna().apply(shapely_scale, xfact=3, yfact=3)
true_geom = shapely_scale(
make_ellipse(disk.to_dict()),
xfact=3, yfact=3
)
mean_geom = shapely_scale(
make_ellipse(mean_res.to_dict()),
xfact=3, yfact=3
)
res_geom = shapely_scale(
make_ellipse(agg_res.params.disk.to_dict()),
xfact=3, yfact=3
)
plt.figure(figsize=(9, 9))
ax = plt.gca()
for g in geoms:
ax.add_patch(PolygonPatch(g, fc='none', ec='k', alpha=0.6))
ax.add_patch(PolygonPatch(true_geom, fc='blue', alpha=0.2, label='True'))
ax.add_patch(PolygonPatch(true_geom, fc='none', ec='k', lw=4, ls='-'))
ax.add_patch(PolygonPatch(mean_geom, fc='none', ec='g', lw=4, ls='--', label='Recovered (mean)'))
ax.add_patch(PolygonPatch(res_geom, fc='none', ec='r', lw=4, ls='-.', label='Recovered (jaccard)'))
plt.legend()
plt.xlim(0, 100)
plt.ylim(0, 100)
###Output
_____no_output_____
###Markdown
What do the jaccard distances from our recovered disks to the true disk look like? (lower is better)
###Code
def jaccard_distance(ob1, ob2):
if ob1.union(ob2).area <= 0:
return 1
return 1 - ob1.intersection(ob2).area / ob1.union(ob2).area
print('Jaccard distance to true (mean): {:.4f}'.format(jaccard_distance(true_geom, mean_geom)))
print('Jaccard distance to true (jaccard): {:.4f}'.format(jaccard_distance(true_geom, res_geom)))
###Output
Jaccard distance to true (mean): 0.1028
Jaccard distance to true (jaccard): 0.0732
|
notebooks/Lidar_comparison_AGU_2019/area_plot_stable_ground.ipynb | ###Markdown
Stable Terrain Lidar Snow Depth
###Code
lidar_data = RasterFile(LIDAR_SNOW_DEPTH, band_number=1)
band_values_lidar = lidar_data.band_values()
band_values_lidar.mask = casi_data.stable_surfaces(band_values_lidar.mask)
plot = side_by_side_plot(
band_values_lidar,
lidar_data.extent,
ORTHO_IMAGE,
hillshade
)
set_axes_style(plot.axes[0])
del band_values_lidar
###Output
_____no_output_____
###Markdown
SfM Snow Depth
###Code
sfm_data = RasterFile(SFM_SNOW_DEPTH, band_number=1)
band_values_sfm = sfm_data.band_values()
band_values_sfm.mask = casi_data.stable_surfaces(band_values_sfm.mask)
plot = side_by_side_plot(
band_values_sfm,
sfm_data.extent,
ORTHO_IMAGE,
hillshade
)
set_axes_style(plot.axes[0])
del band_values_sfm
###Output
_____no_output_____
###Markdown
Snow on comparison (SfM - Lidar)
###Code
snow_on_data = RasterFile(SNOW_ON_DIFF, band_number=1)
band_values_snow_on = snow_on_data.band_values()
band_values_snow_on.mask = casi_data.stable_surfaces(band_values_snow_on.mask)
plot = side_by_side_plot(
band_values_snow_on,
snow_on_data.extent,
ORTHO_IMAGE,
)
set_axes_style(plot.axes[0])
del band_values_snow_on
###Output
_____no_output_____
###Markdown
Snow free comparison (SfM - Lidar)
###Code
snow_free_data = RasterFile(SNOW_FREE_DIFF, band_number=1)
band_values_snow_free = snow_free_data.band_values()
band_values_snow_free.mask = casi_data.stable_surfaces(band_values_snow_free.mask)
plot = side_by_side_plot(
band_values_snow_free,
snow_free_data.extent,
ORTHO_IMAGE,
)
set_axes_style(plot.axes[0])
del band_values_snow_free
###Output
_____no_output_____ |
colab ssh.ipynb | ###Markdown
/usr/etc/jupyter/jupyter_notebook_config.json/usr/etc/jupyter/jupyter_notebook_config.json
###Code
# Create drive folder
!mkdir -p drive
!apt-get install -y -qq software-properties-common python-software-properties module-init-tools
!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
!apt-get update -qq 2>&1 > /dev/null
!apt-get -y install -qq google-drive-ocamlfuse fuse
from IPython.lib import passwd
password = passwd("s3cr3t!!")
password
jupyter_running = !jupyter notebook list | grep 8888
if not jupyter_running:
!mkdir -p /content/.jupyter
!echo '{ "NotebookApp": { "password": password } }' > /usr/etc/jupyter/jupyter_notebook_config.json
#get_ipython().system_raw('jupyter lab &')
#!ssh -o ServerAliveInterval=60 -o StrictHostKeyChecking=no -R vinc3:80:localhost:8888 serveo.net 1>/dev/null
!cat /usr/etc/jupyter/jupyter_notebook_config.json
!ssh -o ServerAliveInterval=60 -o StrictHostKeyChecking=no -R test:80:localhost:8888 -R test:22:localhost:22 serveo.net 1>/dev/null
###Output
Hi there
|
GeneralExemplars/Coding Activities for Schools/National Higher/while_arrays_Higher.ipynb | ###Markdown
 While statements and arrays Legend In blue, the instructions and goals are highlighted. In green, the information is highlighted. In yellow, the exercises are highlighted. In red, the error and alert messages are highlighted. Instructions In red, the error and alert messages are highlighted.Click "Run" on each cell to go through the code in each cell. This will take you through the cell and print out the results. If you wish to see all the outputs at once in the whole notebook, just click Cell and then Run All. In case the cell keeps running and does not stop, go to the respective cell, press Kernel, then Interrupt. Goals After this workshop, the student should get more familiar with the following topics: printing basic statements and commands in Jupyter Notebook performing basic arithmetic calculations in Python improving an existent model of the code recognizing and checking variable types in Python using the if and for statements for basic operations working with characters and strings These objectives are in agreement with the Higher Scottish Curriculum for high-school students. Note: For most of the workshop, the student will be given some coding examples. In some cases, the student will have to code himself/herself. The coding part is optional, but highly recommended to approach. Note: The game at the end of the notebook is completely optional material. You can either only play it, or analyze how it is designed (and even come up with suggestions). However, the coding there is a bit more advanced... after a couple of weeks, you will be equipped with more tools for designing small games. Explore Conditional while statement... Suppose that, in the case where we do not know how many times we would like to print the statement. All we would like is the following: while a condition is not reached yet, we keep executing the instruction. This is the main role of the while instruction. Exercise: Investigate the following code below. Predict the ouptut, then Run the cell.
###Code
i = 0
while(i < 10):
print("Welcome to this programming session!")
i += 1
###Output
_____no_output_____
###Markdown
The while instruction plays indeed a similar role to the for statement. However, there are differences. This time, we do not instruct the computer how many steps we want to perform. The boundary for the above program is set by the condition: $ i == 10 $ Investigate: What happens in the following case?
###Code
i = 0
while(i < 10):
print("Welcome to this programming session!")
###Output
_____no_output_____
###Markdown
We have skipped the crucial statement: $ i += 1 $. Hence, $ i == 0 $, and the statement $ 0 forever. It is of utmost importance to establish from the beginning condition to advance in the while statement, in order to avoid getting stuck. Note: This is done automatically when in a loop. Exercise: let us analyze this phenomenon in greater detail, by debugging the code. Exercise: Debugging involves an in-depth analyss of the code. At each step, you will tell the computer what step to perform and what to print out. This is the most accurate way to check how the computer performs the algorithm. Let us see together how debugging is done in Jupyter Notebooks. Firstly, there is a specific library which needs to be imported
###Code
import pdb
###Output
_____no_output_____
###Markdown
Afterwards, a command line breakpoint is added. That is where you will have to play with all the variables. You will need in particular the following commands: n is for going to te next command line p is for printing out the value(example: p b) w is for finding out your location q is for quitting the debugger
###Code
my_sum = 0
i = 0
while(i<10):
breakpoint()
my_sum += i
my_sum
###Output
_____no_output_____
###Markdown
You should figure out that our conditional variable $ i $ does not change. This is why we never get out from the loop. Exercise: Add the condition of instruction exit ( Hint: you only need one line of code)
###Code
# Write your own code here
i = 0
sum = 0
while(i < 10):
sum += i
print("The sum of the first " + "10 " + "elements is: " + str(sum))
###Output
The sum of the first 10 elements is: 45
###Markdown
Exercise: Compare this method of calculating sum of first $ N $ consecutive elements with the for-method. Afterwards, use while instruction to calculate the product of the first 10 elements
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
...And Arrays Let us have a bit of revision on arrays: Exercise: Create an array called my_second_array which takes all the elements from 1 to 10
###Code
# Write your own code here (it is only one line required)
###Output
_____no_output_____
###Markdown
Exercise: Calculate the sum of all odd elements
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Exercise: Calculate the product of all even elements
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Just as arrays can be created for integers, they can be created for characters as well. They are called strings. Similarly, all the characters (data type) are stored in such a string. The elements are, once again, easily accessible. Investigate: How is a string declared? Can you see the similarity to an array declaration?
###Code
my_first_string = "I have a good day!"
###Output
_____no_output_____
###Markdown
Exercise: Do you remember how to access the elements of an array? The procedure is the same for accessing characters of a string. Access the fifth element:
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Exercise: Investigate the ways in which you can access the last element of a string:
###Code
# First method
length_str = len(my_first_string)
print("The last element of a string is: " + my_first_string[length_str - 1])
# Second method
print("The last element of a string is: " + my_first_string[-1])
###Output
The last element of a string is: !
The last element of a string is: !
###Markdown
Exercise: Access the second last element of a string:
###Code
# Write your own code here
###Output
_____no_output_____
###Markdown
Exercise: Inspect the following line:
###Code
print("All the elements apart from the last character of the string are: " + str(my_first_string[:-1]))
###Output
All the elements apart from the last character of the string are: I have a good day
###Markdown
In this case, only the last element of the string is not printed out. Investigate the following code lines below and comment with your peers what happens in each case:
###Code
for i in range(len(my_first_string)):
print(my_first_string[i], end = '*')
for i in range(len(my_first_string)):
if(i % 2 == 0):
print(my_first_string[i], end = '')
###Output
Ihv oddy
###Markdown
The spacebar is also counted as a character in the string. Optional material Let's play a game: We use all the above tricks for creating the game below. It is not required for you to know by heart how to do the exercises below...You just got started with all these notions, and you need practice to get more used to it...Hopefully, however, by the end of the year, you will get more acquainted with the little game below.
###Code
ok = 1
A = input("Introduce your initial word here: ")
B = []
while(ok == 1):
B = A
A = input("Introduce the word here: ")
if(A[0] != B[len(B) - 1]):
ok = 0
print("Game over! Try again!")
###Output
_____no_output_____
###Markdown
Let us make it a bit harder, shall we? Your next word should have its first two characters the same as the last one. Are you up to it?
###Code
ok = 1
A = input("Introduce your initial word here: ")
B = []
while(ok == 1):
B = A
A = input("Introduce the word here: ")
if( (A[0] != B[len(B) - 2]) and (A[1] != B[len(B) - 1]) ):
ok = 0
print("Game over! Try again!")
###Output
_____no_output_____
###Markdown
Take-away This is it for today, and well done for managing to go through the material!! After this session, you should be more familiar with how simple sentences, numbers and conditional statements can be printed in Python. Moreover, ponder a bit on the for instruction, as it is heavily used in programming. Also, feel free to work more on this notebook using any commands you would like. Note: Always keep a back-up of the notebook, in case the original one is altered. For today's session, this should be enough! See you later!!
###Code
print("Bye bye! :D")
###Output
_____no_output_____ |
notebooks/02_core.random_variable.ipynb | ###Markdown
- [X] TODO: implement kde estimation and entropy estimation- [X] TODO: make an abstracction for random varriable in order to bring kde to same abstractions of sample, etc...- [X] TODO: consider using awkde os scipy for small samples dataset (< 400)- [ ] TODO: make an abstracction for random varriable in order to bring sklearn GMM and kde to same abstractions of sample, etc...- [X] TODO: make RVArray an instance of np.array (NOT NEEDED)
###Code
#hide
from nbdev.showdoc import *
#hide
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('..')
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
Meta Distributions> Extension of SciPy rv_continuous Class, containing some useful methods for Maximum Likelihood Estimation and other distribution methods. imports
###Code
#export
from functools import partial
from warnings import warn
import scipy
import scipy.stats as stats
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.preprocessing import QuantileTransformer, FunctionTransformer
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA, KernelPCA
import KDEpy as kdepy
import awkde
from skdensity.utils import (
cos_sim_query, sample_multi_dim, ctqdm, DelegateEstimatorMixIn, _vector_1d_to_matrix,_assert_dim_3d,_assert_dim_2d,
add_noise, _fix_X_1d, draw_from, _fix_one_sample_2d, _fix_one_dist_2d, _fix_dist_1d
)
###Output
_____no_output_____
###Markdown
KDE Class -
###Code
#export
#Identity transformer in case space_transformer is None
def identity_func(x):
return x
IDENTITY_TRANSFORMER = FunctionTransformer(
func = identity_func,
inverse_func = identity_func,
validate=False,
accept_sparse=True,
check_inverse=True,
kw_args=None,
inv_kw_args=None,
)
def agg_smallest_distance(data, agg_func = np.mean):
'''
returns the agregate (defined by agg_func) distance of each point and their closest neighbor
recieves array of shape (n_dists,n_samples, n_dims) and reutrns array of shape (n_dists, n_dims)
'''
_assert_dim_3d(data)
data = np.sort(data, axis = 1)
diff = np.diff(data, axis = 1)
results = agg_func(diff, axis = 1)
return results
class KDE():
AVALIBLE_BW_METHODS = ['ISJ', 'scott', 'silverman', 'mean_distance', 'std_distance', 'median_distance']
def __init__(self, bw = 'std_distance', space_transformer = PCA, implementation = 'sklearn', st_kws = {}, **kde_kws):
if bw.__class__ == str:
assert bw in self.AVALIBLE_BW_METHODS, f"if str, bw should be one of {self.AVALIBLE_BW_METHODS}, not {bw}"
if not isinstance(bw,(str, float, np.float64, np.float32, np.float)):
raise TypeError(f'bw should be str or float, not {bw.__class__}')
self.bw = bw
self._space_transformer = space_transformer if not space_transformer is None else IDENTITY_TRANSFORMER
self.kde_kws = kde_kws
self.st_kws = st_kws
if not implementation in ['scipy','sklearn','awkde']:
raise ValueError(f'implementation should be one of ["sklearn","scipy","awkde"], not {implementation}')
self.implementation = implementation
def _check_X_2d(self,X):
X = np.array(X)
#reshape if shape == (n_samples,)
X = X if len(X.shape) > 1 else X.reshape(-1,1)
return X
def _check_input_dims_match(self, X):
if X.shape[-1] != self.n_dim:
raise ValueError(f'X dimensions space should be the same size as fitted distribution ({self.n_dim}), got {X.shape[-1]} instead')
def _get_bw_each_dim(self, X, bw_method):
if bw_method in ['ISJ', 'scott', 'silverman']:
return np.array([kdepy.FFTKDE(bw = bw_method).bw(X[:,i:i+1]) for i in range(X.shape[-1])])
elif bw_method == 'mean_distance':
return np.array([agg_smallest_distance(X[:,i].reshape(1,X.shape[0],1), np.mean) for i in range(X.shape[-1])])
elif bw_method == 'median_distance':
return np.array([agg_smallest_distance(X[:,i].reshape(1,X.shape[0],1), np.median) for i in range(X.shape[-1])])
elif bw_method == 'std_distance':
return np.array([agg_smallest_distance(X[:,i].reshape(1,X.shape[0],1), np.std) for i in range(X.shape[-1])])
def _preprocess_fit(self, X):
'''
preprocess data prior to fit. ensure len >2 and add some white noise to avoid eigenvalues errors in space transform
'''
X = self._check_X_2d(X)
if len(X) < 2:
X = np.concatenate([X,X])
X = add_noise(X, 1e-9)
return X
def fit(self, X, y = None, sample_weight = None):
#preprocess X
X = self._preprocess_fit(X)
#fit and transform X with manifold learner (self.space_transformer)
if isinstance(self._space_transformer, type):
self._space_transformer = self._space_transformer(**{**self.st_kws, **{
'n_components':X.shape[-1], 'whiten':True}})
X = self._space_transformer.fit_transform(X)
# calculate bw
if self.bw.__class__ == str:
bw = self._get_bw_each_dim(X, self.bw)
bw = np.sqrt(np.sum(bw**2))
else:
warn('passing a float value for bw is not recomended since X will be transformed by space_transformer before fitting and bw value may not make sence in new trnasformed space')
bw = self.bw
#ensure bw is positive
bw = max(1e-6, bw)
#kde
if self.implementation == 'sklearn':
self.estimator = KernelDensity(**{**{'bandwidth':bw},**self.kde_kws}).fit(X, y, sample_weight = sample_weight)
elif self.implementation == 'scipy':
self.estimator = stats.gaussian_kde(X.T, bw_method = bw)
elif self.implementation == 'awkde':
self.estimator = awkde.GaussianKDE(**{**{'glob_bw':bw},**self.kde_kws})
self.estimator.fit(X = X, weights = sample_weight)
else: raise ValueError(f'self.implementation should be one of ["sklearn","scipy","awkde"], not {self.implementation}')
self._transformed_bw_value = bw
self.n_dim = X.shape[-1]
return self
def evaluate(self, data):
data = self._check_X_2d(data)
#transform input
data = self._space_transformer.transform(data)
self._check_input_dims_match(data)
#get likelihoods
if self.implementation == 'sklearn':
likelihood = np.exp(self.estimator.score_samples(data))
elif self.implementation == 'scipy':
likelihood = self.estimator.pdf(data.T)
elif self.implementation == 'awkde':
likelihood = self.estimator.predict(data)
else: raise ValueError(f'self.implementation should be one of ["sklearn","scipy","awkde"], not {self.implementation}')
return likelihood
def predict(self, X):
return self.evaluate(X)
def pdf(self, data):
return self.evaluate(data)
def rvs(self, size = 1, random_state = None):
sample_size = size
if self.implementation == 'sklearn':
samples = self.estimator.sample(n_samples = sample_size, random_state = random_state)
elif self.implementation == 'scipy':
samples = self.estimator.resample(sample_size, random_state).T
elif self.implementation == 'awkde':
samples = self.estimator.sample(n_samples = sample_size, random_state = random_state)
else: raise ValueError(f'self.implementation should be one of ["sklearn","scipy","awkde"], not {self.implementation}')
#inverse transform samples
samples = self._space_transformer.inverse_transform(samples)
return samples
def sample(self, sample_size = 1, random_state = None):
return self.rvs(sample_size, random_state)
def entropy(self, sample_size = 100):
return np.mean(-np.log2(self.evaluate(self.rvs(size = sample_size))))
def cdf(self, data, sample_size = 1000):
samples = self.sample(sample_size = sample_size)
# fix shape in order to work with _quantile
samples = samples.reshape(1, *samples.shape)
return _quantile(data.reshape(1, *data.shape), samples)
def ppf(self, data, sample_size = 100):
#estimate using sampling and QuantileTransformer since integration is too costly
data = np.array(data)
assert (data.min() >= 0) and (data.max() <= 1), 'data contains values < 0 or > 1'
samples = self.sample(sample_size = sample_size)
return QuantileTransformer(n_quantiles = min(1000,samples.shape[0])).fit(samples).inverse_transform(data)
def _make_conditioning_grid(self, condition_dict = {}, resolution = None):
samples, likelihood = self.sample(1000) #estimate min and max intervals
argsrt = np.argsort(likelihood)[::-1]
likelihood_msk = likelihood[argsrt].cumsum() < 0.99*likelihood.sum()
likelihood_msk = argsrt[likelihood_msk]
#ignore points with low likelihood
grid_min, grid_max = samples[likelihood_msk].min(axis = 0), samples[likelihood_msk].max(axis = 0)
dim_grid = []
for dim in range(grid_min.shape[0]):
dim_min, dim_max = grid_min[dim], grid_max[dim]
if not dim in condition_dict:
dim_grid.append(np.linspace(dim_min,dim_max, resolution))
else:
dim_grid.append(np.linspace(condition_dict[dim],condition_dict[dim], resolution))
return np.array(dim_grid).T
###Output
_____no_output_____
###Markdown
Testing with moons
###Code
from sklearn.datasets import make_moons
import seaborn as sns
import matplotlib.pyplot as plt
def rotate(x, degree):
theta = np.radians(degree)
r = np.array(( (np.cos(theta), -np.sin(theta)),
(np.sin(theta), np.cos(theta)) ))
return x.dot(r)
moon1, ds1 = make_moons(n_samples = 2000, noise = .1)
moon2, ds2 = make_moons(n_samples = 2000, noise = .1)
moon1 = rotate(moon1, 90)+.5
moon2 = rotate(moon2, 15)
moons = np.concatenate([moon1,moon2])
#sns.jointplot(moons[:,0], moons[:,1])
kde = KDE(implementation = 'scipy', bw = 'mean_distance', space_transformer= KernelPCA(n_components = 2, kernel = 'linear', fit_inverse_transform = True))
kde.fit(moons)
kde.evaluate(moons)
samples = kde.sample(4000)
jnt = sns.jointplot(moons[:,0], moons[:,1], alpha = 0.3)
jnt.ax_joint.scatter(samples[:,0], samples[:,1], color = 'r', alpha = 0.3)
kde = KDE(implementation = 'sklearn', bw = 'std_distance', rtol = 0.01)
%timeit kde.fit(moons)
%timeit kde.evaluate(moons)
samples = kde.sample(4000)
jnt = sns.jointplot(moons[:,0], moons[:,1], alpha = 0.3)
jnt.ax_joint.scatter(samples[:,0], samples[:,1], color = 'r', alpha = 0.3)
kde = KDE(implementation = 'sklearn', bw = 'ISJ')
%timeit kde.fit(moons)
%timeit kde.evaluate(moons)
samples = kde.sample(400)
jnt = sns.jointplot(moons[:,0], moons[:,1], alpha = 0.3)
jnt.ax_joint.scatter(samples[:,0], samples[:,1], color = 'r', alpha = 0.3)
###Output
18.3 ms ± 897 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
565 ms ± 17.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
KDE metric functions
###Code
y_dists = np.hstack([np.random.randn(10,200,2), np.random.randn(10,200,2)+5])
y_true = np.hstack([np.random.randn(10,1,2)])
#export
def _check_kde_metrics_input(y_true, y_dists, frac):
'''
preprocesses inputs for kde metrics calculation
'''
y_dists = _assert_dim_3d(y_dists)
if len(y_true.shape) <= 3:
assert y_true.shape[0] == y_dists.shape[0], f'y_true dim 0 should be equal y_dists dim 0, got {y_true.shape[0]} and {y_dists.shape[0]}'
else:
raise Exception(f'y_true dims should less or equal to 3, got {len(y_true.shape)}')
y_true = _fix_one_sample_2d(y_true)
idxs = np.arange(y_true.shape[0])
idxs = draw_from(idxs, frac)
y_dists = y_dists[idxs]
y_true = y_true[idxs]
return y_true, y_dists
###Output
_____no_output_____
###Markdown
`_kde_entropy`
###Code
#export
def _kde_entropy(data, sample_size = 200, frac = 1.0, progress_bar = False, **kde_kwargs):
'''
Calculates the entropy of multiple continuous distributions. entropy equals np.mean(-np.log(p(x)))
input should be of shape (n_distributions, n_sample_per_distribution, n_dims_in_distribtuion)
'''
data = _fix_one_dist_2d(data)
data = _assert_dim_3d(data)
kde = KDE(**kde_kwargs)
data = draw_from(data, frac)
if progress_bar:
return np.array([kde.fit(d).entropy(sample_size = sample_size) for d in tqdm(data)])
else:
return np.array([kde.fit(d).entropy(sample_size = sample_size) for d in data])
###Output
_____no_output_____
###Markdown
`_kde_entropy` estimates a distribution of an aribtrary dimension random variable using kernel density estimation and calculates its entropy
###Code
_kde_entropy(y_dists,frac = 0.05, implementation = 'sklearn')
%%timeit
_kde_entropy(y_dists, implementation = 'sklearn', bw = 'ISJ')
###Output
206 ms ± 27.7 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
`_ppf`
###Code
#export
def _ppf(percentiles, y_dists):
'''
returns the percent point function for given y_dists and percentiles
expected dims:
percentiles: (n_dists, n_points)
y_dists: (n_dists, n_samples, dims)
'''
assert len(percentiles.shape) == 2, f'percentiles should have 2 dimensions: (n_dists, n_percentiles), got {len(percentiles.shape)}'
assert len(y_dists.shape) == 3, f'y_dists should have 3 dimensions: (n_dists, n_samples, n_dims), got {len(y_dists.shape)}'
assert percentiles.shape[0] == y_dists.shape[0], f'percentiles n_dists should be equal y_dists n_dists. got {percentiles.shape[0]} and {y_dists.shape[0]}'
values = np.array([np.quantile(y_dists[i], percentiles[i], axis = 0) for i in range(y_dists.shape[0])])
return _fix_one_sample_2d(values)
_ppf(np.random.random((100,3)), np.random.randn(100,1000,3)).shape
#np.quantile(np.random.randn(1000,2),np.random.random((2)), axis = 0).shape
###Output
_____no_output_____
###Markdown
`_kde_likelihood`
###Code
#export
def _kde_likelihood(y_true,y_dists, frac = 1.0, progress_bar = False,**kde_kwargs):
'''
Calculates the likelihood of y_true in kde estimation of samples
input should be of shape (n_distributions, n_sample_per_distribution, n_dims_in_distribtuion)
'''
y_true, y_dists = _check_kde_metrics_input(y_true, y_dists, frac)
kde = KDE(**kde_kwargs)
if progress_bar:
likelihoods = np.array([kde.fit(y_dists[i]).evaluate(y_true[i]) for i in tqdm([*range(y_dists.shape[0])])])
else:
likelihoods = np.array([kde.fit(y_dists[i]).evaluate(y_true[i]) for i in range(y_dists.shape[0])])
return _fix_dist_1d(likelihoods)
###Output
_____no_output_____
###Markdown
`_kde_likelihood` Calculates the likelihood of y_true in kde estimation of samples
###Code
_kde_likelihood(np.random.randn(10,2,2),y_dists, frac = 0.5).shape
%%timeit
kde_likelihood(y_true,y_dists, implementation = 'scipy', bw = 'scott')
###Output
12.3 ms ± 313 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
`_kde_quantile`
###Code
#export
def _kde_quantile(y_true, y_dists, frac = 1.0, progress_bar = False, **kde_kwargs):
'''
fits a kde in a distribution and returns the quantile that a point in y_true belongs to in that distribution
'''
y_true, y_dists = _check_kde_metrics_input(y_true, y_dists, frac)
kde = KDE(**kde_kwargs)
if progress_bar:
return _fix_one_dist_2d(np.array([kde.fit(y_dists[i]).cdf(y_true[i]) for i in tqdm([*range(len(y_dists))])]))
else:
return _fix_one_dist_2d(np.array([kde.fit(y_dists[i]).cdf(y_true[i]) for i in range(len(y_dists))]))
###Output
_____no_output_____
###Markdown
`_kde_quantile` fits a kde to each dist in `y_dists` and checks the ppf of `y_true` rowwise
###Code
_kde_quantile(np.random.randn(10,3, 2), y_dists).shape
%%timeit
kde_quantile(y_true, y_dists)
###Output
18.4 ms ± 865 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
`_quantile`
###Code
#export
def _quantile(y_true, y_dists):
'''
checks in which quantile lies y_true, given the predicted distribution
y_true shape should be of shape (n_dists, n_samples ,n_dims)
y_dists_should be of shape (n_dists, n_samples, n_dims)
'''
y_true = _assert_dim_3d(y_true)
y_dists = _assert_dim_3d(y_dists)
assert y_true.shape[0] == y_dists.shape[0], f'number of dists should be the same in both y_true and y_dists. got {y_true.shape[0]} and {y_dists.shape[0]}'
values = []
for i in range(y_true.shape[0]):
values.append([])
for j in range(y_true.shape[1]):
values[i].append((y_true[i,j].T >= y_dists[i]).mean(axis = 0))
return _fix_one_sample_2d(np.array(values))
###Output
_____no_output_____
###Markdown
`_quantile` checks the quantile of each dimension of an observation `y_true` given an empirical distribution `y_dists`
###Code
quantiles = _quantile(np.random.randn(10,100,2), np.random.randn(10,100,2))
sns.distplot(quantiles[:,:,0].flatten())
sns.distplot(quantiles[:,:,1].flatten())
%%timeit
_quantile(y_true, y_dists)
###Output
215 µs ± 15.4 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
Empirical Class
###Code
# export
# CREATE EMPIRICAL DIST METHODS (WITH ADD NOISE IN SAMPLING OPTION AND ALL) <-----
class Empirical:
'''
empirical/histogram class
'''
def __init__(self, fit_frac=1):
'''
bins can be str (passed to np.histogram_bin_edges), n_bins (passed to np.histogram_bin_edges) or None
'''
assert 0 < fit_frac <= 1, 'fit_frac should be 0 < fit_frac <= 1'
self.fit_frac = fit_frac
return
def fit(self, X, y=None, sample_weight=None):
'''
saves data into memory
'''
if len(X.shape) == 1:
X = _fix_X_1d(X)
assert len(X.shape) == 2, f'X expected to have 2 dimensions, got {len(X.shape)}'
if sample_weight is None:
self.data = X
self.weights = None
else:
assert X.shape[0] == sample_weight.shape[0], f'''
X and sample_weight must be the same size along dimension 0. got {X.shape[0]} and {sample_weight.shape[0]}'''
self.data = X
self.weights = sample_weight
return self
def sample(self, sample_size):
'''
alias for rvs
'''
return self.rvs(size = sample_size)
def rvs(self, size):
'''
samples from self.data
'''
samples = sample_multi_dim(self.data, sample_size=sample_size, weights=self.weights, )
return samples
def rvs(self, size):
'''
samples from self.data
alias for sample
'''
samples = sample_multi_dim(self.data, sample_size=size, weights=self.weights, )
return samples
def _reshape_X_and_dist(self, X, dist):
X = np.array(X)
if len(X.shape) == 1:
X = _fix_X_1d(X)
X = X.reshape(1, *X.shape)
dist = dist.reshape(1, *dist.shape)
return X, dist
def pdf(self, X, inference_sample_size=1000, **pdf_kwargs):
'''
fits a kde and checks its likelihood to a data sample of dist
'''
dist = self.sample(inference_sample_size)
X, dist = self._reshape_X_and_dist(X, dist)
return _kde_likelihood(X, dist, **pdf_kwargs)
def ppf(self, X, inference_sample_size=1000):
'''
returns the percent point function of X given a sample of distribution
'''
X = np.array(X)
assert len(X.shape) == 1, f'X should have 1 dimension, got {len(X.shape)}'
dist = self.sample(inference_sample_size)
dist = _fix_one_dist_2d(dist)
X = X.reshape(1, *X.shape)
return _ppf(X, dist)[0, :, :]
def cdf(self, X, inference_sample_size=1000):
'''
returns the cumulative distribution function of X given a sample of distribution along all dimensions
'''
X = np.array(X)
dist = self.sample(inference_sample_size)
X, dist = self._reshape_X_and_dist(X, dist)
values = _quantile(X, dist)
return values[0, :, :]
def entropy(self, inference_sample_size = 1000, **entropy_kws):
samples = self.sample(inference_sample_size)
samples = samples.reshape(1,*samples.shape)
return _kde_entropy(samples,**entropy_kws)
###Output
_____no_output_____
###Markdown
RandomVariable Class -
###Code
#export
class RandomVariable():
'''
A container for distribution objects
'''
def __init__(self, default_dist = 'empirical', calculate_likelihood = False, verbose = False, keep_samples = False):
self._fitted_dists = {}
self.log_likelihood = []
self.default_dist = default_dist
self.verbose = verbose
self.keep_samples = keep_samples
self._samples = None
self.calculate_likelihood = calculate_likelihood
return
def _reset_fits(self,):
self._fitted_dists = {}
self.log_likelihood = []
self._samples = None
def __getitem__(self, item):
if item == 'best':
try:
dist = self._handle_dist_arg(item)
item = self._best_fit_alias
except AttributeError:
raise AttributeError('RandomVariable object has no "best" fit yet. Fit at least one density function through fit_dist method')
return self._fitted_dists[item][0]
def fit_new(self, data, dist = None, **fit_kwargs):
'''
fits given distributions
creates alias `best` for dist with maximum likelihood
'''
if dist is None:
dist = self.default_dist
if dist.__class__ in [list,tuple,set]:
pass
elif dist.__class__ == str:
dist = [dist]
else:
raise TypeError(f'dist should be str, list, tuple or set, not {dist.__class__}')
self.n_dim = 1 if len(data.shape) == 1 else data.shape[-1]
if self.keep_samples:
self._samples = data
self._fit_all(data ,dist, **fit_kwargs)
return self
def fit(self, data, dist = None, **fit_kwargs):
self._reset_fits()
self.fit_new(data, dist, **fit_kwargs)
return self
def _check_best(self):
if self.calculate_likelihood:
dists_aliases = list(self._fitted_dists)
dists_arr = np.array([i[1] for i in self._fitted_dists.values()])
best_fit_idx = np.argmax(dists_arr)
self._best_fit_alias = dists_aliases[best_fit_idx]
else:
self._best_fit_alias = None
return
def _fit_all(self, data, dists, **fit_kwargs):
#TODO: check for multiplicity in candidates aliases
for candidate in ctqdm(dists, verbose = self.verbose):
self._fit_dist(data, candidate, **fit_kwargs)
return self
def _fit_dist(self, data, dist, **fit_kwargs):
'''
fits a specified distribution through scipy.stats.rv_continuous.fit method
'''
alias, dist_name = self._handle_dist_names(dist)
alias, dist_class = self._get_dist_from_name(alias, dist_name)
if alias.lower() == 'best':
raise ValueError('"best" cannot be an alias for a distribution. its internally assgined to the best fit dist')
if alias == 'rv_histogram':
hist = np.histogram(data, bins = 'auto')#len(np.unique(data)))
dist = dist_class(hist)
#make this step to optimize since log likelihiood estimation can be expensive
if self.calculate_likelihood:
log_likelihood = np.sum(np.log(dist.pdf(data)))
else:
log_likelihood = None
self._fitted_dists = {**self._fitted_dists, **{alias:(dist,log_likelihood)}}
self.log_likelihood = list({**dict(self.log_likelihood), **{alias:log_likelihood}}.items())
elif not alias in ['kde','empirical']:
if self.n_dim > 1:
raise ValueError('rv_continuous distributions is only available for 1d distributions. Use "kde" dist instead.')
params = dist_class.fit(data, **fit_kwargs)
#make this step to optimize since log likelihiood estimation can be expensive
if self.calculate_likelihood:
log_likelihood = np.sum(np.log(dist_class.pdf(data,*params)))
else:
log_likelihood = None
self._fitted_dists = {**self._fitted_dists, **{alias:(dist_class(*params),log_likelihood)}}
self.log_likelihood = list({**dict(self.log_likelihood), **{alias:log_likelihood}}.items())
else:
#fit kws passed to constructor in sklearn fashion
dist = dist_class(**fit_kwargs).fit(data)
#make this step to optimize since log likelihiood estimation can be expensive
if self.calculate_likelihood:
log_likelihood = np.sum(np.log(dist.pdf(data)))
else:
log_likelihood = None
self._fitted_dists = {**self._fitted_dists, **{alias:(dist,log_likelihood)}}
self.log_likelihood = list({**dict(self.log_likelihood), **{alias:log_likelihood}}.items())
#update 'best' alias
self._check_best()
return self
def _get_dist_from_name(self, alias, dist_name):
'''
handles dist_names. if str tries to get an attribute from scipy.stats accordingly
that is also instance of scipy.stats.rv_continuous
'''
if isinstance(dist_name,str):
if dist_name.lower() == 'kde':
alias = 'kde'
return (alias, KDE)
elif dist_name.lower() == 'empirical':
alias = 'kde'
return (alias, Empirical)
elif dist_name in dir(stats):
alias = dist_name
return (alias, getattr(stats,dist_name))
else:
raise ValueError(f'dist must be a valid scipy.stats.rv_continuous subclass, not {getattr(stats,dist_name)}')
elif isinstance(dist_name, stats.rv_continuous):
return (alias, dist_name)
else:
raise ValueError(f'dist must be a valid scipy.stats.rv_continuous subclass or str, not {dist_name}')
def _handle_dist_names(self, candidate_value):
'''
checks the inputs in elements of "candidates"
returns a named tuple
'''
if isinstance(candidate_value, str):
return candidate_value, candidate_value
elif isinstance(candidate_value, tuple):
if not len(candidate_value) == 2:
raise ValueError(f'candidate named tuple must be of size 2, "{candidate_value}" has size {len(candidate_value)}')
if not isinstance(candidate_value[0], str):
raise ValueError(f'a candidate must be a str or named tuple (alias[str],<rv_continuous intance>), alias is of type {candidate_value[0].__class__}')
else:
return candidate_value
def sample(self, sample_size, dist = 'best', **kwargs):
'''
alias for rvs
'''
return self.rvs(size = sample_size, dist = dist, **kwargs)
def rvs(self, size, dist = 'best', **kwargs):
'''
sampling function
'''
dist = self._handle_dist_arg(dist)
samples = self[dist].rvs(size = size, **kwargs)
if len(samples.shape) == 1:
samples = samples.reshape(*samples.shape,1)
return samples
def _fix_inference_data_input(self, data):
if len(data.shape) == 1:
data = data.reshape(-1,1)
_assert_dim_2d(data)
assert data.shape[1] == self.n_dim, f'Expected data to have shape (n_samples, n_dims({self.n_dim})). got (n_samples, n_dims({data.shape[1]})).'
return data
def _fix_inference_output(self, data):
if len(data.shape) == 1:
return data.reshape(-1,1)
else:
return data
def cdf(self, data, dist = 'best', **cdf_kws):
dist = self._handle_dist_arg(dist)
#no need to fix in new ppf
#data = self._fix_inference_data_input(data)
samples = self[dist].cdf(data, **cdf_kws)
return self._fix_inference_output(samples)
def pdf(self, data, dist = 'best', **pdf_kws):
dist = self._handle_dist_arg(dist)
#no need to fix in new ppf
#data = self._fix_inference_data_input(data)
samples = self[dist].pdf(data, **pdf_kws)
return self._fix_inference_output(samples)
def evaluate(self, data, dist = 'best', **evaluate_kws):
'''alias for self.pdf'''
return self.pdf(data, dist, **evaluate_kws)
def predict(self, data, dist = 'best', **predict_kws):
'''alias for self.pdf'''
return self.pdf(data, dist, **predict_kws)
def ppf(self, data, dist = 'best', **ppf_kws):
'''
percent point function
'''
dist = self._handle_dist_arg(dist)
#no need to fix in new ppf
#data = self._fix_inference_data_input(data)
samples = self[dist].ppf(data)
return self._fix_inference_output(samples)
def entropy(self, dist = 'best', **entropy_kws):
dist = self._handle_dist_arg(dist)
return self[dist].entropy(**entropy_kws)
def _handle_dist_arg(self, dist):
if (self._best_fit_alias is None) and (dist == 'best') and (len(self._fitted_dists) > 1):
raise ValueError(f'No likelihood value have been calculated, so a dist other than "best" should be specified in the arguments or calculate_likelihood should be set to True in constructor')
if len(self._fitted_dists) == 1:
dist = list(self._fitted_dists)[0]
return dist
###Output
_____no_output_____
###Markdown
A RandomVariable Class facilitates the process of fitting multiple parametric distributions avalible in https://docs.scipy.org/doc/scipy/reference/stats.html from a data sample, for example:
###Code
from time import time
import seaborn as sns
data = np.random.randn(10000)
RandomVariable().fit(data ,dist = 'rv_histogram')
#data = stats.lognorm.rvs(dist_args[0], loc = dist_args[1], scale = dist_args[2], size = 30)
dist = stats.lognorm(s = dist_args[0],loc = dist_args[1], scale = dist_args[2])
data = dist.rvs(size = [300,1])
rv = RandomVariable(calculate_likelihood = True)
rv.fit(data, dist = ['norm','halfnorm','lognorm'])
mle_samples = rv['best'].rvs([100,1])
#plot distributions
print('Dist args:')
print(dist_args)
print(rv.log_likelihood)
print('MLE fitted dist args:')
print(rv._best_fit_alias)
sns.distplot(data)
sns.distplot(mle_samples)
###Output
Dist args:
(2, 2, 2)
[('norm', -1495.198070736116), ('halfnorm', -1304.9106025727738), ('lognorm', -924.7109540095081)]
MLE fitted dist args:
lognorm
###Markdown
RVArray -
###Code
#export
#CREATE EMPIRICAL XXDIST CLASS (RV_HISTOGRAM IS TOO SLOW)
class CustomArray:
'''
An array that contains RandomVariable objects and facilitates method calls and getting attributes
'''
def __init__(self, data):
''' the constructor recieves a list of RandomVariable items'''
self._data = np.array(data)
@property
def data(self,):
return self._data
def __getattr__(self, attr):
'''
Custom __getattr__ method
'''
attr_list = []
for i in self.data:
attr_list.append(getattr(i,attr))
if all([callable(i) for i in attr_list]):
return CustomArray(attr_list)
else:
return np.array(attr_list)
def __call__(self, *args, broadcast_method = 'simple', **kwargs):
'''
broadcast_method can be called in two ways:
simple: the same args and kwargs are applied to all the objects inside RVArray
broadcast: for each (row) object in RVArray, the correspondent (same row) arg and kwarg is applied
'''
assert broadcast_method in ['simple','broadcast']
if broadcast_method == 'simple':
results = []
for i in self.data:
results.append(i(*args,**kwargs))
if all([isinstance(i,np.ndarray) for i in results]):
return np.array(results)
else:
return CustomArray(results)
elif broadcast_method == 'broadcast':
if args:
args_lens_check = [len(arg) == len(self.data) for arg in args]
assert all(args_lens_check)
if kwargs:
kwargs_lens_check = [len(arg) == len(self.data) for arg in kwargs.items()]
assert all(kwargs_lens_check)
#prepare args
if args:
_args = []
for arg in args:
_args.append([val for val in arg])
args = _args
#prepare kwargs
_kwargs = []
if kwargs:
_len = len(kwargs[list(kwargs)[0]])
for i in range(_len):
kwargs_i = {}
for key in kwargs:
kwargs_i[key] = kwargs[key][i]
_kwargs.append(kwargs_i)
kwargs = _kwargs
#run
if kwargs and args:
results = []
for i in range(len(self.data)):
results.append(self.data[i](*args[i],**kwargs[i]))
elif kwargs and not args:
results = []
for i in range(len(self.data)):
results.append(self.data[i](*args,**kwargs[i]))
elif not kwargs and args:
results = []
for i in range(len(self.data)):
results.append(self.data[i](*[arg[i] for arg in args],**kwargs))
else:
results = []
for i in range(len(self.data)):
results.append(self.data[i](*args,**kwargs))
#return values
if all([isinstance(i,np.ndarray) for i in results]):
return np.array(results)
else:
return CustomArray(results)
def __getitem__(self, *args):
if len(args) > 1:
return CustomArray(self.data[args])
else:
if args[0].__class__ == str:
return CustomArray([i[args[0]] for i in self.data])
else:
return self.data[args]
def __repr__(self):
return f'CustomArray({str(self.data)})'
def _broadcastable_kwargs(self, kwargs):
return {k:len(self.data)*[v] for k,v in kwargs.items()}
def _broadcastable_args(self, args):
return [len(self.data)*[v] for v in args]
def _broadcastable_arg(self, arg):
return len(self.data)*[arg]
class RVArray(CustomArray):
'''
A container containing RandomVariable objects. it allows for easily assessing methods and attributes
from multiple RandomVariable objects simultaneously.
Since its used for assessing methods of already fitted distributions, the `fit` method is disabled
'''
def __init__(self, rv_objects):
#skip assertion allowing duck typing
#assert all(isinstance(i, RandomVariable) for i in rv_objects), 'All rv_objects passed to cosntructor should be instances of skdensity.core.random_variavle.RandomVariable'
super().__init__(rv_objects)
return
def fit_new(self, data, dist = None, **dist_kwargs):
'''
Same as RandomVariable.fit_new.
can be applied in a broadcasted manner if shape (n_dists, n_samples, n_dims),
No broadcasting otherwise
'''
data = np.array(data)
#no broadcasting case
if len(data.shape) in (1,2):
super().__getattr__('fit_new')(data, dist, broadcast_method = 'simple',**dist_kwargs)
return self
#broadcasting case
dist_kwargs = self._broadcastable_kwargs(dist_kwargs)
dist = self._broadcastable_arg(dist)
super().__getattr__('fit_new')(data, dist, broadcast_method = 'broadcast',**dist_kwargs)
return self
def fit(self, data, dist = None, **dist_kwargs):
'''
Same as RandomVariable.fit
can be applied in a broadcasted manner if shape (n_dists, n_samples, n_dims),
No broadcasting otherwise.
'''
data = np.array(data)
#no broadcasting case
if len(data.shape) in (1,2):
super().__getattr__('fit')(data, dist, broadcast_method = 'simple',**dist_kwargs)
return self
#broadcasting case
dist_kwargs = self._broadcastable_kwargs(dist_kwargs)
dist = self._broadcastable_arg(dist)
super().__getattr__('fit')(data, dist, broadcast_method = 'broadcast',**dist_kwargs)
return self
def entropy(self, dist = 'best', **entropy_kws):
'''
Same as RandomVariable.entropy
'''
return super().__getattr__('entropy')(dist, broadcast_method = 'simple', **entropy_kws)
def ppf(self, data, dist = 'best', **ppf_kws):
'''
Same as RandomVariable.ppf
can be applied in a broadcasted manner if shape (n_dists, n_samples, n_dims),
No broadcasting otherwise.
'''
data = np.array(data)
#no broadcasting case
if len(data.shape) in (1,2):
return super().__getattr__('ppf')(data, dist, broadcast_method= 'simple',**ppf_kws)
ppf_kws = self._broadcastable_kwargs(ppf_kws)
dist = self._broadcastable_arg(dist)
return super().__getattr__('ppf')(data, dist, broadcast_method='broadcast', **ppf_kws)
def predict(self, data, dist = 'best', **predict_kws):
'''
Same as RandomVariable.predict
can be applied in a broadcasted manner if shape (n_dists, n_samples, n_dims),
No broadcasting otherwise.
'''
data = np.array(data)
#no broadcasting case
if len(data.shape) in (1,2):
return super().__getattr__('predict')(data, dist, broadcast_method= 'simple',**predict_kws)
dist = self._broadcastable_arg(dist)
predict_kws = self._broadcastable_kwargs(predict_kws)
return super().__getattr__('predict')(data, dist, broadcast_method = 'broadcast',**predict_kws)
def evaluate(self, data, dist = 'best', **evaluate_kws):
'''
Same as RandomVariable.evaluate
can be applied in a broadcasted manner if shape (n_dists, n_samples, n_dims),
No broadcasting otherwise.
'''
data = np.array(data)
#no broadcasting case
if len(data.shape) in (1,2):
return super().__getattr__('evaluate')(data, dist, broadcast_method= 'simple',**evaluate_kws)
dist = self._broadcastable_arg(dist)
evaluate_kws = self._broadcastable_kwargs(evaluate_kws)
return super().__getattr__('evaluate')(data, dist, broadcast_method = 'broadcast',**evaluate_kws)
def pdf(self, data, dist = 'best', **pdf_kws):
'''
Same as RandomVariable.pdf
can be applied in a broadcasted manner if shape (n_dists, n_samples, n_dims),
No broadcasting otherwise.
'''
data = np.array(data)
#no broadcasting case
if len(data.shape) in (1,2):
return super().__getattr__('pdf')(data, dist, broadcast_method= 'simple',**pdf_kws)
dist = self._broadcastable_arg(dist)
pdf_kws = self._broadcastable_kwargs(pdf_kws)
return super().__getattr__('pdf')(data, dist, broadcast_method = 'broadcast',**pdf_kws)
def cdf(self, data, dist = 'best', **cdf_kws):
'''
Same as RandomVariable.cdf
can be applied in a broadcasted manner if shape (n_dists, n_samples, n_dims),
No broadcasting otherwise.
'''
data = np.array(data)
#no broadcasting case
if len(data.shape) in (1,2):
return super().__getattr__('cdf')(data, dist, broadcast_method = 'simple',**cdf_kws)
#broadcasting case
cdf_kws = self._broadcastable_kwargs(cdf_kws)
dist = self._broadcastable_arg(dist)
return super().__getattr__('cdf')(data, dist, broadcast_method = 'broadcast',**cdf_kws)
def rvs(self, size = 1, dist = 'best', **kwargs):
'''
Same as RandomVariable.rvs
'''
return super().__getattr__('rvs')(size, dist, broadcast_method = 'simple',**kwargs)
def sample(self, sample_size = 1, dist = 'best', **kwargs):
'''
Same as RandomVariable.sample
'''
return super().__getattr__('sample')(sample_size, dist, broadcast_method = 'simple',**kwargs)
###Output
_____no_output_____
###Markdown
A RVArray is a data sctructure tthat facilitates handling multiple RandomVariable objects, assessing RandomVariable methods and attributes in a vectorwise fashion
###Code
data = mle_samples
rv1 = RandomVariable(keep_samples = False).fit(data)
rv2 = RandomVariable(keep_samples = True).fit(data)
rv_arr = RVArray([rv1,rv2])#.fit(mle_samples, dist = 'rv_histogram')
#rv_arr.fit(mle_samples,'norm')
rv_arr.predict(data)
rv_arr.pdf(data)
rv_arr.ppf([0.5,1])
rv_arr.cdf(data)
rv_arr.sample()
rv_arr.rvs(10).shape
#hide
# kde methods performance evaluation
from time import time
from tqdm import tqdm
n_samples = 400
dist = stats.norm(loc = 20, scale = 10)
tree_kde = [[],[]]
fft_kde = [[],[]]
adapatative_kde = [[],[]]
entropies = []
n_samples_list = []
for i in tqdm([*range(50)]):
#samples = np.stack([dist.rvs(n_samples),dist.rvs(n_samples),dist.rvs(n_samples),dist.rvs(n_samples),dist.rvs(n_samples),dist.rvs(n_samples),
# ], axis = -1)
n_samples = int(max(2,10**(i/12)))
n_samples_list.append(n_samples)
samples = dist.rvs(n_samples)
samples = samples if len(samples.shape) > 1 else samples.reshape(-1,1)
bimodal_msk = np.random.randint(0,2,samples.shape[0]).astype(bool)
samples[bimodal_msk] = -abs(samples[bimodal_msk])
n_dim = samples.shape[-1]
resolution = int(10000**(1/n_dim))
entropies.append(dist.entropy()*n_dim)
if 0:#resolution**n_dim > 100000:
tic = time()
kde = kdepy.FFTKDE(bw = 'ISJ')
bw = [kde.bw(samples[:,i:i+1]) for i in range(samples.shape[-1])]
kde = kdepy.TreeKDE(bw = bw).fit(samples)
evaluate = kde.evaluate(samples)
entr = np.mean(-np.log(evaluate[1]))
toc = time()
tree_kde[0].append(entr)
tree_kde[1].append(toc-tic)
#kde_values = evaluate[0]
#kde_pdf = evaluate[1]
if 1:
tic = time()
kde = kdepy.FFTKDE(bw = 'scott')
bw = [kde.bw(samples[:,i:i+1]) for i in range(samples.shape[-1])]
kde = kdepy.FFTKDE(bw = bw).fit(samples)
evaluate = kde.evaluate(resolution)
#kde_values = evaluate[0]
kde_pdf = evaluate[1]
#idxs = euclidean_distances(kde.data,kde_values).argmin(axis = 1)
#kde_pdf = kde_pdf[idxs]
#kde_values = kde_values[idxs]
kde_pdf = np.random.choice(kde_pdf,p = kde_pdf/kde_pdf.sum(), size = 1000)
entr = np.mean(-np.log(kde_pdf))
toc = time()
fft_kde[0].append(entr)
fft_kde[1].append(toc-tic)
if 1:
tic = time()
g = awkde.GaussianKDE(glob_bw = 'scott',alpha = 0.5)
g.fit(samples)
entr = np.mean(-np.log(g.predict(g.sample(100))))
toc = time()
adapatative_kde[0].append(entr)
adapatative_kde[1].append(toc-tic)
#dist.entropy(), np.mean(-np.log(kde_pdf)/n_dim), resolution
#hide
import matplotlib.pyplot as plt
#sns.distplot((np.array(tree_kde[0]) - np.array(entropies))/np.array(entropies))
sns.distplot((np.array(fft_kde[0]) - np.array(entropies))/np.array(entropies), label = 'KDEpy')
sns.distplot((np.array(adapatative_kde[0]) - np.array(entropies))/np.array(entropies), label = 'awkde')
plt.legend()
np.array(entropies).mean()
#hide
#sns.distplot(np.log10(np.array(tree_kde[1])[np.array(tree_kde[1])>0]))
plt.scatter(np.log10(np.array(n_samples_list))[np.array(adapatative_kde[1])>0],np.log10(np.array(adapatative_kde[1])[np.array(adapatative_kde[1])>0]), label = 'awkde')
plt.scatter(np.log10(np.array(n_samples_list))[np.array(fft_kde[1])>0],np.log10(np.array(fft_kde[1])[np.array(fft_kde[1])>0]), label = 'KDEpy')
plt.legend()
plt.xlabel('log(n_samples)')
plt.ylabel('log(time in seconds)')
#sns.distplot(np.log10(np.array(fft_kde[1])[np.array(fft_kde[1])>0]))
#hide
if np.array(samples).shape[1] == 1:
#plt.scatter(samples[:,0],dist.pdf(samples))
#plt.scatter(samples[:,0],kde.evaluate(samples), color = 'r')
plt.scatter(samples[:,0],g.predict(samples))
plt.scatter(*kde.evaluate(256))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 01_ensemble.ipynb.
Converted 02_core.random_variable.ipynb.
Converted 03_utils.ipynb.
Converted 04_metrics.ipynb.
Converted 05_neighbors.ipynb.
Converted 06_kde_baesyan_nets.ipynb.
Converted index.ipynb.
|
docs/_downloads/4d4055f7d7a9f9d94f76cdb0c480c092/calculate_band_center.ipynb | ###Markdown
Calculating band center using vdosThis example shows how to plot projected density of states
###Code
import os
from pdos_overlap import get_example_data
from pdos_overlap import VASP_DOS
from pdos_overlap.plotting_tools import set_figure_settings
###Output
_____no_output_____
###Markdown
Load DOSCAR file----------------First we will, get the example data, load a DOSCAR file and use it toinstantiate a VASP_DOS object.
###Code
set_figure_settings('paper')
example_path = get_example_data()
DOSCAR = os.path.join(example_path, 'C2H4/DOSCAR')
PDOS = VASP_DOS(DOSCAR)
###Output
_____no_output_____
###Markdown
Calculate and print band centers--------------------------------This method uses the the site and spin orbital projected density. It sums thespin orbital densities to get energy sub-level band centers.
###Code
orbitals = [key for key in PDOS.orbital_dictionary.keys() if 's' in key or 'p' in key]
band_centers = PDOS.get_band_center([0], orbital_list=orbitals\
, max_energy=PDOS.e_fermi)
for count, orbital in enumerate(orbitals):
print(orbital + ' band center :' + str(band_centers[count]))
###Output
_____no_output_____ |
Landslide_Data__Analysis_Youtube.ipynb | ###Markdown
This Notebook contains following :1. Data Cleansing using NLP techniques as well as hard coding to remove irrelevant events2. Data Analysis and clustering using Pre-Trained Sentence Transformer model and KNN approach3. Keyword Extraction model using YAKE library which extracts important events( and rank them ) after merging all the video titles which will help us to do focused crawling4. Keyword Extraction using RAKE to find important keywords per clusterThese keywords ranking may further be used to do focus crawling to find out more facts about the event by crawling through different newspaper articles.Note : This notebook was automated by scheduling on daily basis on deepnote.com
###Code
# Importing Essential Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import string
import datetime
x = datetime.datetime.now()
input_csv = f'/datasets/outputyoutube/Landslide_Project/Youtube_General_{x.strftime("%B%d")}.csv'
#df = pd.read_csv('Youtube_General_July14.csv')
df= pd.read_csv(input_csv)
df.shape
df=df.drop_duplicates(subset=['Video_ID',
'Video_Title'])
df.shape
###Output
_____no_output_____
###Markdown
Data Cleaning and preprocessing
###Code
# Removing Irrelevant Data ( Using Hard Coding )
df_updated = df[df["Video_Title"].str.contains("BJP|Switzerland|assassin|battles|czech|fingerstyle guitar|mobile gameplay|Germany|test championship|election|🇩🇪|Elections|vote|child labor agents|child traffickers|Top 10 Disastrous Floods in India|quality product|Shangzhi|New Zealand|Aryan Migration|Learn with BYJU'S|Waterpark in Brazil|Trump mispronounces|PM Modi|Park of Poland|Important Personalities of India|FIVE wicket haul|Covid 19 vaccination|Seaplane arrives|Funny Indoor Activity|Real Royalty|Fun Story|Dispute between India|Movie|CAR vs.|Guru Ka Langar|Voter|Laxmikant Jaybhaye|Nigeria's|Nigeria|Corona Vaccination|Hindi Dubbed Movies|job online|MUPPADAI TRAINING ACADEMY|kedarnath Baba ki|Hidden place|Gangtok|Indonesia|Japan earthquake|India-China Dispute|10 Beautiful Places|Article 370|KFC|Wazwan|Pakistan|Aarti Tikoo|Kashmiri Pandits EXODUS|Bollywood|Paradise on Earth|SOHNIYE|IMPORTANT TOURIST DESTINATIONS|NEW KITCHEN|Students Back To Books|GREEN SHAAG|EASY AND TASTY|ventilators|fresh snowfall|organic|vegetables|Dam Failures|Ball Toys|in Canada|beautiful view|Dream Journey|UNSC|Afghanistan")==False]
df_updated.shape
emojis = {':)': 'smile', ':-)': 'smile', ';d': 'wink', ':-E': 'vampire', ':(': 'sad',
':-(': 'sad', ':-<': 'sad', ':P': 'raspberry', ':O': 'surprised',
':-@': 'shocked', ':@': 'shocked',':-$': 'confused', ':\\': 'annoyed',
':#': 'mute', ':X': 'mute', ':^)': 'smile', ':-&': 'confused', '$_$': 'greedy',
'@@': 'eyeroll', ':-!': 'confused', ':-D': 'smile', ':-0': 'yell', 'O.o': 'confused',
'<(-_-)>': 'robot', 'd[-_-]b': 'dj', ":'-)": 'sadsmile', ';)': 'wink',
';-)': 'wink', 'O:-)': 'angel','O*-)': 'angel','(:-D': 'gossip', '=^.^=': 'cat'
,'🌏': '','🔔': '','👈':'','✔':'','🔥':''}
%%capture
import re
import nltk
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
tokens = word_tokenize(str(df['Description']))
lemmatizer = WordNetLemmatizer()
!pip install ml2en
#!pip install mtranslate
!pip install google_trans_new
# Converting malyalam to english.
from ml2en import ml2en
converter = ml2en()
#from mtranslate import translate
from google_trans_new import google_translator
translator = google_translator()
def translate(text) :
text =str(text)
text = converter.transliterate(text)
#text = translate(text)
translate_text = translator.translate(text,lang_tgt='en')
return translate_text
try :
df_updated['Video_Title'] = df_updated['Video_Title'].apply(translate)
except :
pass
df_updated.head()
import sys
sys.setrecursionlimit(10000)
def TextClean_Final(text):
text =str(text)
text = text.lower()
text = re.sub(r'@[a-z0-9_]\S+' , '', text)
text = re.sub(r'&[a-z0-9_]\S+','',text)
text = re.sub(r'rt[\s]+', '', text)
text = re.sub(r'\$', '', text)
text = re.sub(r'rt+', '', text)
text = re.sub(r'https?:?\/\/\S+', '', text)
for emoji in emojis.keys():
text = text.replace(emoji, "" + emojis[emoji])
tokens = word_tokenize(text)
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
text = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words('english'))
text = [w for w in text if not w in stop_words]
text = [lemmatizer.lemmatize(word) for word in text]
return ','.join(text)
df_updated['clean_title'] = df_updated['Video_Title'].apply(TextClean_Final)
df_updated['clean_description'] = df_updated['Description'].apply(TextClean_Final)
len(df_updated['Description'].unique())
df_updated['clean_title']
df_updated['Video_Title']
output_date = f'Youtube_General_{x.strftime("%B%d")}.csv'
df_updated.to_csv(f'/datasets/outputyoutube/Landslide_Project/{output_date}')
print("The final youtube data is saved as : "+output_date)
###Output
The final youtube data is saved as : Youtube_General_July14.csv
###Markdown
Data Analysis and Clustering
###Code
%%capture
!pip install -U sentence-transformers
from apiclient.discovery import build
import argparse
import csv
import pandas as pd
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
import datetime as dt
from matplotlib import pyplot as plt
import math
%%capture
embedder = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens') # Distilbert gives a nice balance between speed and performance
corpus_t=df_updated['Video_Title'][0:50].values
#corpus_d=df_updated["Description"]
corpus_embeddings = embedder.encode(corpus_t)
from sklearn.cluster import KMeans
num_clusters = 4
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(corpus_t[sentence_id])
for i, cluster in enumerate(clustered_sentences):
print("Cluster ", i+1)
print(cluster)
print("")
wcss = []
for i in range(1, 25):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit(corpus_embeddings)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 25), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
wcss = []
for i in range(1, 15):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit(corpus_embeddings)
wcss.append(kmeans.inertia_ + 500*math.log(i))
plt.plot(range(1, 15), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
import numpy as np
from scipy.cluster import hierarchy
from scipy.cluster.hierarchy import dendrogram
from sklearn.datasets import load_iris
from sklearn.cluster import AgglomerativeClustering
Z = hierarchy.linkage(corpus_embeddings, 'single')
plt.figure()
dn = hierarchy.dendrogram(Z)
Z = hierarchy.linkage(corpus_embeddings, 'complete')
plt.figure()
dn = hierarchy.dendrogram(Z)
clustered_sentences[1]
# Most of the landslides are of uttrakhand, so clustering is working
###Output
_____no_output_____
###Markdown
Keyword Extraction to recommend queries for focused crawler
###Code
%%capture
!pip install git+https://github.com/LIAAD/yake
import yake
language = "en"
max_ngram_size = 3
deduplication_thresold = 0.9
deduplication_algo = 'seqm'
windowSize = 1
numOfKeywords = 20
kw_extractor = yake.KeywordExtractor()
# Merging the description
# text = []
# irrelevant =[]
# for i in range(len(df_updated)) :
# try :
# temp = df_updated['Description'][i]
# text.append(temp)
# except :
# irrelevant.append("")
#text = ''.join(str(text))
# Merging Video Titles to rank the most important events
text = []
irrelevant =[]
for i in range(len(df_updated)) :
try :
temp = str(df_updated['Video_Title'][i])
text.append(temp)
except :
irrelevant.append("")
text = ''.join(str(text))
# Yake Library Paper : https://repositorio.inesctec.pt/bitstream/123456789/7623/1/P-00N-NF5.pdf
kw_extractor = yake.KeywordExtractor()
keywords = kw_extractor.extract_keywords(text)
###Output
_____no_output_____
###Markdown
We can clearly notice below , we rank the important landslide events that our keyword extractor captured
###Code
for kw in keywords:
print(kw)
###Output
('Himachal Pradesh Landslide', 0.00020404189251219105)
('Himachal flash flood', 0.0002857186874773786)
('Himachal Pradesh', 0.0004352167780294632)
('flash flood', 0.0004832808349338941)
('landslide hits Himachal', 0.0005518588024946492)
('Dharamsala flash floods', 0.0006017059557563751)
('Kangra Mein Landslide', 0.0006825772797378637)
('London flash floods', 0.0007412778985560565)
('Himachal Pradesh Floods', 0.0007421977345009992)
('hits Himachal Kangra', 0.0009608979621055371)
('landslide', 0.0009832376589994554)
('Floods Hit Himachal', 0.0010051718230212336)
('Himachal Kangra', 0.00101000047464106)
('triggers flash flood', 0.0011255869035312132)
('Uttarakhand Flash Flood', 0.0011674492472973361)
('Landslide viral video', 0.0013598251574753915)
('Uttarakhand landslide viral', 0.0014870553878369157)
('Himachal', 0.0015624931491615567)
('Rescue ops underway', 0.0016137046936749325)
('flood', 0.0016976368179103811)
###Markdown
Finding Ranks/reccommendation using RAKE Library through our clustering
###Code
%%capture
!pip install rake_nltk
###Output
_____no_output_____
###Markdown
This is useful when we want to capture important events per cluster
###Code
import string
from rake_nltk import Rake
extracts=[[] for i in range(num_clusters)]
r = Rake() # Uses stopwords for english from NLTK, and all puntuation characters.
r = Rake(min_length=1, max_length=5)
for i in range(len(clustered_sentences)):
text=' '.join(clustered_sentences[i])
r.extract_keywords_from_text(text)
print("Cluster",i,": ",r.get_ranked_phrases()[0:3]) # To get keyword phrases ranked highest to lowest.
extracts[i] = ' '.join(r.get_ranked_phrases())
###Output
Cluster 0 : ["disastrous ': inside trump ’", 'six houses swept away', 'tv9news kalsi chakrata road']
Cluster 1 : ['dehradun vikasnagar river overflow', 'delhi ncr himachal pradesh landslide', '‘ flash flood ’']
Cluster 2 : ['five still missing massive landslides', 'floods rescue ops underway', 'catch news himachal pradesh']
Cluster 3 : ['veling priol goankarwada himachal pradesh', 'keralakaumudi landslide lyrical group', 'himachal pradesh huge landslide']
Cluster 4 : ['bura haal ho gaya mera', 'relief ka kaam jari', 'bachaao yarr isse koi']
###Markdown
Archives - OLD CODE
###Code
# pip install date-extractor
# def ExtractDate(tweet):
# tweet =str(tweet)
# tweet = tweet.lower()
# #tweet = re.sub(r'@[a-z0-9_]\S+' , '', tweet)
# match = re.search(r"(\d+)[-.\/](\d+)[-.\/](\d+)", tweet)
# date = extract_dates(tweet)
# date = str(date)
# return date
#ddf['dates_extracted'] = ddf['Description'].apply(ExtractDate)
# def Keyword_extract(text):
# most_imp=[]
# kw_extractor = yake.KeywordExtractor()
# keywords = kw_extractor.extract_keywords(text)
# for kw in keywords:
# most_imp.append(kw)
# break
# return most_imp
###Output
_____no_output_____
###Markdown
Hindi to English transliteration
###Code
#!pip install mtranslate
#from mtranslate import translate
# to_translate = 'नमस्ते कैसी हो तुम?'
# print(translate(to_translate))
# print(translate(to_translate, 'en'))
# print(translate(to_translate, 'hi'))
%%capture
!pip install google_trans_new
from google_trans_new import google_translator
translator = google_translator()
translate_text = translator.translate('हेलो चीनी',lang_tgt='en')
print(translate_text)
###Output
_____no_output_____ |
08-Hacking-Deep-Learning/02-iterative-target-attack.ipynb | ###Markdown
8.2 목표를 정해 공격하기
###Code
import torch
import torchvision.models as models
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
import json
%matplotlib inline
import matplotlib.pyplot as plt
torch.manual_seed(1)
CLASSES = json.load(open('./imagenet_samples/imagenet_classes.json'))
idx2class = [CLASSES[str(i)] for i in range(1000)]
class2idx = {v:i for i,v in enumerate(idx2class)}
vgg16 = models.vgg16(pretrained=True)
vgg16.eval()
print(vgg16)
softmax = torch.nn.Softmax()
img_transforms = transforms.Compose([transforms.Scale((224, 224), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def norm(x):
return 2.*(x/255.-0.5)
def unnorm(x):
un_x = 255*(x*0.5+0.5)
un_x[un_x > 255] = 255
un_x[un_x < 0] = 0
un_x = un_x.astype(np.uint8)
return un_x
img = Image.open('imagenet_samples/chihuahua.jpg')
img_tensor = img_transforms(img)
plt.figure(figsize=(10,5))
plt.imshow(np.asarray(img))
img_tensor.requires_grad_(True)
out = vgg16(img_tensor.unsqueeze(0))
probs = softmax(out)
cls_idx = np.argmax(out.data.numpy())
print(str(cls_idx) + ":" + idx2class[cls_idx] + ":" + str(out.data.numpy()[0][cls_idx]) + ":" + str(probs.data.numpy()[0][cls_idx]))
learning_rate = 1
img = Image.open('imagenet_samples/chihuahua.jpg')
fake_img_tensor = img_transforms(img)
img_var_fake = torch.autograd.Variable(fake_img_tensor.unsqueeze(0), requires_grad=True)
fake_class_idx = class2idx['street sign']
for i in range(100):
out_fake = vgg16(img_var_fake)
_, out_idx = out_fake.data.max(dim=1)
if out_idx.numpy() == fake_class_idx:
print('Fake generated in ' + str(i) + ' iterations')
break
out_fake[0,fake_class_idx].backward()
img_var_fake_grad = img_var_fake.grad.data
img_var_fake.data += learning_rate*img_var_fake_grad/img_var_fake_grad.norm()
img_var_fake.grad.data.zero_()
probs_fake = softmax(out_fake)
print(str(fake_class_idx) + ":" + idx2class[fake_class_idx] + ":" + str(out_fake.data.numpy()[0][fake_class_idx]) + ":" + str(probs_fake.data.numpy()[0][fake_class_idx]))
plt.figure(figsize=(10,5))
plt.subplot(1,3,1)
plt.imshow(unnorm(img_tensor.detach().numpy()).transpose(1,2,0))
plt.subplot(1,3,2)
plt.imshow(unnorm(img_var_fake.data.detach().numpy()[0]).transpose(1,2,0))
plt.subplot(1,3,3)
plt.imshow(unnorm(img_var_fake.data.detach().numpy()[0] - img_tensor.detach().numpy()).transpose(1,2,0))
###Output
_____no_output_____ |
training_pipeline_research.ipynb | ###Markdown
Finding Optimal Hyperparameters using Hyperas and Hyperopt
###Code
def train_cnn_model(X_train, y_train, X_test, y_test):
# CNN model
model = Sequential()
model.add(Conv2D({{choice([32, 64, 128])}}, (3, 3), padding='same', activation='relu', input_shape=(28,28,1)))
model.add(Conv2D({{choice([32, 64, 128])}}, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Conv2D({{choice([64, 128, 256])}}, (3, 3), padding='same', activation='relu'))
model.add(Conv2D({{choice([64, 128, 256])}}, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Conv2D({{choice([64, 128, 256])}}, (3, 3), padding='same', activation='relu'))
model.add(Conv2D({{choice([64, 128, 256])}}, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Flatten())
model.add(Dense({{choice([128, 256, 512, 1024])}}))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(36, activation='softmax'))
model.compile(loss={{choice(['categorical_crossentropy'])}}, metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs={{choice([20, 50, 100])}}, batch_size={{choice([16, 32, 64, 128])}}, verbose=2)
score, acc = model.evaluate(X_test, y_test, verbose=2)
print('Test accuracy: {}'.format(acc))
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
%%time
best_run, best_model = optim.minimize(model=train_cnn_model,
data=get_train_test_data,
algo=tpe.suggest,
max_evals=20,
trials=Trials(),
notebook_name='training_pipeline_research',)
X_train, y_train, X_test, y_test = get_train_test_data()
print("Evaluating best performing model...")
print(best_model.evaluate(X_test, y_test))
print("***********************************************")
print("Best performing model hyper-parameters: ")
print(best_run)
print("***********************************************")
###Output
>>> Imports:
#coding=utf-8
try:
import pandas as pd
except:
pass
try:
import numpy as np
except:
pass
try:
import cv2
except:
pass
try:
import os
except:
pass
try:
import pickle
except:
pass
try:
from sklearn.model_selection import train_test_split
except:
pass
try:
from sklearn.preprocessing import OneHotEncoder
except:
pass
try:
from keras.models import Sequential
except:
pass
try:
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation
except:
pass
try:
from matplotlib import pyplot as plt
except:
pass
try:
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
except:
pass
try:
from hyperas import optim
except:
pass
try:
from keras.models import Sequential
except:
pass
try:
from keras.layers import Dense, Dropout
except:
pass
try:
from keras.optimizers import Adam, SGD
except:
pass
>>> Hyperas search space:
def get_space():
return {
'Conv2D': hp.choice('Conv2D', [32, 64, 128]),
'Conv2D_1': hp.choice('Conv2D_1', [32, 64, 128]),
'Dropout': hp.uniform('Dropout', 0, 1),
'Conv2D_2': hp.choice('Conv2D_2', [64, 128, 256]),
'Conv2D_3': hp.choice('Conv2D_3', [64, 128, 256]),
'Dropout_1': hp.uniform('Dropout_1', 0, 1),
'Conv2D_4': hp.choice('Conv2D_4', [64, 128, 256]),
'Conv2D_5': hp.choice('Conv2D_5', [64, 128, 256]),
'Dropout_2': hp.uniform('Dropout_2', 0, 1),
'Dense': hp.choice('Dense', [128, 256, 512, 1024]),
'Activation': hp.choice('Activation', ['relu', 'sigmoid']),
'Dropout_3': hp.uniform('Dropout_3', 0, 1),
'loss': hp.choice('loss', ['categorical_crossentropy']),
'optimizer': hp.choice('optimizer', ['rmsprop', 'adam', 'sgd']),
'epochs': hp.choice('epochs', [20, 50, 100]),
'batch_size': hp.choice('batch_size', [16, 32, 64, 128]),
'epochs_1': hp.choice('epochs_1', [1]),
'batch_size_1': hp.choice('batch_size_1', [16]),
}
>>> Data
1:
2: # Create dictionary for alphabets and related numbers
3: alphabets_dic = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J',
4: 10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T',
5: 20: 'U', 21: 'V', 22: 'W', 23: 'X', 24: 'Y', 25: 'Z', 26: '0', 27: '1', 28: '2', 29:'3',
6: 30: '4', 31: '5', 32: '6', 33: '7', 34: '8', 35: '9'}
7:
8: alphabets = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
9: dataset_classes = []
10:
11: for cls in alphabets:
12: dataset_classes.append([cls])
13:
14: # Load dataset
15: d = open("./data/alternate_data.pickle","rb")
16: l = open("./data/alternate_data_labels.pickle","rb")
17: data = pickle.load(d)
18: labels = pickle.load(l)
19: label_list = []
20: for l in labels:
21: label_list.append([l])
22:
23: # One hot encoding format for output
24: ohe = OneHotEncoder(handle_unknown='ignore', categorical_features=None)
25: ohe.fit(dataset_classes)
26: labels_ohe = ohe.transform(label_list).toarray()
27:
28: data = np.array(data)
29: labels = np.array(labels)
30:
31: # Split the data
32: X_train, X_test, y_train, y_test = train_test_split(data, labels_ohe, test_size=0.20, random_state=42)
33: print(X_train.shape)
34: print(X_test.shape)
35: print(y_train.shape)
36: print(y_test.shape)
37:
38: X_train = X_train.reshape(X_train.shape[0],28,28,1)
39: X_test = X_test.reshape(X_test.shape[0],28,28,1)
40: print(X_train.shape)
41: print(X_test.shape)
42: print(y_train.shape)
43: print(y_test.shape)
44:
45:
46:
47:
>>> Resulting replaced keras model:
1: def keras_fmin_fnct(space):
2:
3: # CNN model
4: model = Sequential()
5: model.add(Conv2D(space['Conv2D'], (3, 3), padding='same', activation='relu', input_shape=(28,28,1)))
6: model.add(Conv2D(space['Conv2D_1'], (3, 3), activation='relu'))
7: model.add(MaxPooling2D(pool_size=(2, 2)))
8: model.add(Dropout(space['Dropout']))
9:
10: model.add(Conv2D(space['Conv2D_2'], (3, 3), padding='same', activation='relu'))
11: model.add(Conv2D(space['Conv2D_3'], (3, 3), activation='relu'))
12: model.add(MaxPooling2D(pool_size=(2, 2)))
13: model.add(Dropout(space['Dropout_1']))
14:
15: model.add(Conv2D(space['Conv2D_4'], (3, 3), padding='same', activation='relu'))
16: model.add(Conv2D(space['Conv2D_5'], (3, 3), activation='relu'))
17: model.add(MaxPooling2D(pool_size=(2, 2)))
18: model.add(Dropout(space['Dropout_2']))
19:
20: model.add(Flatten())
21: model.add(Dense(space['Dense']))
22: model.add(Activation(space['Activation']))
23: model.add(Dropout(space['Dropout_3']))
24: model.add(Dense(36, activation='softmax'))
25:
26: model.compile(loss=space['loss'], metrics=['accuracy'], optimizer=space['optimizer'])
27: history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=space['epochs'], batch_size=space['batch_size'], verbose=2)
28: # history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=space['epochs_1'], batch_size=space['batch_size_1'], verbose=2)
29:
30: score, acc = model.evaluate(X_test, y_test, verbose=2)
31: print('Test accuracy: {}'.format(acc))
32: return {'loss': -acc, 'status': STATUS_OK, 'model': model}
33:
(29260, 28, 28)
(7316, 28, 28)
(29260, 36)
(7316, 36)
(29260, 28, 28, 1)
(7316, 28, 28, 1)
(29260, 36)
(7316, 36)
Epoch 1/50
1829/1829 - 69s - val_accuracy: 0.4282 - accuracy: 0.0534 - loss: 3.7397 - val_loss: 2.4687
Epoch 2/50
1829/1829 - 69s - val_accuracy: 0.7112 - accuracy: 0.2440 - loss: 2.5559 - val_loss: 1.2068
Epoch 3/50
1829/1829 - 72s - val_accuracy: 0.8382 - accuracy: 0.4386 - loss: 1.7899 - val_loss: 0.7565
Epoch 4/50
1829/1829 - 71s - val_accuracy: 0.8856 - accuracy: 0.5525 - loss: 1.4281 - val_loss: 0.5548
Epoch 5/50
1829/1829 - 73s - val_accuracy: 0.8984 - accuracy: 0.6207 - loss: 1.2171 - val_loss: 0.4571
Epoch 6/50
1829/1829 - 72s - val_accuracy: 0.9176 - accuracy: 0.6748 - loss: 1.0660 - val_loss: 0.3832
Epoch 7/50
1829/1829 - 72s - val_accuracy: 0.9292 - accuracy: 0.7080 - loss: 0.9645 - val_loss: 0.3246
Epoch 8/50
1829/1829 - 71s - val_accuracy: 0.9329 - accuracy: 0.7382 - loss: 0.8832 - val_loss: 0.2843
Epoch 9/50
1829/1829 - 72s - val_accuracy: 0.9291 - accuracy: 0.7598 - loss: 0.8124 - val_loss: 0.2695
Epoch 10/50
1829/1829 - 72s - val_accuracy: 0.9396 - accuracy: 0.7767 - loss: 0.7664 - val_loss: 0.2546
Epoch 11/50
1829/1829 - 77s - val_accuracy: 0.9381 - accuracy: 0.7887 - loss: 0.7143 - val_loss: 0.2459
Epoch 12/50
1829/1829 - 69s - val_accuracy: 0.9481 - accuracy: 0.7974 - loss: 0.6888 - val_loss: 0.2124
Epoch 13/50
1829/1829 - 70s - val_accuracy: 0.9500 - accuracy: 0.8106 - loss: 0.6521 - val_loss: 0.2113
Epoch 14/50
1829/1829 - 71s - val_accuracy: 0.9437 - accuracy: 0.8144 - loss: 0.6297 - val_loss: 0.2173
Epoch 15/50
1829/1829 - 72s - val_accuracy: 0.9493 - accuracy: 0.8272 - loss: 0.6001 - val_loss: 0.1976
Epoch 16/50
1829/1829 - 71s - val_accuracy: 0.9392 - accuracy: 0.8335 - loss: 0.5722 - val_loss: 0.2039
|
code/chap03Mine.ipynb | ###Markdown
Modeling and Simulation in PythonChapter 3Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim library
from modsim import *
# set the random number generator
np.random.seed(7)
###Output
_____no_output_____
###Markdown
More than one State objectHere's the code from the previous chapter, with two changes:1. I've added DocStrings that explain what each function does, and what parameters it takes.2. I've added a parameter named `state` to the functions so they work with whatever `State` object we give them, instead of always using `bikeshare`. That makes it possible to work with more than one `State` object.
###Code
def step(state, p1, p2):
"""Simulate one minute of time.
state: bikeshare State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
"""
if flip(p1):
bike_to_wellesley(state)
if flip(p2):
bike_to_olin(state)
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
state.wellesley -= 1
state.olin += 1
def decorate_bikeshare():
"""Add a title and label the axes."""
decorate(title='Olin-Wellesley Bikeshare',
xlabel='Time step (min)',
ylabel='Number of bikes')
###Output
_____no_output_____
###Markdown
And here's `run_simulation`, which is a solution to the exercise at the end of the previous notebook.
###Code
def run_simulation(state, p1, p2, num_steps):
"""Simulate the given number of time steps.
state: State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
num_steps: number of time steps
"""
results = TimeSeries()
for i in range(num_steps):
step(state, p1, p2)
results[i] = state.olin
plot(results, label='Olin')
###Output
_____no_output_____
###Markdown
Now we can create more than one `State` object:
###Code
bikeshare1 = State(olin=10, wellesley=2)
bikeshare2 = State(olin=2, wellesley=10)
###Output
_____no_output_____
###Markdown
Whenever we call a function, we indicate which `State` object to work with:
###Code
bike_to_olin(bikeshare1)
bike_to_wellesley(bikeshare2)
###Output
_____no_output_____
###Markdown
And you can confirm that the different objects are getting updated independently:
###Code
bikeshare1
bikeshare2
###Output
_____no_output_____
###Markdown
Negative bikes In the code we have so far, the number of bikes at one of the locations can go negative, and the number of bikes at the other location can exceed the actual number of bikes in the system.If you run this simulation a few times, it happens often.
###Code
bikeshare = State(olin=10, wellesley=2)
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
###Output
_____no_output_____
###Markdown
We can fix this problem using the `return` statement to exit the function early if an update would cause negative bikes.
###Code
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
if state.olin == 0:
return
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
if state.wellesley == 0:
return
state.wellesley -= 1
state.olin += 1
###Output
_____no_output_____
###Markdown
Now if you run the simulation again, it should behave.
###Code
bikeshare = State(olin=10, wellesley=2)
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
###Output
_____no_output_____
###Markdown
Comparison operators The `if` statements in the previous section used the comparison operator `<`. The other comparison operators are listed in the book.It is easy to confuse the comparison operator `==` with the assignment operator `=`.Remember that `=` creates a variable or gives an existing variable a new value.
###Code
x = 4
###Output
_____no_output_____
###Markdown
Whereas `==` compared two values and returns `True` if they are equal.
###Code
x == 5
###Output
_____no_output_____
###Markdown
You can use `==` in an `if` statement.
###Code
if x == 5:
print('yes, x is 5')
###Output
yes, x is 5
###Markdown
But if you use `=` in an `if` statement, you get an error.
###Code
# If you remove the # from the if statement and run it, you'll get
# SyntaxError: invalid syntax
if x == 5:
print('yes, x is 5')
else:
print('no, x is not 5')
###Output
no, x is not 5
###Markdown
**Exercise:** Add an `else` clause to the `if` statement above, and print an appropriate message.Replace the `==` operator with one or two of the other comparison operators, and confirm they do what you expect. Metrics Now that we have a working simulation, we'll use it to evaluate alternative designs and see how good or bad they are. The metric we'll use is the number of customers who arrive and find no bikes available, which might indicate a design problem. First we'll make a new `State` object that creates and initializes additional state variables to keep track of the metrics.
###Code
bikeshare = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0)
###Output
_____no_output_____
###Markdown
Next we need versions of `bike_to_wellesley` and `bike_to_olin` that update the metrics.
###Code
def bike_to_wellesley(state):
"""Move one bike from Olin to Wellesley.
state: bikeshare State object
"""
if state.olin == 0:
state.olin_empty += 1
return
state.olin -= 1
state.wellesley += 1
def bike_to_olin(state):
"""Move one bike from Wellesley to Olin.
state: bikeshare State object
"""
if state.wellesley == 0:
state.wellesley_empty += 1
return
state.wellesley -= 1
state.olin += 1
###Output
_____no_output_____
###Markdown
Now when we run a simulation, it keeps track of unhappy customers.
###Code
run_simulation(bikeshare, 0.4, 0.2, 60)
decorate_bikeshare()
savefig('figs/chap02-fig01.pdf')
###Output
Saving figure to file figs/chap02-fig01.pdf
###Markdown
After the simulation, we can print the number of unhappy customers at each location.
###Code
bikeshare.olin_empty
bikeshare.wellesley_empty
###Output
_____no_output_____
###Markdown
Exercises**Exercise:** As another metric, we might be interested in the time until the first customer arrives and doesn't find a bike. To make that work, we have to add a "clock" to keep track of how many time steps have elapsed:1. Create a new `State` object with an additional state variable, `clock`, initialized to 0. 2. Write a modified version of `step` that adds one to the clock each time it is invoked.Test your code by running the simulation and check the value of `clock` at the end.
###Code
bikeshare = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0,
clock=0)
def step(state, p1, p2):
"""Simulate one minute of time.
state: bikeshare State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
"""
if flip(p1):
bike_to_wellesley(state)
if flip(p2):
bike_to_olin(state)
state.clock +=1
run_simulation(bikeshare, .4,.2,60)
bikeshare.clock
###Output
_____no_output_____
###Markdown
**Exercise:** Continuing the previous exercise, let's record the time when the first customer arrives and doesn't find a bike.1. Create a new `State` object with an additional state variable, `t_first_empty`, initialized to -1 as a special value to indicate that it has not been set. 2. Write a modified version of `step` that checks whether`olin_empty` and `wellesley_empty` are 0. If not, it should set `t_first_empty` to `clock` (but only if `t_first_empty` has not already been set).Test your code by running the simulation and printing the values of `olin_empty`, `wellesley_empty`, and `t_first_empty` at the end.
###Code
bikeshareTime = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0,
clock=0, t_first_empty=-1)
def stepTime(state, p1, p2):
"""Simulate one minute of time.
state: bikeshare State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
"""
if flip(p1):
bike_to_wellesley(state)
if flip(p2):
bike_to_olin(state)
state.clock +=1
if state.t_first_empty ==-1:
if state.olin_empty==1 | state.wellesley_empty==1:
state.t_first_empty=state.clock
def run_simulation(state, p1, p2, num_steps):
"""Simulate the given number of time steps.
state: State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
num_steps: number of time steps
"""
results = TimeSeries()
for i in range(num_steps):
stepTime(state, p1, p2)
results[i] = state.olin
plot(results, label='Olin')
run_simulation(bikeshareTime, .4,.2,60)
bikeshareTime.t_first_empty
###Output
_____no_output_____ |
code/Checking_actions.ipynb | ###Markdown
Are my actions the same as Adrian's? Load my and Adrian's data.
###Code
from astropy.io import fits
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from astropy.table import Table
# Adrian's actions
fname = "/Users/ruthangus/Desktop/gaiadr2-good-plx-actions.fits"
with fits.open(fname) as hdu1:
data = hdu1[1].data
#hdr = hdul[1].header
#print(hdr)
jr, lz, jz = np.array(data["actions"]).T
source_id = data.source_id.byteswap().newbyteorder()
ra = data.ra.byteswap().newbyteorder()
dec = data.dec.byteswap().newbyteorder()
adrian = pd.DataFrame(dict({"source_id": source_id,
"ra": ra, "dec": dec,
"jr": jr, "lz": lz, "jz": jz
}))
# Load my actions:
ruth = pd.read_csv("stlr_gaia_actions.csv")
for key in ruth.keys():
print(key)
fudge = 1
ramax, ramin = max(ruth.ra_stlr) + fudge, min(ruth.ra_stlr) - fudge
decmax, decmin = max(ruth.dec_stlr) + fudge, min(ruth.dec_stlr) - fudge
# Make cuts on RA and dec so that Adrian's catalogue isn't so big
print(np.shape(adrian))
m = ramin < adrian.ra.values
m &= decmin < adrian.dec.values
m &= adrian.ra.values < ramax
m &= adrian.dec.values < decmax
ad = adrian.iloc[m]
plt.plot(ruth.ra_stlr, ruth.dec_stlr, ".", ms=1, alpha=.5)
plt.plot(ad.ra, ad.dec, ".", ms=.5, alpha=.5)
df = pd.merge(ad, ruth, on="source_id", suffixes=["_adrian", "_ruth"])
plt.plot(df.ra, df.dec, ".")
plt.loglog(df.J_z, df.jz, ".")
xs = np.linspace(0, 100, 100)
plt.plot(xs, xs, "k", ls="--", lw=1, alpha=.5)
plt.xlabel("My Jz")
plt.ylabel("Adrian's Jz")
plt.plot(-df.L_z, df.lz, ".")
xs = np.linspace(-2500, 0, 100)
plt.plot(xs, xs, "k", ls="--", lw=1, alpha=.5)
plt.xlabel("My Lz")
plt.ylabel("Adrian's Lz")
plt.loglog(df.J_R, df.jr, ".")
xs = np.linspace(0, 500, 100)
plt.plot(xs, xs, "k", ls="--", lw=1, alpha=.5)
plt.xlabel("My Jr")
plt.ylabel("Adrian's Jr")
###Output
_____no_output_____
###Markdown
Check Jz against ages from Sanders et al.
###Code
file = "gaia_spectro.hdf5"
from astropy.table import Table
data = Table.read(file)
sanders = data.to_pandas()
sanders
###Output
_____no_output_____
###Markdown
Crossmatch my catalogue with Jason's.
###Code
ruth_sanders = pd.merge(ruth, sanders, on="source_id", suffixes=["_r", "_s"])
print(np.shape(sanders), np.shape(ruth), np.shape(ruth_sanders))
m = ruth_sanders.log10_age.values > 0
plt.hist2d(ruth_sanders.log10_age.values[m], np.log10(ruth_sanders.J_z.values[m]), bins=40)
plt.xlabel("Age [Gyr]")
plt.ylabel("Jz")
m = ruth_sanders.log10_age.values > 0
m &= np.isfinite(ruth_sanders.log10_age.values)
m &= np.isfinite(ruth_sanders.J_z.values)
x, y = 10**ruth_sanders.log10_age.values[m], ruth_sanders.J_z.values[m]
inds = np.argsort(x)
x, y = x[inds], y[inds]
AT = np.vstack((np.ones(len(x)), x, x**2))
ATA = np.dot(AT, AT.T)
a, b, c = np.linalg.solve(ATA, np.dot(AT, y))
plt.loglog(x, y, ".")
plt.loglog(x, a + b*x + c*x**2, "k-")
plt.xlabel("Age [Gyr]")
plt.ylabel("Jz")
###Output
/Users/ruthangus/anaconda/lib/python3.5/site-packages/ipykernel/__main__.py:1: RuntimeWarning: invalid value encountered in greater
if __name__ == '__main__':
###Markdown
Plot running median
###Code
width = 1 # 1 Gyr width bins
bins = x[(width < x) & (x < max(x) - width)]
medians, v = [np.zeros(len(bins)) for i in range(2)]
for i in range(len(bins)):
inds = (bins[i] < x) & (x < bins[i] + width)
medians[i] = np.median(y[inds])
v[i] = np.var(y[inds])
print(max(x), max(bins))
plt.loglog(x, y, ".")
plt.loglog(bins, medians, ".")
plt.loglog(bins, v, ".")
plt.loglog(x, b*x + c, "k-")
plt.xlabel("Age [Gyr]")
plt.ylabel("Jz")
plt.hist(10**ruth_sanders.log10_age[~np.isnan(10**ruth_sanders.log10_age)], 50);
###Output
_____no_output_____ |
source/eq_by_add/eq_by_add.ipynb | ###Markdown
ExerciseSolve the following equation using addition.$$x + (-6) = 5$$
###Code
from sympy import symbols, Eq, simplify
def ex():
x = symbols('x')
eq1 = Eq(x + (-6), 5)
### BEGIN SOLUTION
eq1 = Eq(eq1.lhs + 6, eq1.rhs + 6)
### END SOLUTION
return(eq1)
ex() # Test out results
# hide_me
# Unit tests
import inspect
assert 'ex' in locals(), 'Please keep the function name as `ex`'
assert (simplify(ex()) == Eq(symbols('x'), 11)), 'The final answer for x is incorrect'
assert '.lhs' in inspect.getsource(ex), 'You should use the lhs method to modify the left of the equation'
assert '.lhs' in inspect.getsource(ex), 'You should use the rhs method to modify the right of the equation'
assert 'solve' not in inspect.getsource(ex), 'Do not use the `solve()` function to get the answer'
assert type(ex()) == Eq, 'Your function should return an SymPy equation'
print("Great job!")
###Output
_____no_output_____ |
StyleTransfer-TensorFlow.ipynb | ###Markdown
Style TransferIn this notebook we will implement the style transfer technique from ["Image Style Transfer Using Convolutional Neural Networks" (Gatys et al., CVPR 2015)](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Gatys_Image_Style_Transfer_CVPR_2016_paper.pdf).The general idea is to take two images, and produce a new image that reflects the content of one but the artistic "style" of the other. We will do this by first formulating a loss function that matches the content and style of each respective image in the feature space of a deep network, and then performing gradient descent on the pixels of the image itself.The deep network we use as a feature extractor is [SqueezeNet](https://arxiv.org/abs/1602.07360), a small model that has been trained on ImageNet. You could use any network, but we chose SqueezeNet here for its small size and efficiency.Here's an example of the images you'll be able to produce by the end of this notebook: Setup
###Code
%load_ext autoreload
%autoreload 2
from scipy.misc import imread, imresize
import numpy as np
from scipy.misc import imread
import matplotlib.pyplot as plt
# Helper functions to deal with image preprocessing
from cs231n.image_utils import load_image, preprocess_image, deprocess_image
%matplotlib inline
def get_session():
"""Create a session that dynamically allocates memory."""
# See: https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
return session
def rel_error(x,y):
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Older versions of scipy.misc.imresize yield different results
# from newer versions, so we check to make sure scipy is up to date.
def check_scipy():
import scipy
version = scipy.__version__.split('.')
if int(version[0]) < 1:
assert int(version[1]) >= 16, "You must install SciPy >= 0.16.0 to complete this notebook."
check_scipy()
###Output
_____no_output_____
###Markdown
Load the pretrained SqueezeNet model. This model has been ported from PyTorch, see `cs231n/classifiers/squeezenet.py` for the model architecture. To use SqueezeNet, you will need to first **download the weights** by descending into the `cs231n/datasets` directory and running `get_squeezenet_tf.sh` . Note that if you ran `get_assignment3_data.sh` then SqueezeNet will already be downloaded.
###Code
from cs231n.classifiers.squeezenet import SqueezeNet
import tensorflow as tf
import os
tf.reset_default_graph() # remove all existing variables in the graph
sess = get_session() # start a new Session
# Load pretrained SqueezeNet model
SAVE_PATH = 'cs231n/datasets/squeezenet.ckpt'
if not os.path.exists(SAVE_PATH + ".index"):
raise ValueError("You need to download SqueezeNet!")
model = SqueezeNet(save_path=SAVE_PATH, sess=sess)
# Load data for testing
content_img_test = preprocess_image(load_image('styles/tubingen.jpg', size=192))[None]
style_img_test = preprocess_image(load_image('styles/starry_night.jpg', size=192))[None]
answers = np.load('style-transfer-checks-tf.npz')
###Output
_____no_output_____
###Markdown
Computing LossWe're going to compute the three components of our loss function now. The loss function is a weighted sum of three terms: content loss + style loss + total variation loss. You'll fill in the functions that compute these weighted terms below. Content lossWe can generate an image that reflects the content of one image and the style of another by incorporating both in our loss function. We want to penalize deviations from the content of the content image and deviations from the style of the style image. We can then use this hybrid loss function to perform gradient descent **not on the parameters** of the model, but instead **on the pixel values** of our original image.Let's first write the content loss function. Content loss measures how much the feature map of the generated image differs from the feature map of the source image. We only care about the content representation of one layer of the network (say, layer $\ell$), that has feature maps $A^\ell \in \mathbb{R}^{1 \times H_\ell \times W_\ell \times C_\ell}$. $C_\ell$ is the number of filters/channels in layer $\ell$, $H_\ell$ and $W_\ell$ are the height and width. We will work with reshaped versions of these feature maps that combine all spatial positions into one dimension. Let $F^\ell \in \mathbb{R}^{M_\ell \times C_\ell}$ be the feature map for the current image and $P^\ell \in \mathbb{R}^{M_\ell \times C_\ell}$ be the feature map for the content source image where $M_\ell=H_\ell\times W_\ell$ is the number of elements in each feature map. Each row of $F^\ell$ or $P^\ell$ represents the vectorized activations of a particular filter, convolved over all positions of the image. Finally, let $w_c$ be the weight of the content loss term in the loss function.Then the content loss is given by:$L_c = w_c \times \sum_{i,j} (F_{ij}^{\ell} - P_{ij}^{\ell})^2$
###Code
def content_loss(content_weight, content_current, content_original):
"""
Compute the content loss for style transfer.
Inputs:
- content_weight: scalar constant we multiply the content_loss by.
- content_current: features of the current image, Tensor with shape [1, height, width, channels]
- content_target: features of the content image, Tensor with shape [1, height, width, channels]
Returns:
- scalar content loss
"""
pass
###Output
_____no_output_____
###Markdown
Test your content loss. You should see errors less than 0.0001.
###Code
def content_loss_test(correct):
content_layer = 3
content_weight = 6e-2
c_feats = sess.run(model.extract_features()[content_layer], {model.image: content_img_test})
bad_img = tf.zeros(content_img_test.shape)
feats = model.extract_features(bad_img)[content_layer]
student_output = sess.run(content_loss(content_weight, c_feats, feats))
error = rel_error(correct, student_output)
print('Maximum error is {:.3f}'.format(error))
content_loss_test(answers['cl_out'])
###Output
_____no_output_____
###Markdown
Style lossNow we can tackle the style loss. For a given layer $\ell$, the style loss is defined as follows:First, compute the Gram matrix G which represents the correlations between the responses of each filter, where F is as above. The Gram matrix is an approximation to the covariance matrix -- we want the activation statistics of our generated image to match the activation statistics of our style image, and matching the (approximate) covariance is one way to do that. There are a variety of ways you could do this, but the Gram matrix is nice because it's easy to compute and in practice shows good results.Given a feature map $F^\ell$ of shape $(M_\ell, C_\ell)$, the Gram matrix has shape $(C_\ell, C_\ell)$ and its elements are given by:$$G_{ij}^\ell = \sum_k F^{\ell}_{ki} F^{\ell}_{kj}$$Assuming $G^\ell$ is the Gram matrix from the feature map of the current image, $A^\ell$ is the Gram Matrix from the feature map of the source style image, and $w_\ell$ a scalar weight term, then the style loss for the layer $\ell$ is simply the weighted Euclidean distance between the two Gram matrices:$$L_s^\ell = w_\ell \sum_{i, j} \left(G^\ell_{ij} - A^\ell_{ij}\right)^2$$In practice we usually compute the style loss at a set of layers $\mathcal{L}$ rather than just a single layer $\ell$; then the total style loss is the sum of style losses at each layer:$$L_s = \sum_{\ell \in \mathcal{L}} L_s^\ell$$Begin by implementing the Gram matrix computation below:
###Code
def gram_matrix(features, normalize=True):
"""
Compute the Gram matrix from features.
Inputs:
- features: Tensor of shape (1, H, W, C) giving features for
a single image.
- normalize: optional, whether to normalize the Gram matrix
If True, divide the Gram matrix by the number of neurons (H * W * C)
Returns:
- gram: Tensor of shape (C, C) giving the (optionally normalized)
Gram matrices for the input image.
"""
pass
###Output
_____no_output_____
###Markdown
Test your Gram matrix code. You should see errors less than 0.0001.
###Code
def gram_matrix_test(correct):
gram = gram_matrix(model.extract_features()[5])
student_output = sess.run(gram, {model.image: style_img_test})
error = rel_error(correct, student_output)
print('Maximum error is {:.3f}'.format(error))
gram_matrix_test(answers['gm_out'])
###Output
_____no_output_____
###Markdown
Next, implement the style loss:
###Code
def style_loss(feats, style_layers, style_targets, style_weights):
"""
Computes the style loss at a set of layers.
Inputs:
- feats: list of the features at every layer of the current image, as produced by
the extract_features function.
- style_layers: List of layer indices into feats giving the layers to include in the
style loss.
- style_targets: List of the same length as style_layers, where style_targets[i] is
a Tensor giving the Gram matrix of the source style image computed at
layer style_layers[i].
- style_weights: List of the same length as style_layers, where style_weights[i]
is a scalar giving the weight for the style loss at layer style_layers[i].
Returns:
- style_loss: A Tensor containing the scalar style loss.
"""
# Hint: you can do this with one for loop over the style layers, and should
# not be very much code (~5 lines). You will need to use your gram_matrix function.
pass
###Output
_____no_output_____
###Markdown
Test your style loss implementation. The error should be less than 0.0001.
###Code
def style_loss_test(correct):
style_layers = [1, 4, 6, 7]
style_weights = [300000, 1000, 15, 3]
feats = model.extract_features()
style_target_vars = []
for idx in style_layers:
style_target_vars.append(gram_matrix(feats[idx]))
style_targets = sess.run(style_target_vars,
{model.image: style_img_test})
s_loss = style_loss(feats, style_layers, style_targets, style_weights)
student_output = sess.run(s_loss, {model.image: content_img_test})
error = rel_error(correct, student_output)
print('Error is {:.3f}'.format(error))
style_loss_test(answers['sl_out'])
###Output
_____no_output_____
###Markdown
Total-variation regularizationIt turns out that it's helpful to also encourage smoothness in the image. We can do this by adding another term to our loss that penalizes wiggles or "total variation" in the pixel values. You can compute the "total variation" as the sum of the squares of differences in the pixel values for all pairs of pixels that are next to each other (horizontally or vertically). Here we sum the total-variation regualarization for each of the 3 input channels (RGB), and weight the total summed loss by the total variation weight, $w_t$:$L_{tv} = w_t \times \left(\sum_{c=1}^3\sum_{i=1}^{H-1}\sum_{j=1}^{W} (x_{i+1,j,c} - x_{i,j,c})^2 + \sum_{c=1}^3\sum_{i=1}^{H}\sum_{j=1}^{W - 1} (x_{i,j+1,c} - x_{i,j,c})^2\right)$In the next cell, fill in the definition for the TV loss term. To receive full credit, your implementation should not have any loops.
###Code
def tv_loss(img, tv_weight):
"""
Compute total variation loss.
Inputs:
- img: Tensor of shape (1, H, W, 3) holding an input image.
- tv_weight: Scalar giving the weight w_t to use for the TV loss.
Returns:
- loss: Tensor holding a scalar giving the total variation loss
for img weighted by tv_weight.
"""
# Your implementation should be vectorized and not require any loops!
pass
###Output
_____no_output_____
###Markdown
Test your TV loss implementation. Error should be less than 0.0001.
###Code
def tv_loss_test(correct):
tv_weight = 2e-2
t_loss = tv_loss(model.image, tv_weight)
student_output = sess.run(t_loss, {model.image: content_img_test})
error = rel_error(correct, student_output)
print('Error is {:.3f}'.format(error))
tv_loss_test(answers['tv_out'])
###Output
_____no_output_____
###Markdown
Style Transfer Lets put it all together and make some beautiful images! The `style_transfer` function below combines all the losses you coded up above and optimizes for an image that minimizes the total loss.
###Code
def style_transfer(content_image, style_image, image_size, style_size, content_layer, content_weight,
style_layers, style_weights, tv_weight, init_random = False):
"""Run style transfer!
Inputs:
- content_image: filename of content image
- style_image: filename of style image
- image_size: size of smallest image dimension (used for content loss and generated image)
- style_size: size of smallest style image dimension
- content_layer: layer to use for content loss
- content_weight: weighting on content loss
- style_layers: list of layers to use for style loss
- style_weights: list of weights to use for each layer in style_layers
- tv_weight: weight of total variation regularization term
- init_random: initialize the starting image to uniform random noise
"""
# Extract features from the content image
content_img = preprocess_image(load_image(content_image, size=image_size))
feats = model.extract_features(model.image)
content_target = sess.run(feats[content_layer],
{model.image: content_img[None]})
# Extract features from the style image
style_img = preprocess_image(load_image(style_image, size=style_size))
style_feat_vars = [feats[idx] for idx in style_layers]
style_target_vars = []
# Compute list of TensorFlow Gram matrices
for style_feat_var in style_feat_vars:
style_target_vars.append(gram_matrix(style_feat_var))
# Compute list of NumPy Gram matrices by evaluating the TensorFlow graph on the style image
style_targets = sess.run(style_target_vars, {model.image: style_img[None]})
# Initialize generated image to content image
if init_random:
img_var = tf.Variable(tf.random_uniform(content_img[None].shape, 0, 1), name="image")
else:
img_var = tf.Variable(content_img[None], name="image")
# Extract features on generated image
feats = model.extract_features(img_var)
# Compute loss
c_loss = content_loss(content_weight, feats[content_layer], content_target)
s_loss = style_loss(feats, style_layers, style_targets, style_weights)
t_loss = tv_loss(img_var, tv_weight)
loss = c_loss + s_loss + t_loss
# Set up optimization hyperparameters
initial_lr = 3.0
decayed_lr = 0.1
decay_lr_at = 180
max_iter = 200
# Create and initialize the Adam optimizer
lr_var = tf.Variable(initial_lr, name="lr")
# Create train_op that updates the generated image when run
with tf.variable_scope("optimizer") as opt_scope:
train_op = tf.train.AdamOptimizer(lr_var).minimize(loss, var_list=[img_var])
# Initialize the generated image and optimization variables
opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=opt_scope.name)
sess.run(tf.variables_initializer([lr_var, img_var] + opt_vars))
# Create an op that will clamp the image values when run
clamp_image_op = tf.assign(img_var, tf.clip_by_value(img_var, -1.5, 1.5))
f, axarr = plt.subplots(1,2)
axarr[0].axis('off')
axarr[1].axis('off')
axarr[0].set_title('Content Source Img.')
axarr[1].set_title('Style Source Img.')
axarr[0].imshow(deprocess_image(content_img))
axarr[1].imshow(deprocess_image(style_img))
plt.show()
plt.figure()
# Hardcoded handcrafted
for t in range(max_iter):
# Take an optimization step to update img_var
sess.run(train_op)
if t < decay_lr_at:
sess.run(clamp_image_op)
if t == decay_lr_at:
sess.run(tf.assign(lr_var, decayed_lr))
if t % 100 == 0:
print('Iteration {}'.format(t))
img = sess.run(img_var)
plt.imshow(deprocess_image(img[0], rescale=True))
plt.axis('off')
plt.show()
print('Iteration {}'.format(t))
img = sess.run(img_var)
plt.imshow(deprocess_image(img[0], rescale=True))
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Generate some pretty pictures!Try out `style_transfer` on the three different parameter sets below. Make sure to run all three cells. Feel free to add your own, but make sure to include the results of style transfer on the third parameter set (starry night) in your submitted notebook.* The `content_image` is the filename of content image.* The `style_image` is the filename of style image.* The `image_size` is the size of smallest image dimension of the content image (used for content loss and generated image).* The `style_size` is the size of smallest style image dimension.* The `content_layer` specifies which layer to use for content loss.* The `content_weight` gives weighting on content loss in the overall loss function. Increasing the value of this parameter will make the final image look more realistic (closer to the original content).* `style_layers` specifies a list of which layers to use for style loss. * `style_weights` specifies a list of weights to use for each layer in style_layers (each of which will contribute a term to the overall style loss). We generally use higher weights for the earlier style layers because they describe more local/smaller scale features, which are more important to texture than features over larger receptive fields. In general, increasing these weights will make the resulting image look less like the original content and more distorted towards the appearance of the style image.* `tv_weight` specifies the weighting of total variation regularization in the overall loss function. Increasing this value makes the resulting image look smoother and less jagged, at the cost of lower fidelity to style and content. Below the next three cells of code (in which you shouldn't change the hyperparameters), feel free to copy and paste the parameters to play around them and see how the resulting image changes.
###Code
# Composition VII + Tubingen
params1 = {
'content_image' : 'styles/tubingen.jpg',
'style_image' : 'styles/composition_vii.jpg',
'image_size' : 192,
'style_size' : 512,
'content_layer' : 3,
'content_weight' : 5e-2,
'style_layers' : (1, 4, 6, 7),
'style_weights' : (20000, 500, 12, 1),
'tv_weight' : 5e-2
}
style_transfer(**params1)
# Scream + Tubingen
params2 = {
'content_image':'styles/tubingen.jpg',
'style_image':'styles/the_scream.jpg',
'image_size':192,
'style_size':224,
'content_layer':3,
'content_weight':3e-2,
'style_layers':[1, 4, 6, 7],
'style_weights':[200000, 800, 12, 1],
'tv_weight':2e-2
}
style_transfer(**params2)
# Starry Night + Tubingen
params3 = {
'content_image' : 'styles/tubingen.jpg',
'style_image' : 'styles/starry_night.jpg',
'image_size' : 192,
'style_size' : 192,
'content_layer' : 3,
'content_weight' : 6e-2,
'style_layers' : [1, 4, 6, 7],
'style_weights' : [300000, 1000, 15, 3],
'tv_weight' : 2e-2
}
style_transfer(**params3)
###Output
_____no_output_____
###Markdown
Feature InversionThe code you've written can do another cool thing. In an attempt to understand the types of features that convolutional networks learn to recognize, a recent paper [1] attempts to reconstruct an image from its feature representation. We can easily implement this idea using image gradients from the pretrained network, which is exactly what we did above (but with two different feature representations).Now, if you set the style weights to all be 0 and initialize the starting image to random noise instead of the content source image, you'll reconstruct an image from the feature representation of the content source image. You're starting with total noise, but you should end up with something that looks quite a bit like your original image.(Similarly, you could do "texture synthesis" from scratch if you set the content weight to 0 and initialize the starting image to random noise, but we won't ask you to do that here.) Run the following cell to try out feature inversion.[1] Aravindh Mahendran, Andrea Vedaldi, "Understanding Deep Image Representations by Inverting them", CVPR 2015
###Code
# Feature Inversion -- Starry Night + Tubingen
params_inv = {
'content_image' : 'styles/tubingen.jpg',
'style_image' : 'styles/starry_night.jpg',
'image_size' : 192,
'style_size' : 192,
'content_layer' : 3,
'content_weight' : 6e-2,
'style_layers' : [1, 4, 6, 7],
'style_weights' : [0, 0, 0, 0], # we discard any contributions from style to the loss
'tv_weight' : 2e-2,
'init_random': True # we want to initialize our image to be random
}
style_transfer(**params_inv)
###Output
_____no_output_____ |
Notebook-Class-Assignment-Answers/Step-4-Develop-Model-Task-7-Graph-Analytics-Class-Assignment.ipynb | ###Markdown
Install Pygeohash libraryThis library provides functions for computing geohash Step 5 - Develop Model - Task 6 - Connect the dots & Task 7 - Graph Analytics - CLASS ASSIGNMENTS
###Code
!pip install pygeohash
###Output
Collecting pygeohash
Downloading https://files.pythonhosted.org/packages/2c/33/c912fa4476cedcd3ed9cd25c44c163583b92d319860438e6b632f7f42d0c/pygeohash-1.2.0.tar.gz
Building wheels for collected packages: pygeohash
Building wheel for pygeohash (setup.py) ... [?25l[?25hdone
Created wheel for pygeohash: filename=pygeohash-1.2.0-py2.py3-none-any.whl size=6162 sha256=3094842273f60a8a8bb2e706f2eb8583cbb5bf7e12c0cc2b25707e2b34b904ae
Stored in directory: /root/.cache/pip/wheels/3f/5f/14/989d83a271207dda28232746d63e737a2dbd88ea7f7a9db807
Successfully built pygeohash
Installing collected packages: pygeohash
Successfully installed pygeohash-1.2.0
###Markdown
Import pygeohash, networkx and Pandas librariesPygeohash - functions for converting latitude, longitude to geohash and related distance measurement utilitiesNetworkx - functions for creating, manipulating and querying open source network graphs Pandas - Python functions for table manipuation
###Code
import pygeohash as pgh
import networkx as nx
import pandas as pd
###Output
_____no_output_____
###Markdown
Connect to datasets using Google drive or local files
###Code
using_Google_colab = True
using_Anaconda_on_Mac_or_Linux = False
using_Anaconda_on_windows = False
if using_Google_colab:
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
DM6.1 Open Notebook, read Lat, Long and compute Geohash - Activity 1
###Code
if using_Google_colab:
state_location = pd.read_csv('/content/drive/MyDrive/COVID_Project/input/state_lat_long.csv')
if using_Anaconda_on_Mac_or_Linux:
state_location = pd.read_csv('../input/state_lat_long.csv')
if using_Anaconda_on_windows:
state_location = pd.read_csv(r'..\input\state_lat_long.csv')
state_location.loc[0:5,]
###Output
_____no_output_____
###Markdown
Apply a function call to convert Lat, Long to Geohash
###Code
def lat_long_to_geohash(lat_long):
return pgh.encode(lat_long[0], lat_long[1])
state_location['geohash'] = state_location[['latitude',
'longitude']].apply(lat_long_to_geohash,
axis=1)
state_location.iloc[0:10,]
###Output
_____no_output_____
###Markdown
Truncate geohash to first two characters
###Code
state_location['geohash'] = state_location.geohash.str.slice(stop=2)
state_location.iloc[0:10,]
###Output
_____no_output_____
###Markdown
DM6.2 - Design Graph representaing States and Geohash Find neighbors by sorting the states by 2 character geohash codes attached to each state Initialize Graph and create state and geohash concepts as nodes
###Code
GRAPH_ID = nx.DiGraph()
GRAPH_ID.add_node('state')
GRAPH_ID.add_node('geohash')
###Output
_____no_output_____
###Markdown
Create a node for each state
###Code
state_list = state_location.state.values
for state in state_list:
GRAPH_ID.add_node(state)
GRAPH_ID.add_edge('state', state, label='instance')
###Output
_____no_output_____
###Markdown
Create a list of unique geohash codes and create a node for each geohash
###Code
geohash_list = state_location.geohash.values
for geohash in geohash_list:
GRAPH_ID.add_node(geohash)
GRAPH_ID.add_edge('geohash', geohash, label='instance')
df_state_geohash = state_location[['state', 'geohash']]
for state_geohash in df_state_geohash.itertuples():
GRAPH_ID.add_edge(state_geohash.state, state_geohash.geohash,
label='located_at')
GRAPH_ID.add_edge(state_geohash.geohash, state_geohash.state,
label='locates',
distance=0.0)
###Output
_____no_output_____
###Markdown
DM6.3 - Which states are in Geohash 9q Find geohash associated with California and Naveda
###Code
list(GRAPH_ID.neighbors('CA'))
list(GRAPH_ID.neighbors('NV'))
###Output
_____no_output_____
###Markdown
Find States locsted with geohash '9q'
###Code
list(GRAPH_ID.neighbors('9q'))
###Output
_____no_output_____
###Markdown
DM6.4 Sort the data and find neighbors sharing geohash Find states located with geohah for all geohashes
###Code
for geohash in GRAPH_ID['geohash']:
print("Geohash: ", geohash, "States: ", list(GRAPH_ID.neighbors(geohash)))
###Output
Geohash: be States: ['AK']
Geohash: dj States: ['AL', 'GA', 'MS']
Geohash: 9y States: ['AR', 'KS', 'MO', 'OK']
Geohash: 9w States: ['AZ', 'NM', 'UT']
Geohash: 9q States: ['CA', 'NV']
Geohash: 9x States: ['CO', 'WY']
Geohash: dr States: ['CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT']
Geohash: dq States: ['DC', 'DE', 'MD', 'VA']
Geohash: dh States: ['FL']
Geohash: 8e States: ['HI']
Geohash: 9z States: ['IA', 'NE', 'SD']
Geohash: 9r States: ['ID', 'OR']
Geohash: dp States: ['IL', 'IN', 'MI', 'OH', 'WI']
Geohash: dn States: ['KY', 'NC', 'SC', 'TN', 'WV']
Geohash: 9v States: ['LA', 'TX']
Geohash: f2 States: ['ME']
Geohash: cb States: ['MN', 'ND']
Geohash: c8 States: ['MT']
Geohash: de States: ['PR']
Geohash: c2 States: ['WA']
###Markdown
DM6.5 Use Graph to find Geohash associated with NY - CLASS ASSIGNMENT
###Code
list(GRAPH_ID.neighbors('NY'))
###Output
_____no_output_____
###Markdown
DM6.6 Use Graph to find which states are in Geohash 'dr' - CLASS ASSIGNMENT
###Code
list(GRAPH_ID.neighbors('dr'))
###Output
_____no_output_____
###Markdown
Step 4 - Develop Model - Task 7 - Graph Analytics - DM7.1 Activity 1 - Find number of state and geohash nodes in a graph
###Code
len(list (GRAPH_ID.neighbors('geohash')))
len(list (GRAPH_ID.neighbors('state')))
###Output
_____no_output_____
###Markdown
DM7.2 - Find all neighboring states for NY Connect neighboring geohash codes if the distance is less than 1,000 km
###Code
for geohash_1 in geohash_list:
for geohash_2 in geohash_list:
if geohash_1 != geohash_2:
distance = pgh.geohash_haversine_distance(geohash_1, geohash_2)
if distance < 1000000:
GRAPH_ID.add_edge(geohash_1, geohash_2, label='near')
###Output
_____no_output_____
###Markdown
Find path length from NY to all nodes (states and geohashes)
###Code
neighbor_path_length = nx.single_source_dijkstra_path_length(GRAPH_ID, 'NY', weight='distance')
neighbor_path_length
###Output
_____no_output_____
###Markdown
Make a list of all nodes covered in the path length and then find those nodes which are states and less than or equal to 3 hops
###Code
neighbor_states = neighbor_path_length.keys()
state_list = (list (GRAPH_ID.neighbors('state')))
for state in state_list:
if state in neighbor_states:
if neighbor_path_length[state] <= 3:
print(state)
###Output
CT
DC
DE
IL
IN
MA
MD
ME
MI
NH
NJ
NY
OH
PA
RI
VA
VT
WI
###Markdown
DM7.3 - Find all neighboring states for each state
###Code
for state_1 in state_list:
neighbor_path_length = nx.single_source_dijkstra_path_length(GRAPH_ID, state_1)
neighbor_state_list = neighbor_path_length.keys()
next_door_list = []
for state_2 in neighbor_state_list:
if state_1 != state_2:
if state_2 in state_list:
if neighbor_path_length[state_2] <=3:
next_door_list.append(state_2)
if next_door_list:
print(state_1, next_door_list)
###Output
AL ['GA', 'MS', 'FL', 'KY', 'NC', 'SC', 'TN', 'WV']
AR ['KS', 'MO', 'OK', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'LA', 'TX']
AZ ['NM', 'UT', 'AR', 'KS', 'MO', 'OK', 'CA', 'NV', 'CO', 'WY']
CA ['NV', 'AZ', 'NM', 'UT', 'ID', 'OR']
CO ['WY', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'ID', 'OR', 'MT']
CT ['MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
DC ['DE', 'MD', 'VA', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'KY', 'NC', 'SC', 'TN', 'WV']
DE ['DC', 'MD', 'VA', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'KY', 'NC', 'SC', 'TN', 'WV']
FL ['AL', 'GA', 'MS']
GA ['AL', 'MS', 'FL', 'KY', 'NC', 'SC', 'TN', 'WV']
IA ['NE', 'SD', 'AR', 'KS', 'MO', 'OK', 'CO', 'WY', 'IL', 'IN', 'MI', 'OH', 'WI', 'MN', 'ND']
ID ['OR', 'CA', 'NV', 'CO', 'WY', 'WA']
IL ['IN', 'MI', 'OH', 'WI', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
IN ['IL', 'MI', 'OH', 'WI', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
KS ['AR', 'MO', 'OK', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'LA', 'TX']
KY ['NC', 'SC', 'TN', 'WV', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
LA ['TX', 'AR', 'KS', 'MO', 'OK']
MA ['CT', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
MD ['DC', 'DE', 'VA', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'KY', 'NC', 'SC', 'TN', 'WV']
ME ['CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT']
MI ['IL', 'IN', 'OH', 'WI', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
MN ['ND', 'IA', 'NE', 'SD', 'MT']
MO ['AR', 'KS', 'OK', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'LA', 'TX']
MS ['AL', 'GA', 'FL', 'KY', 'NC', 'SC', 'TN', 'WV']
MT ['CO', 'WY', 'MN', 'ND', 'WA']
NC ['KY', 'SC', 'TN', 'WV', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
ND ['MN', 'IA', 'NE', 'SD', 'MT']
NE ['IA', 'SD', 'AR', 'KS', 'MO', 'OK', 'CO', 'WY', 'IL', 'IN', 'MI', 'OH', 'WI', 'MN', 'ND']
NH ['CT', 'MA', 'NJ', 'NY', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
NJ ['CT', 'MA', 'NH', 'NY', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
NM ['AZ', 'UT', 'AR', 'KS', 'MO', 'OK', 'CA', 'NV', 'CO', 'WY']
NV ['CA', 'AZ', 'NM', 'UT', 'ID', 'OR']
NY ['CT', 'MA', 'NH', 'NJ', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
OH ['IL', 'IN', 'MI', 'WI', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
OK ['AR', 'KS', 'MO', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'LA', 'TX']
OR ['ID', 'CA', 'NV', 'CO', 'WY', 'WA']
PA ['CT', 'MA', 'NH', 'NJ', 'NY', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
RI ['CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
SC ['KY', 'NC', 'TN', 'WV', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
SD ['IA', 'NE', 'AR', 'KS', 'MO', 'OK', 'CO', 'WY', 'IL', 'IN', 'MI', 'OH', 'WI', 'MN', 'ND']
TN ['KY', 'NC', 'SC', 'WV', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
TX ['LA', 'AR', 'KS', 'MO', 'OK']
UT ['AZ', 'NM', 'AR', 'KS', 'MO', 'OK', 'CA', 'NV', 'CO', 'WY']
VA ['DC', 'DE', 'MD', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'KY', 'NC', 'SC', 'TN', 'WV']
VT ['CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
WA ['ID', 'OR', 'MT']
WI ['IL', 'IN', 'MI', 'OH', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
WV ['KY', 'NC', 'SC', 'TN', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
WY ['CO', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'ID', 'OR', 'MT']
###Markdown
DM7.4 - Find path between two states
###Code
nx.dijkstra_path(GRAPH_ID, 'NY', 'CA', weight='distance')
nx.dijkstra_path(GRAPH_ID, 'OR', 'CA', weight='distance')
GRAPH_ID.nodes()
nx.single_source_dijkstra_path_length(GRAPH_ID, 'NY')
###Output
_____no_output_____
###Markdown
DM7.5 - Find all geohash 2 codes and their immediate neighbors - CLASS ASSIGNMENT
###Code
geohash_list = (list (GRAPH_ID.neighbors('geohash')))
geohash_list
for state_1 in state_list:
neighbor_path_length = nx.single_source_dijkstra_path_length(GRAPH_ID, state_1)
neighbor_state_list = neighbor_path_length.keys()
next_door_list = []
for state_2 in neighbor_state_list:
if state_1 != state_2:
if state_2 in geohash_list:
if neighbor_path_length[state_2] <=3:
next_door_list.append(state_2)
if next_door_list:
print(state_1, next_door_list)
###Output
_____no_output_____
###Markdown
DM7.6 - Use neighborhoods to find states which may receive virus from neighboring states as they are neighbors - CLASS ASSIGNMENT
###Code
for state_1 in state_list:
neighbor_path_length = nx.single_source_dijkstra_path_length(GRAPH_ID, state_1)
neighbor_state_list = neighbor_path_length.keys()
next_door_list = []
for state_2 in neighbor_state_list:
if state_1 != state_2:
if state_2 in state_list:
if neighbor_path_length[state_2] <=3:
next_door_list.append(state_2)
if next_door_list:
print(state_1, next_door_list)
###Output
AL ['GA', 'MS', 'FL', 'KY', 'NC', 'SC', 'TN', 'WV']
AR ['KS', 'MO', 'OK', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'LA', 'TX']
AZ ['NM', 'UT', 'AR', 'KS', 'MO', 'OK', 'CA', 'NV', 'CO', 'WY']
CA ['NV', 'AZ', 'NM', 'UT', 'ID', 'OR']
CO ['WY', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'ID', 'OR', 'MT']
CT ['MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
DC ['DE', 'MD', 'VA', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'KY', 'NC', 'SC', 'TN', 'WV']
DE ['DC', 'MD', 'VA', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'KY', 'NC', 'SC', 'TN', 'WV']
FL ['AL', 'GA', 'MS']
GA ['AL', 'MS', 'FL', 'KY', 'NC', 'SC', 'TN', 'WV']
IA ['NE', 'SD', 'AR', 'KS', 'MO', 'OK', 'CO', 'WY', 'IL', 'IN', 'MI', 'OH', 'WI', 'MN', 'ND']
ID ['OR', 'CA', 'NV', 'CO', 'WY', 'WA']
IL ['IN', 'MI', 'OH', 'WI', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
IN ['IL', 'MI', 'OH', 'WI', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
KS ['AR', 'MO', 'OK', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'LA', 'TX']
KY ['NC', 'SC', 'TN', 'WV', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
LA ['TX', 'AR', 'KS', 'MO', 'OK']
MA ['CT', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
MD ['DC', 'DE', 'VA', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'KY', 'NC', 'SC', 'TN', 'WV']
ME ['CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT']
MI ['IL', 'IN', 'OH', 'WI', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
MN ['ND', 'IA', 'NE', 'SD', 'MT']
MO ['AR', 'KS', 'OK', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'LA', 'TX']
MS ['AL', 'GA', 'FL', 'KY', 'NC', 'SC', 'TN', 'WV']
MT ['CO', 'WY', 'MN', 'ND', 'WA']
NC ['KY', 'SC', 'TN', 'WV', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
ND ['MN', 'IA', 'NE', 'SD', 'MT']
NE ['IA', 'SD', 'AR', 'KS', 'MO', 'OK', 'CO', 'WY', 'IL', 'IN', 'MI', 'OH', 'WI', 'MN', 'ND']
NH ['CT', 'MA', 'NJ', 'NY', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
NJ ['CT', 'MA', 'NH', 'NY', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
NM ['AZ', 'UT', 'AR', 'KS', 'MO', 'OK', 'CA', 'NV', 'CO', 'WY']
NV ['CA', 'AZ', 'NM', 'UT', 'ID', 'OR']
NY ['CT', 'MA', 'NH', 'NJ', 'PA', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
OH ['IL', 'IN', 'MI', 'WI', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
OK ['AR', 'KS', 'MO', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'LA', 'TX']
OR ['ID', 'CA', 'NV', 'CO', 'WY', 'WA']
PA ['CT', 'MA', 'NH', 'NJ', 'NY', 'RI', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
RI ['CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'VT', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
SC ['KY', 'NC', 'TN', 'WV', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
SD ['IA', 'NE', 'AR', 'KS', 'MO', 'OK', 'CO', 'WY', 'IL', 'IN', 'MI', 'OH', 'WI', 'MN', 'ND']
TN ['KY', 'NC', 'SC', 'WV', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
TX ['LA', 'AR', 'KS', 'MO', 'OK']
UT ['AZ', 'NM', 'AR', 'KS', 'MO', 'OK', 'CA', 'NV', 'CO', 'WY']
VA ['DC', 'DE', 'MD', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'KY', 'NC', 'SC', 'TN', 'WV']
VT ['CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI', 'ME']
WA ['ID', 'OR', 'MT']
WI ['IL', 'IN', 'MI', 'OH', 'CT', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT', 'IA', 'NE', 'SD', 'KY', 'NC', 'SC', 'TN', 'WV']
WV ['KY', 'NC', 'SC', 'TN', 'AL', 'GA', 'MS', 'DC', 'DE', 'MD', 'VA', 'IL', 'IN', 'MI', 'OH', 'WI']
WY ['CO', 'AZ', 'NM', 'UT', 'IA', 'NE', 'SD', 'ID', 'OR', 'MT']
|
11.state-of-OA-viz.ipynb | ###Markdown
Visualize coverage on the "State of OA" 2017 DOI CatalogsVisualize Sci-Hub's coverage on the DOI catalogs from> Piwowar, Priem, Larivière, Alperin, Matthias, Norlander, Farley, West, Haustein. (2017-08-02) [**The State of OA: A large-scale analysis of the prevalence and impact of Open Access articles**](https://doi.org/10.7287/peerj.preprints.3119v1). _PeerJ Preprints_See [this GitHub issue](https://github.com/greenelab/scihub-manuscript/issues/18) for more discussion of this analysis.
###Code
# Load magrittr pipe
`%>%` = dplyr::`%>%`
coverage_df = file.path('data', 'state-of-oa-coverage.tsv') %>%
readr::read_tsv()
coverage_df %>% head(2)
abbreviate_number <- function(x) {
x = round(x)
if (nchar(x) <= 3) {return(x)}
if (nchar(x) <= 5) {
return(paste0(signif(x / 1e3, digits = 2), 'K'))
}
if (nchar(x) <= 6) {
return(paste0(round(x / 1e3), 'K'))
}
return(paste0(signif(x / 1e6, digits = 2), 'M'))
}
abbreviate_number <- Vectorize(abbreviate_number)
coverage_df = coverage_df %>%
dplyr::mutate(label =
sprintf('%s of %s articles (%.1f%%)',
abbreviate_number(articles * coverage),
abbreviate_number(articles),
100 * coverage
))
coverage_df %>% head(2)
oa_colors = c(
closed="#bbbbbb",
bronze="#cd7f32",
green="#4CAF50",
hybrid="#ffa500",
gold="#ffe135"
)
# Set figure dimensions
width = 8
height = 3
options(repr.plot.width=width, repr.plot.height=height)
labels = c('Sci-Hub', 'PennText', 'PennText, Sci-Hub')
gg_faceted = coverage_df %>%
dplyr::filter(oadoi_color %in% names(oa_colors)) %>%
dplyr::filter(repos %in% labels) %>%
dplyr::mutate(oadoi_color = factor(oadoi_color, levels=rev(names(oa_colors)))) %>%
ggplot2::ggplot(ggplot2::aes(x = repos, y = coverage, fill = oadoi_color)) +
ggplot2::geom_col(position='dodge') +
ggplot2::geom_text(ggplot2::aes(label = label, y = 0.015), size=2, hjust='inward', color='#000000',
position=ggplot2::position_dodge(width = 0.9)) +
ggplot2::facet_grid(. ~ collection) +
ggplot2::scale_x_discrete(name = NULL, expand = c(0.02, 0), limits = rev(labels)) +
ggplot2::scale_y_continuous(name = "Repository's Coverage", labels = scales::percent, expand = c(0, 0), breaks=seq(0.1, 1, 0.2)) +
ggplot2::expand_limits(y = 1) +
ggplot2::scale_fill_manual(name=NULL, values = oa_colors,
labels = tools::toTitleCase(names(oa_colors)), breaks = names(oa_colors)) +
ggplot2::coord_flip() +
ggplot2::theme_bw() +
ggplot2::theme(
panel.grid.major.y = ggplot2::element_blank(),
legend.key.size=grid::unit(4, 'mm'),
legend.position='top',
legend.margin=ggplot2::margin(t = 0, r = 0, b = -6, l = 0, unit='pt'),
legend.box.margin=ggplot2::margin(t = 0, r = 0, b = 0, l = 0, unit='pt'),
plot.margin = ggplot2::margin(t = 0, r = 12, b = 0, l = 0, unit='pt'),
strip.background = ggplot2::element_rect(fill='#FEF2E2'))
file.path('figure', 'state-of-oa-colors-large.svg') %>%
ggplot2::ggsave(gg_faceted, width = width, height = height)
gg_faceted
# Set figure dimensions
width = 3.5
height = 1.3
options(repr.plot.width=width, repr.plot.height=height)
gg_mini = coverage_df %>%
dplyr::filter(oadoi_color %in% names(oa_colors)) %>%
dplyr::filter(repos %in% c('Sci-Hub')) %>%
dplyr::filter(collection == 'Combined') %>%
dplyr::mutate(oadoi_color = factor(oadoi_color, levels=rev(names(oa_colors)))) %>%
ggplot2::ggplot(ggplot2::aes(x = repos, y = coverage, fill = oadoi_color)) +
ggplot2::geom_col(position='dodge') +
ggplot2::geom_text(ggplot2::aes(label = label, y = 0.015), size=2.5, hjust='inward', color='#000000',
position=ggplot2::position_dodge(width = 0.9)) +
ggplot2::scale_x_discrete(name = NULL, expand = c(0.02, 0)) +
ggplot2::scale_y_continuous(name = NULL, labels = scales::percent, expand = c(0, 0), breaks=seq(0.1, 1, 0.2)) +
ggplot2::expand_limits(y = 1) +
ggplot2::scale_fill_manual(name=NULL, values = oa_colors,
labels = tools::toTitleCase(names(oa_colors)), breaks = names(oa_colors)) +
ggplot2::coord_flip() +
ggplot2::theme_bw() +
ggplot2::theme(
axis.text.y = ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank(),
panel.grid.major.y = ggplot2::element_blank(),
legend.key.size=grid::unit(4, 'mm'),
legend.position='top',
legend.margin=ggplot2::margin(t = 0, r = 0, b = -9, l = 0, unit='pt'),
legend.box.margin=ggplot2::margin(t = 0, r = 0, b = 0, l = 0, unit='pt'),
plot.margin = ggplot2::margin(t = 0, r = 12, b = 0, l = 0, unit='pt'),
strip.background = ggplot2::element_rect(fill='#FEF2E2'))
file.path('figure', 'state-of-oa-colors-small.svg') %>%
ggplot2::ggsave(gg_mini, width = width, height = height)
gg_mini
###Output
_____no_output_____ |
Apache Spark/Spark-Streaming/01-Cap09-NLTK.ipynb | ###Markdown
Data Science Academy Big Data Real-Time Analytics com Python e Spark Capítulo 9 Processamento de Linguagem Natural com Python - NLTK Instalação do pacote NLTKhttp://www.nltk.org/install.html
###Code
# Instalação do módulo NLTK
!pip install nltk
import nltk
# Instalando os arquivos de dados do NLTK
# Clique em Download quando solicitado
nltk.download()
###Output
showing info https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml
###Markdown
Leia a definição e execute as células para compreender o código de cada uma e o conceito que está sendo demonstrado Tokenization Processo de dividir uma string em listas de pedaços ou "tokens". Um token é uma parte inteira. Por exemplos: uma palavra é um token em uma sentença. Uma sentença é um token em um parágrafo. Dividindo um parágrafo em frases
###Code
paragrafo = "Oi. Bom saber que você está aprendendo PLN. Obrigado por estar conosco."
from nltk.tokenize import sent_tokenize
# Dividindo o parágrafo em frases
sent_tokenize(paragrafo)
import nltk.data
# Utilizando dados do pacote NLTK
tokenizer = nltk.data.load('tokenizers/punkt/PY3/english.pickle')
# Load no windows
#tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
tokenizer.tokenize(paragrafo)
# Dados em espanhol
spanish_tokenizer = nltk.data.load('tokenizers/punkt/PY3/spanish.pickle')
# Load no windows
#spanish_tokenizer = nltk.data.load('tokenizers/punkt/spanish.pickle')
spanish_tokenizer.tokenize('Hola amigo. Estoy bien.')
spanish_tokenizer
###Output
_____no_output_____
###Markdown
Dividindo uma frase em palavras
###Code
from nltk.tokenize import word_tokenize
word_tokenize('Data Science Academy')
from nltk.tokenize import TreebankWordTokenizer
tokenizer = TreebankWordTokenizer()
tokenizer.tokenize('Hello World.')
word_tokenize("can't")
from nltk.tokenize import WordPunctTokenizer
tokenizer = WordPunctTokenizer()
tokenizer.tokenize("Can't is a contraction.")
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer("[\w']+")
tokenizer.tokenize("Can't is a contraction.")
from nltk.tokenize import regexp_tokenize
regexp_tokenize("Can't is a contraction.", "[\w']+")
tokenizer = RegexpTokenizer('\s+', gaps = True)
tokenizer.tokenize("Can't is a contraction.")
###Output
_____no_output_____
###Markdown
Treinando um Tokenizer
###Code
from nltk.tokenize import PunktSentenceTokenizer
from nltk.corpus import webtext
# /Users/dmpm/nltk_data/corpora/webtext
texto = webtext.raw('overheard.txt')
sent_tokenizer = PunktSentenceTokenizer(texto)
sents1 = sent_tokenizer.tokenize(texto)
sents1[0]
from nltk.tokenize import sent_tokenize
sents2 = sent_tokenize(texto)
sents2[0]
sents1[678]
sents2[678]
# Inserindo caminho em sistema Windows
with open('/Users/dmpm/nltk_data/corpora/webtext/overheard.txt', encoding = 'ISO-8859-2') as f:
texto = f.read()
# Path para o Windows
# with open('C:/Users/usuario/AppData/Roaming/nltk_data/corpora/webtext/overheard.txt', encoding = 'ISO-8859-2') as f:
# texto = f.read()
sent_tokenizer = PunktSentenceTokenizer(texto)
sents = sent_tokenizer.tokenize(texto)
sents[0]
sents[678]
###Output
_____no_output_____
###Markdown
Stopwords Stopwords são palavras comuns que normalmente não contribuem para o significado de uma frase, pelo menos com relação ao propósito da informação e do processamento da linguagem natural. São palavras como "The" e "a" ((em inglês) ou "O/A" e "Um/Uma" ((em português). Muitos mecanismos de busca filtram estas palavras (stopwords), como forma de economizar espaço em seus índices de pesquisa.
###Code
from nltk.corpus import stopwords
english_stops = set(stopwords.words('english'))
words = ["Can't", 'is', 'a', 'contraction']
[word for word in words if word not in english_stops]
portuguese_stops = set(stopwords.words('portuguese'))
palavras = ["Aquilo", 'é', 'um', 'gato']
[palavra for palavra in palavras if palavra not in portuguese_stops]
stopwords.fileids()
stopwords.words('portuguese')
###Output
_____no_output_____
###Markdown
Wordnet WordNet é um banco de dados léxico (em Inglês). É uma espécie de dicionário criado especificamente para processamento de linguagem natural.
###Code
from nltk.corpus import wordnet
syn = wordnet.synsets('cookbook')[0]
syn.name()
syn.definition()
wordnet.synsets('cooking')[0].examples()
###Output
_____no_output_____
###Markdown
Collocations Collocations são duas ou mais palavras que tendem a aparecer frequentemente juntas, como "Estados Unidos" ou "Rio Grande do Sul". Essas palavras podem gerar diversas combinações e por isso o contexto também é importante no processamento de linguagem natural.
###Code
from nltk.corpus import webtext
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
words = [w.lower() for w in webtext.words('grail.txt')]
bcf = BigramCollocationFinder.from_words(words)
bcf.nbest(BigramAssocMeasures.likelihood_ratio, 4)
from nltk.corpus import stopwords
stopset = set(stopwords.words('english'))
filter_stops = lambda w: len(w) < 3 or w in stopset
bcf.apply_word_filter(filter_stops)
bcf.nbest(BigramAssocMeasures.likelihood_ratio, 4)
###Output
_____no_output_____
###Markdown
Stemming Words Stemming é a técnica de remover sufixos e prefixos de uma palavra, chamada stem. Por exemplo, o stem da palavra cooking é cook. Um bom algoritmo sabe que "ing" é um sufixo e pode ser removido. Stemming é muito usado em mecanismos de buscas para indexação de palavras. Ao invés de armazenar todas as formas de uma palavras, um mecamismo de busca armazena apenas o stem da palavra, reduzindo o tamanho do índice e aumentando a performance do processo de busca.
###Code
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stemmer.stem('cooking')
stemmer.stem('cookery')
from nltk.stem import LancasterStemmer
stemmer = LancasterStemmer()
stemmer.stem('cooking')
stemmer.stem('cookery')
from nltk.stem import RegexpStemmer
stemmer = RegexpStemmer('ing')
stemmer.stem('cooking')
from nltk.stem import SnowballStemmer
SnowballStemmer.languages
spanish_stemmer = SnowballStemmer('portuguese')
spanish_stemmer.stem('Tudo bem')
###Output
_____no_output_____
###Markdown
Corpus Corpus é uma coleção de documentos de texto e Corpora é o plural de Corpus. Esse termo vem da palavra em Latim para corpo (nesse caso, o corpo de um texto). Um Corpus customizado é uma coleção de arquivos de texto organizados em um diretório.Se você for treinar seu próprio modelo como parte de um processo de classificação de texto (como análise de texto), você ter;a que criar seu próprio Corpus e treiná-lo.
###Code
from nltk.corpus.reader import WordListCorpusReader
# Criando um Corpus (arquivo palavras.txt no mesmo diretório do Jupyter Notebook)
reader = WordListCorpusReader('.', ['palavras.txt'])
reader.words()
reader.fileids()
reader.raw()
from nltk.tokenize import line_tokenize
line_tokenize(reader.raw())
from nltk.corpus import brown
brown.categories()
###Output
_____no_output_____ |
AAAI/Learnability/CIN/Linear/ds4/size_100/synthetic_type4_Linear_m_10.ipynb | ###Markdown
Generate dataset
###Code
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [4,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [5.5,6],cov=[[0.01,0],[0,0.01]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [4.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [3,3.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [2.5,5.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [3.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [5.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [7,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [6.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [5,3],cov=[[0.01,0],[0,0.01]],size=sum(idx[9]))
x[idx[0]][0], x[idx[5]][5]
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
bg_idx = [ np.where(idx[3] == True)[0],
np.where(idx[4] == True)[0],
np.where(idx[5] == True)[0],
np.where(idx[6] == True)[0],
np.where(idx[7] == True)[0],
np.where(idx[8] == True)[0],
np.where(idx[9] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
np.unique(bg_idx).shape
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
foreground_classes = {'class_0','class_1', 'class_2'}
background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'}
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
np.reshape(a,(2*m,1))
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(np.reshape(a,(2*m,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T
mosaic_list_of_images.shape
mosaic_list_of_images.shape, mosaic_list_of_images[0]
for j in range(m):
print(mosaic_list_of_images[0][2*j:2*j+2])
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0])
for i in range(len(mosaic_dataset)):
img = torch.zeros([2], dtype=torch.float64)
np.random.seed(int(dataset_number*10000 + i))
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(m):
if j == give_pref:
img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim
else :
img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m)
test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m)
avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)
# avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))
# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))
print("=="*40)
test_dataset = torch.stack(test_dataset, axis = 0)
# test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(test_dataset, keepdims= True, axis = 0))
# print(torch.std(test_dataset, keepdims= True, axis = 0))
print("=="*40)
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("dataset4 CIN with alpha = 1/"+str(m))
x1 = (test_dataset).numpy() / m
y1 = np.array(labels)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("test dataset4")
test_dataset[0:10]/m
test_dataset = test_dataset/m
test_dataset[0:10]
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
avg_image_dataset_1[0].shape
avg_image_dataset_1[0]
batch = 200
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
testdata_11 = MosaicDataset(test_dataset, labels )
testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False)
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(2,3)
# self.linear2 = nn.Linear(50,10)
# self.linear3 = nn.Linear(10,3)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
def forward(self,x):
# x = F.relu(self.linear1(x))
# x = F.relu(self.linear2(x))
x = (self.linear1(x))
return x
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/(i+1)
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1000
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
return loss_curi
train_loss_all=[]
testloader_list= [ testloader_1, testloader_11]
train_loss_all.append(train_all(trainloader_1, 1, testloader_list))
%matplotlib inline
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+str(i+1))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
###Output
_____no_output_____ |
ParticleFilterBasics.ipynb | ###Markdown
Basic Introduction to Particle Filters : Updating and Sampling The Kalman Filter can only model Gaussian Distributions. The Particle filter is an approach for dealing with arbitrary distributions, and it uses multiple random samples to represent them.To represent a PDF using samples, we can take more samples in areas were values are higher. A more "lightweight" approach is to weight those samples of higher value areas, so that we require less samples. The particle setEvery sample has two variables:$X = \langle x^{[j]}, w^{[j]} \rangle_{j = 1,...,J} $with $w^{[j]}$ the importance weight (a real number), and $x^{[j]}$ the state hypothesis (a vector).The samples represent the posterior:$p(x) = \sum^J_{j=1}w^{[j]}\delta_{x^{[j]}}(x)$With sum up to one. Depending how complex the function to represent is, there could be a larger amount of samples.How do we obtain the samples? Particle filter for dynamic state estimation problemsThe particle filter is a recursive Bayes filter. It is a non-parametric approach, because models the distribution by taking samples. The recursive steps can be sumed up as:- Prediction: draw from a proposal distribution $\pi$ to generate the samples from $f$. The proposal is a used-defined choice, so it could be, for instance, the odometry model.- Correction: weighting by the ratio of target and proposal. It is not a user-defined choice. It accounts the difference between $\pi$ and $f$ using a weight $w = f(x)/\pi(x)$.- Resampling: Draw a sample $i$ with a probability $w_t^{[i]}$ and repeat. $J$ times. The higher the weight of a sample is, the more probability of sampling in the next step. This way, we replace the weights by frequencies. Monte Carlo localizationWith Monte Carlo localization with estimate the position and orientation of a platform using a particle filter.- Each particle is a pose hypothesis.- The proposal is the motion model.- The correction is performed via the observation model. ResamplingFor resampling, stochastic universal resampling is faster and better since it has a low variance. It works in case of having identical weights for all samples. ConsThe particle filters need to represent the space of possibilities in a efficient manner, and because of that it is:- Problematic in high dimensional spaces.- Problematic in situations with high uncertainty.Any of those two cases would make the dimensionality of the state space grow exponentially. Pros- Works in non-Gaussian distributions.- Works well in low-dimensional spaces- Handles data association ambiguities- Easily incorporates different sensing modalities.- Comparably robust when the model is not perfect and suboptimal setups.- Easy to implement Variants- Real time particle filters to handle situations with sensor data coming at different rates- Delayed state particle filters, when some of the streams come very late and need to be re-synchronized- Rao-Blackwellized particle filters for dealing with high dimensional state spaces.Finally, as stated by Cyrill Stachniss in his course: THE ART IS TO DESIGN APPROPIATE MOTION AND SENSOR MODELS
###Code
!pip install celluloid
import pandas as pd
import matplotlib.pyplot as plt
from celluloid import Camera
from IPython.display import HTML
import numpy as np
import os
import math
import seaborn as sns
import random
%matplotlib inline
###Output
_____no_output_____
###Markdown
Updating exerciseThe first exercise on this basic Particle Filter notebook, is to implement the updating function that updates the position of the particles according to the motion model. Prepare the Odometry data
###Code
def read_data(filename,path):
data = pd.read_csv(path + filename,delimiter = ' ',header=None, names = ['l','t','r1','r2']) # or id, range and bearing for sensor
return (data)
odom = read_data('odometry.dat','')
timestep = []
timestep = [i for i in range(odom.shape[0])]
odom.insert(0,"timestep",timestep, True)
odom = odom.drop(['l'],axis = 1)
odom
noise = [0.005, 0.01, 0.005]
numParticles = 100
###Output
_____no_output_____
###Markdown
Initialize the particles array
###Code
# Initialize values for particles
weights = np.ones(numParticles)*(1/numParticles)
p = []
posei = np.array([[0], [0], [0]])
for i in range(numParticles):
p.append(posei)
data = {'weights': weights,
'pose': p,
'history': p}
# Create dataframe
particles = pd.DataFrame(data)
particles
###Output
_____no_output_____
###Markdown
Auxiliar functions
###Code
def normalize_angle(phi):
# Normalize phi to be between -pi and pi
while(phi>np.pi):
phi -= 2*np.pi;
while(phi<-np.pi):
phi += 2*np.pi
phiNorm = phi
return phiNorm
###Output
_____no_output_____
###Markdown
Prediction stepFor every sample, we **draw** from the distribution$x_t^{[j]} \approx p(x_t | x_{[t-1]}, u_t)$. That is, instead of taking the maximum value from the distribution, we draw from it according to the gaussian distribution.This will cause the particles to spread and expand every time we move, since we are increasing the uncertainty if we move in the environment without observing it.
###Code
def prediction_step(particles, u , noise):
r1noise = noise[0]
transnoise = noise[1]
r2noise = noise[2]
parts = particles.copy()
numparticles = particles.shape[0]
for i in range(numParticles):
# Update the historic poses
history = parts.at[i,'history']
pose = parts.at[i,'pose']
parts.at[i,'history'] = np.hstack((history,pose))
# Sample a new pose for the particle
# Update the robot particles according to the noise-free motion model
#print(parts.loc[0,'pose'])
parts.loc[:,'pose'] = parts.loc[:,'pose'].apply(lambda x: np.array([[x[0][0] + odom['t'][0]*math.cos(x[2][0]+odom['r1'][0])],
[x[1][0] + odom['t'][0]*math.sin(x[2][0]+odom['r1'][0])],
[x[2][0] + normalize_angle(odom['r1'][0]+odom['r2'][0])]]))
#normalize angles
#parts.loc[:,'pose'] = parts.loc[:,'pose'].apply(lambda x: np.array([[x[0][0]],[x[1][0]],[normalize_angle(x[2][0])]]))
# update particles according to motion represented by odometry and noise
# With the function random.normal, we draw samples from a Gaussian with mean
# the pose, and std dev the sum of the noise parameters.
parts.loc[:,'pose'] = parts.loc[:,'pose'].apply(lambda x: np.random.normal(x,r1noise + transnoise + r2noise))
return parts
def plot_state(particles, t, fig, ax):
# visualize state of particles
ax.set_xticks([x for x in range(-2,12)],minor=True )
ax.set_yticks([y for y in range(-2,12)],minor=True)
# using seaborn, set background grid to gray
sns.set_style("dark")
# Plot grid on minor axes in gray (width = 1)
plt.grid(which='minor',ls='-',lw=1, color='white')
# Plot grid on major axes in larger width
plt.grid(which='major',ls='-',lw=2, color='white')
# Plot particles
for key, value in particles['pose'].iteritems():
ax.text(value[0][0],value[1][0], '.', color = 'red', fontsize = 20)
return fig
def particle_loop(camera,fig, odometry, particles, noise):
ps = particles.copy()
for t in range(odometry.shape[0]):#odometry.shape[0]
ps = prediction_step(ps, odometry.loc[(odom['timestep'] == t)], noise)
fig = plot_state(ps, t, fig, ax)
camera.snap()
return camera
###Output
_____no_output_____
###Markdown
Create the figure and run the loop
###Code
fig,ax = plt.subplots()
camera = Camera(fig)
camera = particle_loop(camera, fig, odom, particles, noise)
animation = camera.animate()
HTML(animation.to_html5_video())
###Output
_____no_output_____
###Markdown
Resampling ExampleNow we will see how to perform the resampling process in the Particle Filter (without motion), and we will plot the results to compare the before and after of the particles sampled. Initialize particles (v2.0) Initialize the particles by drawing around the point [0,0] with a gaussian distribution:
###Code
# Initialize values for particles
weights = np.ones(numParticles)*(1/numParticles)
mu, sigma = [0,0], [[1, 2],[2,1]] # mean and covariance
pose = np.random.multivariate_normal(mu, sigma, (numParticles)).tolist()
data = {'weights': weights,
'pose': pose,
'history': pose}
# Create dataframe
weighted_particles = pd.DataFrame(data)
weighted_particles
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: RuntimeWarning: covariance is not positive-semidefinite.
after removing the cwd from sys.path.
###Markdown
Weight the particles according to their distance to [0 0]:
###Code
sig = np.diag([0.2,0.2], k= 0)
siginv = np.linalg.inv(sig)
weighted_particles.loc[:,'weights'] = weighted_particles.loc[:,'pose'].apply(lambda x: math.exp((-1/2)*float(np.matmul(np.matmul(np.reshape(np.asarray(x),(1,2)),siginv),np.reshape(np.asarray(x),(2,1))))))
weighted_particles
###Output
_____no_output_____
###Markdown
ResampleThe resampling process induces a loss of diversity in particle population, which manifests as an approximation error: even though the variance of the particle set itself decreases, the variance of the particle set as an estimator increases.The "Low Variance Sampling", also called "Stochastic Universal Sampling" is an implementation that selects samples in a sequential stochastic process, instead of selecting them independently of each other. (Probabilistic Robotics, p.109).We select a single random number, and then select the samples according to this number, but still with a probability proportional to the sample weight.
###Code
def low_variance_sampling(particles):
numParticles = particles.shape[0]
w = particles.loc[:,'weights']
# normalize the weights
w = w/np.sum(w)
newParticles = pd.DataFrame({'weights' : [],'pose' : [],'history' : []})
c = w[0]
i = 0
# Initialize the position of the sampling wheel
r = random.uniform(0.,1/numParticles)
# Move along the wheel to select particles
for m in range (numParticles):
U = r + (m-1) * (1/numParticles)
while (U > c):
i += 1
c = c + w[i]
newParticles = newParticles.append(particles.loc[i,:])
return newParticles
resampled_particles = low_variance_sampling(weighted_particles)
resampled_particles
###Output
_____no_output_____
###Markdown
Plot particles before (red) and after (blue)
###Code
fig,ax = plt.subplots()
# visualize state of particles
ax.set_xticks([x for x in range(-4,5)],minor=True )
ax.set_yticks([y for y in range(-4,5)],minor=True)
# using seaborn, set background grid to gray
sns.set_style("dark")
# Plot grid on minor axes in gray (width = 1)
plt.grid(which='minor',ls='-',lw=1, color='white')
# Plot grid on major axes in larger width
plt.grid(which='major',ls='-',lw=2, color='white')
# Plot particles
for key, value in weighted_particles['pose'].iteritems():
ax.text(value[0],value[1], '.', color = 'red', fontsize = 25)
for key, value in resampled_particles['pose'].iteritems():
ax.text(value[0],value[1], '.', color = 'blue', fontsize = 20)
plt.show()
###Output
_____no_output_____ |
Web-Scraping_Challenges.ipynb | ###Markdown
Challenge I - Parsing the HTML DOM Structure1. Go to http://www.seleniumhp.org/2. Locate the element by id "banner-blm" and print it3. Locate the element by name "search" and print it4. Locate the element heading "Selenium automates broswers. That's it!" by Xpath and print it5. Find element by class "selenium-backers" and print it
###Code
# Import the dependencies
# Seting up the web driver
# Find the element by id
# Find the element by name
# Find the element by heading
# Find the element by class
# Close the web driver
###Output
_____no_output_____
###Markdown
Challenge II - Navigating through Pages1. Go to https://wiki.python.org/moin/FrontPage2. Perform a search for the text "Beginner"3. In the left-side menu bar, change the value of the select from "More Options" to Raw Text
###Code
# Import the dependencies
# Setting up the web driver
# Find the element by id "searchinput"
# Clear the search box
# Input the search word "Beginner" to the search box
# Wait for 5 seconds
# Select the menu bar element by xpath
# Select the element by visible text "Raw Text"
# Wait for 5 seconds
# Close the web driver
###Output
_____no_output_____ |
Db2_Jupyter_Tutorials/An_Introduction_to_Jupyter_Notebooks.ipynb | ###Markdown
An Introduction to Jupyter NotebooksYou are now officially using a Jupyter notebook! This tutorial will show you some of the basics of using a notebook, including how to create the cells, run code, and save files for future use.Jupyter notebooks are based on IPython which started in development in the 2006/7 timeframe. The existing Python interpreter was limited in functionality and work was started to create a richer development environment. By 2011 the development efforts resulted in IPython being released (http://blog.fperez.org/2012/01/ipython-notebook-historical.html).Jupyter notebooks were a spinoff (2014) from the original IPython project. IPython continues to be the kernel that Jupyter runs on, but the notebooks are now a project on their own.Jupyter notebooks run in a browser and communicate to the backend IPython server which renders this content. These notebooks are used extensively by data scientists and anyone wanting to document, plot, and execute their code in an interactive environment. The beauty of Jupyter notebooks is that you document what you do as you go along. A Quick TourThis brief introduction will explain the various parts of a Jupyter notebook and how you interact with it. The remainder of the labs in this series will be using Jupyter notebooks so you will have to become really familiar with them! File MenuYou may have started this notebook by selecting it from the table of contents, but if you use standard Jupyter notebooks, then you would be presented with a similar file menu. To start using a notebook, all you need to do is click on its name. So for this notebook, you would have selected _An Introduction to Jupyter Notebooks_. If you want to manage the notebooks (i.e. delete them or place them into a folder, you can select them on the left hand side, and then execute the action from the pull down list (the arrow just below the Running tab).The Running tab shows you which notebooks are currently active or running. Each notebook is independent from the others. This means that there is no sharing of data or variables between each notebook because they are running on different threads. When you shut down a notebook, you are stopping its process or thread in the system.If you need to upload a new notebook (or replace an existing one), you can use the Upload button on the far right hand side. This will give you a file menu on your local system where you can select a notebook to upload. Jupyter notebooks have the extension .ipynb (IPython Notebook) which contains all of the notebook information in a JSON format. If you want to create a brand new notebook, you would select the New button that is beside the Upload button. The New button may ask what type of Notebook that you want. It could by Python 2 or 3, or even a different language based notebook (Scala for instance). This image only has Python 3 installed so that will be your only choice when creating a notebook. The Tool BarAt the top of this page you should see the following toolbar. The tool bar is found at the top of your Jupyter Notebook. There are three sections that you need to be familiar with.* Title (An Introduction...)* File/Edit/View... Menu* Save/Add Cell/... Icons TitleThe top of the notebook has the title of the contents. The name of the notebook can be changed by clicking on the title. This will open up a dialog which gives you the option of changing the name of the notebook.Note that this will create a new copy of the notebook with this name. One important behavior of Jupyter notebooks is that notebooks "autosave" the contents every few minutes (you know how much we hate losing work in the event of a crash). Changing the name of the title will make sure any changes get saved under the new name. However, changes will probably have been saved to the old name up to this point because of autosave. For that reason, it is better to make a new copy of the notebook before starting to edit it. File/Edit/View MenuThe menu bar contains options to `File`, `Edit`, `View` and perform other administrative actions within the Jupyter notebook. The `File` option gives you options to save the file as a checkpoint (a version that you can revert to), make a copy of the notebook, rename it, or download it. Of particular interest is the `Copy` command. This will make a copy of the existing notebook and start that up in a separate tab in the browser. You can then view and edit this copy rather than changing the original. The other option is to checkpoint your progress at regular intervals and then use the `Revert to Checkpoint` to restore the notebook to a previous version. The `Download` option is also very important to be familiar with. The notebook lives within your Jupyter environment so you may not know what the full file path is to access it. Use this option to download the file to your local operating system for safe keeping.The seven additional menu items are:* **Edit** - These menu items are used for editing the cells. The icons below the menus are equivalent to most of these menus.* **View** - View will turn on Header information, Line numbers, Tool bars and additional Cell information* **Insert** - Insert new cells above or below the current cell* **Cell** - This menu item lets you run code in a cell, run all cells, remove output from the cells or change the cell type* **Kernel** - The kernel that is running the current notebook can be restarted or stopped if there appears to be a problem with it* **Widgets** - Widgets are add-ons for Jupyter notebooks. * **Help** - If you need help, check out this menu.Some important menu items that you may want to use:- **View/Toggle Line Numbers** should be turned on if you have a substantial amount of code. This makes it easier to find errors when Python generates an error message with a line number. Note that this only applies to code cells.- **Insert/Above** is useful if you don't want to move your cursor to the cell above before hitting the [+] icon.- **Cell/All Output/Clear** will get rid of any output that your notebook has produced so that you can start over again.- **Cell/Run All** is useful if you are lazy or just want to test your entire notebook at once!- **Kernel/Restart** & Clear Output should be used if the notebook appears to hang and you want to start from scratch again. Cell ContentsA Jupyter notebook contains multiple "cells" which can contain one of three different types of objects:- **Code** - A cell that contains code that will run (usually Python)- **Markdown** - A cell that contains text and formatting using a language called Markdown- **Raw NBConvert** - A specialized cell that is rendered (displayed) using an extension, like mathematical formulasWe are going to keep it simple and only look at the two most common types of cells: code and markdown. The first example below is a code cell.
###Code
print('Hello World')
###Output
_____no_output_____ |
docs/ipynb/open_bed.ipynb | ###Markdown
Task: Simplest Read All
###Code
import os
os.chdir(r'D:\OneDrive\programs\pstsgkit\tests')
from sgkit_plink._open_bed import open_bed
val = open_bed('datasets/snpgen.bed').read(force_python_only=True)
print(val.shape, val.dtype)
val
###Output
(1000, 5) int8
###Markdown
Discussion* This is running code at https://github.com/CarlKCarlK/sgkit-plink/commit/b1ab9e04* In this example, it avoids parsing the metadata (samples and variant names, etc) * All metadata parsing is now lazy * In this case, it does a one-time, fast, line count of the \*.fam and \*.bim files instead of a parse.* "close()" is not needed because there is no longer a persistent file pointer*In the future** *force_python_only* won't be needed* Final import will be something like 'from sgkit_plink import open_bed Task: sgkit's use case: Reading data in batches when we know the dimensions
###Code
import numpy as np
with open_bed('datasets/all_chr.maf0.001.N300.bed',iid=300,sid=1015) as bed:
batch_size = 100
for start in range(0,bed.sid_count,batch_size):
val = bed.read(index=np.s_[:,start:start+batch_size],force_python_only=True)
print(val.shape)
###Output
(300, 100)
(300, 100)
(300, 100)
(300, 100)
(300, 100)
(300, 100)
(300, 100)
(300, 100)
(300, 100)
(300, 100)
(300, 15)
###Markdown
Discussion* This is sgkit's use case* The \*.fam and \*.bim files are never read.* I'm thinking about adding an \_\_item\_\_ method * But PySnpTools needs to specify a dtype and order on every read Task: Find the mean value for every chromosome (excluding missing values)
###Code
with open_bed('datasets/all_chr.maf0.001.N300.bed') as bed:
for chrom in np.unique(bed.chromosome): # pos[:,0] is chrom number
val = bed.read(index=np.s_[:,bed.chromosome==chrom],dtype='float32',force_python_only=True)
print(f'chromo {int(chrom)} mean: {np.nanmean(val):0.4}')
###Output
chromo 1 mean: 0.1631
chromo 10 mean: 0.2113
chromo 11 mean: 0.2175
chromo 12 mean: 0.208
chromo 13 mean: 0.1667
chromo 14 mean: 0.2361
chromo 15 mean: 0.1573
chromo 16 mean: 0.1735
chromo 17 mean: 0.1155
chromo 18 mean: 0.1494
chromo 19 mean: 0.234
chromo 2 mean: 0.1015
chromo 20 mean: 0.1828
chromo 21 mean: 0.126
chromo 22 mean: 0.2173
chromo 23 mean: 0.3713
chromo 3 mean: 0.1927
chromo 4 mean: 0.318
chromo 5 mean: 0.2038
chromo 6 mean: 0.1897
chromo 7 mean: 0.185
chromo 8 mean: 0.191
chromo 9 mean: 0.204
###Markdown
Discussion* This examples shows what a NumPy-inspired API is good at.* Lazy metadata means, the '.bim' file is parsed (one time) here while the '.fam' file is merely line-counted (one time). Punt Slicing Metadata?* My plan is to continue to ignore the rest of the metadata info (e.g., parent info, phenotype in \*.fam) because: * I've never had anyone request it * They can parse it from \*.fam and \*.bim if they need it* However, I can imagine a way to efficently slice into metadata, but is this ability needed?
###Code
#hypothetical! doesn't run. may not implement
with open_bed('datasets/all_chr.maf0.001.N300.bed') as bed:
batch_size = 100
# The first time .sid_count is called, C++ code would (in one pass)
# count the lines in *.bim and return the offsets for sqrt(#lines) random lines.
# For example, if there turns out to be 1 million variants,
# we'd remember offsets to a random 1000 of them.
for start in range(0,bed.sid_count,batch_size):
#'batch' becomes an open_bed object for just a subset of the data
batch = bed[:,start:start+batch_size]
print(batch.sid) #We can find all the variant names in batch in Order(sqrt(#original_lines))
val = batch.read()
print(val.shape)
# Could do for *.fam and *.bim
###Output
_____no_output_____
###Markdown
Discussion* This doesn't require a cache file* It is low memory and fast* But ... * Is it needed by anyone? * If it is needed, can't they just use sgkit and its dask files? random stuff
###Code
%%time
filename = r'M:\deldir\genbgen\2\merged_487400x220000.1.bed'
bed = open_bed(filename)
bed.shape
bed.read(np.s_[:,100*1000:100*1000+10])
bed.chromosome
bed.iid
sum(range(1000*1000))
from pysnptools.util.mapreduce1 import map_reduce
from pysnptools.util.mapreduce1.runner import LocalMultiThread, LocalMultiProc, Local
import time
def slow_square(x):
time.sleep(1)
return x*x
def square_sum(count,runner=None):
ss= map_reduce(range(count),
mapper=slow_square,
reducer=sum,
runner=runner)
return ss
square_sum(10)
%%time
square_sum(10,runner=Local())
%%time
square_sum(100,runner=LocalMultiThread(10))
def thread_read(filename, runner):
with open_bed(filename) as bed:
bed.shape #causes the lazy meta to read the # lines in the two metadata files
batch_size = 100
def read_and_report(start):
print(start)
val = bed.read(index=np.s_[:,start:start+batch_size],force_python_only=True)
return val.shape
report = map_reduce(range(0,bed.sid_count,batch_size),
mapper=read_and_report,
runner=runner)
return report
%%time
thread_read('datasets/all_chr.maf0.001.N300.bed', runner=Local())
%%time
thread_read('datasets/all_chr.maf0.001.N300.bed', runner=LocalMultiThread(10))
bigfile = r'M:\deldir\genbgen\2\merged_487400x220000.1.bed'
%%time
thread_read(bigfile, runner=Local())
%%time
thread_read(bigfile, runner=LocalMultiThread(10))
###Output
0
22000
44000
66000
88000
110000
132000
154000
176000
198000
661008810044100
22100176100
154100198100
100
110100
132100
20044200
8820066200
132200
110200176200
198200154200
22200
198300
88300110300
132300
66300154300
17630022300
300
44300
198400
11040088400
154400
66400400
17640044400
22400
132400
198500
176500
110500
1545004450022500
6650013250088500
500
198600
6660088600600
110600176600
132600
44600
22600
154600
198700
8870066700
44700
176700700
154700110700
13270022700
198800
13280066800
154800
800
88800110800
17680044800
22800
198900
15490066900
8890022900132900
110900
176900
19900044900900
670002300089000
45000
177000155000
1000
133000199100111000
67100
199200
89100
15510045100
177100
133100
23100
1111001100
67200
199300
89200
177200155200
2320045200
133200
1200111200
67300
199400
89300
23300
177300
155300
45300
133300
1300
111300
67400
199500
89400
177400
2340045400
155400
111400
1400133400
67500
199600
89500
155500
23500177500
111500
45500
1335001500
67600
89600
199700
1600
111600
133600
155600
45600
177600
23600
67700
89700
199800
1700155700
111700
133700
45700
23700
177700
67800
89800
199900
1800
133800
155800
6790045800
177800
111800
23800
200000
89900
1900
133900
155900
68000
11190045900
200100
17790023900
90000
2000
68100
200200
13400090100
112000
17800024000
156000
46000
2100
68200
134100
90200
200300
112100
2410015610046100
178100
2200
68300
90300
200400
134200
112200
24200
156200
178200
46200
2300
200500
68400
134300
90400
24300
156300
46300112300
178300
2400
200600
68500
134400
90500
112400
156400
24400
46400
178400
2500
200700
68600
134500
90600
112500
24500
46500
156500
2600178500
200800
68700
134600
90700
112600
24600156600
200900
46600
270068800
178600
90800
134700
112700
201000
68900
156700
178700
46700
280090900
24700
134800
201100
112800
69000
91000
1568002900
178800
4680024800
134900
201200
112900
69100
91100
3000
46900178900
156900
24900
135000
201300
11300069200
91200
3100157000
17900047000
25000
135100
201400
69300
113100
91300
3200
4710025100
157100
179100
201500
135200
69400
113200
91400
3300
157200
179200201600
25200
47200
69500
135300
113300
91500
201700
3400
157300
179300
25300
69600
47300
135400
113400
91600
201800
157400
3500
69700
179400
25400
135500
47400
91700
113500
201900
157500
3600
69800
17950025500
91800
47500
135600
113600
202000
157600
69900
3700
91900
135700
113700
47600
25600179600
202100
157700
70000
3800
92000
47700
179700
135800
25700
202200
113800
157800
70100
3900
202300
92100
179800
47800
25800
70200
135900
113900
157900
4000
202400
92200
70300
25900136000179900
47900
114000
4100
92300
158000
202500
70400
92400
202600
26000
180000
136100
11410048000
70500
158100
4200
202700
92500
70600
136200
114200
4810018010026100
4300
20280092600
158200
70700
136300
202900
92700
114300
48200
180200
26200
70800
4400
158300
203000
92800
114400
136400
70900
4500
180300
48300
26300
158400
203100
92900
114500
71000
136500
4600
180400
48400
26400
203200
93000
158500
71100
114600
1366004700
180500
26500
203300
48500
93100
71200
158600
114700
180600
203400
4800
26600
136700
48600
71300
93200
158700
203500
180700
1148004900
48700
26700
136800
93300
71400
158800
203600
180800
4880026800
114900
5000
136900
71500
93400
158900
203700
180900
26900
48900
71600
115000137000
93500
5100
159000
203800
181000
49000
2700071700
93600
115100
5200
137100
203900
159100
181100
49100
71800
27100
93700
204000
159200
115200
137200
5300
181200
71900
49200
27200
93800
204100
137300159300
5400
115300
181300
72000
49300
27300
93900
204200
159400
1374001154005500
72100
181400
94000
49400
27400
204300
159500
137500
115500
5600
72200
94100
49500
27500
181500
204400
159600
137600
115600
72300
5700
27600
94200
18160049600
204500
159700
137700
115700
72400
5800
2770094300
18170049700
204600
159800
137800
115800
72500
5900
204700
94400
49800
27800181800
159900
137900
115900
72600
6000
18190020480094500
27900
49900160000
138000
116000
72700
6100
204900
94600
182000
50000
28000
160100
116100
138100
72800
6200
205000
94700
182100
28100
50100
160200
138200116200
72900
6300
205100
94800
182200
50200
28200
160300
116300
138300
73000
6400
205200
94900
18230050300
28300
73100116400
160400
138400
6500
205300
95000
182400
73200
50400
116500
160500
28400
138500
6600
95100
205400
73300
182500
50500
116600
28500160600
138600
95200205500
6700
73400
50600
182600
205600
28600116700
160700138700
95300
6800
73500
50700
182700
954001388006900
205700
28700160800
116800
73600
50800
182800
20580095500
138900160900
116900
28800
7000
73700
50900
182900
710095600
139000205900
11700016100028900
73800
51000
183000
20600029000
73900
161100117100
95700139100
7200
51100
183100
206100
74000
95800
29100
117200
1392007300
161200
51200
183200
206200
74100
95900
29200
117300
161300
7400
139300
183300
51300
206300
74200
96000
29300
117400
161400
183400
7500
139400
51400
206400
74300
96100
29400
117500
161500
183500
51500
7600
206500
139500
74400
96200
29500
117600
161600
183600
51600206600
139600
7450096300
7700
29600
117700
161700
206700
183700
51700
7460096400
7800
139700
29700
117800
161800
206800
183800
7470096500
51800139800
29800
7900
117900
161900
74800
51900
139900
96600
183900
206900800029900
118000
162000
74900
140000
30000
207000
52000
8100
184000
96700
118100
162100
75000
140100
207100
30100
1841009680052100
8200
118200
162200
75100
207200
140200
184200
162300
30200
96900
118300
522008300
75200
207300
184300140300
162400
97000
30300
75300
52300
118400
8400
207400
184400
162500
140400
97100
30400
52400
850075400
118500
207500
184500
140500
162600
97200
30500
52500
75500
8600
118600
207600
162700
184600
97300140600
75600
30600
207700
52600
118700
8700
97400184700
162800
140700
207800
75700880030700
52700
118800
97500
184800
162900140800
207900
75800
308008900
11890052800
97600
184900
163000
140900
208000
75900
9000
52900
11900030900
97700
163100
185000
141000
208100
76000
9100
119100
31000
97800
53000
163200
185100
141100
208200
76100
9200
97900
119200
31100
53100
163300
185200208300
141200
76200
98000
9300
3120053200119300
163400
76300
141300208400185300
98100
9400
31300
53300
119400
76400163500
208500141400
185400
98200
31400
53400
9500119500
14150018550076500208600
163600
98300
31500
53500
9600
119600
76600208700185600163700
141600
98400
31600
53600
119700
9700
76700
185700
208800
98500141700
163800
3170053700
119800
9800
76800
185800
20890098600163900
141800
53800
31800
9900
119900
76900
209000
185900
98700
164000
141900
120000
53900
31900
10000
77000
98800
209100
186000
164100
142000
54000
32000
120100
10100
77100
98900
209200
186100
142100
164200
54100
32100
10200
120200
77200
99000
209300
186200
142200
164300
54200
32200
10300
120300
9910077300
209400
186300
142300
164400
54300
32300
10400
120400
77400
99200
209500
186400
142400164500
54400
32400
10500
7750099300
209600
120500
186500
164600
142500
54500
32500
10600
99400209700
120600
164700186600
77600
142600
54600
32600
10700
209800
99500
164800
120700
186700
77700
142700
32700
54700
10800
99600209900
164900
12080077800
186800
14280032800
5480010900
21000099700
77900165000120900
186900
32900
14290011000
54900
99800
210100
78000
165100
187000
121000
11100
143000
3300055000210200
99900
78100
165200
187100
121100
3310011200
55100100000210300
143100
78200
165300
187200
121200
210400
10010033200
55200
11300
143200
78300
165400
187300
121300
55300
11400
33300
210500100200
143300
78400
165500
187400
121400
55400
3340011500210600
100300
143400
78500
187500
165600
121500
55500
210700
100400
33500
143500
11600
78600
187600
165700
121600
55600
100500
210800
11700
143600
33600
165800
187700
78700
121700
55700
210900
100600
11800
14370078800
165900
33700
187800
121800
55800
100700
211000
78900
11900
33800
143800
166000
187900
121900
55900
100800
211100
79000
12000
143900
33900
166100
188000122000
56000
211200
100900
79100
34000
12100
122100144000188100
166200
56100
211300
101000
79200
3410012200
122200
144100166300
188200
101100
56200
211400
79300
122300
79400
12300
166400
211500
144200101200
34200
188300
56300
1013007950012400
211600
144300
122400
188400
166500
3430056400
|
notebooks/Recurrence.ipynb | ###Markdown
A qualitative example of Recurrence for Time Series Classification
###Code
import sys
import os
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import seaborn as sns
import numpy as np
from rnn import RNN
import torch
import torch.nn.functional as F
import tqdm
from torch import matmul, sigmoid, tanh
sns.set_style("whitegrid")
BANDS = ['B1', 'B10', 'B11', 'B12', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8',
'B8A', 'B9']
# simulate no GPU (for checks like torch.cuda.is_available())
os.environ["CUDA_VISIBLE_DEVICES"] = ""
###Output
_____no_output_____
###Markdown
Data Data PartitioningThe models were trained on the trainset of the HOLL region.This notebook loads data exclusively from test partitions of the HOLL region and a spatially different KRUM region. Change the parameter `region=holl` to `region=krum` to change the data region
###Code
!wget https://syncandshare.lrz.de/dl/fiM6b3e7eeyFAGWmAHEeoeBB/notebookdata.zip -O /tmp/notebookdata.zip
!unzip -o /tmp/notebookdata.zip -d /tmp
# Test Partition of the HOLL region (model was trained on the Train Partition on the HOLL region)
region = "holl" # <- change to 'krum' for test data from a different region
if region == "holl":
X = np.load("/tmp/data/x.npy")
Y = np.load("/tmp/data/y.npy")
meta = np.load("data/meta.npy")
elif region == "krum":
# Test Partition of the KRUM region
X = np.load("/tmp/data/x_krum.npy")
Y = np.load("/tmp/data/y_krum.npy")
meta = np.load("/tmp/data/meta_krum.npy")
print(f"X shape: (N, T, D):{X.shape} with N examples, sequencelength T, and D number of features per t")
print(f"y shape: (N, C):{Y.shape} with N examples, sequencelength T (class repeatet T times)")
klassennamen = np.load("/tmp/data/classnames.npy", allow_pickle=True)
classnames = klassennamen
###Output
X shape: (N, T, D):(100, 50, 13) with N examples, sequencelength T, and D number of features per t
y shape: (N, C):(100, 50) with N examples, sequencelength T (class repeatet T times)
###Markdown
Initialize the model and load pre-trained weights
###Code
model = RNN(input_dim=13, nclasses=33, hidden_dims=32,
num_rnn_layers=4, dropout=0.710883, bidirectional=True)
model.load("/tmp/data/rnn.pth")
model.eval()
###Output
loading model from /tmp/data/rnn.pth
###Markdown
Chose one Example for the Analysis
###Code
idx = 5 # <- change this to an index between 0 and N=99
x = X[idx]
y = Y[idx,0]
x = torch.from_numpy(x)
logprobabilities = model.forward(x[None,:,:])
###Output
_____no_output_____
###Markdown
Get the weights from the model's first layer and split up for respective gates
###Code
# get weight tensors of the first layer forward pass (l0)
weight_hh = model.lstm.weight_hh_l0
weight_ih = model.lstm.weight_ih_l0
w_ii, w_if, w_ig, w_io = weight_ih.chunk(4, 0)
w_hi, w_hf, w_hg, w_ho = weight_hh.chunk(4, 0)
w_i = torch.cat([w_ii,w_hi],dim=1)
w_f = torch.cat([w_if,w_hf],dim=1)
w_g = torch.cat([w_ig,w_hg],dim=1)
w_o = torch.cat([w_io,w_ho],dim=1)
###Output
_____no_output_____
###Markdown
Visualize the weights of the first recurrent layer
###Code
fix, axs = plt.subplots(2,4, figsize=(16,6))
axs[0,0].imshow(w_ii.detach().numpy())
axs[0,0].set_title("input gate weights x")
axs[0,1].imshow(w_if.detach().numpy())
axs[0,1].set_title("forget gate weights x")
axs[0,2].imshow(w_ig.detach().numpy())
axs[0,2].set_title("modulation gate weights x")
axs[0,3].imshow(w_io.detach().numpy())
axs[0,3].set_title("output gate weights x")
axs[1,0].imshow(w_hi.detach().numpy())
axs[1,0].set_title("input gate weights h")
axs[1,1].imshow(w_hf.detach().numpy())
axs[1,1].set_title("forget gate weights h")
axs[1,2].imshow(w_hg.detach().numpy())
axs[1,2].set_title("modulation gate weights h")
axs[1,3].imshow(w_ho.detach().numpy())
axs[1,3].set_title("output gate weights h")
[ax.grid(False) for ax in axs.reshape(-1)]
###Output
_____no_output_____
###Markdown
Re-implement the LSTM updateIn pytorch, the LSTM loop is optimized away. We have access to the weights and input output features.so, we can re-implement the LSTM cell state update
###Code
# for later plotting
i_all = list()
f_all = list()
g_all = list()
o_all = list()
h_all = list()
c_all = list()
h_prev = torch.zeros(32)
c_prev = torch.zeros(32)
for t in range(x.shape[0]):
# append previous information t-1 with current information
xh = torch.cat([x[t,:],h_prev])
i = sigmoid(matmul(w_i, xh))
f = sigmoid(matmul(w_f, xh))
g = tanh(matmul(w_g, xh))
o = sigmoid(matmul(w_o, xh))
c = f*c_prev + i*g
h = o*tanh(c)
# update c, h for next iteration
h_prev = h
c_prev = c
# store gates for later plotting
i_all.append(i)
f_all.append(f)
g_all.append(g)
o_all.append(o)
h_all.append(h)
c_all.append(c)
i_all = torch.stack(i_all)
f_all = torch.stack(f_all)
g_all = torch.stack(g_all)
o_all = torch.stack(o_all)
h_all = torch.stack(h_all)
c_all = torch.stack(c_all)
###Output
_____no_output_____
###Markdown
Visualize the Gate Activations when doing inference on a time series
###Code
import matplotlib.pylab as pl
hidden_dims = 32
colors = pl.cm.tab20(np.linspace(0,1,hidden_dims))
# positive spike for clouds
colors[30] = np.array([0,0,0,0])
fig, axs = plt.subplots(7,1, figsize=(12,13))
# randomize the sequence in which the lines are plotted (on top of each other...)
plotting_idxs = np.arange(hidden_dims)
np.random.shuffle(plotting_idxs)
axs[0].plot(x.detach().numpy())
axs[0].set_ylabel("Input Series")
axs[0].set_title("Class {}".format(classnames[y]))
[axs[1].plot(i_all[:,i].detach().numpy(), color=colors[i], alpha=0.5) for i in plotting_idxs]
axs[1].set_ylabel("Input Gate")
[axs[2].plot(f_all[:,i].detach().numpy(), color=colors[i], alpha=0.5) for i in plotting_idxs]
axs[2].set_ylabel("Forget Gate")
[axs[3].plot(g_all[:,i].detach().numpy(), color=colors[i], alpha=0.5) for i in plotting_idxs]
axs[3].set_ylabel("Modulation Gate")
[axs[4].plot(o_all[:,i].detach().numpy(), color=colors[i], alpha=0.5) for i in plotting_idxs]
axs[4].set_ylabel("Output Gate")
[axs[5].plot(h_all[:,i].detach().numpy(), color=colors[i], alpha=0.5) for i in plotting_idxs]
axs[5].set_ylabel("Output")
[axs[6].plot(c_all[:,i].detach().numpy(), color=colors[i], alpha=0.5, label="hidden dim {}".format(i)) for i in range(hidden_dims)]
axs[6].set_ylabel("Cell State")
#axs[7].legend()
path="/home/marc/projects/Phiweek19_Presentation/images/rnn_examples"
import pandas as pd
os.makedirs(os.path.join(path,str(idx)), exist_ok=True)
df = pd.DataFrame(x.detach().numpy(), columns=BANDS)
df.index.name="t"
df.to_csv(os.path.join(path,str(idx),"x.csv"))
print("writing: "+os.path.join(path,str(idx),"x.csv"))
for name, tensor in zip(["i","f","o","g","h","c"],[i_all,f_all,o_all,g_all,h_all,c_all]):
df = pd.DataFrame(tensor.detach().numpy())
df.index.name="t"
df.to_csv(os.path.join(path,str(idx),name+".csv"))
print("writing: "+os.path.join(path,str(idx),name+".csv"))
###Output
writing: /home/marc/projects/Phiweek19_Presentation/images/rnn_examples/5/x.csv
writing: /home/marc/projects/Phiweek19_Presentation/images/rnn_examples/5/i.csv
writing: /home/marc/projects/Phiweek19_Presentation/images/rnn_examples/5/f.csv
writing: /home/marc/projects/Phiweek19_Presentation/images/rnn_examples/5/o.csv
writing: /home/marc/projects/Phiweek19_Presentation/images/rnn_examples/5/g.csv
writing: /home/marc/projects/Phiweek19_Presentation/images/rnn_examples/5/h.csv
writing: /home/marc/projects/Phiweek19_Presentation/images/rnn_examples/5/c.csv
###Markdown
Gradients can be used to analyze the input feature importance Do Forward Inference and Gradient Backpropagation1. Add gradients to the input `x_` (new variable with gradients)2. do forward inference3. get the scalar with the highest prediction score: `logprobabilities.exp().max()`4. do gradient backpropagation `.backward()` (to all variables with gradients)5. retrieve the gradients with `.grad`
###Code
x_ = torch.autograd.Variable(x[None,:,:], requires_grad=True)
logprobabilities = model.forward(x_)
maxypred = logprobabilities.exp().max()
maxypred.backward()
dydx = x_.grad
###Output
_____no_output_____
###Markdown
Visualize Gradients along with input time series
###Code
fig, axs = plt.subplots(2, figsize=(12,10))
axs[0].plot(x_[0].detach().numpy())
axs[0].set_ylabel("x")
axs[0].legend(BANDS)
axs[1].plot(x_.grad[0].numpy(),linewidth=4)
axs[1].set_ylabel("dy/dx")
axs[1].legend(BANDS)
df = pd.DataFrame(x_.grad[0].numpy(), columns=BANDS)
df.index.name="t"
df.to_csv(os.path.join(path,str(idx),"dydx.csv"))
print("writing: "+os.path.join(path,str(idx),"dydx.csv"))
fig, axs = plt.subplots(4, figsize=(12,10))
axs[0].plot(x_[0].detach().numpy())
axs[0].set_ylabel("x")
axs[1].plot(x_.grad[0,:,[0,1,12]].numpy(),linewidth=4)
axs[1].set_ylabel("atmosphere bands")
axs[1].legend(np.array(BANDS)[[0,1,12]])
axs[2].plot(x_.grad[0,:,[2,3]].numpy(),linewidth=4)
axs[2].legend(np.array(BANDS)[[2,3]])
axs[2].set_ylabel("swir bands")
axs[3].plot(x_.grad[0,:,4:12].numpy(),linewidth=4)
axs[3].set_ylabel("dy/dx")
axs[3].legend(np.array(BANDS)[4:12])
###Output
_____no_output_____
###Markdown
A PGF/Tikzed example of idx=5 (Holl region) Get the Gradients on the weights
###Code
dweight_hh = model.lstm.weight_hh_l0.grad
dweight_ih = model.lstm.weight_ih_l0.grad
dw_ii, dw_if, dw_ig, dw_io = weight_ih.chunk(4, 0)
dw_hi, dw_hf, dw_hg, dw_ho = weight_hh.chunk(4, 0)
dw_i = torch.cat([w_ii,w_hi],dim=1)
dw_f = torch.cat([w_if,w_hf],dim=1)
dw_g = torch.cat([w_ig,w_hg],dim=1)
dw_o = torch.cat([w_io,w_ho],dim=1)
###Output
_____no_output_____
###Markdown
Visualize
###Code
fix, axs = plt.subplots(4, figsize=(16,16))
axs[0].imshow(dw_ii.detach().numpy().T)
axs[1].imshow(dw_if.detach().numpy().T)
axs[2].imshow(dw_ig.detach().numpy().T)
axs[3].imshow(dw_io.detach().numpy().T)
for ax, name in zip(axs.reshape(-1),["input","forget","modulation", "output"]):
ax.set_yticks(np.arange(len(BANDS)))
ax.set_yticklabels(BANDS)
ax.set_title("gradients " + name + " gate")
ax.grid(False)
###Output
_____no_output_____
###Markdown
Values Gates
###Code
fix, axs = plt.subplots(4, figsize=(16,16))
axs[0].imshow(w_ii.detach().numpy().T)
axs[1].imshow(w_if.detach().numpy().T)
axs[2].imshow(w_ig.detach().numpy().T)
axs[3].imshow(w_io.detach().numpy().T)
for ax, name in zip(axs.reshape(-1),["input","forget","modulation", "output"]):
ax.set_yticks(np.arange(len(BANDS)))
ax.set_yticklabels(BANDS)
ax.set_title(name + " gate")
ax.grid(False)
###Output
_____no_output_____ |
listas/dcc212l0-Copy1.ipynb | ###Markdown
Lista 00 - Revisão Python e Numpy[NumPy](http://numpy.org) é um pacote incrivelmente poderoso em Python, onipresente em qualquer projeto de ciência de dados. Possui forte integração com o [Pandas](http://pandas.pydata.org), outra ferramenta que iremos abordar na matéria. NumPy adiciona suporte para matrizes multidimensionais e funções matemáticas que permitem que você execute facilmente cálculos de álgebra linear. Este notebook será uma coleção de exemplos de álgebra linear computados usando NumPy. Numpy Para fazer uso de Numpy precisamos importar a biblioteca
###Code
# -*- coding: utf8
import numpy as np
###Output
_____no_output_____
###Markdown
Quando pensamos no lado prático de ciência de dados, um aspecto chave que ajuda na implementação de novos algoritmos é a vetorização. De forma simples, vetorização consiste do uso de tipos como **escalar**, **vetor** e **matriz** para realizar uma computação mais eficaz (em tempo de execução).Uma matriz é uma coleção de valores, normalmente representada por uma grade 𝑚 × 𝑛, onde 𝑚 é o número de linhas e 𝑛 é o número de colunas. Os comprimentos das arestas 𝑚 e 𝑛 não precisam ser necessariamente diferentes. Se tivermos 𝑚 = 𝑛, chamamos isso de matriz quadrada. Um caso particularmente interessante de uma matriz é quando 𝑚 = 1 ou 𝑛 = 1. Nesse caso, temos um caso especial de uma matriz que chamamos de vetor. Embora haja um objeto de matriz em NumPy, faremos tudo usando matrizes NumPy porque elas podem ter dimensões maiores que 2. 1. **Escalar:** Um vetor de zero dimensões
###Code
1
###Output
_____no_output_____
###Markdown
2. **Vetor:** Representa uma dimensão Abaixo vamos criar um vetor simples. Inicialmente, vamos criar uma lista.
###Code
data_list = [3.5, 5, 2, 8, 4.2]
###Output
_____no_output_____
###Markdown
Observe o tipo da mesma.
###Code
type(data_list)
###Output
_____no_output_____
###Markdown
Embora vetores e listas sejam parecidos, vetores Numpy são otimizados para operações de Álgebra Linear. Ciência de Dados faz bastante uso de tais operações, sendo este um dos motivos da dependência em Numpy.Abaixo criamos um vetor.
###Code
data = np.array(data_list)
print(data)
print(type(data))
###Output
_____no_output_____
###Markdown
Observe como podemos somar o mesmo com um número. Não é possível fazer tal operação com listas.
###Code
data + 7
###Output
_____no_output_____
###Markdown
3. **Matrizes:** Representam duas dimensões.
###Code
X = np.array([[2, 4],
[1, 3]])
X
###Output
_____no_output_____
###Markdown
Podemos indexar as matrizes e os vetores.
###Code
data[0]
X[0, 1] # aqui é primeira linha, segunda coluna
###Output
_____no_output_____
###Markdown
Podemos também criar vetores/matrizes de números aleatórios
###Code
X = np.random.randn(4, 3) # Gera números aleatórios de uma normal
print(X)
###Output
_____no_output_____
###Markdown
IndexandoPegando a primeira linha
###Code
X[0] # observe que 0 é a linha 1, compare com o X[0, 1] de antes.
X[1] # segunda
X[2] # terceira
###Output
_____no_output_____
###Markdown
Observe como todos os tipos retornados são `array`. Array é o nome genérico de Numpy para vetores e matrizes. `X[:, c]` pega uma coluna
###Code
X[:, 0]
X[:, 1]
###Output
_____no_output_____
###Markdown
`X[um_vetor]` pega as linhas da matriz. `X[:, um_vetor]` pega as colunas
###Code
X[[0, 0, 1]] # observe que pego a primeira linha, indexada por 0, duas vezes
###Output
_____no_output_____
###Markdown
Abaixo pego a segunda a primeira coluna
###Code
X[:, [1, 0]]
###Output
_____no_output_____
###Markdown
Indexação Booleana`X[vetor_booleano]` retorna as linhas (ou colunas quando X[:, vetor_booleano]) onde o vetor é true
###Code
X[[True, False, True, False]]
X[:, [False, True, True]]
###Output
_____no_output_____
###Markdown
Reshape, Flatten e RavelTodo vetor ou matriz pode ser redimensionado. Observe como uma matriz abaixo de 9x8=72 elementos. Podemos redimensionar os mesmos para outros arrays de tamanho 72.
###Code
X = np.random.randn(9, 8)
###Output
_____no_output_____
###Markdown
Criando uma matriz de 18x4.
###Code
X.reshape((18, 4))
###Output
_____no_output_____
###Markdown
Ou um vetor de 72
###Code
X.reshape(72)
###Output
_____no_output_____
###Markdown
A chamada flatten e ravel faz a mesma coisa, criam uma visão de uma dimensão da matriz.
###Code
X.flatten()
X.ravel()
###Output
_____no_output_____
###Markdown
As funções incorporadas ao NumPy podem ser facilmente chamadas em matrizes. A maioria das funções são aplicadas a um elemento de array (como a multiplicação escalar). Por exemplo, se chamarmos `log()` em um array, o logaritmo será obtido de cada elemento.
###Code
np.log(data)
###Output
_____no_output_____
###Markdown
Mean tira a média
###Code
np.mean(data)
###Output
_____no_output_____
###Markdown
Algumas funções podem ser chamadas direto no vetor, nem todas serão assim. O importante é ler a [documentação](http://numpy.org) e aprender. Com um pouco de prática você vai se acostumando.
###Code
data.mean()
###Output
_____no_output_____
###Markdown
Abaixo temos a mediana,
###Code
np.median(data) # por exemplo, não existe data.median(). Faz sentido? Não. Mas é assim.
###Output
_____no_output_____
###Markdown
Em matrizes as funções operam em todos os elemntos.
###Code
np.median(X)
X.mean()
np.log(X + 10)
###Output
_____no_output_____
###Markdown
Porém, caso você queira a media de linhas ou colunas use `axis`. Antes, vamos ver o tamanho do vetor.
###Code
X.shape
np.mean(X, axis=0) # média das colunas. como temos 8 colunas, temos 8 elementos.
np.mean(X, axis=0).shape
np.mean(X, axis=1) # média das linhas
np.mean(X, axis=1).shape
###Output
_____no_output_____
###Markdown
Lembre-se que eixo 0 é coluna. Eixo 1 é linas. Multiplicação de Matrizes Para transpor uma matriz fazemos uso de .T
###Code
X.shape
X.T.shape
X.T
###Output
_____no_output_____
###Markdown
Para multiplicar matrizes, do ponto de visto de multiplicação matricial como definido na álgebra linear, fazemos uso de `@`.
###Code
X @ X.T
###Output
_____no_output_____
###Markdown
O uso de `*` realiza uma operação ponto a ponto
###Code
X * X
###Output
_____no_output_____
###Markdown
Observe a diferença de tamanhos
###Code
(X * X).shape
(X @ X.T).shape
###Output
_____no_output_____
###Markdown
**Pense:** Para o nosso `X` de tamanho `(9, 8)`, qual o motivo de `X * X.T` não funcionar? Qual o motivo de `X @ X` não funcionar? Correção AutomáticaNossa correção automática depende das funções abaixo. Tais funções comparam valores que serão computados pelo seu código com uma saída esperada. Normalmente, vocês não fazer uso de tais funções em notebooks como este. Porém, elas são chave em ambientes de testes automáticos (fora do nosso escopo).Observe como algumas funções comparam valores e outras comparam vetores. Além do mais, temos funções para comparar dentro de algumas casas decimais.
###Code
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
# caso você mude um dos valores vamos receber um erro!
assert_array_equal(2, 2)
# caso você mude um dos valores vamos receber um erro!
assert_array_equal([1, 2], [1, 2])
# caso você mude um dos valores vamos receber um erro!
assert_almost_equal(3.1415, 3.14, 1)
###Output
_____no_output_____
###Markdown
Caso você mude um dos valores abaixo vamos receber um erro! Como o abaixo.```-----------------------------------------------------------------------AssertionError Traceback (most recent call last) in ----> 1 assert_equal(2, 3) caso você mude um dos valores vamos receber um erro!~/miniconda3/lib/python3.7/site-packages/numpy/testing/_private/utils.py in assert_equal(actual, desired, err_msg, verbose) 413 Explicitly use __eq__ for comparison, gh-2552 414 if not (desired == actual):--> 415 raise AssertionError(msg) 416 417 except (DeprecationWarning, FutureWarning) as e:AssertionError: Items are not equal: ACTUAL: 2 DESIRED: 3 ``` É essencial que todo seu código execute sem erros! Portanto, antes de submeter clique em `Kernel` no menu acima. Depois clique em `Restart & Execute All.`**Garanta que o notebook executa até o fim!** Isto é, sem erros como o acima. Funções em Python Para criar uma função em Python fazemos uso da palavra-chave: ```pythondef```Todos nossos exercícios farão uso de funções. **Mantenha a assinatura das funções exatamente como requisitado, a correção automática depende disso.** Abaixo, temos um exempo de uma função que imprime algo na tela!
###Code
def print_something(txt):
print(f'Você passou o argumento: {txt}')
print_something('DCC 212')
###Output
_____no_output_____
###Markdown
Podemos também dizer o tipo do argumento, porém faremos pouco uso disto em ICD.
###Code
def print_something(txt: str):
print(f'Você passou o argumento: {txt}')
print_something('DCC 212')
###Output
_____no_output_____
###Markdown
Abaixo temos uma função que soma, a soma, dois vetores
###Code
def sum_of_sum_vectors(array_1, array_2):
return (array_1 + array_2).sum()
x = np.array([1, 2])
y = np.array([1, 2])
sum_of_sum_vectors(x, y)
###Output
_____no_output_____
###Markdown
Abaixo temos um teste, tais testes vão avaliar o seu código. Nem todos estão aqui no notebook!
###Code
assert_equal(6, sum_of_sum_vectors(x, y))
###Output
_____no_output_____
###Markdown
Exercício 01Inicialmente, crie uma função que recebe duas listas de numéros, converte as duas para um vetor numpy usando `np.array` e retorna o produto interno das duas listas. __Dicas:__ 1. Tente fazer um código sem nenhum **for**! Ou seja, numpy permite operações em vetores e matrizes, onde: `np.array([1, 2]) + np.array([2, 2]) = np.array([3, 4])`.__Funções:__1. `np.sum(array)` soma os elementos do array. `array.sum()` tem o mesmo efeito!
###Code
def inner(array_1, array_2):
# Seu código aqui!
# Apague o return None abaixo e mude para seu retorno
return None
x1 = np.array([2, 4, 8])
x2 = np.array([10, 100, 1000])
assert_equal(20 + 400 + 8000, inner(x1, x2))
###Output
_____no_output_____
###Markdown
Exercício 02Implemente uma função utilizando numpy que recebe duas matrizes, multiplica as duas e retorne o valor médio das células da multiplicação. Por exemplo, ao multiplicar:```[1 2][3 4] com [2 1][1 2]temos[4 5 ][10 11]onde a média de [4, 5, 10, 11] é7.5, sua resposta final!```__Dicas:__ 1. Use o operador @ para multiplicar matrizes!
###Code
def medmult(X_1, X_2):
# Seu código aqui!
# Apague o return None abaixo e mude para seu retorno
return None
X = np.array([1, 2, 3, 4]).reshape(2, 2)
Y = np.array([2, 1, 1, 2]).reshape(2, 2)
assert_equal(7.5, medmult(X, Y))
###Output
_____no_output_____ |
examples/ex2-sir-two-age-groups.ipynb | ###Markdown
The SIR model with two age groupsThe partitioning of the population can be refined to include other attributes relevant to the disease. One of the most important of these is the age. Let us assume we partition the population into two age groups, children and adults, and label them by the index $i=1,2$. Children can catch the infection from other children or from adults; likewise, adults can catch the infection from other adults or from children. Calling their respective rates of infection $\lambda_1(t)$ and $\lambda_2(t)$ we get\begin{align}\lambda_1(t) = \beta(C_{11}\frac{I_1}{N_1} + C_{12}\frac{I_2}{N_2})S_1\\\lambda_2(t) = \beta(C_{21}\frac{I_1}{N_1} + C_{22}\frac{I_2}{N_2})S_2\end{align}where $C_{ij}$ are contact matrices, quantifying how much each age group interacts with the other. The ordinary differential equations of this age-structured SIR model are \begin{align}\dot S_i &= -\lambda_i(t)S_i \\\dot I_i &= \lambda(t)_iI_i - \gamma I_i \\\dot R_i &= \gamma I_i \end{align}Again, for each $i$ the sum $N_i = S_i + I_i + R_i$ remains constant. What do we expect qualitatively ? The group that has a greater rate will catch the disease faster and catch more of it. This depends on how the entries of the contact matrix are distributed. This example integrates the above equations to **epidemic curve** for both the children and the adults. We see that they have unequal rates of infection.
###Code
%%capture
## compile PyRoss for this notebook
import os
owd = os.getcwd()
os.chdir('../')
%run setup.py install
os.chdir(owd)
%matplotlib inline
import numpy as np
import pyross
import matplotlib.pyplot as plt
M = 2 # the population has two age groups
N = 1000000 # and this is the total population
beta = 0.0131 # infection rate
gIa = 1./7 # recovery rate of asymptomatic infectives
gIs = 1./7 # recovery rate of asymptomatic infectives
alpha = 0 # fraction of asymptomatic infectives
fsa = 1 # the self-isolation parameter
Ni = np.zeros((M)) # population in each group
fi = np.zeros((M)) # fraction of population in age age group
# set the age structure
fi = np.array((0.25, 0.75))
for i in range(M):
Ni[i] = fi[i]*N
# set the contact structure
C = np.array(([18., 9.], [3., 12.]))
Ia_0 = np.array((1,1)) # each age group has asymptomatic infectives
Is_0 = np.array((1,1)) # and also symptomatic infectives
R_0 = np.array((0,0)) # there are no recovered individuals initially
S_0 = Ni - (Ia_0 + Is_0 + R_0)
# matrix for linearised dynamics
L = np.zeros((M, M))
for i in range(M):
for j in range(M):
L[i,j]=C[i,j]*Ni[i]/Ni[j]
L = (alpha*beta/gIs)*L
# the basic reproductive ratio
r0 = np.max(np.linalg.eigvals(L))
print("The basic reproductive ratio for these parameters is", r0)
# duration of simulation and data file
Tf=200; Nf=2000; filename='this.mat'
# the contact structure is independent of time
def contactMatrix(t):
return C
# instantiate model
parameters = {'alpha':alpha, 'beta':beta, 'gIa':gIa, 'gIs':gIs,'fsa':fsa}
model = pyross.models.SIR(parameters, M, Ni)
# simulate model
data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf, filename)
IK = data.get('X')[:,2*M].flatten()
IA = data.get('X')[:,2*M+1].flatten()
t = data.get('t')
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.fill_between(t, 0, IK/Ni[0], color="#348ABD", alpha=0.3)
plt.plot(t, IK/Ni[0], '-', color="#348ABD", label='$Children$', lw=4)
plt.fill_between(t, 0, IA/Ni[1], color='#A60628', alpha=0.3)
plt.plot(t, IA/Ni[1], '-', color='#A60628', label='$Adults$', lw=4)
plt.legend(fontsize=26); plt.grid()
plt.autoscale(enable=True, axis='x', tight=True)
###Output
_____no_output_____ |
notebooks/sparse-decoding-from-scratch.ipynb | ###Markdown
AimA full implementation of the SPRIGHT algorithm, as in [2].
###Code
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
def fwht(x):
"""Recursive implementation of the 1D Cooley-Tukey FFT"""
# x = np.asarray(x, dtype=float)
N = x.shape[0]
if N == 1:
return x
else:
X_even = fwht(x[0:(N//2)])
X_odd = fwht(x[(N//2):])
return np.concatenate([(X_even + X_odd),
(X_even - X_odd)])
def bin_to_dec(x):
n = len(x)
c = 2**(np.arange(n)[::-1])
return c.dot(x).astype(np.int)
def dec_to_bin(x, num_bits):
assert x < 2**num_bits, "number of bits are not enough"
u = bin(x)[2:].zfill(num_bits)
u = list(u)
u = [int(i) for i in u]
return np.array(u)
def binary_ints(n):
'''
Returns a matrix where row 'i' is dec_to_bin(i, n), for i from 0 to 2 ** n - 1.
https://stackoverflow.com/questions/28111051/create-a-matrix-of-binary-representation-of-numbers-in-python
'''
a = np.arange(2 ** n, dtype=int)[np.newaxis,:]
b = np.arange(n, dtype=int)[::-1,np.newaxis]
return np.array(a & 2**b > 0, dtype=int)
def make_input_signal(n, loc, strengths, noise_sd):
'''
Arguments
---------
n : int
number of bits
loc : iterable
Locations of peaks in the W-H spectrum. Elements must be integers in [0, 2 ** n - 1].
strengths : iterable
The strength of each peak in the W-H spectrum. Defaults to all 1s. Dimension has to match that of loc.
noise_sd : scalar
The SD of the added noise.
Returns
-------
input_signal : numpy.ndarray
The time signal.
input_wht : numpy.ndarray
The WHT of input_signal.
'''
N = 2 ** n
if strengths is None:
strengths = np.ones_like(loc)
input_wht = np.zeros((N,))
for l, s in zip(loc, strengths):
input_wht[l] = s
input_signal = fwht(input_wht) + np.random.normal(0, noise_sd, (N,))
return input_signal, fwht(input_signal) / N
class InputSignal:
def __init__(self, n, loc, strengths=None, noise_sd=0):
self.n = n
self.noise_sd = noise_sd
self.signal_t, self.signal_w = make_input_signal(n, loc, strengths, noise_sd)
# make signal and plot it
input_signal = InputSignal(4, [4, 6, 10, 15], strengths=[2, 4, 1, 1], noise_sd=0.01)
noiseless_signal = InputSignal(4, [4, 6, 10, 15], strengths=[2, 4, 1, 1], noise_sd=0)
fig, axs = plt.subplots(1,2, figsize=(10,3))
axs[0].stem(input_signal.signal_w, use_line_collection=True)
axs[0].set_title('WHT')
axs[1].plot(input_signal.signal_t)
axs[1].set_title('time')
plt.show()
def subsample_indices(b, M, d):
'''
Query generator: creates indices for signal subsamples.
Arguments
---------
b : int
The subsampling coefficient; subject to b <= n = log2(N).
M : numpy.ndarray, shape (n, b)
The subsampling matrix; takes on binary values.
d : numpy.ndarray, shape (n,)
The subsampling offset; takes on binary values.
Returns
-------
indices : numpy.ndarray, shape (B,)
The (decimal) subsample indices. Mostly for debugging purposes.
subsamples : numpy.ndarray, shape (B,)
The subsampled time signal.
'''
B = 2 ** b
L = binary_ints(b)
indices = bin_to_dec(np.mod(np.dot(M, L).T + d, 2).T)
return indices
# implementing the example in the paper section 2.2
M1 = np.vstack((np.zeros((2,2)), np.eye(2)))
M2 = np.vstack((np.eye(2), np.zeros((2,2))))
# check linear combinations of subsampling
M1_subsampled_wht = fwht(input_signal.signal_t[subsample_indices(2, M1, np.zeros(4,))]) / 4
assert np.allclose(M1_subsampled_wht, np.array([np.sum([input_signal.signal_w[i::4]]) for i in range(4)]))
def singleton_detection_noiseless(U_slice):
'''
Finds the true index of a singleton, assuming that it is one.
Works on a fixed M, and assumes P = n + 1 and D = [0; I]
Arguments
---------
U_slice : numpy.ndarray, (P,).
The WHT component of the subsampled bin we care about, at diff delays.
d[0] is the zero array, I think (is this necessary? probably)
Returns
-------
k : numpy.ndarray
Index of the corresponding right node, in binary form.
'''
return (-np.sign(U_slice * U_slice[0])[1:]/2 + 1/2).astype(np.int)
def singleton_detection(U_slice, selection, S, n):
'''
TODO:
- what are the input variables (comment)
- what is returned
As in singleton_detection_noiseless, but with noise.
Also returns the sign of the coefficient (as in [2] eq 22.)
S is only the subset that could be valid, i.e. where M.T @ S = i.
'''
P = S.shape[0]
alphas = (1/P) * np.dot(S.T, U_slice)
residuals = np.linalg.norm(U_slice - (alphas * S).T, ord=2, axis=1)
k = np.argmin(residuals)
return dec_to_bin(selection[k], n), np.sign(alphas[k])
D = np.vstack((np.zeros(4,), np.eye(4)))
all_delay_subsamples = np.array([input_signal.signal_t[subsample_indices(2, M1, D[i])] for i in range(5)])
all_delay_fwht = np.array([fwht(row) for row in all_delay_subsamples])
all_delay_fwht
S_test = (-1) ** (D @ binary_ints(input_signal.n))
selection_test = [0, 4, 8, 12]
singleton_detection(all_delay_fwht[:,0], selection_test, S_test[:, selection_test], input_signal.n) # should be 0, 1, 0, 0
def compute_delayed_wht(signal, b, M, num_delays, force_identity_like=False):
'''
Helper function for bin_cardinality. Creates random delays, subsamples according to M and the random delays,
and returns the subsample WHT along with the delays.
'''
if num_delays is None:
num_delays = signal.n + 1
if signal.noise_sd > 0:
if not force_identity_like:
choices = np.random.choice(2 ** signal.n, num_delays, replace=False)
# choices = np.concatenate(([0], 1 + np.random.choice(2 ** signal.n - 1, num_delays - 1, replace=False)))
else:
choices = np.array([0] + [2 ** i for i in range(signal.n)])
D = np.array([dec_to_bin(x, signal.n) for x in choices])
else:
D = np.vstack((np.zeros(signal.n,), np.eye(signal.n)))
samples_to_transform = signal.signal_t[np.array([subsample_indices(b, M, d) for d in D])] # subsample to allow small WHTs
U = np.array([fwht(row) for row in samples_to_transform]) # compute the small WHTs
return U, D
def bin_cardinality(signal, M, num_delays=None):
'''
Computes delayed WHT observations and declares cardinality based on that.
2 is a stand-in for any cardinality > 1. (Bad design, but I can't think
of a better way)
Arguments
---------
signal : InputSignal
The input signal object.
b : int
M : numpy.ndarray
As in the signature to subsample_indices.
num_delays : int
The number of delays to apply; or, the number of rows in the delays matrix.
Returns
-------
cardinality : numpy.ndarray
0 or 1 if the bin is a zeroton or singleton resp.; 2 if multiton.
singleton_indices : list
A list (in decimal form for compactness) of the k values of the singletons.
Length matches the number of 1s in cardinality.
'''
b = M.shape[1]
if num_delays is None:
num_delays = signal.n + 1
U, D = compute_delayed_wht(signal, b, M, num_delays)
cardinality = np.ones((signal.n,), dtype=np.int) # vector of indicators
singleton_indices = []
cutoff = 2 * signal.noise_sd ** 2 * (2 ** (signal.n - b)) * num_delays
if signal.noise_sd > 0:
K = binary_ints(signal.n)
S = (-1) ** (D @ K)
for i, col in enumerate(U.T):
sgn = 1
print("Column: ", col)
# <col, col> = |col|^2 = |U|^2
if np.inner(col, col) <= cutoff:
cardinality[i] = 0
else:
if signal.noise_sd == 0:
k = singleton_detection_noiseless(col)
else:
selection = np.where([bin_to_dec(row) == i for row in (M.T.dot(K)).T])[0]
k, sgn = singleton_detection(col, selection, S[:, selection], signal.n)
rho = np.mean(np.abs(col))
residual = col - sgn * rho * (-1) ** np.dot(D, k)
print("Residual: ", residual)
if np.inner(residual, residual) > cutoff:
cardinality[i] = 2
else:
singleton_indices.append(bin_to_dec(k))
print("Slice {0} has k = {1}".format(i, k))
return cardinality, singleton_indices
bin_cardinality(input_signal, M1) # should be 1, 0, 2, 1; [4, 15]
bin_cardinality(input_signal, M2) # should be 0, 2, 1, 1; [10, 15]
compute_delayed_wht(input_signal, 2, M1, num_delays=None, force_identity_like=False)
def decode(signal, Ms, num_delays=None, verbose=False):
'''
Full SPRIGHT decoding. Implements Algorithm 2 from [2].
(numbers) indicate equation numbers in [2].
Arguments
---------
signal : InputSignal object.
The signal to be transformed / compared to.
Ms : list of ndarrays
List of 'M' matrices.
num_delays : int
The number of delays to apply to each M (i.e. the number of rows in the delays matrix.)
The variable P in [2]. By default this is signal.n + 1.
verbose : boolean
Whether to print intermediate steps.
Returns
-------
wht : ndarray
The WHT constructed by subsampling and peeling.
'''
result = []
wht = np.zeros_like(signal.signal_t)
c = len(Ms)
b = Ms[0].shape[1]
Us, Ss = [], []
singletons = {}
multitons = []
if num_delays is None:
num_delays = signal.n + 1
K = binary_ints(signal.n)
# subsample, make the observation [U] and delays [D] matrices
for M in Ms:
U, D = compute_delayed_wht(signal, b, M, num_delays, force_identity_like=False)
Us.append(U)
Ss.append((-1) ** (D @ K)) # offset signature matrix
cutoff = 2 * signal.noise_sd ** 2 * (2 ** (signal.n - b)) * num_delays # noise threshold
if verbose:
print('cutoff: {}'.format(cutoff))
# K is the binary representation of all integers from 0 to 2 ** n - 1.
select_froms = np.array([[bin_to_dec(row) for row in M.T.dot(K).T] for M in Ms])
# `select_froms` is the collection of 'j' values and associated indices
# so that we can quickly choose from the coefficient locations such that M.T @ k = j as in (20)
# example: ball j goes to bin at "select_froms[i][j]"" in stage i
# begin peeling
# index convention for peeling: 'i' goes over all M/U/S values
# i.e. it refers to the index of the subsampling group (zero-indexed - off by one from the paper).
# 'j' goes over all columns of the WHT subsample matrix, going from 0 to 2 ** b - 1.
# e.g. (i, j) = (0, 2) refers to subsampling group 0, and aliased bin 2 (10 in binary)
# which in the example of section 3.2 is the multiton X[0110] + X[1010] + W1[10]
# a multiton will just store the (i, j)s in a list
# a singleton will map from the (i, j)s to the true (binary) values k.
# e.g. the singleton (0, 0), which in the example of section 3.2 is X[0100] + W1[00]
# would be stored as the dictionary entry (0, 0): array([0, 1, 0, 0]).
there_were_multitons = True
while there_were_multitons:
if verbose:
print('-----')
print('the measurement matrix')
for U in Us:
print(U)
# first step: find all the singletons and multitons.
singletons = {} # dictionary from (i, j) values to the true index of the singleton, k.
multitons = [] # list of (i, j) values indicating where multitons are.
for i, (U, S, select_from) in enumerate(zip(Us, Ss, select_froms)):
for j, col in enumerate(U.T):
# note that np.inner(x, x) is used as norm-squared: marginally faster than taking norm and squaring
if np.inner(col, col) > cutoff:
selection = np.where(select_from == j)[0] # pick all the k such that M.T @ k = j
k, sgn = singleton_detection(col, selection, S[:, selection], signal.n) # find the best fit singleton
k_dec = bin_to_dec(k)
rho = np.dot(S[:,k_dec], col)*sgn/len(col)
residual = col - sgn * rho * S[:,k_dec]
if verbose:
print((i, j), np.inner(residual, residual))
if np.inner(residual, residual) > cutoff:
multitons.append((i, j))
else: # declare as singleton
singletons[(i, j)] = (k, rho, sgn)
if verbose:
print('amplitude: {}'.format(rho))
# all singletons and multitons are discovered
if verbose:
print('singletons:')
for ston in singletons.items():
print("\t{0} {1}\n".format(ston, bin_to_dec(ston[1][0])))
print("Multitons : {0}\n".format(multitons))
# WARNING: this is not a correct thing to do
# in the last iteration of peeling, everything will be singletons and there
# will be no multitons
if len(multitons) == 0: # no more multitons, and can construct final WHT
there_were_multitons = False
# balls to peel
balls_to_peel = set()
ball_values = {}
ball_sgn = {}
for (i, j) in singletons:
k, rho, sgn = singletons[(i, j)]
ball = bin_to_dec(k)
balls_to_peel.add(ball)
ball_values[ball] = rho
ball_sgn[ball] = sgn
if verbose:
print('these balls will be peeled')
print(balls_to_peel)
# peel
for ball in balls_to_peel:
k = dec_to_bin(ball, signal.n)
potential_peels = [(l, bin_to_dec(M.T.dot(k))) for l, M in enumerate(Ms)]
result.append((k, ball_sgn[ball]*ball_values[ball]))
for peel in potential_peels:
signature_in_stage = Ss[peel[0]][:,ball]
to_subtract = ball_sgn[ball] * ball_values[ball] * signature_in_stage
Us[peel[0]][:,peel[1]] -= to_subtract
if verbose:
print('this is subtracted:')
print(to_subtract)
print("Peeled ball {0} off bin {1}".format(bin_to_dec(k), peel))
for k, value in result: # iterating over (i, j)s
idx = bin_to_dec(k) # converting 'k's of singletons to decimals
if wht[idx] == 0:
wht[idx] = value
else:
wht[idx] = (wht[idx] + value) / 2
# average out noise; e.g. in the example in 3.2, U1[11] and U2[11] are the same singleton,
# so averaging them reduces the effect of noise.
wht /= 2 ** (signal.n / b) # should maybe be n - b? in the example it's n = 4 and b = 2 so same either way, but should double check.
return wht
decoded = decode(input_signal, [M1, M2], verbose=True)
plt.stem(decoded, use_line_collection=True)
decoded_noiseless = decode(noiseless_signal, [M1, M2])
assert np.allclose(decoded_noiseless, noiseless_signal.signal_w)
res = decoded - input_signal.signal_w
np.inner(res, res) # should be small
###Output
_____no_output_____ |
Developing_ipynb/project4_0324_CC.ipynb | ###Markdown
Imports Import Libraries
###Code
import tarfile
import pandas as pd
import os
# Import Libraries for section "Linear regression model"
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from google.colab import drive
drive.mount('/content/drive')
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
###Markdown
Set Your Directory
###Code
#YOUR_DIRECTORY = '/content/drive/My Drive/Colab Notebooks/ATMS597/project4/' #Sarah
# YOUR_DIRECTORY = '/content/drive/My Drive/Colab Notebooks/ATMS 597/P04/' #Cathy
YOUR_DIRECTORY = '/content/drive/My Drive/ATMS597 Weather Climate Data Analysis/Module 4/Project 4/' #Chu-Chun
###Output
_____no_output_____
###Markdown
Import GFS data and save to pd.DataFrame
###Code
daily = tarfile.open(name = YOUR_DIRECTORY + 'daily.tar.gz') # Set the archive for opening
# Aggregate to PD DataFrame
cur_file = daily.next() # Initiate while loop using the first file in the tar archive
daily_gfs = pd.DataFrame(columns=['TMAX', 'TMIN', 'WMAX', 'RTOT'])
i = 0
while cur_file != None:
i += 1
if i % 350 == 0:
print(float(i/3500))
working_file = YOUR_DIRECTORY + cur_file.name
daily.extract(cur_file, path=YOUR_DIRECTORY) # Extract TarInfo Object
convert_to_df = pd.read_csv(working_file, index_col=0, parse_dates=True,
infer_datetime_format=True) # Convert cur_file
# (TarInfo Object) to string, then to PD; convert
# index col to DateTime
daily_gfs = daily_gfs.append(convert_to_df) # Append PD to DF
os.remove(working_file) # Remove file extracted in directory
cur_file = daily.next() # Go to next file in archive
daily.close() # Close .tar
daily_gfs
daily_gfs.index
# find missing dates
missing_daily_gfs = pd.date_range(start = '2010-01-01 12:00:00', end = '2020-01-31 12:00:00', freq='D').difference(daily_gfs.index)
missing_daily_gfs
# len(missing_daily_gfs)
# profile gfs data
prof = tarfile.open(name = YOUR_DIRECTORY + 'prof.tar.gz') # Set the archive for opening
# Aggregate to PD DataFrame
cur_file = prof.next() # Initiate while loop using the first file in the tar archive
prof_gfs = pd.DataFrame(columns=['DWPC','HGHT','PRES','TMPC','UWND','VWND'])
i = 0
while cur_file != None:
i += 1
if i % 350 == 0:
print(float(i/3500))
working_file = YOUR_DIRECTORY + cur_file.name
prof.extract(cur_file, path=YOUR_DIRECTORY) # Extract TarInfo Object
convert_to_df = pd.read_csv(working_file, index_col=0, parse_dates=True,
infer_datetime_format=True) # Convert cur_file
# (TarInfo Object) to string, then to PD; convert
# index col to DateTime
prof_gfs = prof_gfs.append(convert_to_df) # Append PD to DF
os.remove(working_file) # Remove file extracted in directory
cur_file = prof.next() # Go to next file in archive
prof.close() # Close .tar
prof_gfs
# find missing times
missing_prof_gfs = pd.date_range(start = '2010-01-02 06:00:00', end = '2020-02-02 06:00:00', freq='6H').difference(prof_gfs.index)
missing_prof_gfs
# the result shows that len(missing_prof_gfs) < 4*len(missing_daily_gfs)...not sure how to go from here.
# surface gfs data
sfc = tarfile.open(name = YOUR_DIRECTORY + 'sfc.tar.gz') # Set the archive for opening
# Aggregate to PD DataFrame
cur_file = sfc.next() # Initiate while loop using the first file in the tar archive
sfc_gfs = pd.DataFrame()
i = 0
while cur_file != None:
i += 1
if i % 350 == 0:
print(float(i/3500))
working_file = YOUR_DIRECTORY + cur_file.name
sfc.extract(cur_file, path=YOUR_DIRECTORY) # Extract TarInfo Object
convert_to_df = pd.read_csv(working_file, index_col=False).T # Convert cur_file
# (TarInfo Object) to string, then to PD. Note that
# the sfc files are transposed, i.e. they have
# variables as rows and timestamps as columns, hence .T
sfc_gfs = sfc_gfs.append(convert_to_df) # Append PD to DF
os.remove(working_file) # Remove file extracted in directory
cur_file = sfc.next() # Go to next file in archive
sfc.close() # Close .tar
sfc_gfs
sfc_gfs.columns = ['DWPC', 'HCLD', 'LCLD', 'MCLD', 'PRCP', 'PRES', 'TMPC', 'UWND', 'VWND', 'WSPD']
sfc_gfs = sfc_gfs.drop('Unnamed: 0')
sfc_gfs.index = pd.to_datetime(sfc_gfs.index)
sfc_gfs.index
# find missing times
missing_sfc_gfs = pd.date_range(start = '2010-01-02 06:00:00', end = '2020-02-02 06:00:00', freq='3H').difference(sfc_gfs.index)
missing_sfc_gfs
# the result shows that len(missing_sfc_gfs) < (24/3)*len(missing_daily_gfs)...not sure how to go from here.
###Output
_____no_output_____
###Markdown
Import obs daily data
###Code
daily_obs = pd.read_csv(YOUR_DIRECTORY + 'KCMI_daily.csv', header=4, usecols=[0,1,2,3,4], index_col='Date')[:-7] # ignore the last 7 lines
daily_obs
# KCMI_daily.csv is first edited to comment out the last few rows with '#' for
# reindexing and parsing dates (using skipfooter directly did not work). The new
# file is saved as KCMI_daily_comment.csv under Cathy's forked directory.
# daily_obs = pd.read_csv(YOUR_DIRECTORY + 'KCMI_daily_comment.csv', header=4,
# usecols=[0,1,2,3,4], comment='#', index_col=0, parse_dates=True,
# infer_datetime_format=True)
# daily_obs.rename(columns={'TMAX (F)','TMIN (F)','WMAX (mph)','PREC (in)'})
# daily_obs
# daily_obs['Max Daily Temp (C)'] = daily_obs['TMAX'].apply(lambda x: (x*(9/5))).apply(lambda x: x+32) # Change TMAX to Celsius
# daily_gfs['TMIN'] = daily_gfs['TMIN'].apply(lambda x: (x*(9/5))).apply(lambda x: x+32) # Change TMIN to Celsius
daily_obs.index
# check for missing dates - there's none
missing_dates_obs = pd.date_range(start = '2010-01-01', end = '2019-12-31', freq='D').difference(daily_obs.index)
missing_dates_obs
###Output
_____no_output_____
###Markdown
Import obs hourly data
###Code
hourly_obs = pd.read_csv(YOUR_DIRECTORY + 'KCMI_hourly.csv', #header=1, usecols=[0,1,2,3,4], comment='#',
index_col=0, parse_dates=True, infer_datetime_format=True)
hourly_obs
###Output
_____no_output_____
###Markdown
Resample hourly precip data into daily freq and add to daily_obs
###Code
# should we treat trace precip as 0 instead of -0.1?
hourly_obs_res = hourly_obs.resample('24H',base=6).sum()
precip_daily = hourly_obs_res['pr1h']['2010-01-01 06:00:00':'2019-12-31 06:00:00'].resample('D').sum()
precip_daily
daily_obs['Total Precip from Hourly (in)'] = precip_daily
daily_obs
###Output
_____no_output_____
###Markdown
Plot the TMAX from GFS and observation
###Code
GFS_TMAX = daily_gfs['TMAX']['2010-01-01 12:00:00':'2018-12-30 12:00:00'] # select 2010-01-01 to 2018-12-30
GFS_TMAX.index = GFS_TMAX.index.strftime('%Y-%m-%d') # to be consistent with observation index
# GFS_TMAX.index = GFS_TMAX.index.map(lambda x: x.strftime('%Y-%m-%d')) # to be consistent with observation index
GFS_TMAX
GFS_TMAX.plot()
daily_obs['Max Hourly Temp (F)'][daily_obs['Max Hourly Temp (F)'] == 'M']
# sum(daily_obs['Max Hourly Temp (F)'] == 'M')
# select 2010-01-02 to 2018-12-31 (one day after GFS model)
OBS_TMAX = daily_obs['Max Hourly Temp (F)'].mask(daily_obs['Max Hourly Temp (F)']=='M').dropna().astype(float)['2010-01-02':'2018-12-31']
OBS_TMAX
OBS_TMAX.plot()
###Output
_____no_output_____
###Markdown
Find the overlap dates between GFS daily and observation (one day after GFS model)
###Code
GFS_TMAX_plus1day = pd.to_datetime(GFS_TMAX.index) + pd.Timedelta('1 day') # OBS dates are one day after GFS timestamps
mismatch_dates = GFS_TMAX_plus1day.difference(pd.to_datetime(OBS_TMAX.index)) # OBS doesn't have these dates
OBS_TMAX_dates = GFS_TMAX_plus1day.drop(mismatch_dates) # dates derived from available GFS dates and OBS dates
GFS_TMAX_dates = OBS_TMAX_dates - pd.Timedelta('1 day')
GFS_TMAX_dates
GFS_TMAX[GFS_TMAX_dates.strftime('%Y-%m-%d')]
OBS_TMAX[OBS_TMAX_dates.strftime('%Y-%m-%d')]
# it seems the timestamps are not initialization time??
# if they are not initialization time, then they should be the same as OBS
GFS_sfc_TMPC_daily = sfc_gfs['TMPC'].astype(float).resample('24H',base=6).mean()['2010-01-02 06:00:00':'2018-12-31 06:00:00'].resample('D').mean()
GFS_sfc_TMPC_daily
mismatch_dates = pd.to_datetime(GFS_sfc_TMPC_daily.index).difference(pd.to_datetime(OBS_TMAX_dates)) # OBS_TMAX_dates doesn't have these dates
GFS_sfc_TMPC = GFS_sfc_TMPC_daily.drop(mismatch_dates)
GFS_sfc_TMPC
###Output
_____no_output_____
###Markdown
Linear regression model
###Code
from sklearn.linear_model import LinearRegression
X = np.column_stack((GFS_TMAX[GFS_TMAX_dates.strftime('%Y-%m-%d')].values, GFS_sfc_TMPC.values)) # GFS model daily and sfc TMPC
y = (OBS_TMAX[OBS_TMAX_dates.strftime('%Y-%m-%d')].values-32)*5/9 # Observation, converted from F to C
model = LinearRegression(fit_intercept=False)
model.fit(X, y)
y_predict = model.predict(X) # linear regression model prediction
print("Model slope: ", model.coef_)
print("Model intercept:", model.intercept_)
# y_predict = X[:, 0] * model.coef_[0] + X[:, 1] * model.coef_[1] + model.intercept_ # another form to write the equation
plt.figure(figsize=(20, 8.5))
# plt.plot(OBS_TMAX_dates, X, alpha=0.5)
plt.plot(OBS_TMAX_dates, y, alpha=0.5, label='Observation')
plt.plot(OBS_TMAX_dates, y_predict, alpha=0.5, label='Prediction')
plt.legend()
plt.show()
###Output
_____no_output_____ |
project_capstone/Sparkify.ipynb | ###Markdown
Sparkify Project WorkspaceThis workspace contains a tiny subset (128MB) of the full dataset available (12GB). Feel free to use this workspace to build your project, or to explore a smaller subset with Spark before deploying your cluster on the cloud. Instructions for setting up your Spark cluster is included in the last lesson of the Extracurricular Spark Course content.You can follow the steps below to guide your data analysis and model building portion of this project.
###Code
# import libraries
import numpy as np
### Spark
import pyspark
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.functions import avg, col, concat, count, desc, explode, lit, min, max, split, stddev, udf
from pyspark.sql.types import StringType
from pyspark.sql.types import IntegerType
from pyspark.sql.functions import desc
from pyspark.sql.functions import asc
from pyspark.sql.functions import sum as Fsum
from pyspark.ml import Pipeline
from pyspark.ml.feature import RegexTokenizer, CountVectorizer, VectorAssembler, Normalizer, StandardScaler, IDF, StringIndexer
from pyspark.ml.regression import LinearRegression
from pyspark.ml.classification import LogisticRegression, RandomForestClassifier
from pyspark.ml.clustering import KMeans
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, RegressionEvaluator, BinaryClassificationEvaluator
import sklearn
from sklearn.metrics import f1_score
# create a Spark session
spark = SparkSession \
.builder \
.appName("Sparkify") \
.getOrCreate()
###Output
_____no_output_____
###Markdown
Load and Clean DatasetIn this workspace, the mini-dataset file is `mini_sparkify_event_data.json`. Load and clean the dataset, checking for invalid or missing data - for example, records without userids or sessionids.
###Code
# load 'mini_sparkify_event_data.json'
sparkify = spark.read.json('mini_sparkify_event_data.json')
sparkify.persist()
sparkify.createOrReplaceTempView('sparkify')
sparkify.printSchema()
###Output
root
|-- artist: string (nullable = true)
|-- auth: string (nullable = true)
|-- firstName: string (nullable = true)
|-- gender: string (nullable = true)
|-- itemInSession: long (nullable = true)
|-- lastName: string (nullable = true)
|-- length: double (nullable = true)
|-- level: string (nullable = true)
|-- location: string (nullable = true)
|-- method: string (nullable = true)
|-- page: string (nullable = true)
|-- registration: long (nullable = true)
|-- sessionId: long (nullable = true)
|-- song: string (nullable = true)
|-- status: long (nullable = true)
|-- ts: long (nullable = true)
|-- userAgent: string (nullable = true)
|-- userId: string (nullable = true)
###Markdown
Exploratory Data AnalysisWhen you're working with the full dataset, perform EDA by loading a small subset of the data and doing basic manipulations within Spark. In this workspace, you are already provided a small subset of data you can explore. Define ChurnOnce you've done some preliminary analysis, create a column `Churn` to use as the label for your model. I suggest using the `Cancellation Confirmation` events to define your churn, which happen for both paid and free users. As a bonus task, you can also look into the `Downgrade` events. Explore DataOnce you've defined churn, perform some exploratory data analysis to observe the behavior for users who stayed vs users who churned. You can start by exploring aggregates on these two groups of users, observing how much of a specific action they experienced per a certain time unit or number of songs played.
###Code
data = spark.sql('''
select userId, firstName, lastName, level, auth, sessionId, page, song, artist from sparkify
''').show(5)
# Length of the entire dataset:
print(sparkify.count())
# Are there duplicated entries:
print(sparkify.dropDuplicates().count())
# Answer: No
# How many users in the dataset?
users_count = spark.sql('''
select count(distinct userId) from sparkify where userId <> ''
''')
users_count.show()
# 226 users: not many...
users = spark.sql("select userId from sparkify where userId <> ''")
print(users.dropDuplicates().count())
# So: no duplicate users = good
# What is SessionId: are those unique values or per-user values?
session = spark.sql('''
select distinct userId
from sparkify
where sessionId = 1
''')
session.show()
session = spark.sql('''
select sessionId, count(userId) as cnt
from sparkify
group by sessionId
order by cnt DESC
''')
session.show()
# So: NO. SessionId is not a metric that can be used.
# User values all ok?
non_users = spark.sql("select count(userId) from sparkify where userId = '' and (page = 'Cancellation Confirmation') or (page = 'Downgrade')")
non_users.show()
# We have empty users values on the pages we want to track.
# This is a NO-NO.
# 2107 users who "churned"
churned_users = spark.sql("select distinct userId from sparkify \
WHERE (page = 'Cancellation Confirmation') or (page = 'Downgrade') \
order by userId")
churned_users.show()
print(churned_users.count())
''' Fetching all data applying the tag: `churn` on the users who are/have done it.
Being careful to eliminate EMPTY user strings.
'''
# songs list of users who churned = 217405 total
## Removing EMPTY UserIds as previously identified!
table = spark.sql(''' ((select *, 1 as churn
from sparkify
where page = 'NextSong' and userId IN (
select distinct userId from sparkify
WHERE ((page = 'Cancellation Confirmation') or (page = 'Downgrade')) and userId <> ''
order by userId)
) )
union (
(select *, 0 as churn
from sparkify where page = 'NextSong' and userId not IN (
select distinct userId from sparkify
WHERE ((page = 'Cancellation Confirmation') or (page = 'Downgrade')) and userId <> ''
order by userId)
) )
''')
table.createOrReplaceTempView('table')
df = spark.sql("select * from table where churn = 1")
df.show(10)
churned = spark.sql('''
select count(*) sum_events,
count(distinct userId) as sum_users
from sparkify
WHERE (page = 'Cancellation Confirmation') or (page = 'Downgrade')
''').show()
cancel_churned = spark.sql('''
select count(distinct userId)
from sparkify
where page = 'Cancellation Confirmation'
''').show()
downgrade_churned = spark.sql('''
select count(distinct userId)
from sparkify
where page = 'Downgrade'
''').show()
### self-input
'''
We can see that most people Downgrade their service rather than cancel the service entirely.
Churn: 21 (went free) / 2086 (downgraded the subscription) [total 2107 cancellations/downgrades from 171 users]
In total 52 people cancelled whilst 154 downgraded
Cancellation/downgrade is pretty evenly distributed between Males and Females.
'''
df.groupBy('gender').count().show()
# Reasonably distributed between genders
# SELECT CASE WHEN 1 > 0 THEN 1 WHEN 2 > 0 THEN 2.0 ELSE 1.2 END;
is_home = spark.sql("SELECT *, CASE WHEN page = 'NextSong' THEN 1 ELSE 0 END AS is_next_song FROM table \
WHERE page = 'NextSong'") #
# keep the results in a new view
is_home.createOrReplaceTempView("is_next_song_table")
# find the cumulative sum over the is_home column
cumulative_sum = spark.sql("SELECT *, SUM(is_next_song) OVER \
(PARTITION BY userID ORDER BY ts ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS period \
FROM is_next_song_table \
ORDER BY userId ASC, ts DESC")
# keep the results in a view
cumulative_sum.createOrReplaceTempView("song_count_table")
# find the average count for NextSong
#spark.sql("SELECT AVG(count_results) FROM \
# (SELECT COUNT(*) AS count_results FROM song_count_table \
#GROUP BY userID, period, page HAVING page = 'NextSong') AS counts").show()
dataframe = spark.sql('''SELECT userId, first(itemInSession) as itemInSession, first(churn) as churn, COUNT(*) AS song_count FROM song_count_table \
GROUP BY userId
ORder By userId ASC
''')
dataframe.show()
print(dataframe.count())
###Output
+------+-------------+-----+----------+
|userId|itemInSession|churn|song_count|
+------+-------------+-----+----------+
| 10| 97| 1| 673|
| 100| 91| 1| 2682|
|100001| 20| 1| 133|
|100002| 0| 1| 195|
|100003| 78| 1| 51|
|100004| 121| 1| 942|
|100005| 69| 1| 154|
|100006| 42| 1| 26|
|100007| 198| 1| 423|
|100008| 42| 1| 772|
|100009| 58| 1| 518|
|100010| 33| 0| 275|
|100011| 19| 1| 11|
|100012| 71| 1| 476|
|100013| 48| 1| 1131|
|100014| 70| 1| 257|
|100015| 112| 1| 800|
|100016| 48| 1| 530|
|100017| 71| 1| 52|
|100018| 49| 1| 1002|
+------+-------------+-----+----------+
only showing top 20 rows
225
###Markdown
Feature EngineeringOnce you've familiarized yourself with the data, build out the features you find promising to train your model on. To work with the full dataset, you can follow the following steps.- Write a script to extract the necessary features from the smaller subset of data- Ensure that your script is scalable, using the best practices discussed in Lesson 3- Try your script on the full data set, debugging your script if necessaryIf you are working in the classroom workspace, you can just extract features based on the small subset of data contained here. Be sure to transfer over this work to the larger dataset when you work on your Spark cluster.
###Code
assembler = VectorAssembler(inputCols=["itemInSession", "song_count"], outputCol="featureVec")
df_assembled = assembler.transform(dataframe)
# Features: An array of data points of features to be used for predicting (the label).
# Label: the output for each data point.
# Required `label` to be Int: It already is an Int
data = df_assembled.select(col("churn").alias("label"), col("featureVec").alias("features"))
data.head()
data.show()
###Output
+-----+-------------+
|label| features|
+-----+-------------+
| 1| [97.0,673.0]|
| 1|[91.0,2682.0]|
| 1| [20.0,133.0]|
| 1| [0.0,195.0]|
| 1| [78.0,51.0]|
| 1|[121.0,942.0]|
| 1| [69.0,154.0]|
| 1| [42.0,26.0]|
| 1|[198.0,423.0]|
| 1| [42.0,772.0]|
| 1| [58.0,518.0]|
| 0| [33.0,275.0]|
| 1| [19.0,11.0]|
| 1| [71.0,476.0]|
| 1|[48.0,1131.0]|
| 1| [70.0,257.0]|
| 1|[112.0,800.0]|
| 1| [48.0,530.0]|
| 1| [71.0,52.0]|
| 1|[49.0,1002.0]|
+-----+-------------+
only showing top 20 rows
###Markdown
ModelingSplit the full dataset into train, test, and validation sets. Test out several of the machine learning methods you learned. Evaluate the accuracy of the various models, tuning parameters as necessary. Determine your winning model based on test accuracy and report results on the validation set. Since the churned users are a fairly small subset, I suggest using F1 score as the metric to optimize.
###Code
train, test = data.randomSplit([0.7, 0.3], seed=42)
##### ##### Linear Regression ##### #####
# Linear Regression
linreg = LinearRegression(maxIter=10, regParam=0.0, fitIntercept=False, solver="normal")
model = linreg.fit(train)
print(model.summary.residuals.show())
print(model.summary.rootMeanSquaredError)
print(model.summary.r2)
print(model.coefficients)
results = model.transform(test)
pred_res = results.filter(results.label == results.prediction).count()
total = results.count()
print(pred_res)
print(total)
print(pred_res/total) # 0.0
##### ##### Logistic Regression ##### #####
# Logistic Regression
logreg = LogisticRegression(maxIter=10, regParam=0.0)
model = logreg.fit(train) # used to be data
# print(model.summary.residuals.show())
print(model.summary.accuracy) # 0.8385093167701864
print(model.coefficients)
results = model.transform(test) # used to be data
pred_res = results.filter(results.label == results.prediction).count()
total = results.count()
print(pred_res) # 58
print(total) # 64
print(pred_res/total) # 0.90625
def f1score(df):
tpos = df.where((df.label == 1) & (df.prediction == 1)).count()
fpos = df.where((df.label == 0) & (df.prediction == 1)).count()
tneg = df.where((df.label == 0) & (df.prediction == 0)).count()
fneg = df.where((df.label == 1) & (df.prediction == 0)).count()
precision = tpos / (tpos + fpos)
recall = tpos / (tpos + fneg)
f1 = 2 * precision * recall / (precision + recall)
return f1
f1_score = f1score(results)
f1_score
##### ##### Random Forest Classifier ##### #####
rfc = RandomForestClassifier(featuresCol = 'features', labelCol = 'label')
model = rfc.fit(train)
results = model.transform(test)
pred_res = results.filter(results.label == results.prediction).count()
total = results.count()
print(pred_res) #
print(total) #
print(pred_res/total) # 0.
f1_score = f1score(results)
f1_score
###Output
_____no_output_____
###Markdown
Final StepsClean up your code, adding comments and renaming variables to make the code easier to read and maintain. Refer to the Spark Project Overview page and Data Scientist Capstone Project Rubric to make sure you are including all components of the capstone project and meet all expectations. Remember, this includes thorough documentation in a README file in a Github repository, as well as a web app or blog post.
###Code
##### ##### Linear Regression ##### #####
'''
No need to make a cross validation where the model is not performing
'''
##### ##### Logistic Regression ##### #####
# Tuning
paramGrid = ParamGridBuilder() \
.addGrid(logreg.maxIter,[5, 10, 15, 25]) \
.addGrid(logreg.regParam,[0.0, 0.5, 1.0]) \
.build()
crossval = CrossValidator(estimator=logreg,
estimatorParamMaps=paramGrid,
evaluator=BinaryClassificationEvaluator(), # BinaryClassificationEvaluator / RegressionEvaluator
numFolds=3)
cvmodel = crossval.fit(train)
cvmodel.avgMetrics
results = cvmodel.transform(test)
pred_res = results.filter(results.label == results.prediction).count()
total = results.count()
print(pred_res)
print(total)
print(pred_res/total)
f1_score = f1score(results)
f1_score
print("END")
###Output
_____no_output_____ |
sklearn-pipeline-example/credit.ipynb | ###Markdown
Simple Pipeline Demonstration Load data
###Code
import pandas as pd
import numpy as np
df = pd.read_csv('data/credit.csv')
df.head()
###Output
_____no_output_____
###Markdown
Split off target from features
###Code
y = df['Income']
X = df[[x for x in df.columns if x != 'Income']]
X.head()
###Output
_____no_output_____
###Markdown
Create pipeline for feature processing- Not including all features
###Code
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
cat_vars = ['Own', 'Student']
num_vars = ['Limit', 'Balance']
num_pipeline = Pipeline([('impute_missing', SimpleImputer(strategy='median')),
('standardize_num', StandardScaler())
])
cat_pipeline = Pipeline([('impute_missing_cats', SimpleImputer(strategy='most_frequent')),
('create_dummies_cats', OneHotEncoder(handle_unknown='ignore', drop='first'))])
processing_pipeline = ColumnTransformer(transformers=[('proc_numeric', num_pipeline, num_vars),
('create_dummies', cat_pipeline, cat_vars)])
print(processing_pipeline)
###Output
ColumnTransformer(transformers=[('proc_numeric',
Pipeline(steps=[('impute_missing',
SimpleImputer(strategy='median')),
('standardize_num',
StandardScaler())]),
['Limit', 'Balance']),
('create_dummies',
Pipeline(steps=[('impute_missing_cats',
SimpleImputer(strategy='most_frequent')),
('create_dummies_cats',
OneHotEncoder(drop='first',
handle_unknown='ignore'))]),
['Own', 'Student'])])
###Markdown
Fit model
###Code
from sklearn.linear_model import LinearRegression
modeling_pipeline = Pipeline([('data_processing', processing_pipeline),
('lm', LinearRegression())])
modeling_pipeline.fit(X, y)
###Output
_____no_output_____
###Markdown
Show Predictions
###Code
yhat = modeling_pipeline.predict(X)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(y, yhat, 'bo')
plt.xlabel('Actual')
plt.ylabel('Predict')
plt.show()
###Output
_____no_output_____ |
sandbox/quartet-funcs.ipynb | ###Markdown
toytree quartet functions (in progress)
###Code
import toytree
import itertools
import numpy as np
###Output
_____no_output_____
###Markdown
get two random trees
###Code
t0 = toytree.rtree.unittree(10, seed=0)
t1 = toytree.rtree.unittree(10, seed=1)
toytree.mtree([t0, t1]).draw(ts='p', height=200);
###Output
_____no_output_____
###Markdown
Plan for counting quartets (Illustrated below)We will traverse the tree visiting every node in turn. At each node we will select the edge above it (towards the root) to be the focal 'split'. Each split can represent many possible quartets, where at least one tip can be sampled from each of the four edges leading from the split. In the example below, we are visiting node 12, and the focal split is shown in black. The four edges leaving this split are shown in red, pink, blue, and aqua. To get all quartets from this split we must sample all possible combinations of one sample from each colored set.
###Code
t0.draw(
ts='p',
node_colors="lightgrey",
edge_widths=3,
edge_colors=t0.get_edge_values_mapped(
{11: 'red', 3: 'pink', 4: 'blue', 18: 'aqua', 12: 'black'},
),
);
###Output
_____no_output_____
###Markdown
Example to sample tips from each quartet edge
###Code
# focal node
nidx = 12
# get all tips as a set
fullset = set(i for i in t0.get_tip_labels())
# get tips from each child of a given node
down0 = set(t0.idx_dict[nidx].children[0].get_leaf_names())
down1 = set(t0.idx_dict[nidx].children[1].get_leaf_names())
up0 = set(t0.idx_dict[nidx].up.get_leaf_names()) - down0 - down1
up1 = fullset - down0 - down1 - up0
print(down0)
print(down1)
print(up0)
print(up1)
###Output
{'r3'}
{'r0', 'r2', 'r1'}
{'r4'}
{'r8', 'r6', 'r7', 'r9', 'r5'}
###Markdown
Example to get all quartet sets from sampled tips
###Code
set(itertools.product(down0, down1, up0, up1))
###Output
_____no_output_____
###Markdown
Combine into a function
###Code
def get_quartets(ttre):
# store all quartets in this SET
qset = set([])
# get a SET with all tips in the tree
fullset = set(ttre.get_tip_labels())
# get a SET of the descendants from each internal node
for node in ttre.idx_dict.values():
# skip leaf nodes
if not node.is_leaf():
children = set(node.get_leaf_names())
prod = itertools.product(
itertools.combinations(children, 2),
itertools.combinations(fullset - children, 2),
)
quartets = set([tuple(itertools.chain(*i)) for i in prod])
qset = qset.union(quartets)
# order tups in sets
sorted_set = set()
for qs in qset:
if np.argmin(qs) > 1:
tup = tuple(sorted(qs[2:]) + sorted(qs[:2]))
sorted_set.add(tup)
else:
tup = tuple(sorted(qs[:2]) + sorted(qs[2:]))
sorted_set.add(tup)
return sorted_set
get_quartets(t1)
###Output
_____no_output_____
###Markdown
Compare quartet sets
###Code
q0 = get_quartets(t0)
q1 = get_quartets(t1)
# quartets that are in one tree but not the other
q0.symmetric_difference(q1)
###Output
_____no_output_____ |
skvectors/doc/Using_a_Fundamental_Vector_Class.ipynb | ###Markdown
Using a Fundamental Vector Class Copyright (c) 2019 Tor Olav Kristensen, http://subcube.comhttps://github.com/t-o-k/scikit-vectorsUse of this source code is governed by a BSD-license that can be found in the LICENSE file.
###Code
from skvectors import create_class_Fundamental_Vector
# Create a 3-dimensional fundamental vector class
# The first argument is a string with the name of the class
# to be created.
# The number of elements in the iterable given as the second
# argument determines the number of dimensions for the class.
FVC = create_class_Fundamental_Vector('FVC', 'abc')
# Explicit alternative:
# FVC = \
# create_class_Fundamental_Vector(
# name = 'FVC',
# component_names = [ 'a', 'b', 'c' ],
# brackets = [ '<', '>' ],
# sep = ', '
# )
# Number of dimensions for vectors in the class
FVC.dimensions()
# Brackets for vectors in the class
# (Used when printing a vector and when applying str to a vector)
FVC.brackets
# Separator between components for vectors in the class
# (Used when printing a vector and when applying str or repr to a vector)
FVC.sep
# List of component names for vectors in the class
FVC.component_names()
# Initialize a vector
FVC(1, -2, +3)
# Initialize a vector
FVC(a=1, b=-2, c=+3)
# Initialize a vector
l = [ 1, -2, 3 ]
FVC(*l)
# Initialize vector
d = { 'a': 1, 'b': -2, 'c': 3 }
FVC(**d)
# Initialize a vector
FVC.fill(8)
# Number of dimensions of vector
u = FVC(0, 0, 0)
u.dimensions()
# Number of dimensions of vector
u = FVC(0, 0, 0)
len(u)
# List of component names for vector
u = FVC(0, 0, 0)
u.cnames
# Check if something is a vector
u = FVC(-3, 4, 5)
FVC.is_vector(u)
# Check if something is a vector
d = { 'a': -3, 'b': 4, 'c': 5 }
FVC.is_vector(d)
# Print a vector
u = FVC(2, 4, 6)
print(u)
# Applying str to a vector
u = FVC(2, 4, 6)
str(u)
# Applying str to a vector inside a string
u = FVC(-3.3, 4.6, -5.5)
'str applied to a vector: {!s}'.format(u)
# Applying repr to a vector
u = FVC(2, 4, 6)
repr(u)
# NB: This does only work if the sep parameter in the class
# creation above contains a comma, or a comma and space(s)
# Applying repr to a vector
u = FVC(2, 4, 6)
eval(repr(u))
# Applying repr to a vector inside a string
u = FVC(-3.3, 4.6, -5.5)
'repr applied to a vector: {!r}'.format(u)
# Applying format to a vector
u = FVC(2.222222, 4.444444, 6.6666666)
format(u, '.3e')
# Applying format to vectors inside a string
u = FVC(2.222222, 4.444444, 6.6666666)
v = FVC(-3.3, 4.6, -5.5)
'format applied to two vectors: {:.4e} and {:.2e}'.format(u, v)
# Check if vector contains a value
u = FVC(2, 3, 4)
3 in u
# Check if a vector does not contain a value
u = FVC(2, 3, 4)
3.0 not in u
# The component values of a vector
u = FVC(-6, 8, 3)
u.a, u.b, u.c
# Change the component values of a vector
u = FVC(0, 0, 0)
u.a, u.b, u.c = 6, 7, 8
u
# Change a component value of a vector
u = FVC(0, 0, 0)
u.a += 100
u
# Change a component value of a vector
u = FVC(3, -4, 20)
u.c //= 8
u
# The component values / Indexing of vector
u = FVC(7, -8, 9)
u[0], u[1], u[2]
# The component values / Indexing of vector
u = FVC(7, -8, 9)
u[-3], u[-2], u[-1]
# Indexing of a vector
u = FVC(7, -8, 9)
u[0:3], u[:], u[::]
# Change the component values of a vector
u = FVC(0, 0, 0)
u[0], u[1], u[2] = 7, -8, 9
u
# Change the component values of a vector
u = FVC(0, 0, 0)
u[0:3] = 7, -8, 9
u
# Change the component values of a vector
u = FVC(0, 0, 0)
v = FVC(7, -8, 9)
u[:] = v
u
# Change the component values of a vector
u = FVC(0, 0, 0)
u[:] = (cv for cv in [ 7, -8, 9 ])
u
# List of the component values of a vector
u = FVC(7, -8, 9)
u.cvalues, u.component_values(), u[:]
# List of the component values
u = FVC(7, -8, 9)
list(u), [ *u ], [ getattr(u, cn) for cn in u.cnames ]
# Iterate over the components
u = FVC(7, -8, 9)
x, y, z = u
x, y, z
# Iterate over the components
u = FVC(7, -8, 9)
g = (cv for cv in u)
print(*g)
# Iterate over the components
u = FVC(7, -8, 9)
components = iter(u)
next(components), next(components), next(components)
# Check if a vector is equal to another
u = FVC(2.0, 4.0, 6.0)
v = FVC(2, 4, 6)
u == v
# Check if a vector is not equal to another
u = FVC(2, 4, 6)
v = FVC(2.0, 4.0, 6.0)
u != v
# Create a dictionary from the components of a vector and their names
u = FVC(2, 4, 6)
u.as_dict()
# Make shallow copy of vector
u = FVC(2, 4, 6)
v = FVC(*u)
v
# Make shallow copy of vector
u = FVC(2, 4, 6)
v = u.copy()
v
# Create a vector by applying a lambda function to each of its components
u = FVC(-3.3, 4.6, -5.5)
u(lambda s: 10 + s * 1000)
# Create a vector by applying abs to each of its components
u = FVC(-3.3, 4.6, -5.5)
u(abs)
# Create a vector by applying abs to each of its components
u = FVC(-3, 4, -5)
FVC(*map(abs, u))
# Create a vector by applying the int class to each of its components
u = FVC(-3.3, 4.6, -5.5)
u(int)
# Change the components of a vector by applying the int class to each component
u = FVC(-3.3, 4.6, -5.5)
u[:] = map(int, u)
u
# Create a vector method that takes 1 vector as argument
def square(s):
return s**2
FVC.create_vector_method_arg1('square', square)
u = FVC(2, 3, -4)
u.vector_square()
# Create, from a built in function, a vector method that takes 1 vector as argument
FVC.create_vector_method_arg1('abs', lambda s: abs(s))
u = FVC(2, 3, -4)
u.vector_abs()
# Create a vector method that takes 2 vectors as arguments
def add(s, t):
return s + t
FVC.create_vector_method_arg2('add', add)
u = FVC(2, 3, -4)
v = FVC(1, -2, 3)
s = 1000
u.vector_add(v), v.vector_add(s)
# Create a vector method that takes 3 vectors as arguments
def select(r, s, t):
if r < 0:
result = s
else:
result = t
return result
FVC.create_vector_method_arg3('select', select)
u = FVC(-2, 0, 3)
v = FVC(1, 3, 5)
w = FVC(2, 4, 6)
s = 0
t = 100
u.vector_select(v, w), u.vector_select(s, t)
###Output
_____no_output_____ |
OPC_Sensor/Models With Decompositions/Models with PCA/CNN/CNN_tanh_binary.ipynb | ###Markdown
Importing Libraries
###Code
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import os.path as op
import pickle
import tensorflow as tf
from tensorflow import keras
from keras.models import Model,Sequential,load_model
from keras.layers import Input, Embedding
from keras.layers import Dense, Bidirectional
from keras.layers.recurrent import LSTM
import keras.metrics as metrics
import itertools
from tensorflow.python.keras.utils.data_utils import Sequence
from decimal import Decimal
from keras import backend as K
from keras.layers import Conv1D,MaxPooling1D,Flatten,Dense
###Output
_____no_output_____
###Markdown
Data Fetching
###Code
A1=np.empty((0,5),dtype='float32')
U1=np.empty((0,7),dtype='float32')
node=['150','149','147','144','142','140','136','61']
mon=['Apr','Mar','Aug','Jun','Jul','Sep','May','Oct']
for j in node:
for i in mon:
inp= pd.read_csv('../../../data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[1,2,3,15,16])
out= pd.read_csv('../../../data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[5,6,7,8,17,18,19])
inp=np.array(inp,dtype='float32')
out=np.array(out,dtype='float32')
A1=np.append(A1, inp, axis=0)
U1=np.append(U1, out, axis=0)
print(A1)
print(U1)
###Output
[[1.50000e+02 1.90401e+05 7.25000e+02 2.75500e+01 8.03900e+01]
[1.50000e+02 1.90401e+05 8.25000e+02 2.75600e+01 8.03300e+01]
[1.50000e+02 1.90401e+05 9.25000e+02 2.75800e+01 8.02400e+01]
...
[6.10000e+01 1.91020e+05 1.94532e+05 2.93700e+01 7.52100e+01]
[6.10000e+01 1.91020e+05 1.94632e+05 2.93500e+01 7.52700e+01]
[6.10000e+01 1.91020e+05 1.94732e+05 2.93400e+01 7.53000e+01]]
[[ 28. 3. -52. ... 16.97 19.63 20.06]
[ 28. 15. -53. ... 16.63 19.57 23.06]
[ 31. 16. -55. ... 17.24 19.98 20.24]
...
[ 76. 12. -76. ... 3.47 3.95 4.35]
[ 75. 13. -76. ... 3.88 4.33 4.42]
[ 76. 12. -75. ... 3.46 4.07 4.28]]
###Markdown
Min Max Scaler
###Code
from sklearn.decomposition import PCA
import warnings
scaler_obj1=PCA(svd_solver='full')
scaler_obj2=PCA(svd_solver='full')
X1=scaler_obj1.fit_transform(A1)
Y1=scaler_obj2.fit_transform(U1)
warnings.filterwarnings(action='ignore', category=UserWarning)
X1=X1[:,np.newaxis,:]
Y1=Y1[:,np.newaxis,:]
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def coeff_determination(y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
###Output
_____no_output_____
###Markdown
Model
###Code
inp=keras.Input(shape=(1,5))
l=keras.layers.Conv1D(16,1,padding="same",activation="tanh",kernel_initializer="glorot_uniform")(inp)
output = keras.layers.Conv1D(7,4,padding="same",activation='sigmoid')(l)
model1=keras.Model(inputs=inp,outputs=output)
model1.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-5), loss='binary_crossentropy',metrics=['accuracy','mse','mae',rmse])
model1.summary()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)
history1 = model1.fit(x_train,y_train,batch_size=256,epochs=50, validation_data=(x_test, y_test),verbose = 2, shuffle= False)
model1.evaluate(x_test,y_test)
###Output
13518/13518 [==============================] - 59s 4ms/step - loss: -145.0924 - accuracy: 0.3128 - mse: 1432062.8750 - mae: 74.2652 - rmse: 141.0169
###Markdown
Saving Model as File
###Code
model1.evaluate(x_train,y_train)
df1=pd.DataFrame(history1.history['loss'],columns=["Loss"])
df1=df1.join(pd.DataFrame(history1.history["val_loss"],columns=["Val Loss"]))
df1=df1.join(pd.DataFrame(history1.history["accuracy"],columns=['Accuracy']))
df1=df1.join(pd.DataFrame(history1.history["val_accuracy"],columns=['Val Accuracy']))
df1=df1.join(pd.DataFrame(history1.history["mse"],columns=['MSE']))
df1=df1.join(pd.DataFrame(history1.history["val_mse"],columns=['Val MSE']))
df1=df1.join(pd.DataFrame(history1.history["mae"],columns=['MAE']))
df1=df1.join(pd.DataFrame(history1.history["val_mae"],columns=['Val MAE']))
df1=df1.join(pd.DataFrame(history1.history["rmse"],columns=['RMSE']))
df1=df1.join(pd.DataFrame(history1.history["val_mse"],columns=['Val RMSE']))
df1
df1.to_excel("GRU_tanh_mse.xlsx")
model_json = model1.to_json()
with open("cnn_relu.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model1.save_weights("cnn_relu.h5")
print("Saved model to disk")
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)
from keras.models import model_from_json
json_file = open('cnn_relu.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("cnn_relu.h5")
print("Loaded model from disk")
loaded_model.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-5), loss='mse',metrics=['accuracy','mse','mae',rmse])
loaded_model.evaluate(x_train, y_train, verbose=0)
loaded_model.evaluate(x_test, y_test, verbose=0)
###Output
_____no_output_____
###Markdown
Error Analysis
###Code
# summarize history for loss
plt.plot(history1.history['loss'])
plt.plot(history1.history['val_loss'])
plt.title('Model Loss',fontweight ='bold',fontsize = 15)
plt.ylabel('Loss',fontweight ='bold',fontsize = 15)
plt.xlabel('Epoch',fontweight ='bold',fontsize = 15)
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# summarize history for accuracy
plt.plot(history1.history['accuracy'])
plt.plot(history1.history['val_accuracy'])
plt.title('Model accuracy',fontweight ='bold',fontsize = 15)
plt.ylabel('Accuracy',fontweight ='bold',fontsize = 15)
plt.xlabel('Epoch',fontweight ='bold',fontsize = 15)
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)
y_test_pred=loaded_model.predict(x_test)
y_test_pred
y_test
y_test=y_test[:,0]
y_test_pred=y_test_pred[:,0]
from numpy import savetxt
savetxt('cnn_y_test_pred.csv', y_test_pred[:1001], delimiter=',')
from numpy import savetxt
savetxt('cnn_y_test.csv', y_test[:1001], delimiter=',')
#completed
###Output
_____no_output_____ |
Bengali_Plagiarism_Detection_&_Corpus_Creation_Notebook.ipynb | ###Markdown
© SATYAJIT GHOSH (https://github.com/SATYAJIT1910)
###Code
'''
Requirements
sudo apt install tesseract-ocr-ben
sudo apt-get install sqlite3 libsqlite3-dev
sudo apt-get install nltk
pip install pdf2image
pip install openpyxl
pip install pandas
pip install pytesseract
pip install opencv-python
'''
#importing libraries
from pdf2image import convert_from_path
import nltk as nl
import os
import sqlite3
import cv2
import pytesseract
import pandas as pd
import re
mydb = sqlite3.connect('corpus.db') #Connects to database
###Output
_____no_output_____
###Markdown
CORPUS CREATION THIS CODE WILL MAKE EVERY PAGE OF THE DOWNLOADED PDF FILES A IMAGE AND STORES IT TO THE RESPECTIVE FOLDER FOR OCR OPERATION
###Code
# This method saves every page of PDF document as Image
def save_image(k):
images = convert_from_path('BOOKS/'+str(k)+'.pdf')
path='IMAGE/'+str(k) # Path to store the Image files
for i in range(len(images)):
if not(os.path.isdir(path)):
os.mkdir(path) # Creates the directory
# Save Images
images[i].save(path+'/'+str(k)+'_'+ str(i+1) +'.jpg', 'JPEG')
for k in range(1,201):
save_image(k) #Function Call
def nomalization(text):
text=only_bengali_characters(text)
return text
def only_bengali_characters(text):
'''
All the bengali unicode characters are present from u980 to u09FF .
Here we are using regex to remove all the invaild unicode characters from our output .
We are also removing whitespaces .
'''
return re.sub('[^\u0980-\u09FF ]',r'',text)
###Output
_____no_output_____
###Markdown
THIS CODE WILL PERFORM OCR OPERATION AND STORE THE EXTRACTED TEXTS IN THE DATABASE
###Code
mydb.execute("CREATE TABLE BOOK(ID INTEGER AUTO_INCREMENT PRIMARY KEY,AUTHOR TEXT,TITLE TEXT);")
mydb.execute("CREATE TABLE PAGE(ID INTEGER ,PAGENO INTEGER,CONTENT TEXT,FOREIGN KEY (ID) REFERENCES BOOK(ID));")
# This method performs OCR operation and saves the result on the database
def perform_OCR_and_save(path,ID,pageno):
# Reading the Image
img = cv2.imread(path)
# Adding custom options
custom_config = r'--oem 3 --psm 6'
text=pytesseract.image_to_string(img, config=custom_config,lang='ben') #OCR operation
# normalizing the texts
text=nomalization(text)
QUERY="INSERT INTO PAGE VALUES("+ID+","+pageno+",'"+text+"');"
#print(QUERY)
mydb.execute(QUERY) # Stores the extracted text to the database
# assign directory
directory = 'IMAGE'
# iterate over files in
# that directory
for filename in os.listdir(directory):
f = os.path.join(directory, filename)
print("LOG: OCR WORKING ON DIR : "+f)
ID=f.split("/")[1] # Removing the Book ID from the directory itself
QUERY='INSERT INTO BOOK(ID) VALUES('+ID+');' # Inserting the Book ID only to Database
mydb.execute(QUERY)
log=0 # for logging the progress
for images in os.listdir(f):
i=os.path.join(f,images)
PAGENO=i.split("_")[-1]
PAGENO=PAGENO[:-4] # Removing the Page No from the directory itself
perform_OCR_and_save(i,str(ID),str(PAGENO))
print("LOG : "+str(log)+" "+str(ID)+" "+str(PAGENO)+" DONE")
log=log+1
mydb.commit() # Commits to database
###Output
_____no_output_____
###Markdown
THIS CODE PERFORM ENTRIES OF AUTHOR AND TITLE OF THE BOOK FROM THE EXCEL FILE DATA TO DATABASE
###Code
df = pd.read_excel (r'RECORDS_200.xlsx')
for i in range(len(df)):
author=df["AUTHOR"][i]
title=df["TITLE"][i]
QUERY="UPDATE BOOK SET AUTHOR = '" + str(author) +"'"+" WHERE ID =" + str(i+1)+";"
mydb.execute(QUERY)
QUERY="UPDATE BOOK SET TITLE = '" + str(title) +"'"+" WHERE ID =" + str(i+1)+";"
mydb.execute(QUERY)
mydb.commit()
###Output
_____no_output_____
###Markdown
PLAGIARISM CHECKING
###Code
def levenshtein(text1,text2):
diff=nl.edit_distance(text1,text2)
# FORMULA
result=(1-(diff/max(len(text1),len(text2))))*100
result="{:.2f}".format(result) #taking two decimal place value only
return float(result)
# source : https://github.com/stopwords-iso/stopwords-bn
bangla_stopwords=["অতএব",
"অথচ",
"অথবা",
"অনুযায়ী",
"অনেক",
"অনেকে",
"অনেকেই",
"অন্তত",
"অন্য",
"অবধি",
"অবশ্য",
"অর্থাত",
"আই",
"আগামী",
"আগে",
"আগেই",
"আছে",
"আজ",
"আপনার",
"আপনি",
"আবার",
"আমরা",
"আমাকে",
"আমাদের",
"আমার",
"আমি",
"আর",
"আরও",
"ই",
"ইত্যাদি",
"ইহা",
"উচিত",
"উত্তর",
"উনি",
"উপর",
"উপরে",
"এ",
"এঁদের",
"এঁরা",
"এই",
"একই",
"একটি",
"একবার",
"একে",
"এক্",
"এখন",
"এখনও",
"এখানে",
"এখানেই",
"এটা",
"এটাই",
"এটি",
"এত",
"এতটাই",
"এতে",
"এদের",
"এব",
"এবং",
"এবার",
"এমন",
"এমনকী",
"এমনি",
"এর",
"এরা",
"এল",
"এস",
"এসে",
"ঐ",
"ও",
"ওঁদের",
"ওঁর",
"ওঁরা",
"ওই",
"ওকে",
"ওখানে",
"ওদের",
"ওর",
"ওরা",
"কখনও",
"কত",
"কবে",
"কমনে",
"কয়েক",
"কয়েকটি",
"করছে",
"করছেন",
"করতে",
"করবে",
"করবেন",
"করলে",
"করলেন",
"করা",
"করাই",
"করায়",
"করার",
"করি",
"করিতে",
"করিয়া",
"করিয়ে",
"করে",
"করেই",
"করেছিলে",
"করেছে",
"করেছেন",
"করেন",
"কাউকে",
"কাছ",
"কাছে",
"কাজ",
"কাজে",
"কারও",
"কারণ",
"কি",
"কিংবা",
"কিছু",
"কিছুই",
"কিন্তু",
"কী",
"কে",
"কেউ",
"কেউই",
"কেখা",
"কেন",
"কোটি",
"কোন",
"কোনও",
"কোনো",
"ক্ষেত্রে",
"কয়েক",
"খুব",
"গিয়ে",
"গিয়েছে",
"গিয়ে",
"গুলি",
"গেছে",
"গেল",
"গেলে",
"গোটা",
"চলে",
"চান",
"চায়",
"চার",
"চালু",
"চেয়ে",
"চেষ্টা",
"ছাড়া",
"ছাড়াও",
"ছিল",
"ছিলেন",
"জন",
"জনকে",
"জনের",
"জন্য",
"জানতে",
"জানা",
"জানানো",
"জানায়",
"জানিয়ে",
"জানিয়েছে",
"জে",
"জ্নজন",
"টি",
"ঠিক",
"তখন",
"তত",
"তথা",
"তবু",
"তবে",
"তা",
"তাঁকে",
"তাঁদের",
"তাঁর",
"তাঁরা",
"তাঁাহারা",
"তাই",
"তাও",
"তাকে",
"তাতে",
"তাদের",
"তার",
"তারপর",
"তারা",
"তারৈ",
"তাহলে",
"তাহা",
"তাহাতে",
"তাহার",
"তিনঐ",
"তিনি",
"তিনিও",
"তুমি",
"তুলে",
"তেমন",
"তো",
"তোমার",
"থাকবে",
"থাকবেন",
"থাকা",
"থাকায়",
"থাকে",
"থাকেন",
"থেকে",
"থেকেই",
"থেকেও",
"দিকে",
"দিতে",
"দিন",
"দিয়ে",
"দিয়েছে",
"দিয়েছেন",
"দিলেন",
"দু",
"দুই",
"দুটি",
"দুটো",
"দেওয়া",
"দেওয়ার",
"দেওয়া",
"দেখতে",
"দেখা",
"দেখে",
"দেন",
"দেয়",
"দ্বারা",
"ধরা",
"ধরে",
"ধামার",
"নতুন",
"নয়",
"না",
"নাই",
"নাকি",
"নাগাদ",
"নানা",
"নিজে",
"নিজেই",
"নিজেদের",
"নিজের",
"নিতে",
"নিয়ে",
"নিয়ে",
"নেই",
"নেওয়া",
"নেওয়ার",
"নেওয়া",
"নয়",
"পক্ষে",
"পর",
"পরে",
"পরেই",
"পরেও",
"পর্যন্",
"পাওয়া",
"পাচ",
"পারি",
"পারে",
"পারেন",
"পি",
"পেয়ে",
"পেয়্র্",
"প্রতি",
"প্রথম",
"প্রভৃতি",
"প্রাথমিক",
"প্রায়",
"প্রায়",
"ফলে",
"ফিরে",
"ফের",
"বদলে",
"বন",
"বরং",
"বলতে",
"বলল",
"বললেন",
"বলা",
"বলে",
"বলেছেন",
"বলেন",
"বসে",
"বহু",
"বা",
"বাদে",
"বার",
"বি",
"বিনা",
"বিভিন্ন",
"বিশেষ",
"বিষয়টি",
"বেশ",
"বেশি",
"ব্যবহার"
"ব্যাপারে",
"ভাবে",
"ভাবেই",
"মতো",
"মতোই",
"মধ্যভাগে"
"মধ্যে",
"মধ্যেই",
"মধ্যেও",
"মনে",
"মাত্র",
"মাধ্যমে",
"মোট",
"মোটেই",
"যখন",
"যত",
"যতটা",
"যথেষ্ট",
"যদি",
"যদিও",
"যা",
"যাঁর",
"যাঁরা",
"যাওয়া",
"যাওয়ার",
"যাওয়া",
"যাকে",
"যাচ্ছে",
"যাতে",
"যাদের",
"যান",
"যাবে",
"যায়",
"যার",
"যারা",
"যিনি",
"যে",
"যেখানে",
"যেতে",
"যেন",
"যেমন",
"র",
"রকম",
"রয়েছে",
"রাখা",
"রেখে",
"লক্ষ",
"শুধু",
"শুরু",
"সঙ্গে",
"সঙ্গেও",
"সব",
"সবার",
"সমস্ত",
"সম্প্রতি"
"সহ",
"সহিত",
"সাধারণ",
"সামনে",
"সি",
"সুতরাং",
"সে",
"সেই",
"সেখান",
"সেখানে",
"সেটা",
"সেটাই",
"সেটাও",
"সেটি",
"স্পষ্ট",
"স্বয়ং",
"হইতে",
"হইবে",
"হইয়া",
"হওয়া",
"হওয়ায়",
"হওয়ার",
"হচ্ছে",
"হত",
"হতে",
"হতেই",
"হন",
"হবে",
"হবেন",
"হয়",
"হয়তো",
"হয়নি",
"হয়ে",
"হয়েই",
"হয়েছিল",
"হয়েছে",
"হয়েছেন",
"হল",
"হলে",
"হলেই",
"হলেও",
"হলো",
"হাজার",
"হিসাবে",
"হৈলে",
"হোক",
"হয়"]
# This method traverse through the entire database and searches for similier texts and gives the output
def check(inputText):
scorelist=[]
inputTextToken=inputText.split(" ")
inputTextToken = [i for i in inputTextToken if i not in bangla_stopwords]
for bookID in range(1,201):
#print("Starting work on ",bookID)
mycursor=mydb.execute("SELECT MAX(PAGENO) FROM PAGE WHERE ID="+str(bookID)+";")
totalpages=int(mycursor.fetchone()[0])
for page in range(1,totalpages+1):
mycursor=mydb.execute("SELECT CONTENT FROM PAGE WHERE ID="+str(bookID)+" AND PAGENO="+str(page)+";")
dbtext=mycursor.fetchone()[0]
dbtokens=dbtext.split(" ")
dbtokens = [i for i in dbtokens if i not in bangla_stopwords] #Removing stopwords
#gives the similarity score
score=levenshtein(dbtokens,inputTextToken)
if(score>=20):
scorelist.append([bookID,page,score])
scorelist.sort(key = lambda y: y[2])
return scorelist
# This method accepts the list from the check() method and finds out the title and author information from the database
def findTitle(result):
final=[]
for i in range(0,len(result)):
bookID=result[i][0]
pageno=result[i][1]
score=result[i][2]
mycursor=mydb.execute("SELECT * FROM BOOK WHERE ID="+str(bookID))
dboutput=mycursor.fetchone()
author=dboutput[1]
title=dboutput[2]
final.append([bookID,author,title,pageno,score])
return final
###Output
_____no_output_____
###Markdown
Driver Codes
###Code
inputText='''
সোনা বললে, “কতদিন পালিয়ে বেড়াবে? বাড়ি যাবে না? তোমার মা নেই? ঘড়িওলা হাউ হাউ করে কীদতে লাগল। আছে গো, আছে, সব আছে; বড়ো ভালো সরুচাকলি বানায় আমার মা, একবার খেলে
আর ভোলা যায় না। কবে যে আবার তাকে দেখতে পাবো! টিয়া বললে,দুষ্টু মাকু থাকগে পড়ে, তুমি মার কাছে ফিরে যাও ।” এই বলে পুটলির কোণা দিয়ে টিয়া চোখ মুছল। ঘড়িওলাও চোখ মুছল। “তাই কি হয়, দিদি, মাকু যে আমার প্রাণ, ওকে নাগালের বাইরে যেতে দিই কী করে? ওর চাবি ফুরিয়ে গেলেই যে নেতিয়ে পড়বে, তখন চোর ডাকাতে ওর কলকজা খুলে নিলেই মাকুর আমার হয়ে গেল।” “কবে চাবি ফুরুবে?” এক বছরের চাবি দেওয়া, তার সাড়ে এগারো মাস কেটে গেছে, আর পনেরো দিন। বলো,
ওকে বের করে চোখে চোখে রাখবে? সোনা বললে, “সেলাই কল চালায় আর নিজের পেটের চাবিটা ঘুরিয়ে নিতে পারবে না? ঘড়িওলা ব্যস্ত হয়ে উঠল। পেটে নয়, দিদি, পেটে নয়, পিটের মধ্যিখানে, গায়ে-বসা এতটুকু চাবি, কানখুসকি দিয়ে ঘুরুতে হয়। নইলে মাকু যা দস্যি, কৰে টেনে খুলে ফেলে দিত। ওখানে সে হাত পায় না, হাত দুটো ইচ্ছা করে একটু বেঁটে করে দিয়েছি। কথা বলতে বলতে কখন তারা শুনশুনির মাঠ পেরিয়ে এসেছে, সামনে দেখে ঘন বন। বনের মধ্যে খানিক রোদ, খানিক ছায়া, পাখির ডাক, পাতার খসখস, বুনো ফুলের আর ধূপ কাঠের গন্ধ। ঘড়িওলা বললে,আমি আর যাব না, মাকু আমাকে দেখলে ছেঁকে ধরবে, আমার ভয় করে। তোমরা স্কুলে ভর্তি হয়েছ, ভয় পাও না, তোমরা যাও! আমি এখানে গাছের মাথায় পাতার ঘর বেঁধে অপেক্ষা করি।' এই বলে ঘড়িওলা সোনা-টিয়ার ঘাড় ধরে একটু ঠেলে দিল।
দুই ঠেলা খেয়ে প্রায় একরকম ঢুকেই গেছিল বনের মধ্যে সোনা আর টিয়া, এমন সময় ঘড়িওলা পেছন
থেকে ডেকে বলল, চললে কোথায়? হ্যান্ডবিল নিতে হবে না? তা নইলে মাকুর বিষয়ে বিশেষ
বিজ্ঞপ্তি জানবে কী করে? বলি, তাকে চিনতে হবে তো? এই বলে ঝোলা থেকে একটা বড়ো মতো গোলাপি কাগজ বের করে, পাশের মাটির টিপির
ওপরে চড়ে গলা খাঁকরে পড়তে লাগল মাত্র পচিশ পয়সায়! অদ্ভুত! অত্যান্চর্য মাকু দি গ্রেট! অভাবনীয় দৃশ্য দেখে যান!
'''
if(len(inputText)<=300):
print("Minimum 300 Characters required for accurate outputs")
elif(len(inputText)>5000):
print("To large to handle,should be within 5000 characters")
else:
finaloutput=findTitle(check(inputText))
finaloutput
mydb.close() #closes the database
###Output
_____no_output_____ |
Chapter 1/3. Python/Introduction to Python/01_workbooks/Intro to Python - part 4.ipynb | ###Markdown
Intro to Python - Lecture - Part 4 4. Conditions In program code, there are often statements that you only want to execute when certain conditions hold. Every programming language therefore supports conditional statements. In this chapter we will explain how to use conditions in Python. Boolean expressions A conditional statement, often called an "if"-statement, consists of a test and one or more actions. The test is a so-called "boolean expression". The actions are executed when the test evaluates to `True`. For instance, an app on a smartphone might give a warning if the battery level is lower than 5%. This means that the app needs to check if a certain variable `battery_level` is lower than the value 5, i.e., if the comparison `battery_level < 5` evaluates to `True`. If the variable `battery_level` currently holds the value `17`, then `battery_level < 5` evaluates to `False`. Booleans `True` and `False` are so-called "boolean values" that are predefined in Python. `True` and `False` are the only boolean values, and anything that is not `False`, is `True`.You might wonder what the data type of `True` and `False` is. The answer is that they are of the type `bool`. However, in Python every value can be interpreted as a boolean value, regardless of its data type. I.e., when you test a condition, and your test is of a value that is not `True` or `False`, it will still be interpreted as either `True` or `False`.The following values are interpreted as `False`:- The special value `False`- The special value `None` (more about that in the next chapter)- Every numerical value that is zero, e.g., `0` and `0.0`- Every empty sequence, e.g., an empty string (`""`)- Every empty "mapping", e.g., an empty dictionary (dictionaries follow in a later chapter)- Any function or method call that returns one of these listed values (this includes functions that return nothing; more about that in the next chapter)Every other value is interpreted as `True`. Any expression that is evaluated as `True` or `False` is called a "boolean expression". Comparisons The most common boolean expressions are comparisons. A comparison consists of two values, and a comparison operator in between. Comparison operators are: < less than <= less than or equal to == equal to >= equal to or greater than > greater than != not equalA common mistake is to use a single `=` as a comparison operator, but the single `=` is the assignment operator. In general, Python will produce a syntax or runtime error if you try to use a single `=` to make a a comparison.You can use the comparison operators to compare both numbers and strings. Comparison for strings is an alphabetical comparison, whereby all **capitals come before all lower case letters (and digits come before both of them). ** Numbers, CAPITALS, lowercaseHere are some examples of the results of comparisons:
###Code
print("1.", 2 < 5 )
print("2.", 2 <= 5 )
print("3.", 3 > 3 )
print("4.", 3 >= 3 )
print("5.", 3 == 3.0 )
print("6.", 3 == "3" )
print("7.", "syntax" == "syntax" )
print("8.", "syntax" == "semantics" )
print("9.", "syntax" == " syntax" )
print("10.", "Python" != "rubbish" )
print("11.", "Python" > "Perl")
print("12.", "banana" < "orange")
print("13.", "banana" < "Orange")
print("o" == int)
###Output
1. True
2. True
3. False
4. True
5. True
6. False
7. True
8. False
9. False
10. True
11. True
12. True
13. False
False
###Markdown
Comparisons of data types that cannot be compared, in general lead to runtime errors.
###Code
# This code gives a runtime error.
print( 3 < "3" )
###Output
_____no_output_____
###Markdown
Functions can return a boolean value. The following code defines a function `isPositive` which returns `True` if its parameter is a positive number, and `False` otherwise:
###Code
def isPositive(number):
return number > 0
print(isPositive(4))
print(isPositive(-12.4))
###Output
_____no_output_____
###Markdown
`in` operator Python has a special operator called the "membership test operator", which is usually abbreviated to the "in operator" as it is written as `in`. The `in` operator tests if the value to the left side of the operator is found in the collection to the right side of the operator.At this time, we have discussed only one "collection", which is the string. A string is a collection of characters. You can test if a particular character or a sequence of characters is part of the string using the `in` operator. The opposite of the `in` operator is the `not in` operator, which gives `True` when `in` gives `False`, and which gives `False` when `in` gives `True`. For example:
###Code
print("y" in "Python")
print("x" in "Python")
print("p" in "Python")
print("th" in "Python")
print("to" in "Python")
print("y" not in "Python")
###Output
_____no_output_____
###Markdown
Logical operators Boolean expressions can be combined with logical operators. There are three logical operators, `and`, `or`, and `not`.`and` and `or` are placed between two boolean expressions. When `and` is between two boolean expressions, the result is `True` if and only if both expressions evaluate to `True`; otherwise it is `False`. When `or` is between two boolean expressions, the result is `True` when one or both of the expressions evaluate to `True`; it is only `False` if both expressions evaluate to `False`.`not` is placed in front of a boolean expression to switch it from `True` to `False` or vice versa.For example:
###Code
t = True
f = False
print(t and t)
print(t and f)
print(f and t)
print(f and f)
print(t or t)
print(t or f)
print(f or t)
print(f or f)
print(not t)
print(not f)
###Output
_____no_output_____
###Markdown
Conditional statements Conditional statements are, as the introduction to this chapter said, statements consisting of a test and one or more actions, whereby the actions only get executed if the test evaluates to `True`. Conditional statements are also called "if-statements", as they are written using the special keyword `if`.Here is an example:
###Code
x = 9
if x < 10:
print("x less then 10")
###Output
_____no_output_____
###Markdown
The syntax of the `if` statement is as follows: if : Note the colon (`:`) after the boolean expression, and the fact that `` is indented. Code blocks In the syntactic description of the `if` statement shown above, you see that the `` are "indented", i.e., they are placed one tabulation to the right. This is intentional and necessary. Python considers statements that are following each other and that are at the same level of indentation part of a code block. The code block underneath the first line of the `if` statement is considered to be the list of actions that are executed when the boolean expression evaluates to `True`.For example:
###Code
x = 7
if x < 10:
print("This line is only executed if x < 10.")
print("And the same holds for this line.")
print("This line, however, is always executed.")
###Output
_____no_output_____
###Markdown
**Exercise**: Change the value of `x` to see how it affects the outcome of the code.Thus, all the statements under the `if` that are indented, belong to the code block that is executed when the boolean expression of the `if` statement evaluates to `True`. This code block is skipped if the boolean expression evaluates to `False`. Statements which follow the `if` construction which are not indented (as deep as the code block under the `if`), are executed, regardless of whether the boolean expression evaluates to `True` or `False`. Naturally, you are not restricted to having just a single `if` statement in your code. You can have as many as you like:
###Code
def print_status(x):
#Your Code Here
print(print_status(5))
###Output
_____no_output_____
###Markdown
**Exercise**: Test this function by giving it different parameters and see how it affects the outcome. Two-way decisions Indentation In Python, __correct indenting is of the utmost importance__! Without correct indentation, Python will not be able to recognize which statements belong together as one code block, and therefore cannot execute your code correctly.**Side note**: In many programming languages (actually, in almost all programming languages), code blocks are recognized by having them start and end with a specific symbol or keyword. For instance, in languages such as Java and C++, code blocks are enclosed by curly brackets, while in languages such as Pascal and Modula, code blocks are started with the keyword `begin` and ended with the keyword `end`. That means that in almost all languages, indenting to recognize code blocks is not necessary. However, you will find that code written by capable programmers is always nicely indented, regardless of the language. This makes it easy to see which code belongs together, for instance, which commands belong to an `if` statement. Python makes indenting a requirement. While for experienced programmers who are new to Python this seems strange at first, they quickly find that they do not care -- they were indenting nicely anyway, and Python's strategy makes that beginning programmers are also required to write nice-looking code.Note that you can indent using the *Tab* key, or indent using spaces. Most editors (including the editor in these notebooks) will auto-indent for you, i.e., if, for instance, you write the first line of an `if` statement, once you press *Enter* to go to the next line, it will automatically "jump in" one level of indentation (if it does not, it is very likely that you forgot the colon at the end of the conditional expression). Also, when you have indented one line to a certain level of indentation, the next line will use the same level. You can get rid of indentations using the *Backspace* key.For Python programs, a normal level of indentation is four spaces, i.e., one press of the *Tab* key should "jump in" four spaces. As long as you are in one editor, you can in such a case either use the *Tab* key, or press the spacebar four times, to go up one indentation level. So far so good. You may get into problems, however, if you port your code to another editor, which might have a different setting for the *Tab* key. If you edit your code in a such a different editor, even though it might look okay, Python may see that there are indentation conflicts (a mix of tabulations and space-indentations) and may report a syntax error when you try to run your code. Most editors therefore offer the option to automatically replace tabulations with spaces, so that such problems do not arise. If you use a text editor to write Python code, check if it contains such an option, and if so, ensure that tabulations are set to 4 and are automatically replaced by spaces. Two-way decisions Often a decision branches, e.g., if a certain condition arises, you want to take a particular action, but if it does not arise, you want to take another action. This is supported by Python in the form of an expansion to the `if` statement that adds an `else` branch:
###Code
def bigger_than_two(x):
if x > 2:
print(x, "is bigger than 2")
else:
print("smaller than or equal to 2")
bigger_than_two(4444)
###Output
_____no_output_____
###Markdown
The syntax is as follows: if : else: Note the colon (`:`) after both the boolean expression and the `else`.It is important that the word `else` is aligned with the word `if` that it belongs to. If you do not align them correctly, this results in an indentation error.A consequence of adding an `else` branch to an `if` statement is that always exactly one of the two code blocks will be executed. If the boolean expression of the `if` statement evaluates to `True`, the code block directly under the `if` will be executed, and the code block directly under the `else` will be skipped. If it evaluates to `False`, the code block directly under the `if` will be skipped, while the code block directly under the `else` will be executed.**Exercise**: Write a function `isOdd` which returns `True` if its integer parameter is odd or `False` if it's even. You can use the modulo operator. Test your function with different parameter values.
###Code
# function isOdd
def isOdd (n):
#Your Code Here
print(isOdd(3))
###Output
_____no_output_____
###Markdown
Multi-branch decisions Occasionally, you encounter multi-branch decisions, where one of multiple blocks of commands has to be executed, but never more than one block. Such multi-branch decisions can be implemented using a further expansion of the `if` statement, namely in the form of one or more `elif` statements (`elif` stands for "else if"):
###Code
def age_status(age):
if age < 12:
print("You're still a child!")
elif age < 18:
print("You are a teenager!")
elif age < 30:
print("You're pretty young!")
elif age < 50:
print("Wisening up, are we?")
else:
print("Aren't the years weighing heavy on your shoulders?")
age_status(12)
###Output
_____no_output_____
###Markdown
Change the parameter value and test the function `age_status`.The syntax is as follows: if : elif : else: The syntax above shows only one `elif`, but you can have multiple. The different tests in an `if`-`elif`-`else` construct are executed in order. The first boolean expression that evaluates to `True` will cause the code block that belongs to that expression to be executed. None of the other code blocks of the construct will be executed.In other words: First the boolean expression next to the `if` will be evaluated. If it evaluates to `True`, the code block underneath the `if` will be executed. If it evaluates to `False`, the boolean expression for the first `elif` will be evaluated. If that turns out to be `True`, the code block underneath it will be executed. If it is `False`, Python will check the boolean expression for the next `elif`. Etcetera. Only when all the boolean expressions for the `if` and all of the `elif`s evaluate to `False`, the code block underneath the `else` will be executed.The consequence is that in the code above, for the first `elif`, you do not need to test `age >= 12 and age < 18`. Just testing `age < 18` suffices, because if `age` was smaller than `12`, already the boolean expression for the `if` would have evaluated to `True`, and the boolean expression for the first `elif` would not even have been encountered by Python.Note that the inclusion of the `else` branch is always optional. However, in most cases where we need `elif`s we include it anyway, if only for error checking.**Exercise:** Write a function that takes a parameter `weight`. If `weight` is greater than 20 (kilo's), print: "There is a $25 surcharge for luggage that is too heavy." If `weight` is smaller than 20, print: "Thank you for your business." If `weight` is exactly 20, print: "Pfew! The weight is just right!". Test the function for different values of `weight`to make sure your code works.
###Code
# Weight function
def weight (weight):
#Your Code Here
weight(19)
###Output
_____no_output_____
###Markdown
Nested conditions Given the rules of the `if-elif-else` statements and identation, it is perfectly possible to use an `if` statement within another `if` statement. This second `if` statement is only executed if the condition for the first `if` statement evaluates to `True`, as it belongs to the code block of the first `if` statement. This is called "nesting".
###Code
x = 77
if x%7 == 0:
# --- Here starts a nested block of code ---
if x%11 == 0:
print(x, "is dividable by both 7 and 11.")
else:
print(x, "is dividable by 7, but not by 11.")
# --- Here ends the nested block of code ---
elif x%11 == 0:
print(x, "is dividable by 11, but not by 7.")
else:
print(x, "is dividable by neither 7 nor 11.")
###Output
_____no_output_____
###Markdown
**Exercise**: Change the value of `x` and observe the results. Early exits Occasionally it happens that you want to exit a function (or program) early when a certain condition arises. For instance, your function receives and processes an integer value extensively. But if the value cannot be processed, the function should just return an error message. You could write the function that as follows:
###Code
def handle_number(num):
if num < 0:
print("I cannot handle a negative integer, you clod!")
else:
print("Now I am processing your integer", num)
print("Lots and lots of processing")
print("Hundreds of lines of code here")
handle_number(2)
###Output
_____no_output_____
###Markdown
It is a bit irritating that most of your program is already one indent deep, while you would have preferred to leave the program at the error message, and then have the rest of the program at the top indent level.You can do that using an early `return` statement:
###Code
def handle_number(num):
if num < 0:
print("I cannot handle a negative integer, you clod!")
return
print("Now I am processing your integer", num)
print("Lots and lots of processing")
print("Hundreds of lines of code here")
handle_number(-2)
###Output
_____no_output_____ |
docs/tutorials/scikitlearn_regression.ipynb | ###Markdown
Scikit-Learn RegressionThe library `scikit-learn` is a great machine-learning toolkit that provides a large collection of regression methods.By default, `chaospy` only support traditional least-square regression, but is also designed to work together with the various regression functions provided by `scikit-learn`.Because `scikit-learn` isn't a required dependency, you might need to install it first with e.g. `pip install scikit-learn`. When that is done, it should be importable:
###Code
import sklearn
###Output
_____no_output_____
###Markdown
As en example to follow, consider the following artificial case:
###Code
import numpy
import chaospy
samples = numpy.linspace(0, 5, 50)
numpy.random.seed(1000)
noise = chaospy.Normal(0, 0.1).sample(50)
evals = numpy.sin(samples) + noise
from matplotlib import pyplot
pyplot.rc("figure", figsize=[15, 6])
pyplot.scatter(samples, evals)
pyplot.show()
###Output
_____no_output_____
###Markdown
Least squares regressionBy default, `chaospy` does not use `sklearn` (and can be used without `sklearn` being installed). Instead it uses `scipy.linalg.lstsq`, which is the ordinary least squares method, the classical regression problem by minimizing the residulals squared.In practice:
###Code
q0 = chaospy.variable()
expansion = chaospy.polynomial([1, q0, q0**2, q0**3])
fitted_polynomial = chaospy.fit_regression(
expansion, samples, evals)
pyplot.scatter(samples, evals)
pyplot.plot(samples, fitted_polynomial(samples))
pyplot.show()
fitted_polynomial.round(4)
###Output
_____no_output_____
###Markdown
Least squares regression is also supported by `sklearn`. So it is possible to get the same result using the `LinearRegression` model. For example:
###Code
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=False)
fitted_polynomial = chaospy.fit_regression(
expansion, samples, evals, model=model)
pyplot.scatter(samples, evals)
pyplot.plot(samples, fitted_polynomial(samples))
pyplot.show()
fitted_polynomial.round(4)
###Output
_____no_output_____
###Markdown
It is important to note that sklearn often does extra operations that may interfer with the compatability of `chaospy`. Here `fit_intercept=False` ensures that an extra columns isn't added needlessly. An error will be raised if this is forgotton. Single Variable Regression MethodsWhile in most cases least squares regression is sufficient, that is not always the case. For those deviating cases `scikit-learn` provides a set of alternative methods. Even though `chaospy` doesn't differentiate between single dimensional and multi-dimensional responses, `scikit-learn` do.The methods that support single dimensional responses are:* `least squares` -- Simple $L_2$ regression without any extra features.* `elastic net` -- $L_2$ regression with both $L_1$ and $L_2$ regularization terms.* `lasso` -- $L_2$ regression with an extra $L_1$ regularization term, and a preference for fewer non-zero terms.* `lasso lars` -- An implementation of `lasso` meant for high dimensional data.* `lars` -- $L_1$ regression well suited for high dimensional data.* `orthogonal matching pursuit` -- $L_2$ regression with enforced number of non-zero terms.* `ridge` -- $L_2$ regression with an $L_2$ regularization term.* `bayesian ridge` -- Same as `ridge`, but uses Bayesian probability to let data estimate the complexity parameter.* `auto relevant determination` -- Same as `bayesian ridge`, but also favors fewer non-zero terms.
###Code
from sklearn import linear_model as lm
kws = {"fit_intercept": False}
univariate_models = {
"least squares": lm.LinearRegression(**kws),
"elastic net": lm.ElasticNet(alpha=0.1, **kws),
"lasso": lm.Lasso(alpha=0.1, **kws),
"lasso lars": lm.LassoLars(alpha=0.1, **kws),
"lars": lm.Lars(**kws),
"orthogonal matching pursuit":
lm.OrthogonalMatchingPursuit(n_nonzero_coefs=3, **kws),
"ridge": lm.Ridge(alpha=0.1, **kws),
"bayesian ridge": lm.BayesianRidge(**kws),
"auto relevant determination": lm.ARDRegression(**kws),
}
###Output
_____no_output_____
###Markdown
Again, as the polynomials already addresses the constant term, it is important to remember to include `fit_intercept=False` for each model.We can then create a fit for each of the univariate models:
###Code
for label, model in univariate_models.items():
fitted_polynomial = chaospy.fit_regression(
expansion, samples, evals, model=model)
pyplot.plot(samples, fitted_polynomial(samples), label=label)
pyplot.scatter(samples, evals)
pyplot.legend(loc="upper right")
pyplot.show()
###Output
_____no_output_____
###Markdown
Multi-variable Regression MethodsThis part of the tutorial uses the same example as the [example introduction](./example_introduction.ipynb).In other words:
###Code
from chaospy.example import (
coordinates, exponential_model, distribution,
error_mean, error_variance
)
###Output
_____no_output_____
###Markdown
The methods that support multi-label dimensional responses are:* `least squares` -- Simple $L_2$ regression without any extra features.* `elastic net` -- $L_2$ regression with both $L_1$ and $L_2$ regularization terms.* `lasso` -- $L_2$ regression with an extra $L_1$ regularization term, and a preference for fewer non-zero terms.* `lasso lars` -- An implementation of `lasso` meant for high dimensional data.* `orthogonal matching pursuit` -- $L_2$ regression with enforced number of non-zero terms.* `ridge` -- $L_2$ regression with an $L_2$ regularization term.
###Code
multivariate_models = {
"least squares": lm.LinearRegression(**kws),
"elastic net": lm.MultiTaskElasticNet(alpha=0.2, **kws),
"lasso": lm.MultiTaskLasso(alpha=0.2, **kws),
"lasso lars": lm.LassoLars(alpha=0.2, **kws),
"lars": lm.Lars(n_nonzero_coefs=3, **kws),
"orthogonal matching pursuit": \
lm.OrthogonalMatchingPursuit(n_nonzero_coefs=3, **kws),
"ridge": lm.Ridge(alpha=0.2, **kws),
}
###Output
_____no_output_____
###Markdown
To illustrate the difference between the methods, we do the simple error analysis:
###Code
# NBVAL_CHECK_OUTPUT
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
expansion = chaospy.generate_expansion(2, distribution)
samples = distribution.sample(50)
evals = numpy.array([exponential_model(sample)
for sample in samples.T])
for label, model in multivariate_models.items():
fitted_polynomial, coeffs = chaospy.fit_regression(
expansion, samples, evals, model=model, retall=True)
self_evals = fitted_polynomial(*samples)
error_mean_ = error_mean(chaospy.E(
fitted_polynomial, distribution))
error_var_ = error_variance(chaospy.Var(
fitted_polynomial, distribution))
count_non_zero = numpy.sum(numpy.any(coeffs, axis=-1))
print(f"{label:<30} {error_mean_:.5f} " +
f"{error_var_:.5f} {count_non_zero}")
###Output
least squares 0.00003 0.00000 6
elastic net 0.08373 0.02085 2
lasso 0.01386 0.01541 2
lasso lars 0.21168 0.02121 1
lars 0.00061 0.00144 3
orthogonal matching pursuit 0.00114 0.00060 3
ridge 0.00819 0.00936 6
|
p3_collab-compet/Tennis-ddpg-v01.ipynb | ###Markdown
Collaboration and Competition---In this notebook, you will learn how to use the Unity ML-Agents environment for the third project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program. 1. Start the EnvironmentWe begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).
###Code
%load_ext autoreload
%autoreload 2
import os
from os import path
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from functools import partial
import datetime as dt
import time
repo_path = path.dirname(path.dirname(path.abspath("__file__")))
repo_path
sys.path.append(repo_path)
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
from src.ac_agent import AgentDDPG, GaussianProcess, OUNoise
from src.utils import action_scaler_fn
from unity_tennis_utils import train
EXP_NAME = 'ddpg:v01'
EXP_FOLDER = 'ddpg1'
action_scaler = partial(action_scaler_fn, lower=-1., upper=1.)
###Output
_____no_output_____
###Markdown
Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.- **Mac**: `"path/to/Tennis.app"`- **Windows** (x86): `"path/to/Tennis_Windows_x86/Tennis.exe"`- **Windows** (x86_64): `"path/to/Tennis_Windows_x86_64/Tennis.exe"`- **Linux** (x86): `"path/to/Tennis_Linux/Tennis.x86"`- **Linux** (x86_64): `"path/to/Tennis_Linux/Tennis.x86_64"`- **Linux** (x86, headless): `"path/to/Tennis_Linux_NoVis/Tennis.x86"`- **Linux** (x86_64, headless): `"path/to/Tennis_Linux_NoVis/Tennis.x86_64"`For instance, if you are using a Mac, then you downloaded `Tennis.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:```env = UnityEnvironment(file_name="Tennis.app")```
###Code
env = UnityEnvironment(file_name="Tennis_Windows_x86_64/Tennis.exe")
###Output
INFO:unityagents:
'Academy' started successfully!
Unity Academy name: Academy
Number of Brains: 1
Number of External Brains : 1
Lesson number : 0
Reset Parameters :
Unity brain name: TennisBrain
Number of Visual Observations (per agent): 0
Vector Observation space type: continuous
Vector Observation space size (per agent): 8
Number of stacked Vector Observation: 3
Vector Action space type: continuous
Vector Action space size (per agent): 2
Vector Action descriptions: ,
###Markdown
Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
###Code
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
###Output
_____no_output_____
###Markdown
2. Examine the State and Action SpacesIn this environment, two agents control rackets to bounce a ball over a net. If an agent hits the ball over the net, it receives a reward of +0.1. If an agent lets a ball hit the ground or hits the ball out of bounds, it receives a reward of -0.01. Thus, the goal of each agent is to keep the ball in play.The observation space consists of 8 variables corresponding to the position and velocity of the ball and racket. Two continuous actions are available, corresponding to movement toward (or away from) the net, and jumping. Run the code cell below to print some information about the environment.
###Code
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
###Output
Number of agents: 2
Size of each action: 2
There are 2 agents. Each observes a state with length: 24
The state for the first agent looks like: [ 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. -6.65278625 -1.5
-0. 0. 6.83172083 6. -0. 0. ]
###Markdown
3. Take Random Actions in the EnvironmentIn the next code cell, you will learn how to use the Python API to control the agents and receive feedback from the environment.Once this cell is executed, you will watch the agents' performance, if they select actions at random with each time step. A window should pop up that allows you to observe the agents.Of course, as part of the project, you'll have to change the code so that the agents are able to use their experiences to gradually choose better actions when interacting with the environment! 1. After each episode, we add up the rewards that each agent received (without discounting), to get a score for each agent. This yields 2 (potentially different) scores. We then take the maximum of these 2 scores.2. This yields a single score for each episode.3. The environment is considered solved, when the average (over 100 episodes) of those scores is at least +0.5. When finished, you can close the environment. 4. It's Your Turn!Now it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:```pythonenv_info = env.reset(train_mode=True)[brain_name]```
###Code
RND_SEED = 123
# Problem
N_EPISODES = 1000
MAX_STEPS = 1000
SOLVED_AT = .5
GAMMA = .99 # Discount factor
# Noise
NOISE_MU = 0
NOISE_SIGMA = 0.1
NOISE_DECAY = 0.9995
NOISE_MIN_WEIGHT = .1
# Agent
ACT_HID_LAYERS = (256, 128)
CRIT_HID_LAYERS = (256, 128)
ACT_ADD_BN = (True)
CRIT_ADD_BN = (True)
GRAD_CLIP = (False, 1.) # (actor, critic)
BATCH_SIZE = 256
LEARNING_RATES = (1e-3, 1e-3) # (actor, critic)
WEIGHT_DECAY = (0, 0) # (actor, critic)
SOFT_UPD_PARAM = 2e-3
UPDATE_EVERY = 1
BUFFER_SIZE = int(1e6)
LEARN_EVERY = 1
LEARN_NUM = 10
g_noise = GaussianProcess(action_size, RND_SEED, mu=NOISE_MU, sigma=NOISE_SIGMA)
ddpg = AgentDDPG(state_size=state_size, action_size=action_size, gamma=GAMMA,
actor_hidden_layers=ACT_HID_LAYERS,
critic_hidden_layers=CRIT_HID_LAYERS,
actor_add_bn=ACT_ADD_BN,
critic_add_bn=CRIT_ADD_BN,
grad_clipping=GRAD_CLIP,
learning_rates=LEARNING_RATES,
weight_decay=WEIGHT_DECAY,
batch_size=BATCH_SIZE,
soft_upd_param=SOFT_UPD_PARAM,
update_every=UPDATE_EVERY,
buffer_size=BUFFER_SIZE,
noise=g_noise,
learn_every=LEARN_EVERY,
learn_num=LEARN_NUM,
seed=RND_SEED)
path_agent = os.path.join('models', EXP_FOLDER)
scores_agent = train(env, brain_name, ddpg, n_episodes=N_EPISODES, max_t=MAX_STEPS, solved=SOLVED_AT,
action_scaler_fn=action_scaler, add_noise=True, noise_decay=NOISE_DECAY, min_noise_weight=NOISE_MIN_WEIGHT,
model_save_path=path_agent)
scores_agent['experiment'] = EXP_NAME
checkpoint_metadata = pd.Series(index=['N_episodes', 'gamma', 'actor_hidden_layers', 'critic_hidden_layers',
'grad_clipping', 'batch_size', 'learning_rates',
'soft_upd_param', 'update_every', 'buffer_size', 'noise', 'learn_every', 'learn_num', 'solved',
'checkpoint_folder'],
data = [len(scores_agent), GAMMA, ACT_HID_LAYERS, CRIT_HID_LAYERS,
GRAD_CLIP, BATCH_SIZE, LEARNING_RATES,SOFT_UPD_PARAM, UPDATE_EVERY, BUFFER_SIZE, 'g-noise', LEARN_EVERY, LEARN_NUM, False, EXP_FOLDER], name=f'experiment:{EXP_NAME}')
checkpoint_metadata
experiment_dt = dt.datetime.strftime(dt.datetime.now(), "%Y%m%d%H%M%S")
checkpoint_metadata.to_json(f'models/experiments/hparams_{experiment_dt}.json')
scores_agent.to_csv(f'models/experiments/scores_{experiment_dt}.csv')
env.close()
###Output
_____no_output_____ |
explore-scikit-learn/Scikit.ipynb | ###Markdown
Supervised Learning and K Nearest Neighbors Exercises IntroductionWe will be using customer churn data from the telecom industry. We will load this data and use K-nearest neighbors to predict customer churn based on account characteristics. The data we will use are in a file called `Orange_Telecom_Churn_Data_OK.csv` found in the [GitHub repository](https://github.com/rosalvoneto/InteligenciaComputacional). Question 1* Begin by importing the data. Examine the columns and data.
###Code
# Import the data
import pandas as pd
df = pd.read_csv('data/Orange_Telecom_Churn_Data_OK.csv')
df
###Output
_____no_output_____
###Markdown
Question 2* Notice that the data contains a phone number. Do you think this is good feature to use when building a machine learning model? Why or why not? We will not be using it, so it can be dropped from the data.
###Code
# Remove phone_number column
df = df.drop(['phone_number'], axis = 1)
###Output
_____no_output_____
###Markdown
Question 3* Separate the feature columns (everything except `churned`) from the label (`churned`). This will create two tables.* Fit a K-nearest neighbors model with a value of `k=3` to this data and predict the outcome on the same data.
###Code
# Split the data into two dataframes
df_label = df.churned
df_feature = df.drop(['churned'], axis = 1)
print(df_label)
df_feature
# Fit a K-nearest neighbors model with a value of k=3
from sklearn.neighbors import KNeighborsClassifier
KNN = KNeighborsClassifier(n_neighbors=3)
###Output
_____no_output_____
###Markdown
Question 4Ways to measure error haven't been discussed in class yet, but accuracy is an easy one to understand--it is simply the percent of labels that were correctly predicted (either true or false). * Write a function to calculate accuracy using the actual and predicted labels.* Using the function, calculate the accuracy of this K-nearest neighbors model on the data.
###Code
# Function to calculate accuracy
from sklearn.metrics import accuracy_score
def knn_accuracy(X_data, Y_data):
KNN = KNeighborsClassifier(n_neighbors=3)
KNN = KNN.fit(X_data, Y_data)
Y_predicted = KNN.predict(X_data)
return accuracy_score(Y_data, Y_predicted)
# Using the function
knn_accuracy(df_feature, df_label)
###Output
_____no_output_____
###Markdown
Question 5* Fit a K-nearest neighbors model using values of `k` (`n_neighbors`) ranging from 1 to 20. Store the accuracy and the value of `k` used from each of these fits in a list or dictionary.* Plot (or view the table of) the `accuracy` vs `k`. What do you notice happens when `k=1`? Why do you think this is? *Hint:* it's for the same reason discussed above.
###Code
# K-nearest neighbors model
fits = {
'k': list(range(1, 21)),
'accuracy': [
x for x in [
accuracy_score(df_label,
KNeighborsClassifier(n_neighbors=k).fit(df_feature, df_label).predict(df_feature)
) for k in range(1, 21)
]
]
}
fits
# Plot
import matplotlib.pyplot as plt
plt.xlabel('K')
plt.ylabel('accuracy')
plt.title('KNN Accuracy')
ticks = [ x for x in fits['k'] if x%2 == 1 ]
plt.xticks(ticks, ticks)
plt.bar(fits['k'], fits['accuracy'], width=0.5)
###Output
_____no_output_____ |
Basic ML algorithms/Linear Regression-2.ipynb | ###Markdown
Linear RegressionPackages Used numpy matplotlibImport necessary packages
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams['figure.figsize'] = [10, 8]
###Output
_____no_output_____
###Markdown
ReadData(data,separator): Helper function to read data Assumes data is of the form X[0], X[1], ..., X[n], YWhere X[i] is a feature and Y is the label
###Code
def ReadData(data, separator):
XY = np.genfromtxt(data, delimiter=separator)
m = XY.shape[0]
Y = XY[:, -1].reshape(m, 1)
X = XY[:, 0:-1]
bias = np.zeros((1, 1))
theta = np.zeros((X.shape[1], 1))
return X, Y, m, bias, theta
###Output
_____no_output_____
###Markdown
Normalize(data): Helper function to Normalize data
###Code
def Normalize(data):
Mu = np.mean(X, axis=0, keepdims=True)
Sigma = np.std(X, axis=0, keepdims=True)
data = ((data-Mu)/Sigma)
return data, Mu, Sigma
###Output
_____no_output_____
###Markdown
GradDescent_CostCalc(iter1,X,theta,bias,Y,learningratebym,costweight): Function to calculate costs, final theata, biases using Gradient Descent
###Code
def GradDescent_CostCalc(iter1, X, theta, bias, Y, learningratebym, costweight):
costs = []
for i in range(iter1):
H = np.dot(X, theta) + bias
diff = H - Y
delta = learningratebym * np.dot(diff.T, X).T
theta = theta - delta
bias = bias - (learningratebym * np.sum(diff,keepdims = True))
J = costweight * sum(np.square(diff))
costs.append(J.item(0))
return costs, bias, theta
###Output
_____no_output_____
###Markdown
CostCalc(X,theta,bias,Y,costweight): Function to calculate cost
###Code
def CostCalc(X, theta, bias, Y, costweight):
H = np.dot(X, theta) + bias
diff = H - Y
J = costweight * sum(np.square(diff))
return J
###Output
_____no_output_____
###Markdown
PlotData(Original_X,Normalized_X,Y,trainedtheta,trainedbias,costs,fignumber=1): Helper function to Plot data,predicted target and costs
###Code
def PlotData(Original_X, Normalized_X, Y, trainedtheta, trainedbias, costs, fignumber=1):
plt.style.use('ggplot')
fig = plt.figure(fignumber)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(Original_X[:,0], Original_X[:,1], Y[:,0], marker = '*', c='#ef0909')
Y = np.linspace(min(Original_X[:,1]), max(Original_X[:,1]), 5)
X = np.linspace(min(Original_X[:,0]), max(Original_X[:,0]), 100)
X1 = np.array([ (i,j) for i in X for j in Y])
Z = np.dot(Normalize(X1)[0],trainedtheta) + trainedbias
X, Y = np.meshgrid(X, Y, sparse=False, indexing='ij')
ax.scatter(X, Y, Z, marker = '*', c='#493d35')
ax.set_xlabel('Feature 0')
ax.set_ylabel('Feature 1')
ax.set_zlabel('Label')
plt.figure(fignumber+1)
plt.ylabel('Cost')
plt.xlabel('Iteration')
plt.plot(range(iter1),costs, '-')
return
###Output
_____no_output_____
###Markdown
Main Code below
###Code
X, Y, m, bias, theta = ReadData('LinRegDS2.txt', ',')
Original_X = X
X, Mu, Sigma = Normalize(X)
learningrate = 0.1
iter1 = 50
learningratebym = learningrate/m
costweight = 1/(2*m)
costs, trainedbias, trainedtheta = GradDescent_CostCalc(iter1, X, theta, bias, Y,
learningratebym, costweight)
PlotData(Original_X, X, Y, trainedtheta, trainedbias, costs)
actual_input = np.array([1650,3]).reshape(1, 2)
normalized_input = (actual_input-Mu)/Sigma
print(f'For population = {actual_input.item(0)},'
f' we predict a profit of {(trainedbias + np.dot(normalized_input, trainedtheta)).item(0)}')
###Output
For population = 1650, we predict a profit of 292679.0716800462
|
deep-learning-aml/2019/week_2/ML.ipynb | ###Markdown
Assignment 1The goal of this assignment is to supply you with machine learning models and algorithms. In this notebook, we will cover linear and nonlinear models, the concept of loss functions and some optimization techniques. All mathematical operations should be implemented in **NumPy** only. Table of contents* [1. Logistic Regression](1.-Logistic-Regression) * [1.1 Linear Mapping](1.1-Linear-Mapping) * [1.2 Sigmoid](1.2-Sigmoid) * [1.3 Negative Log Likelihood](1.3-Negative-Log-Likelihood) * [1.4 Model](1.4-Model) * [1.5 Simple Experiment](1.5-Simple-Experiment)* [2. Decision Tree](2.-Decision-Tree) * [2.1 Gini Index & Data Split](2.1-Gini-Index-&-Data-Split) * [2.2 Terminal Node](2.2-Terminal-Node) * [2.3 Build the Decision Tree](2.3-Build-the-Decision-Tree)* [3. Experiments](3.-Experiments) * [3.1 Decision Tree for Heart Disease Prediction](3.1-Decision-Tree-for-Heart-Disease-Prediction) * [3.2 Logistic Regression for Heart Disease Prediction](3.2-Logistic-Regression-for-Heart-Disease-Prediction) NoteSome of the concepts below have not (yet) been discussed during the lecture. These will be discussed further during the next lectures. Before you beginTo check whether the code you've written is correct, we'll use **automark**. For this, we created for each of you an account with the username being your student number.
###Code
import automark as am
# fill in you student number as your username
username = '12743674'
# to check your progress, you can run this function
am.get_progress(username)
###Output
---------------------------------------------
| Maria Dalavagka |
| [email protected] |
---------------------------------------------
| W2: linear_forward | completed |
| W2: linear_grad_W | completed |
| W2: linear_grad_b | completed |
| W2: nll_forward | completed |
| W2: nll_grad_input | completed |
| W2: sigmoid_forward | completed |
| W2: sigmoid_grad_input | completed |
| W2: tree_gini_index | completed |
| W2: tree_split_data_left | completed |
| W2: tree_split_data_right| completed |
| W2: tree_to_terminal | completed |
| W3: box_blur | not attempted |
| W3: conv_matrix | completed |
| W3: dense_forward | completed |
| W3: dense_grad_W | completed |
| W3: dense_grad_b | completed |
| W3: dense_grad_input | completed |
| W3: flatten_forward | not attempted |
| W3: flatten_grad_input | not attempted |
| W3: l2_regularizer | completed |
| W3: maxpool_forward | not attempted |
| W3: relu_forward | completed |
| W3: relu_grad_input | completed |
---------------------------------------------
###Markdown
So far all your tests are 'not attempted'. At the end of this notebook you'll need to have completed all test. The output of `am.get_progress(username)` should at least match the example below. However, we encourage you to take a shot at the 'not attempted' tests!```---------------------------------------------| Your name / student number || your_email@your_domain.whatever |---------------------------------------------| linear_forward | not attempted || linear_grad_W | not attempted || linear_grad_b | not attempted || nll_forward | not attempted || nll_grad_input | not attempted || sigmoid_forward | not attempted || sigmoid_grad_input | not attempted || tree_data_split_left | not attempted || tree_data_split_right | not attempted || tree_gini_index | not attempted || tree_to_terminal | not attempted |---------------------------------------------```
###Code
from __future__ import print_function, absolute_import, division # You don't need to know what this is.
import numpy as np # this imports numpy, which is used for vector- and matrix calculations
###Output
_____no_output_____
###Markdown
This notebook makes use of **classes** and their **instances** that we have already implemented for you. It allows us to write less code and make it more readable. If you are interested in it, here are some useful links:* The official [documentation](https://docs.python.org/3/tutorial/classes.html) * Video by *sentdex*: [Object Oriented Programming Introduction](https://www.youtube.com/watch?v=ekA6hvk-8H8)* Antipatterns in OOP: [Stop Writing Classes](https://www.youtube.com/watch?v=o9pEzgHorH0) 1. Logistic RegressionWe start with a very simple algorithm called **Logistic Regression**. It is a generalized linear model for 2-class classification.It can be generalized to the case of many classes and to non-linear cases as well. However, here we consider only the simplest case. Let us consider a data with 2 classes. Class 0 and class 1. For a given test sample, logistic regression returns a value from $[0, 1]$ which is interpreted as a probability of belonging to class 1. The set of points for which the prediction is $0.5$ is called a *decision boundary*. It is a line on a plane or a hyper-plane in a space. Logistic regression has two trainable parameters: a weight $W$ and a bias $b$. For a vector of features $X$, the prediction of logistic regression is given by$$f(X) = \frac{1}{1 + \exp(-[XW + b])} = \sigma(h(X))$$where $\sigma(z) = \frac{1}{1 + \exp(-z)}$ and $h(X)=XW + b$.Parameters $W$ and $b$ are fitted by maximizing the log-likelihood (or minimizing the negative log-likelihood) of the model on the training data. For a training subset $\{X_j, Y_j\}_{j=1}^N$ the normalized negative log likelihood (NLL) is given by $$\mathcal{L} = -\frac{1}{N}\sum_j \log\Big[ f(X_j)^{Y_j} \cdot (1-f(X_j))^{1-Y_j}\Big]= -\frac{1}{N}\sum_j \Big[ Y_j\log f(X_j) + (1-Y_j)\log(1-f(X_j))\Big]$$ There are different ways of fitting this model. In this assignment we consider Logistic Regression as a one-layer neural network. We use the following algorithm for the **forward** pass:1. Linear mapping: $h=XW + b$2. Sigmoid activation function: $f=\sigma(h)$3. Calculation of NLL: $\mathcal{L} = -\frac{1}{N}\sum_j \Big[ Y_j\log f_j + (1-Y_j)\log(1-f_j)\Big]$ In order to fit $W$ and $b$ we perform Gradient Descent ([GD](https://en.wikipedia.org/wiki/Gradient_descent)). We choose a small learning rate $\gamma$ and after each computation of forward pass, we update the parameters $$W_{\text{new}} = W_{\text{old}} - \gamma \frac{\partial \mathcal{L}}{\partial W}$$$$b_{\text{new}} = b_{\text{old}} - \gamma \frac{\partial \mathcal{L}}{\partial b}$$We use Backpropagation method ([BP](https://en.wikipedia.org/wiki/Backpropagation)) to calculate the partial derivatives of the loss function with respect to the parameters of the model.$$\frac{\partial\mathcal{L}}{\partial W} = \frac{\partial\mathcal{L}}{\partial h} \frac{\partial h}{\partial W} =\frac{\partial\mathcal{L}}{\partial f} \frac{\partial f}{\partial h} \frac{\partial h}{\partial W}$$$$\frac{\partial\mathcal{L}}{\partial b} = \frac{\partial\mathcal{L}}{\partial h} \frac{\partial h}{\partial b} =\frac{\partial\mathcal{L}}{\partial f} \frac{\partial f}{\partial h} \frac{\partial h}{\partial b}$$ 1.1 Linear MappingFirst of all, you need to implement the forward pass of a linear mapping:$$h(X) = XW +b$$ **Note**: here we use `n_out` as the dimensionality of the output. For logisitc regression `n_out = 1`. However, we will work with cases of `n_out > 1` in next assignments. You will **pass** the current assignment even if your implementation works only in case `n_out = 1`. If your implementation works for the cases of `n_out > 1` then you will not have to modify your method next week. All **numpy** operations are generic. It is recommended to use numpy when is it possible.
###Code
def linear_forward(x_input, W, b):
"""Perform the mapping of the input
# Arguments
x_input: input of the linear function - np.array of size `(n_objects, n_in)`
W: np.array of size `(n_in, n_out)`
b: np.array of size `(n_out,)`
# Output
the output of the linear function
np.array of size `(n_objects, n_out)`
"""
# output = x_input * np.transpose(W) + b
output = np.dot(x_input, W) + b
return output
###Output
_____no_output_____
###Markdown
Let's check your first function. We set the matrices $X, W, b$:$$X = \begin{bmatrix}1 & -1 \\-1 & 0 \\1 & 1 \\\end{bmatrix} \quadW = \begin{bmatrix}4 \\2 \\\end{bmatrix} \quadb = \begin{bmatrix}3 \\\end{bmatrix}$$And then compute $$XW = \begin{bmatrix}1 & -1 \\-1 & 0 \\1 & 1 \\\end{bmatrix}\begin{bmatrix}4 \\2 \\\end{bmatrix} =\begin{bmatrix}2 \\-4 \\6 \\\end{bmatrix} \\XW + b = \begin{bmatrix}5 \\-1 \\9 \\\end{bmatrix} $$
###Code
X_test = np.array([[1, -1],
[-1, 0],
[1, 1]])
W_test = np.array([[4],
[2]])
b_test = np.array([3])
h_test = linear_forward(X_test, W_test, b_test)
print(h_test)
am.test_student_function(username, linear_forward, ['x_input', 'W', 'b'])
###Output
Running local tests...
linear_forward successfully passed local tests
Running remote test...
Test was successful. Congratulations!
###Markdown
Now you need to implement the calculation of the partial derivative of the loss function with respect to the parameters of the model. As this expressions are used for the updates of the parameters, we refer to them as gradients.$$\frac{\partial \mathcal{L}}{\partial W} = \frac{\partial \mathcal{L}}{\partial h}\frac{\partial h}{\partial W} \\\frac{\partial \mathcal{L}}{\partial b} = \frac{\partial \mathcal{L}}{\partial h}\frac{\partial h}{\partial b} \\$$
###Code
def linear_grad_W(x_input, grad_output, W, b):
"""Calculate the partial derivative of
the loss with respect to W parameter of the function
dL / dW = (dL / dh) * (dh / dW)
# Arguments
x_input: input of a dense layer - np.array of size `(n_objects, n_in)`
grad_output: partial derivative of the loss functions with
respect to the ouput of the dense layer (dL / dh)
np.array of size `(n_objects, n_out)`
W: np.array of size `(n_in, n_out)`
b: np.array of size `(n_out,)`
# Output
the partial derivative of the loss
with respect to W parameter of the function
np.array of size `(n_in, n_out)`
"""
# grad_W = np.gradient(np.dot(x_input, W) + b, W)
# grad_W = grad_output * np.gradient(linear_forward(x_input, W, b), W)
grad_W = np.dot(np.transpose(x_input), grad_output)
return grad_W
am.test_student_function(username, linear_grad_W, ['x_input', 'grad_output', 'W', 'b'])
def linear_grad_b(x_input, grad_output, W, b):
"""Calculate the partial derivative of
the loss with respect to b parameter of the function
dL / db = (dL / dh) * (dh / db)
# Arguments
x_input: input of a dense layer - np.array of size `(n_objects, n_in)`
grad_output: partial derivative of the loss functions with
respect to the ouput of the linear function (dL / dh)
np.array of size `(n_objects, n_out)`
W: np.array of size `(n_in, n_out)`
b: np.array of size `(n_out,)`
# Output
the partial derivative of the loss
with respect to b parameter of the linear function
np.array of size `(n_out,)`
"""
# grad_b = np.dot(np.transpose(1), grad_output)
# grad_b = 1* grad_output
grad_b = np.dot(np.transpose(np.ones(grad_output.shape)), grad_output)
return grad_b
am.test_student_function(username, linear_grad_b, ['x_input', 'grad_output', 'W', 'b'])
am.get_progress(username)
###Output
_____no_output_____
###Markdown
1.2 Sigmoid$$f = \sigma(h) = \frac{1}{1 + e^{-h}} $$Sigmoid function is applied element-wise. It does not change the dimensionality of the tensor and its implementation is shape-agnostic in general.
###Code
def sigmoid_forward(x_input):
"""sigmoid nonlinearity
# Arguments
x_input: np.array of size `(n_objects, n_in)`
# Output
the output of relu layer
np.array of size `(n_objects, n_in)`
"""
output = 1 / (1 + np.exp(- x_input))
return output
am.test_student_function(username, sigmoid_forward, ['x_input'])
###Output
_____no_output_____
###Markdown
Now you need to implement the calculation of the partial derivative of the loss function with respect to the input of sigmoid. $$\frac{\partial \mathcal{L}}{\partial h} = \frac{\partial \mathcal{L}}{\partial f}\frac{\partial f}{\partial h} $$Tensor $\frac{\partial \mathcal{L}}{\partial f}$ comes from the loss function. Let's calculate $\frac{\partial f}{\partial h}$$$\frac{\partial f}{\partial h} = \frac{\partial \sigma(h)}{\partial h} =\frac{\partial}{\partial h} \Big(\frac{1}{1 + e^{-h}}\Big)= \frac{e^{-h}}{(1 + e^{-h})^2}= \frac{1}{1 + e^{-h}} \frac{e^{-h}}{1 + e^{-h}}= f(h) (1 - f(h))$$Therefore, in order to calculate the gradient of the loss with respect to the input of sigmoid function you need to 1. calculate $f(h) (1 - f(h))$ 2. multiply it element-wise by $\frac{\partial \mathcal{L}}{\partial f}$
###Code
def sigmoid_grad_input(x_input, grad_output):
"""sigmoid nonlinearity gradient.
Calculate the partial derivative of the loss
with respect to the input of the layer
# Arguments
x_input: np.array of size `(n_objects, n_in)`
grad_output: np.array of size `(n_objects, n_in)`
dL / df
# Output
the partial derivative of the loss
with respect to the input of the function
np.array of size `(n_objects, n_in)`
dL / dh
"""
# grad_input = np.dot(np.transpose((1/(1+np.exp(- x_input)))*(np.exp(- x_input)/(1+np.exp(- x_input))), grad_output))
grad_input = (1/(1+np.exp(- x_input))*(np.exp(- x_input)/(1+np.exp(- x_input)))) * grad_output
return grad_input
am.test_student_function(username, sigmoid_grad_input, ['x_input', 'grad_output'])
###Output
_____no_output_____
###Markdown
1.3 Negative Log Likelihood $$\mathcal{L} = -\frac{1}{N}\sum_j \Big[ Y_j\log \dot{Y}_j + (1-Y_j)\log(1-\dot{Y}_j)\Big]$$Here $N$ is the number of objects. $Y_j$ is the real label of an object and $\dot{Y}_j$ is the predicted one.
###Code
def nll_forward(target_pred, target_true):
"""Compute the value of NLL
for a given prediction and the ground truth
# Arguments
target_pred: predictions - np.array of size `(n_objects, 1)`
target_true: ground truth - np.array of size `(n_objects, 1)`
# Output
the value of NLL for a given prediction and the ground truth
scalar
"""
output = -(1/len(target_pred))*np.sum(target_true*np.log(target_pred)+(1-target_true)*np.log(1-target_pred))
return output
am.test_student_function(username, nll_forward, ['target_pred', 'target_true'])
###Output
_____no_output_____
###Markdown
Now you need to calculate the partial derivative of NLL with with respect to its input.$$\frac{\partial \mathcal{L}}{\partial \dot{Y}}=\begin{pmatrix}\frac{\partial \mathcal{L}}{\partial \dot{Y}_0} \\\frac{\partial \mathcal{L}}{\partial \dot{Y}_1} \\\vdots \\\frac{\partial \mathcal{L}}{\partial \dot{Y}_N}\end{pmatrix}$$Let's do it step-by-step\begin{equation}\begin{split}\frac{\partial \mathcal{L}}{\partial \dot{Y}_0} &= \frac{\partial}{\partial \dot{Y}_0} \Big(-\frac{1}{N}\sum_j \Big[ Y_j\log \dot{Y}_j + (1-Y_j)\log(1-\dot{Y}_j)\Big]\Big) \\&= -\frac{1}{N} \frac{\partial}{\partial \dot{Y}_0} \Big(Y_0\log \dot{Y}_0 + (1-Y_0)\log(1-\dot{Y}_0)\Big) \\&= -\frac{1}{N} \Big(\frac{Y_0}{\dot{Y}_0} - \frac{1-Y_0}{1-\dot{Y}_0}\Big)= \frac{1}{N} \frac{\dot{Y}_0 - Y_0}{\dot{Y}_0 (1 - \dot{Y}_0)}\end{split}\end{equation}And for the other components it can be done in exactly the same way. So the result is the vector where each component is given by $$\frac{1}{N} \frac{\dot{Y}_j - Y_j}{\dot{Y}_j (1 - \dot{Y}_j)}$$Or if we assume all multiplications and divisions to be done element-wise the output can be calculated as$$\frac{\partial \mathcal{L}}{\partial \dot{Y}} = \frac{1}{N} \frac{\dot{Y} - Y}{\dot{Y} (1 - \dot{Y})}$$
###Code
def nll_grad_input(target_pred, target_true):
"""Compute the partial derivative of NLL
with respect to its input
# Arguments
target_pred: predictions - np.array of size `(n_objects, 1)`
target_true: ground truth - np.array of size `(n_objects, 1)`
# Output
the partial derivative
of NLL with respect to its input
np.array of size `(n_objects, 1)`
"""
grad_input = (1/len(target_pred))*((target_pred-target_true)/(target_pred*(1-target_pred)))
return grad_input
am.test_student_function(username, nll_grad_input, ['target_pred', 'target_true'])
am.get_progress(username)
###Output
_____no_output_____
###Markdown
1.4 ModelHere we provide a model for your. It consist of the function which you have implmeneted above
###Code
class LogsticRegressionGD(object):
def __init__(self, n_in, lr=0.05):
super().__init__()
self.lr = lr
self.b = np.zeros(1, )
self.W = np.random.randn(n_in, 1)
def forward(self, x):
self.h = linear_forward(x, self.W, self.b)
y = sigmoid_forward(self.h)
return y
def update_params(self, x, nll_grad):
# compute gradients
grad_h = sigmoid_grad_input(self.h, nll_grad)
grad_W = linear_grad_W(x, grad_h, self.W, self.b)
grad_b = linear_grad_b(x, grad_h, self.W, self.b)
# update params
self.W = self.W - self.lr * grad_W
self.b = self.b - self.lr * grad_b
###Output
_____no_output_____
###Markdown
1.5 Simple Experiment
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# Generate some data
def generate_2_circles(N=100):
phi = np.linspace(0.0, np.pi * 2, 100)
X1 = 1.1 * np.array([np.sin(phi), np.cos(phi)])
X2 = 3.0 * np.array([np.sin(phi), np.cos(phi)])
Y = np.concatenate([np.ones(N), np.zeros(N)]).reshape((-1, 1))
X = np.hstack([X1,X2]).T
return X, Y
def generate_2_gaussians(N=100):
phi = np.linspace(0.0, np.pi * 2, 100)
X1 = np.random.normal(loc=[1, 2], scale=[2.5, 0.9], size=(N, 2))
X1 = X1.dot(np.array([[0.7, -0.7], [0.7, 0.7]]))
X2 = np.random.normal(loc=[-2, 0], scale=[1, 1.5], size=(N, 2))
X2 = X2.dot(np.array([[0.7, 0.7], [-0.7, 0.7]]))
Y = np.concatenate([np.ones(N), np.zeros(N)]).reshape((-1, 1))
X = np.vstack([X1,X2])
return X, Y
def split(X, Y, train_ratio=0.7):
size = len(X)
train_size = int(size * train_ratio)
indices = np.arange(size)
np.random.shuffle(indices)
train_indices = indices[:train_size]
test_indices = indices[train_size:]
return X[train_indices], Y[train_indices], X[test_indices], Y[test_indices]
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
X, Y = generate_2_circles()
ax1.scatter(X[:,0], X[:,1], c=Y.ravel(), edgecolors= 'none')
ax1.set_aspect('equal')
X, Y = generate_2_gaussians()
ax2.scatter(X[:,0], X[:,1], c=Y.ravel(), edgecolors= 'none')
ax2.set_aspect('equal')
X_train, Y_train, X_test, Y_test = split(*generate_2_gaussians(), 0.7)
# let's train our model
model = LogsticRegressionGD(2, 0.05)
for step in range(30):
Y_pred = model.forward(X_train)
loss_value = nll_forward(Y_pred, Y_train)
accuracy = ((Y_pred > 0.5) == Y_train).mean()
print('Step: {} \t Loss: {:.3f} \t Acc: {:.1f}%'.format(step, loss_value, accuracy * 100))
loss_grad = nll_grad_input(Y_pred, Y_train)
model.update_params(X_train, loss_grad)
print('\n\nTesting...')
Y_test_pred = model.forward(X_test)
test_accuracy = ((Y_test_pred > 0.5) == Y_test).mean()
print('Acc: {:.1f}%'.format(test_accuracy * 100))
def plot_model_prediction(prediction_func, X, Y, hard=True):
u_min = X[:, 0].min()-1
u_max = X[:, 0].max()+1
v_min = X[:, 1].min()-1
v_max = X[:, 1].max()+1
U, V = np.meshgrid(np.linspace(u_min, u_max, 100), np.linspace(v_min, v_max, 100))
UV = np.stack([U.ravel(), V.ravel()]).T
c = prediction_func(UV).ravel()
if hard:
c = c > 0.5
plt.scatter(UV[:,0], UV[:,1], c=c, edgecolors= 'none', alpha=0.15)
plt.scatter(X[:,0], X[:,1], c=Y.ravel(), edgecolors= 'black')
plt.xlim(left=u_min, right=u_max)
plt.ylim(bottom=v_min, top=v_max)
plt.axes().set_aspect('equal')
plt.show()
plot_model_prediction(lambda x: model.forward(x), X_train, Y_train, False)
plot_model_prediction(lambda x: model.forward(x), X_train, Y_train, True)
# Now run the same experiment on 2 circles
# run the same code with 2 training sets (?)
# split the data 50-50 (?) and run it again
###Output
_____no_output_____
###Markdown
2. Decision TreeThe next model we look at is called **Decision Tree**. This type of model is non-parametric, meaning in contrast to **Logistic Regression** we do not have any parameters here that need to be trained.Let us consider a simple binary decision tree for deciding on the two classes of "creditable" and "Not creditable".Each node, except the leafs, asks a question about the the client in question. A decision is made by going from the root node to a leaf node, while considering the clients situation. The situation of the client, in this case, is fully described by the features:1. Checking account balance2. Duration of requested credit3. Payment status of previous loan4. Length of current employment In order to build a decision tree we need training data. To carry on the previous example: we need a number of clients for which we know the properties 1.-4. and their creditability.The process of building a decision tree starts with the root node and involves the following steps:1. Choose a splitting criteria and add it to the current node.2. Split the dataset at the current node into those that fullfil the criteria and those that do not.3. Add a child node for each data split.4. For each child node decide on either A. or B.: 1. Repeat from 1. step 2. Make it a leaf node: The predicted class label is decided by the majority vote over the training data in the current split. 2.1 Gini Index & Data SplitDeciding on how to split your training data at each node is dominated by the following two criterias:1. Does the rule help me make a final decision?2. Is the rule general enough such that it applies not only to my training data, but also to new unseen examples?When considering our previous example, splitting the clients by their handedness would not help us deciding on their creditability. Knowning if a rule will generalize is usually a hard call to make, but in practice we rely on [Occam's razor](https://en.wikipedia.org/wiki/Occam%27s_razor) principle. Thus the less rules we use, the better we believe it to generalize to previously unseen examples.One way to measure the quality of a rule is by the [**Gini Index**](https://en.wikipedia.org/wiki/Gini_coefficient).Since we only consider binary classification, it is calculated by:$$Gini = \sum_{n\in\{L,R\}}\frac{|S_n|}{|S|}\left( 1 - \sum_{c \in C} p_{S_n}(c)^2\right)\\p_{S_n}(c) = \frac{|\{\mathbf{x}_{i}\in \mathbf{X}|y_{i} = c, i \in S_n\}|}{|S_n|}, n \in \{L, R\}$$with $|C|=2$ being your set of class labels and $S_L$ and $S_R$ the two splits determined by the splitting criteria.The lower the gini score, the better the split. In the extreme case, where all class labels are the same in each split respectively, the gini index takes the value of $0$.
###Code
def tree_gini_index(Y_left, Y_right, classes):
"""Compute the Gini Index.
# Arguments
Y_left: class labels of the data left set
np.array of size `(n_objects, 1)`
Y_right: class labels of the data right set
np.array of size `(n_objects, 1)`
classes: list of all class values
# Output
gini: scalar `float`
"""
gini = 0.0
pL0 = (Y_left[Y_left==classes[0]].size)/len(Y_left)
pL1 = (Y_left[Y_left==classes[1]].size)/len(Y_left)
pR0 = (Y_right[Y_right==classes[0]].size)/len(Y_right)
pR1 = (Y_right[Y_right==classes[1]].size)/len(Y_right)
# pL0 = (len(Y_left==classes[0]))/len(Y_left)
# pL1 = (len(Y_left==classes[1]))/len(Y_left)
# pR0 = (len(Y_right==classes[0]))/len(Y_right)
# pR1 = (len(Y_right==classes[1]))/len(Y_right)
left = ((len(Y_left))/(len(Y_left)+len(Y_right)))*(1-(pL0**2 + pL1**2))
right = ((len(Y_right))/(len(Y_right)+len(Y_left)))*(1-(pR0**2 + pR1**2))
gini = left + right
return gini
am.test_student_function(username, tree_gini_index, ['Y_left', 'Y_right', 'classes'])
###Output
_____no_output_____
###Markdown
At each node in the tree, the data is split according to a split criterion and each split is passed onto the left/right child respectively.Implement the following function to return all rows in `X` and `Y` such that the left child gets all examples that are less than the split value and vice versa.
###Code
def tree_split_data_left(X, Y, feature_index, split_value):
"""Split the data `X` and `Y`, at the feature indexed by `feature_index`.
If the value is less than `split_value` then return it as part of the left group.
# Arguments
X: np.array of size `(n_objects, n_in)`
Y: np.array of size `(n_objects, 1)`
feature_index: index of the feature to split at
split_value: value to split between
# Output
(XY_left): np.array of size `(n_objects_left, n_in + 1)`
"""
X_left, Y_left = None, None
index = np.where(X[:,feature_index] < split_value)
X_left = X[index]
Y_left = Y[index]
XY_left = np.concatenate([X_left, Y_left], axis=-1)
return XY_left
def tree_split_data_right(X, Y, feature_index, split_value):
"""Split the data `X` and `Y`, at the feature indexed by `feature_index`.
If the value is greater or equal than `split_value` then return it as part of the right group.
# Arguments
X: np.array of size `(n_objects, n_in)`
Y: np.array of size `(n_objects, 1)`
feature_index: index of the feature to split at
split_value: value to split between
# Output
(XY_left): np.array of size `(n_objects_left, n_in + 1)`
"""
X_right, Y_right = None, None
index = np.where(X[:,feature_index] >= split_value)
X_right = X[index]
Y_right = Y[index]
XY_right = np.concatenate([X_right, Y_right], axis=-1)
return XY_right
am.test_student_function(username, tree_split_data_left, ['X', 'Y', 'feature_index', 'split_value'])
am.test_student_function(username, tree_split_data_right, ['X', 'Y', 'feature_index', 'split_value'])
am.get_progress(username)
###Output
_____no_output_____
###Markdown
Now to find the split rule with the lowest gini score, we brute-force search over all features and values to split by.
###Code
def tree_best_split(X, Y):
class_values = list(set(Y.flatten().tolist()))
r_index, r_value, r_score = float("inf"), float("inf"), float("inf")
r_XY_left, r_XY_right = (X,Y), (X,Y)
for feature_index in range(X.shape[1]):
for row in X:
XY_left = tree_split_data_left(X, Y, feature_index, row[feature_index])
XY_right = tree_split_data_right(X, Y, feature_index, row[feature_index])
XY_left, XY_right = (XY_left[:,:-1], XY_left[:,-1:]), (XY_right[:,:-1], XY_right[:,-1:])
gini = tree_gini_index(XY_left[1], XY_right[1], class_values)
if gini < r_score:
r_index, r_value, r_score = feature_index, row[feature_index], gini
r_XY_left, r_XY_right = XY_left, XY_right
return {'index':r_index, 'value':r_value, 'XY_left': r_XY_left, 'XY_right':r_XY_right}
###Output
_____no_output_____
###Markdown
2.2 Terminal NodeThe leaf nodes predict the label of an unseen example, by taking a majority vote over all training class labels in that node.
###Code
def tree_to_terminal(Y):
"""The most frequent class label, out of the data points belonging to the leaf node,
is selected as the predicted class.
# Arguments
Y: np.array of size `(n_objects)`
# Output
label: most frequent label of `Y.dtype`
"""
label = None
label = np.argmax(np.bincount(Y.flatten().astype(int)))
return label
am.test_student_function(username, tree_to_terminal, ['Y'])
am.get_progress(username)
###Output
_____no_output_____
###Markdown
2.3 Build the Decision TreeNow we recursively build the decision tree, by greedily splitting the data at each node according to the gini index.To prevent the model from overfitting, we transform a node into a terminal/leaf node, if:1. a maximum depth is reached.2. the node does not reach a minimum number of training samples.
###Code
def tree_recursive_split(X, Y, node, max_depth, min_size, depth):
XY_left, XY_right = node['XY_left'], node['XY_right']
del(node['XY_left'])
del(node['XY_right'])
# check for a no split
if XY_left[0].size <= 0 or XY_right[0].size <= 0:
node['left_child'] = node['right_child'] = tree_to_terminal(np.concatenate((XY_left[1], XY_right[1])))
return
# check for max depth
if depth >= max_depth:
node['left_child'], node['right_child'] = tree_to_terminal(XY_left[1]), tree_to_terminal(XY_right[1])
return
# process left child
if XY_left[0].shape[0] <= min_size:
node['left_child'] = tree_to_terminal(XY_left[1])
else:
node['left_child'] = tree_best_split(*XY_left)
tree_recursive_split(X, Y, node['left_child'], max_depth, min_size, depth+1)
# process right child
if XY_right[0].shape[0] <= min_size:
node['right_child'] = tree_to_terminal(XY_right[1])
else:
node['right_child'] = tree_best_split(*XY_right)
tree_recursive_split(X, Y, node['right_child'], max_depth, min_size, depth+1)
def build_tree(X, Y, max_depth, min_size):
root = tree_best_split(X, Y)
tree_recursive_split(X, Y, root, max_depth, min_size, 1)
return root
###Output
_____no_output_____
###Markdown
By printing the split criteria or the predicted class at each node, we can visualise the decising making process.Both the tree and a a prediction can be implemented recursively, by going from the root to a leaf node.
###Code
def print_tree(node, depth=0):
if isinstance(node, dict):
print('%s[X%d < %.3f]' % ((depth*' ', (node['index']+1), node['value'])))
print_tree(node['left_child'], depth+1)
print_tree(node['right_child'], depth+1)
else:
print('%s[%s]' % ((depth*' ', node)))
def tree_predict_single(x, node):
if isinstance(node, dict):
if x[node['index']] < node['value']:
return tree_predict_single(x, node['left_child'])
else:
return tree_predict_single(x, node['right_child'])
return node
def tree_predict_multi(X, node):
Y = np.array([tree_predict_single(row, node) for row in X])
return Y[:, None] # size: (n_object,) -> (n_object, 1)
###Output
_____no_output_____
###Markdown
Let's test our decision tree model on some toy data.
###Code
X_train, Y_train, X_test, Y_test = split(*generate_2_circles(), 0.7)
tree = build_tree(X_train, Y_train, 4, 1)
Y_pred = tree_predict_multi(X_test, tree)
test_accuracy = (Y_pred == Y_test).mean()
print('Test Acc: {:.1f}%'.format(test_accuracy * 100))
###Output
_____no_output_____
###Markdown
We print the decision tree in [pre-order](https://en.wikipedia.org/wiki/Tree_traversalPre-order_(NLR)).
###Code
print_tree(tree)
plot_model_prediction(lambda x: tree_predict_multi(x, tree), X_test, Y_test)
###Output
_____no_output_____
###Markdown
3. ExperimentsThe [Cleveland Heart Disease](https://archive.ics.uci.edu/ml/datasets/Heart+Disease) dataset aims at predicting the presence of heart disease based on other available medical information of the patient.Although the whole database contains 76 attributes, we focus on the following 14:1. Age: age in years 2. Sex: * 0 = female * 1 = male 3. Chest pain type: * 1 = typical angina * 2 = atypical angina * 3 = non-anginal pain * 4 = asymptomatic4. Trestbps: resting blood pressure in mm Hg on admission to the hospital 5. Chol: serum cholestoral in mg/dl 6. Fasting blood sugar: > 120 mg/dl * 0 = false * 1 = true7. Resting electrocardiographic results: * 0 = normal * 1 = having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV) * 2 = showing probable or definite left ventricular hypertrophy by Estes' criteria 8. Thalach: maximum heart rate achieved 9. Exercise induced angina: * 0 = no * 1 = yes10. Oldpeak: ST depression induced by exercise relative to rest 11. Slope: the slope of the peak exercise ST segment * 1 = upsloping * 2 = flat * 3 = downsloping 12. Ca: number of major vessels (0-3) colored by flourosopy 13. Thal: * 3 = normal * 6 = fixed defect * 7 = reversable defect 14. Target: diagnosis of heart disease (angiographic disease status) * 0 = < 50% diameter narrowing * 1 = > 50% diameter narrowing The 14. attribute is the target variable that we would like to predict based on the rest. We have prepared some helper functions to download and pre-process the data in `heart_disease_data.py`
###Code
import heart_disease_data
X, Y = heart_disease_data.download_and_preprocess()
X_train, Y_train, X_test, Y_test = split(X, Y, 0.7)
###Output
_____no_output_____
###Markdown
Let's have a look at some examples
###Code
print(X_train[0:2])
print(Y_train[0:2])
# TODO feel free to explore more examples and see if you can predict the presence of a heart disease
###Output
_____no_output_____
###Markdown
3.1 Decision Tree for Heart Disease Prediction Let's build a decision tree model on the training data and see how well it performs
###Code
# TODO: you are free to make use of code that we provide in previous cells
# TODO: play around with different hyper parameters and see how these impact your performance
tree = build_tree(X_train, Y_train, 5, 4)
Y_pred = tree_predict_multi(X_test, tree)
test_accuracy = (Y_pred == Y_test).mean()
print('Test Acc: {:.1f}%'.format(test_accuracy * 100))
###Output
_____no_output_____
###Markdown
How did changing the hyper parameters affect the test performance? Usually hyper parameters are tuned using a hold-out [validation set](https://en.wikipedia.org/wiki/Training,_validation,_and_test_setsValidation_dataset) instead of the test set. 3.2 Logistic Regression for Heart Disease PredictionInstead of manually going through the data to find possible correlations, let's try training a logistic regression model on the data.
###Code
# TODO: you are free to make use of code that we provide in previous cells
# TODO: play around with different hyper parameters and see how these impact your performance
###Output
_____no_output_____
###Markdown
How well did your model perform? Was it actually better then guessing? Let's look at the empirical mean of the target.
###Code
Y_train.mean()
###Output
_____no_output_____
###Markdown
So what is the problem? Let's have a look at the learned parameters of our model.
###Code
print(model.W, model.b)
###Output
_____no_output_____
###Markdown
If you trained sufficiently many steps you'll probably see how some weights are much larger than others. Have a look at what range the parameters were initialized and how much change we allow per step (learning rate). Compare this to the scale of the input features. Here an important concept arises, when we want to train on real world data: [Feature Scaling](https://en.wikipedia.org/wiki/Feature_scaling).Let's try applying it on our data and see how it affects our performance.
###Code
# TODO: Rescale the input features and train again
###Output
_____no_output_____ |
docs/examples/rbs/rbs_optimiser_example.ipynb | ###Markdown
Rule-Based System (RBS) Optimiser Example The RBS Optimiser is used to optimise which rules are leveraged to generate decisions as part of an RBS Pipeline.An RBS Pipeline allows a user to configure a logical flow for decisioning events. Each stage in the pipeline consists of a set of rules which are linked to a decision. The decision that is applied to each event is dictated by the rule(s) that trigger first.For example, in the case of approving and rejecting transactions for a e-commerce transaction use case, you might have 3 approve rules and 3 reject rules. These rules could be used in an RBS Pipeline to approve and reject transactions like so:1. If any approve rules trigger, approve the transaction.2. If no approve rules trigger, but any reject rules trigger, reject the transaction.3. If no rules trigger, approve any remaining transactions.This example shows how we can create and optimise this RBS Pipeline. Requirements To run, you'll need the following:* A set of rules that you want to use in the RBS (in this example, we'll generate these).* A labelled, processed dataset (nulls imputed, categorical features encoded). ---- Import packages
###Code
from iguanas.rule_generation import RuleGeneratorDT
from iguanas.rbs import RBSPipeline, RBSOptimiser
from iguanas.metrics.classification import FScore
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report
###Output
_____no_output_____
###Markdown
Read in data Let's read in some labelled, processed dummy data:
###Code
X_train = pd.read_csv(
'dummy_data/X_train.csv',
index_col='eid'
)
y_train = pd.read_csv(
'dummy_data/y_train.csv',
index_col='eid'
).squeeze()
X_test = pd.read_csv(
'dummy_data/X_test.csv',
index_col='eid'
)
y_test = pd.read_csv(
'dummy_data/y_test.csv',
index_col='eid'
).squeeze()
###Output
_____no_output_____
###Markdown
---- Generate rules Let's first generate some rules (both for approving and rejecting transactions) that we'll use later in our RBS Pipeline.**Note:** in this dataset, positive cases in the target column refers to a fraudulent transaction, so we'll need to flip `y` when generating approve rules. Reject rules
###Code
fs = FScore(beta=1)
params = {
'n_total_conditions': 4,
'metric': fs.fit,
'tree_ensemble': RandomForestClassifier(n_estimators=5, random_state=0, bootstrap=False),
'precision_threshold': 0,
'num_cores': 1,
'target_feat_corr_types': 'Infer',
'verbose': 0,
'rule_name_prefix': 'RejectRule'
}
rg_reject = RuleGeneratorDT(**params)
X_rules_reject = rg_reject.fit(
X=X_train,
y=y_train,
sample_weight=None
)
###Output
_____no_output_____
###Markdown
Approve rules
###Code
params = {
'n_total_conditions': 4,
'metric': fs.fit,
'tree_ensemble': RandomForestClassifier(n_estimators=2, random_state=0, bootstrap=False),
'precision_threshold': 0,
'num_cores': 1,
'target_feat_corr_types': 'Infer',
'verbose': 0,
'rule_name_prefix': 'ApproveRule'
}
rg_approve = RuleGeneratorDT(**params)
X_rules_approve = rg_approve.fit(
X=X_train,
y=(1-y_train), # We flip y here so non-fraudulent transactions become the target
sample_weight=None
)
###Output
_____no_output_____
###Markdown
Now let's combine the binary columns of the approve and reject rules into one dataframe:
###Code
X_rules = pd.concat([X_rules_reject, X_rules_approve], axis=1)
X_rules.head()
X_rules_reject.shape[1], X_rules_approve.shape[1]
###Output
_____no_output_____
###Markdown
Setting up the RBS Pipeline Now, let's set up our RBS Pipeline using the rules we've generated. To reiterate our approach:1. If any approve rules trigger, approve the transaction.2. If no approve rules trigger, but any reject rules trigger, reject the transaction.3. If no rules trigger, approve any remaining transactions.To set up the pipeline using the logic above, we first need to create the `config` parameter. This is just a list which outlines the stages of the pipeline. Each stage should be defined using a tuple of two elements: 1. The first element should be an integer which corresponds to the decision made at that stage (either `0` or `1`)2. The second element should be a list that dictates which rules should trigger for that decision to be made.In our example, the `config` will be:
###Code
config = [
(0, X_rules_approve.columns.tolist()),
(1, X_rules_reject.columns.tolist()),
]
###Output
_____no_output_____
###Markdown
Here, the first stage is configured via the tuple in the first element of the list. This says to apply a decision of `0` (i.e. approve) to transactions where the approve rules have triggered. The second stage is configured via the tuple in the second element of the list. This says to apply a decision of `1` (i.e. reject) to transactions where the reject rules have triggered (**and no approve rules have triggered**).We also need to specify the final decision to be made if no rules are triggered - this is set via the `final_decision` parameter. In our case this should be `0`, as we want to approve any remaining transactions:
###Code
final_decision = 0
###Output
_____no_output_____
###Markdown
With these parameters configured, we can now create our RBS Pipeline by instantiating the `RBSPipeline` class:
###Code
rbsp = RBSPipeline(
config=config,
final_decision=final_decision
)
###Output
_____no_output_____
###Markdown
We can then apply the pipeline to the dataset using the `predict` method:
###Code
y_pred_init = rbsp.predict(
X_rules=X_rules
)
###Output
_____no_output_____
###Markdown
Outputs The `predict` method returns the prediction of the pipeline by applying the pipeline to the given dataset. We can use Sklearn's *classification_report* and *confusion_matrix* functions to generate some performance metrics for the pipeline:
###Code
print(
classification_report(
y_true=y_train,
y_pred=y_pred_init,
digits=4
)
)
cm = ConfusionMatrixDisplay(
confusion_matrix(
y_true=y_train,
y_pred=y_pred_init
)
)
cm.plot()
###Output
_____no_output_____
###Markdown
Optimising the RBS Pipeline Now that we have our basic RBS Pipeline set up, we can optimise it using the RBS Optimiser. Here, we just pass the instatiated pipeline class to the `pipeline` parameter in the `RBSOptimiser` class:
###Code
rbso = RBSOptimiser(
pipeline=rbsp,
metric=fs.fit,
n_iter=60,
verbose=1
)
###Output
_____no_output_____
###Markdown
Then we run the `fit_predict` method to optimise the pipeline using the given dataset, then apply it to the dataset:
###Code
y_pred_opt = rbso.fit_predict(
X_rules=X_rules,
y=y_train
)
###Output
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 60/60 [00:01<00:00, 43.68trial/s, best loss: -0.9959016393442623]
###Markdown
Outputs The `fit_predict` method optimises the pipeline and returns the prediction of the optimised pipeline by applying it to the given dataset. See the `Attributes` section in the class docstring for a description of each attribute generated:
###Code
rbso.config
###Output
_____no_output_____
###Markdown
We can use Sklearn's *classification_report* and *confusion_matrix* functions to generate some performance metrics for the pipeline:
###Code
print(
classification_report(
y_true=y_train,
y_pred=y_pred_opt,
digits=4
)
)
cm = ConfusionMatrixDisplay(
confusion_matrix(
y_true=y_train,
y_pred=y_pred_opt
)
)
cm.plot()
###Output
_____no_output_____
###Markdown
By comparing these performance metrics to those of the original pipeline, we can see that the RBS Optimiser has indeed improved the performance of the original RBS Pipeline:
###Code
print(f'Original RBS Pipeline F1 score: {fs.fit(y_pred_init, y_train)}')
print(f'Optimised RBS Pipeline F1 score: {fs.fit(y_pred_opt, y_train)}')
###Output
Original RBS Pipeline F1 score: 0.14503816793893132
Optimised RBS Pipeline F1 score: 0.9959016393442623
###Markdown
---- Optimising the RBS Pipeline (without a `config`) In the previous example, we instantiated a pipeline with a `config` before optimising. However, if we don't know what structure the `config` should have, or don't have any requirements for its structure, we can use the RBS Optimiser to generate a new `config` from scratch, which will optimise the overall performance of the RBS Pipeline.To do this, we follow a similar process as before - **the only difference being that we instantiate the RBS Pipeline with an empty dictionary for the** `config` **parameter**:
###Code
rbsp = RBSPipeline(
config=[], # Empty config
final_decision=final_decision
)
###Output
_____no_output_____
###Markdown
We feed this pipeline into the RBS Optimiser as before, but this time provide an extra parameter - `rule_types` - which is just a dictionary showing which decision (`0` or `1`) should be linked to each set of rules:
###Code
rbso = RBSOptimiser(
pipeline=rbsp,
metric=fs.fit,
n_iter=15,
pos_pred_rules=X_rules_reject.columns.tolist(),
neg_pred_rules=X_rules_approve.columns.tolist(),
verbose=1
)
###Output
_____no_output_____
###Markdown
Then we run the `fit_predict` method to optimise the pipeline using the given dataset, then apply it to the dataset:
###Code
y_pred_opt = rbso.fit_predict(
X_rules=X_rules,
y=y_train
)
###Output
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 15/15 [00:00<00:00, 16.41trial/s, best loss: -0.9959016393442623]
###Markdown
Outputs The `fit_predict` method optimises the pipeline and returns the prediction of the optimised pipeline by applying it to the given dataset. See the `Attributes` section in the class docstring for a description of each attribute generated:
###Code
rbso.config
###Output
_____no_output_____
###Markdown
We can use Sklearn's *classification_report* and *confusion_matrix* functions to generate some performance metrics for the pipeline:
###Code
print(
classification_report(
y_true=y_train,
y_pred=y_pred_opt,
digits=4
)
)
cm = ConfusionMatrixDisplay(
confusion_matrix(
y_true=y_train,
y_pred=y_pred_opt
)
)
cm.plot()
###Output
_____no_output_____ |
Chapter3/chptr3.2-R.ipynb | ###Markdown
3.2: Multiple predictors Read the dataData are in the *child.iq* directory of the ARM_Data download-- you might haveto change the path I use below to reflect the path on your computer.
###Code
%%R
# I had to import foreign to get access to read.dta
library("foreign")
kidiq <- read.dta("../../ARM_Data/child.iq/kidiq.dta")
# I won't attach kidiq-- i generally don't attach to avoid confusion(s)
#attach(kidiq)
###Output
_____no_output_____
###Markdown
Load the *arm* library-- see the Chapter 3.1 notebook if you need help.
###Code
%%R
library("arm")
###Output
_____no_output_____
###Markdown
Regression-- two predictors
###Code
%%R
fit2 <- lm(kidiq$kid_score ~ kidiq$mom_hs + kidiq$mom_iq)
display(fit2)
%%R
plot(kidiq$mom_iq, kidiq$kid_score,
xlab="Mother IQ score",
ylab="Child test score",
pch=20, xaxt="n", yaxt="n", type="n")
curve(coef(fit2)[1] + coef(fit2)[2] + coef(fit2)[3]*x, add=TRUE, col="gray")
curve(coef(fit2)[1] + coef(fit2)[3]*x, add=TRUE)
points(kidiq$mom_iq[kidiq$mom_hs==0],
kidiq$kid_score[kidiq$mom_hs==0], pch=19)
points(kidiq$mom_iq[kidiq$mom_hs==1],
kidiq$kid_score[kidiq$mom_hs==1], col="gray", pch=19)
axis(1, c(80,100,120,140))
axis(2, c(20,60,100,140))
###Output
_____no_output_____ |
final-challenge-project/test_score/Test Score.ipynb | ###Markdown
Scoring your trained modelIn the cell below, please load your model into `model`. Also if you used an image size for your input images that *isn't* 224x224, you'll need to set `image_size` to the size you used. The scoring code assumes square input images.For example, this is how I loaded in my checkpoint:```pythonimport torchfrom torch import nnimport torch.nn.functional as Ffrom torchvision import modelsclass FFClassifier(nn.Module): def __init__(self, in_features, hidden_features, out_features, drop_prob=0.1): super().__init__() self.fc1 = nn.Linear(in_features, hidden_features) self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(p=drop_prob) def forward(self, x): x = self.drop(F.relu(self.fc1(x))) x = self.fc2(x) x = F.log_softmax(x, dim=1) return x def load_checkpoint(checkpoint_path): checkpoint = torch.load(checkpoint_path) model = models.vgg16(pretrained=False) for param in model.parameters(): param.requires_grad = False Put the classifier on the pretrained network model.classifier = FFClassifier(25088, checkpoint['hidden'], 102) model.load_state_dict(checkpoint['state_dict']) return modelmodel = load_checkpoint('/home/workspace/classifier.pt')```Your exact code here will depend on how you defined your network in the project. Make sure you use the absolute path to your checkpoint which should have been uploaded to the `/home/workspace` directory.Run the cell, then after loading the data, press "Test Code" below. This can take a few minutes or more depending on the size of your network. Your model needs to reach **at least 20% accuracy** on the test set to be recorded.
###Code
import torch
from torch import nn
from torchvision import models
from collections import OrderedDict
# Load your model to this variable
# TODO: Write a function that loads a checkpoint and rebuilds the model
flatten_layers = {"vgg16":25088,
"densenet121":1024}
def get_model(name="vgg16", classifier=None, drop_prob=0.5):
"""Get pre trained model and stack fully connected layers
Arguments
---------
name -> pre trained model name (vgg16 or densenet121)
classifier -> nn.Sequencial classifier. If None, a default classifier will be set in place.
drop_prob -> dropout value
"""
# load pre trained model
if name == "vgg16":
model = models.vgg16(pretrained=True)
elif name == "densenet121":
model = models.densenet121(pretrained=True)
else:
print("Model not available at the moment.")
# freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
# build classifier layers
if classifier is None:
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(flatten_layers[name], 1024)),
('relu1', nn.ReLU()),
('drop1', nn.Dropout(drop_prob)),
('fc2', nn.Linear(1024, 512)),
('relu2', nn.ReLU()),
('drop2', nn.Dropout(drop_prob)),
('fc3', nn.Linear(512, 102)),
('output', nn.LogSoftmax(dim=1))
]))
# add classifier to model
model.classifier = classifier
# model to gpu
#model.cuda()
return model
def load_model(path):
"""Load checkpoints model from path"""
checkpoint = torch.load(path, map_location=lambda storage, loc: storage)
name = checkpoint["name"]
classifier = checkpoint["classifier"]
drop_prob = checkpoint["drop_prob"]
model = get_model(name, classifier, drop_prob)
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'], strict=False)
model.eval()
return model
model = load_model("final_project_densenet121.pth")
# If you used something other than 224x224 cropped images, set the correct size here
image_size = 224
# Values you used for normalizing the images. Default here are for
# pretrained models from torchvision.
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
###Output
/opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/densenet.py:212: UserWarning: nn.init.kaiming_normal is now deprecated in favor of nn.init.kaiming_normal_.
|
side_grids_camp/experiments/Test and train DQN.ipynb | ###Markdown
TESTS
###Code
#
# Test preprocessing and estimator
#
global_step = tf.Variable(0, name="global_step", trainable=False)
env = sokoban_game(level=0)
actions_num = env.action_spec().maximum + 1
world_shape = env.observation_spec()['board'].shape
frames_state = 2
batch_size = 8
e = Estimator(actions_num, world_shape[0], world_shape[1], scope="test")
sp = StateProcessor(world_shape[0], world_shape[1])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Example observation batch
time_step = env.reset()
frame = np.moveaxis(time_step.observation['RGB'], 0, -1)
observation_p = sp.process(sess, frame)
print("Sokoban in grey-scale:")
print(observation_p)
plt.figure()
plt.imshow(observation_p/255.0, cmap='gray')
plt.axis('off')
plt.show()
observation = np.stack([observation_p] * frames_state, axis=2)
observations = np.array([observation] * batch_size)
# Test Prediction
pred = e.predict(sess, observations)
print(pred)
print(pred.max(axis=1))
# Test training step
y = np.array([10.0, 4.0] * (batch_size/2))
a = np.array([1, 3] * (batch_size/2))
print(e.update(sess, observations, a, y))
## Some tests:
# TimeStep inherits from:
# collections.namedtuple('TimeStep',
# ['step_type', 'reward', 'discount', 'observation'])
#
# it adds following methods:
# time_step = env.reset()
# time_step.first()
# time_step.mid()
# time_step.last()
time_step = env.reset()
print("Step type: first {}, mid {}, last {}".format(time_step.first(), time_step.mid(), time_step.last()))
print("Reward {}, discount {}".format(time_step.reward, time_step.discount))
print("Observation type: {}".format(type(time_step.observation)))
print("Let's act..")
time_step = env.step(2)
print("Step type: first {}, mid {}, last {}".format(time_step.first(), time_step.mid(), time_step.last()))
print("Reward {}, discount {}".format(time_step.reward, time_step.discount))
print("Observation type: {}".format(type(time_step.observation)))
print("RGB image dims: {}".format(time_step.observation['RGB'].shape))
print("Plot from rgb:")
frame = np.moveaxis(time_step.observation['RGB'],0,-1)
plt.figure()
plt.imshow(frame)
plt.axis('off')
plt.show()
print("Plot board:")
plt.figure()
plt.imshow(time_step.observation['board'])
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
TRAIN
###Code
EpisodeStats = namedtuple("EpisodeStats", ["episode_lengths", "episode_rewards"])
print("Start training side effects sokoban.")
env = sokoban_game(level=0)
actions_num = env.action_spec().maximum + 1
world_shape = env.observation_spec()['board'].shape
frames_state = 2
batch_size = 32
start_time = datetime.datetime.now()
num_episodes = 50 # 5000
stats = EpisodeStats(episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes))
tf.reset_default_graph()
with tf.Session() as sess:
agent = DQNAgent(sess,
world_shape,
actions_num,
env,
frames_state=frames_state,
experiment_dir=None,
replay_memory_size=10000, # 10000
replay_memory_init_size=500, # 3000
update_target_estimator_every=250, # 500
discount_factor=0.99,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=50000,
batch_size=batch_size)
for i_episode in range(num_episodes):
# Save the current checkpoint
agent.save()
ret = 0
time_step = env.reset() # for the description of timestep see ai_safety_gridworlds.environments.shared.rl.environment
for t in itertools.count():
action = agent.act(time_step.observation)
time_step = env.step(action)
loss = agent.learn(time_step, action)
print("\rStep {} ({}) @ Episode {}/{}, loss: {}".format(
t, agent.total_t, i_episode + 1, num_episodes, loss), end="")
sys.stdout.flush()
ret += time_step.reward
if time_step.last():
break
stats.episode_lengths[i_episode] = t
stats.episode_rewards[i_episode] = ret
if i_episode % 25 == 0:
print("\nEpisode return: {}, and performance: {}.".format(ret, env.get_last_performance()))
elapsed = datetime.datetime.now() - start_time
print("Return: {}, elasped: {}.".format(ret, elapsed))
print("Traning finished.")
###Output
_____no_output_____ |
Chapter02/2A_Seasonality/trade_seasonality.ipynb | ###Markdown
Demand forecasting using time series analysis
###Code
import os
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_squared_error
import time
import statsmodels.api as sm
import pickle
# Constants needed for the program
data_path = os.getcwd()
os.chdir(data_path)
list_flds = []
fld_name = ''
index_col_name = 'Month'
file_path_in = os.path.join(data_path,'Import_file_names.txt')
file_path_out = os.path.join(data_path,'output_dataset.txt')
f_name = pd.read_csv(file_path_in,sep='\t')
curr_pd = pd.read_csv('C:/Users/pgl/Documents/GitHub/Hands-On-Artificial-Intelligence-for-Banking/Chapter02/2A_Seasonality/avg_california.csv', header=4)
###Output
_____no_output_____ |
01_clases_objetos_instancias.ipynb | ###Markdown
[](https://pythonista.io) Objetos, clases e instancias. Particularidades de Python.* Todo es un objeto, incluyendo los tipos y clases.* Tipos y clases son sinónimos.* Permite herencia múltiple.* No existen métodos ni atributos privados.* Los atributos pueden ser accedidos directamente sin necesidad de definir propiedades.* Las clases abstractas son opcionales, pero pueden ser implementadas.* Permite "monkey patching".* Permite "duck typing".* Permite "mixins".* Permite la sobrecarga de operadores.* Permite la creación de nuevos tipos de datos. Clases.Las clases son prototipos a partir de los cuales pueden crearse objetos que adquieren las propiedades, características y comportamientos definidos por las clases. Reglas sobre los nombres de las clases.Sintácticamente, las clases pueden llevar cualquier nombre. Sin embargo, para facilitar su dientificación, el [PEP8](https://www.python.org/dev/peps/pep-0008/class-names), indica que las clases deben de usar el estilo camel case:* La primera letra del nombre de la clase debe de ser maýúscula y el resto deben de ser minúsculas. * Si el nombre se compone de varias palabras, la letra incial de cada palabra debe de ser mayúscula.**Ejemplos:**Los siguientes son nombres de clase que se apegan al ```PEP 8```.* ```Cadrilatero```.* ```FormaSimple```.* ```AlumnoRegularRegistrado```.* ```Base```.* ```NotImplementedError```. Definición de una clase.Las clases utilizan la palabra clave ```class``` para ser definidas.Sintaxis:```class (, ... ): ... ...```Las clases en Python pueden "heredar" los componentes de otras clases definidas previamente a las que se conocen como "superclases" de la clase en cuestión. Si no se indica una superclase, no es necesario usar los paréntesis y la clase heredaría las características de ```object```.La herencia es uno de los conceptos fundamentales de la programación orientada a objetos y se estudiará a profundidad en capítulos posteriores.**Nota:** La herencia es una relación que se da exclisuvamente entre clases. **Ejemplo:** * La siguiente celda creará un clase sin contenido.
###Code
class MiClase:
'''Una clase básica.'''
pass
###Output
_____no_output_____
###Markdown
* La clase ```MiClase``` hereda los atributos de ```object```.
###Code
help(MiClase)
###Output
_____no_output_____
###Markdown
* La siguiente celda desplegará todos los atributos heredados de ```object``` por parte de ```MiClase```.
###Code
dir(MiClase)
###Output
_____no_output_____
###Markdown
Objetos.Los objetos son las implementaciones de una clase. A la creación de un objeto a partir de una clase, se le llama "instanciar".Todos los elementos de Python son instancias de al menos una clase.Para crear un objeto se utiliza la siguiente sintaxis:```()```Si no se le asigna un nombre al objeto, este es desechado de inmediato por el intérprete de Python. **Ejemplo:** * La siguiente celda creará una instancia de ```MiClase```, pero al no contar con un nombre, será desechada automáticamente por el intérprete de Python.
###Code
MiClase()
###Output
_____no_output_____
###Markdown
* La siguiente celda creará una instancia de ```MiClase```, y se le asignará el nombre ```mi_objeto```.
###Code
mi_objeto = MiClase()
###Output
_____no_output_____
###Markdown
La función ```type()```.* En Python las clases y los objetos se refieren al mismo concepto, por lo que la función ```type()``` regresará la clase a partir de la cual fue instanciada un objeto que es ingresado como atributos.```type()``` **Ejemplo:** * Al ejecutar la función ```type()``` ingresando al objeto ```mi_objeto``` como argumento, regresará ```__main__.MiClase```, haciendo referencia a la clase ```MiClase```, la cual fue definida desde el intérprete.
###Code
type(mi_objeto)
###Output
_____no_output_____
###Markdown
* Al hacer referencia un objeto desde el intérprete, esto regresará la clase de la cual fue instanciado y la posición de memoria en la que se encuentra.
###Code
mi_objeto
help(mi_objeto)
###Output
_____no_output_____
###Markdown
* La siguiente celda creará un objeto de tipo ```tuple``` con mombre ```tupla_objetos```, el cual contendrá cuatro instancias de ```MiClase```.
###Code
tupla_objetos = (MiClase(), MiClase(), MiClase(), MiClase())
###Output
_____no_output_____
###Markdown
* La siguiente celda desplegará a cada elemento de ```tupla_objetos```, incluyendo su número identificador.
###Code
for item in tupla_objetos:
print(item, id(item))
###Output
<__main__.MiClase object at 0x000002777096B5C8> 2712013288904
<__main__.MiClase object at 0x000002777096B548> 2712013288776
<__main__.MiClase object at 0x000002777096B608> 2712013288968
<__main__.MiClase object at 0x000002777096B6C8> 2712013289160
###Markdown
La función ```isinstance()```.Para saber si un objeto es una instancia de una clase se utiliza la función ```isinstnace()```.Sintaxis:```isinstance( , )``` **Ejemplo:** * La siguiente celda evaluará si el objeto ```mi_objeto``` es instancia de la clase ```MiClase```.
###Code
isinstance(mi_objeto, MiClase)
###Output
_____no_output_____
###Markdown
Los objetos de tipo ```bool``` son clases que heredan a ```int```, por lo que ```True``` es una instancia de ```bool``` y también una instancia de ```int```. * La siguiente celda evaluará si ```True``` es instancia de la clase ```int```.
###Code
isinstance(True, int)
###Output
_____no_output_____
###Markdown
* La siguiente celda evaluará si ```True``` es instancias de la clase ```bool```.
###Code
isinstance(True, bool)
###Output
_____no_output_____
###Markdown
El objeto ```object```.Todos los tipos y clases en Python 3 heredan a ```object```.En Python 3 si no se indica, el intérprete da por sentado que la clase hereda a ```object```. **Ejemplos:** * Se creará la clase ```MiClase``` sin usar paréntesis.
###Code
class MiClase:
pass
###Output
_____no_output_____
###Markdown
* ```MiClase``` es instancia de ```object```.
###Code
isinstance(MiClase, object)
###Output
_____no_output_____
###Markdown
* Ahora se creará la clase ```MiClase_1``` usando parántesis, sin argumentos.
###Code
class MiClase_1():
pass
###Output
_____no_output_____
###Markdown
* ```MiClase_1``` es instancia de ```object```.
###Code
isinstance(MiClase_1, object)
###Output
_____no_output_____
###Markdown
* Ahora se creará la clase ```MiClase_2``` usando parántesis, e ingrsando ```object``` como argumento.
###Code
class MiClase_2(object):
pass
###Output
_____no_output_____
###Markdown
* ```MiClase_2``` es instancia de ```object```.
###Code
isinstance(MiClase_2, object)
###Output
_____no_output_____ |
baekjoon/not-classified/1991/.ipynb_checkpoints/tree traversal-checkpoint.ipynb | ###Markdown
트리 순회하기문제 링크: https://www.acmicpc.net/problem/1991일단 문제를 열심히 읽었는데 문제는 어렵지 않다. 기본적인 2진트리 순회니까. 문제는 입력부를 가지고 트리를 구성하는게 더 어려운 듯한 느낌이 ㅋ. > 첫째 줄에는 이진 트리의 노드의 개수 N(1≤N≤26)이 주어진다. > 둘째 줄부터 N개의 줄에 걸쳐 각 노드와 그의 왼쪽 자식 노드, 오른쪽 자식 노드가 주어진다. > 노드의 이름은 A부터 차례대로 영문자 대문자로 매겨지며, 항상 A가 루트 노드가 된다. 자식 노드가 없는 경우에는 .으로 표현된다.먼저 트리부터 만들자.그리고 트리를 순회해서 노드를 찾는 메소드도 만들어야 한다. BST가 아니므로 BST search를 쓸 수 없음에 주의!값을 찾기 위해 중위순회의 변형으로 구현을 해 보았다.
###Code
class TreeNode:
def __init__(self, value):
self.value = value
self.left = self.right = None
# 주어진 문제의 입력을 처리하기 위한 함수
def insert(self, value, lvalue, rvalue):
# it always returns a node has value
node = find(self, value)
if lvalue != '.':
node.left = TreeNode(lvalue)
if rvalue != '.':
node.right = TreeNode(rvalue)
def find(node, value):
if node is None:
return
elif node.value == value:
return node
else:
left = find(node.left, value)
if left is not None:
return left
right = find(node.right, value)
return right
def preorder(node, array):
if node is not None:
array.append(node.value)
preorder(node.left, array)
preorder(node.right, array)
def inorder(node, array):
if node is not None:
inorder(node.left, array)
array.append(node.value)
inorder(node.right, array)
def postorder(node, array):
if node is not None:
postorder(node.left, array)
postorder(node.right, array)
array.append(node.value)
root = TreeNode('A')
root.insert('A', 'B', 'C')
root.insert('B', 'D', '.')
print(find(root, 'A').value)
print(find(root, 'B').value)
print(find(root, 'C').value)
print(find(root, 'D').value)
# print(find(root, 'E').value) error
###Output
A
B
C
D
###Markdown
이제 주어진 문제의 입력을 처리해 보자.
###Code
input_arr='''7
A B C
B D .
C E F
E . .
F . G
D . .
G . .'''.split('\n')
input_len = input_arr[0]
root = TreeNode('A')
for line in input_arr[1:]:
arr = line.split()
root.insert(arr[0], arr[1], arr[2])
pre = []
in_arr = []
post = []
preorder(root, pre)
inorder(root, in_arr)
postorder(root, post)
print(''.join(pre))
print(''.join(in_arr))
print(''.join(post))
###Output
ABDCEFG
DBAECFG
ABDCEFG
|
cn/.ipynb_checkpoints/sicp-2-71-checkpoint.ipynb | ###Markdown
SICP 习题 (2.71)解题总结:huffman树的结构 SICP 习题 2.71鼓励我们去思考huffman树的结构。题目中列出了一个特别的情形,就是符号的使用频率是以平方形式递增的,像下面这样: '(a 1) (b 2) (c 4) (d 8) (e 16) (f 32) 题目问我们,如果n=5,就是有5个这样的元素,对应的huffman树长成什么样子,接着还问n=10,就是有10个这样的元素,对应的huffman树又长成什么样子。对于这样的情形,频率最小的符号的编码是什么,频率最大的符号编码又是什么。 对于我这个典型的程序员来讲,这样的题目直接写个样例跑一下就可以知道结果的啦。当然,为了表示自己的级别,还是需要封装一下代码。具体的代码后面会列出来。不过,我们除了写代码看结果,还是要仔细思考这个问题。按照2.69的思路,我们生成huffman树的基本思路就是找到权重最小的两个元素进行合并,然后用合并出来的元素替代原来的两个元素,不断进行直到待处理的元素数量为1.按照上面的特殊情形,第一个和第二个元素的权重分别是 1和2,加起来是3,没有第三个元素的权重大,所以下一步就是把第三个元素拿出来合并到前面的两个元素里,合并出来的元素权重为7,没有第四个元素的8大,只能继续把第四个元素合并进来。看得出来这是故意的。。。。。。。这样长出来一棵比较不平衡的树,就差说它是最不平衡了。大概描述就是左边长片叶子,右边是剩下的所有部分,再细看右边的子树,还是左边长片叶子,右边是剩下的所有部分。。。。这长的,啥树长这样?也因为这个特殊的长相,这种huffman树里频率最小的符号编码特别长。。。。。 下面来看看测试的代码,先是把之前的代码都拷贝过来:
###Code
(define (make-leaf symbol weight)
(list 'leaf symbol weight))
(define (leaf? object)
(eq? (car object) 'leaf))
(define (symbol-leaf x) (cadr x))
(define (weight-leaf x) (caddr x))
(define (make-code-tree left right)
(list left
right
(append (symbols left) (symbols right))
(+ (weight left) (weight right))))
(define (left-branch tree) (car tree))
(define (right-branch tree) (cadr tree))
(define (symbols tree)
(if (leaf? tree)
(list (symbol-leaf tree))
(caddr tree)))
(define (weight tree)
(if (leaf? tree)
(weight-leaf tree)
(cadddr tree)))
(define (decode bits tree)
(define (decode-1 bits current-branch)
(if (null? bits)
'()
(let ((next-branch
(choose-branch (car bits) current-branch)))
(if (leaf? next-branch)
(cons (symbol-leaf next-branch)
(decode-1 (cdr bits) tree))
(decode-1 (cdr bits) next-branch)))))
(decode-1 bits tree))
(define (choose-branch bit branch)
(cond ((= bit 0) (left-branch branch))
((= bit 1) (right-branch branch))
(else (error "bad bit -- CHOOSE-BRANCH" bit))))
(define (adjoin-set x set)
(cond ((null? set) (list x))
((< (weight x) (weight (car set))) (cons x set))
(else (cons (car set)
(adjoin-set x (cdr set))))))
(define (make-leaf-set pairs)
(if (null? pairs)
'()
(let ((pair (car pairs)))
(adjoin-set (make-leaf (car pair)
(cadr pair))
(make-leaf-set (cdr pairs))))))
(define (encode message tree)
(if (null? message)
'()
(append (encode-symbol (car message) tree)
(encode (cdr message) tree))))
(define (encode-symbol symbol tree)
(cond ((leaf? tree)
(if (equal? symbol (symbol-leaf tree))
'()
#f))
(else
(let ((left-branch-result (encode-symbol symbol (left-branch tree))))
(if left-branch-result
(cons '0 left-branch-result)
(let ((right-branch-result (encode-symbol symbol (right-branch tree))))
(if right-branch-result
(cons '1 right-branch-result)
#f)))))))
(define (generate-huffman-tree pairs)
(successive-merge (make-leaf-set pairs)))
(define (successive-merge leafs)
(cond ((null? leafs) '())
((= 1 (length leafs)) (car leafs))
(else (successive-merge
(adjoin-set
(make-code-tree (car leafs) (cadr leafs))
(cddr leafs))))))
###Output
_____no_output_____
###Markdown
接着是关于样例序对的生成,按题目的要求简单手工输入也是可以的,但是学习了lisp,这种事情要让代码完成吧:
###Code
(define (generate-pairs n)
(define (inter i exp-result)
(cond ((> i n) '())
(else (cons (list (format "a~s" i) exp-result) (inter (+ i 1) (* exp-result 2))))))
(inter 1 1)
)
###Output
_____no_output_____
###Markdown
这样通过输入n就可以获得样例序列了:
###Code
(generate-pairs 10)
###Output
_____no_output_____
###Markdown
再打包一个显示huffman树细节的函数:
###Code
(define (display-tree-detail n)
(define sample-words (generate-pairs n))
(define sample-tree (generate-huffman-tree sample-words))
(display (format "sample tree when n=~s:" n)) (newline)(display sample-tree) (newline)
(display (format "smallest one of n=~s:" n)) (newline) (display (encode (list (format "a~s" 1)) sample-tree)) (newline)
(display (format "largest one of n=~s:" n)) (newline) (display (encode (list (format "a~s" n)) sample-tree)) (newline)
)
(display-tree-detail 10)
(display-tree-detail 5)
###Output
sample tree when n=5:
(((((leaf "a1" 1) (leaf "a2" 2) ("a1" "a2") 3) (leaf "a3" 4) ("a1" "a2" "a3") 7) (leaf "a4" 8) ("a1" "a2" "a3" "a4") 15) (leaf "a5" 16) ("a1" "a2" "a3" "a4" "a5") 31)
smallest one of n=5:
(0 0 0 0)
largest one of n=5:
(1)
###Markdown
再打包一个整体测试函数:
###Code
(define (start-test-2-71)
(display-tree-detail 5)
(display-tree-detail 10))
(start-test-2-71)
###Output
sample tree when n=5:
(((((leaf "a1" 1) (leaf "a2" 2) ("a1" "a2") 3) (leaf "a3" 4) ("a1" "a2" "a3") 7) (leaf "a4" 8) ("a1" "a2" "a3" "a4") 15) (leaf "a5" 16) ("a1" "a2" "a3" "a4" "a5") 31)
smallest one of n=5:
(0 0 0 0)
largest one of n=5:
(1)
sample tree when n=10:
((((((((((leaf "a1" 1) (leaf "a2" 2) ("a1" "a2") 3) (leaf "a3" 4) ("a1" "a2" "a3") 7) (leaf "a4" 8) ("a1" "a2" "a3" "a4") 15) (leaf "a5" 16) ("a1" "a2" "a3" "a4" "a5") 31) (leaf "a6" 32) ("a1" "a2" "a3" "a4" "a5" "a6") 63) (leaf "a7" 64) ("a1" "a2" "a3" "a4" "a5" "a6" "a7") 127) (leaf "a8" 128) ("a1" "a2" "a3" "a4" "a5" "a6" "a7" "a8") 255) (leaf "a9" 256) ("a1" "a2" "a3" "a4" "a5" "a6" "a7" "a8" "a9") 511) (leaf "a10" 512) ("a1" "a2" "a3" "a4" "a5" "a6" "a7" "a8" "a9" "a10") 1023)
smallest one of n=10:
(0 0 0 0 0 0 0 0 0)
largest one of n=10:
(1)
|
sr-dyna-detour-task.ipynb | ###Markdown
SR-Dyna (Detour Task)
###Code
import matplotlib.pyplot as plt
import numpy as np
import srdyna
import importlib
importlib.reload(srdyna)
# Detour Task
REPLAY = "sufficient"
EXPLORE_STEPS = 10000
POST_REWARD_TRIALS = 5
WALL_LEARNING_STEPS = 100 # 40
REPLAY_STEPS = {
"insufficient": 10,
"sufficient": 25000
}[REPLAY]
env = srdyna.SimpleGridWorld(world='worlds/detour_task.txt')
S_LOC = (0, 5)
agent = srdyna.SRDyna(id=0, loc=S_LOC, env=env)
# Explore
for i in range(EXPLORE_STEPS):
agent.step(random_policy=True)
# Add reward
R_LOC = (9, 5)
env.add_reward(R_LOC, 10)
for i in range(POST_REWARD_TRIALS):
# Repeated trials from S (until reward reached)
agent.terminate_episode(reset_state=env.state_at_loc(S_LOC))
done = False
steps = 0
MAX_STEPS = 500
while not done and steps < MAX_STEPS:
done = agent.step(verbose=False)
steps += 1
print("Trial finished in %d steps" % steps)
# One-step replay samples from random sa's
agent.learn_offline(k=REPLAY_STEPS)
agent.make_plots(sr_state=env.state_at_loc((4, 5)))
# Add barrier
B_LOC = (5, 5)
S2_LOC = (4, 5)
env.wall_coords.append(B_LOC)
env.map = env.get_map()
for i in range(WALL_LEARNING_STEPS):
# One-step runs from left of new wall
reset_state = env.state_at_loc(S2_LOC)
agent.terminate_episode(reset_state=reset_state)
agent.step()
agent.make_plots(sr_state=env.state_at_loc((4, 5)))
# One-step replay samples from random sa's
agent.learn_offline(k=REPLAY_STEPS)
agent.make_plots(sr_state=env.state_at_loc((4, 5)))
# Generate anim (slow)
agent.record_trials(title="detour",
learning=False,
start_locs=[(0, 5), (4, 5), (7, 0), (7, 7)])
###Output
_____no_output_____ |
Probabilistic_Models/NLP_C2_probability_models_W4_Assignment_learn_word_from_context_words_embeddings.ipynb | ###Markdown
Assignment 4: Word Embeddings Welcome to the fourth (and last) programming assignment of Course 2! In this assignment, you will practice how to compute word embeddings and use them for sentiment analysis.- To implement sentiment analysis, you can go beyond counting the number of positive words and negative words. - You can find a way to represent each word numerically, by a vector. - The vector could then represent syntactic (i.e. parts of speech) and semantic (i.e. meaning) structures. In this assignment, you will explore a classic way of generating word embeddings or representations.- You will implement a famous model called the continuous bag of words (CBOW) model. By completing this assignment you will:- Train word vectors from scratch.- Learn how to create batches of data.- Understand how backpropagation works.- Plot and visualize your learned word vectors.Knowing how to train these models will give you a better understanding of word vectors, which are building blocks to many applications in natural language processing. Outline- [1 The Continuous bag of words model](1)- [2 Training the Model](2) - [2.0 Initialize the model](2) - [Exercise 01](ex-01) - [2.1 Softmax Function](2.1) - [Exercise 02](ex-02) - [2.2 Forward Propagation](2.2) - [Exercise 03](ex-03) - [2.3 Cost Function](2.3) - [2.4 Backproagation](2.4) - [Exercise 04](ex-04) - [2.5 Gradient Descent](2.5) - [Exercise 05](ex-05)- [3 Visualizing the word vectors](3) 1. The Continuous bag of words modelLet's take a look at the following sentence: >**'I am happy because I am learning'**. - In continuous bag of words (CBOW) modeling, we try to predict the center word given a few context words (the words around the center word).- For example, if you were to choose a context half-size of say $C = 2$, then you would try to predict the word **happy** given the context that includes 2 words before and 2 words after the center word:> $C$ words before: [I, am] > $C$ words after: [because, I] - In other words:$$context = [I,am, because, I]$$$$target = happy$$The structure of your model will look like this: Figure 1 Where $\bar x$ is the average of all the one hot vectors of the context words. Figure 2 Once you have encoded all the context words, you can use $\bar x$ as the input to your model. The architecture you will be implementing is as follows:\begin{align} h &= W_1 \ X + b_1 \tag{1} \\ a &= ReLU(h) \tag{2} \\ z &= W_2 \ a + b_2 \tag{3} \\ \hat y &= softmax(z) \tag{4} \\\end{align}
###Code
# Import Python libraries and helper functions (in utils2)
import nltk
from nltk.tokenize import word_tokenize
import numpy as np
from collections import Counter
from utils2 import sigmoid, get_batches, compute_pca, get_dict
# Download sentence tokenizer
nltk.data.path.append('.')
# Load, tokenize and process the data
import re # Load the Regex-modul
with open('shakespeare.txt') as f:
data = f.read() # Read in the data
data = re.sub(r'[,!?;-]', '.',data) # Punktuations are replaced by .
data = nltk.word_tokenize(data) # Tokenize string to words
data = [ ch.lower() for ch in data if ch.isalpha() or ch == '.'] # Lower case and drop non-alphabetical tokens
print("Number of tokens:", len(data),'\n', data[:15]) # print data sample
# Compute the frequency distribution of the words in the dataset (vocabulary)
fdist = nltk.FreqDist(word for word in data)
print("Size of vocabulary: ",len(fdist) )
print("Most frequent tokens: ",fdist.most_common(20) ) # print the 20 most frequent words and their freq.
###Output
Size of vocabulary: 5778
Most frequent tokens: [('.', 9630), ('the', 1521), ('and', 1394), ('i', 1257), ('to', 1159), ('of', 1093), ('my', 857), ('that', 781), ('in', 770), ('a', 752), ('you', 748), ('is', 630), ('not', 559), ('for', 467), ('it', 460), ('with', 441), ('his', 434), ('but', 417), ('me', 417), ('your', 397)]
###Markdown
Mapping words to indices and indices to wordsWe provide a helper function to create a dictionary that maps words to indices and indices to words.
###Code
# get_dict creates two dictionaries, converting words to indices and viceversa.
word2Ind, Ind2word = get_dict(data)
V = len(word2Ind)
print("Size of vocabulary: ", V)
# example of word to index mapping
print("Index of the word 'king' : ",word2Ind['king'] )
print("Word which has index 2743: ",Ind2word[2743] )
###Output
Index of the word 'king' : 2745
Word which has index 2743: kindness
###Markdown
2 Training the Model Initializing the modelYou will now initialize two matrices and two vectors. - The first matrix ($W_1$) is of dimension $N \times V$, where $V$ is the number of words in your vocabulary and $N$ is the dimension of your word vector.- The second matrix ($W_2$) is of dimension $V \times N$. - Vector $b_1$ has dimensions $N\times 1$- Vector $b_2$ has dimensions $V\times 1$. - $b_1$ and $b_2$ are the bias vectors of the linear layers from matrices $W_1$ and $W_2$.The overall structure of the model will look as in Figure 1, but at this stage we are just initializing the parameters. Exercise 01Please use [numpy.random.rand](https://numpy.org/doc/stable/reference/random/generated/numpy.random.rand.html) to generate matrices that are initialized with random values from a uniform distribution, ranging between 0 and 1.**Note:** In the next cell you will encounter a random seed. Please **DO NOT** modify this seed so your solution can be tested correctly.
###Code
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: initialize_model
def initialize_model(N,V, random_seed=1):
'''
Inputs:
N: dimension of hidden vector
V: dimension of vocabulary
random_seed: random seed for consistent results in the unit tests
Outputs:
W1, W2, b1, b2: initialized weights and biases
'''
np.random.seed(random_seed)
### START CODE HERE (Replace instances of 'None' with your code) ###
# W1 has shape (N,V)
W1 = np.random.rand(N,V)
# W2 has shape (V,N)
W2 = np.random.rand(V,N)
# b1 has shape (N,1)
b1 = np.random.rand(N,1)
# b2 has shape (V,1)
b2 = np.random.rand(V,1)
### END CODE HERE ###
return W1, W2, b1, b2
# Test your function example.
tmp_N = 4
tmp_V = 10
tmp_W1, tmp_W2, tmp_b1, tmp_b2 = initialize_model(tmp_N,tmp_V)
assert tmp_W1.shape == ((tmp_N,tmp_V))
assert tmp_W2.shape == ((tmp_V,tmp_N))
print(f"tmp_W1.shape: {tmp_W1.shape}")
print(f"tmp_W2.shape: {tmp_W2.shape}")
print(f"tmp_b1.shape: {tmp_b1.shape}")
print(f"tmp_b2.shape: {tmp_b2.shape}")
###Output
tmp_W1.shape: (4, 10)
tmp_W2.shape: (10, 4)
tmp_b1.shape: (4, 1)
tmp_b2.shape: (10, 1)
###Markdown
Expected Output ```CPPtmp_W1.shape: (4, 10)tmp_W2.shape: (10, 4)tmp_b1.shape: (4, 1)tmp_b2.shape: (10, 1)``` 2.1 SoftmaxBefore we can start training the model, we need to implement the softmax function as defined in equation 5: $$ \text{softmax}(z_i) = \frac{e^{z_i} }{\sum_{i=0}^{V-1} e^{z_i} } \tag{5} $$- Array indexing in code starts at 0.- $V$ is the number of words in the vocabulary (which is also the number of rows of $z$).- $i$ goes from 0 to |V| - 1. Exercise 02**Instructions**: Implement the softmax function below. - Assume that the input $z$ to `softmax` is a 2D array- Each training example is represented by a column of shape (V, 1) in this 2D array.- There may be more than one column, in the 2D array, because you can put in a batch of examples to increase efficiency. Let's call the batch size lowercase $m$, so the $z$ array has shape (V, m)- When taking the sum from $i=1 \cdots V-1$, take the sum for each column (each example) separately.Please use- numpy.exp- numpy.sum (set the axis so that you take the sum of each column in z)
###Code
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: softmax
def softmax(z):
'''
Inputs:
z: output scores from the hidden layer
Outputs:
yhat: prediction (estimate of y)
'''
### START CODE HERE (Replace instances of 'None' with your own code) ###
# Calculate yhat (softmax)
yhat = np.divide(np.exp(z),np.sum(np.exp(z),axis=0))
### END CODE HERE ###
return yhat
# Test the function
tmp = np.array([[1,2,3],
[1,1,1]
])
tmp_sm = softmax(tmp)
display(tmp_sm)
softmax([9, 8, 11, 10, 8.5])
###Output
_____no_output_____
###Markdown
Expected Ouput```CPParray([[0.5 , 0.73105858, 0.88079708], [0.5 , 0.26894142, 0.11920292]])``` 2.2 Forward propagation Exercise 03Implement the forward propagation $z$ according to equations (1) to (3). \begin{align} h &= W_1 \ X + b_1 \tag{1} \\ a &= ReLU(h) \tag{2} \\ z &= W_2 \ a + b_2 \tag{3} \\\end{align}For that, you will use as activation the Rectified Linear Unit (ReLU) given by:$$f(h)=\max (0,h) \tag{6}$$ Hints You can use numpy.maximum(x1,x2) to get the maximum of two values Use numpy.dot(A,B) to matrix multiply A and B
###Code
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: forward_prop
def forward_prop(x, W1, W2, b1, b2):
'''
Inputs:
x: average one hot vector for the context
W1, W2, b1, b2: matrices and biases to be learned
Outputs:
z: output score vector
'''
### START CODE HERE (Replace instances of 'None' with your own code) ###
# Calculate h
h = np.matmul(W1,x)+b1
# Apply the relu on h (store result in h)
h = np.maximum(0,h)
# Calculate z
z = np.matmul(W2,h)+b2
### END CODE HERE ###
return z, h
# Test the function
# Create some inputs
tmp_N = 2
tmp_V = 3
tmp_x = np.array([[0,1,0]]).T
tmp_W1, tmp_W2, tmp_b1, tmp_b2 = initialize_model(N=tmp_N,V=tmp_V, random_seed=1)
print(f"x has shape {tmp_x.shape}")
print(f"N is {tmp_N} and vocabulary size V is {tmp_V}")
# call function
tmp_z, tmp_h = forward_prop(tmp_x, tmp_W1, tmp_W2, tmp_b1, tmp_b2)
print("call forward_prop")
print()
# Look at output
print(f"z has shape {tmp_z.shape}")
print("z has values:")
print(tmp_z)
print()
print(f"h has shape {tmp_h.shape}")
print("h has values:")
print(tmp_h)
###Output
x has shape (3, 1)
N is 2 and vocabulary size V is 3
call forward_prop
z has shape (3, 1)
z has values:
[[0.55379268]
[1.58960774]
[1.50722933]]
h has shape (2, 1)
h has values:
[[0.92477674]
[1.02487333]]
###Markdown
Expected output```CPPx has shape (3, 1)N is 2 and vocabulary size V is 3call forward_propz has shape (3, 1)z has values:[[0.55379268] [1.58960774] [1.50722933]]h has shape (2, 1)h has values:[[0.92477674] [1.02487333]]``` 2.3 Cost function- We have implemented the *cross-entropy* cost function for you.
###Code
# compute_cost: cross-entropy cost functioN
def compute_cost(y, yhat, batch_size):
# cost function
logprobs = np.multiply(np.log(yhat),y) + np.multiply(np.log(1 - yhat), 1 - y)
cost = - 1/batch_size * np.sum(logprobs)
cost = np.squeeze(cost)
return cost
# Test the function
tmp_C = 2
tmp_N = 50
tmp_batch_size = 4
tmp_word2Ind, tmp_Ind2word = get_dict(data)
tmp_V = len(word2Ind)
tmp_x, tmp_y = next(get_batches(data, tmp_word2Ind, tmp_V,tmp_C, tmp_batch_size))
print(f"tmp_x.shape {tmp_x.shape}")
print(f"tmp_y.shape {tmp_y.shape}")
tmp_W1, tmp_W2, tmp_b1, tmp_b2 = initialize_model(tmp_N,tmp_V)
print(f"tmp_W1.shape {tmp_W1.shape}")
print(f"tmp_W2.shape {tmp_W2.shape}")
print(f"tmp_b1.shape {tmp_b1.shape}")
print(f"tmp_b2.shape {tmp_b2.shape}")
tmp_z, tmp_h = forward_prop(tmp_x, tmp_W1, tmp_W2, tmp_b1, tmp_b2)
print(f"tmp_z.shape: {tmp_z.shape}")
print(f"tmp_h.shape: {tmp_h.shape}")
tmp_yhat = softmax(tmp_z)
print(f"tmp_yhat.shape: {tmp_yhat.shape}")
tmp_cost = compute_cost(tmp_y, tmp_yhat, tmp_batch_size)
print("call compute_cost")
print(f"tmp_cost {tmp_cost:.4f}")
###Output
tmp_x.shape (5778, 4)
tmp_y.shape (5778, 4)
tmp_W1.shape (50, 5778)
tmp_W2.shape (5778, 50)
tmp_b1.shape (50, 1)
tmp_b2.shape (5778, 1)
tmp_z.shape: (5778, 4)
tmp_h.shape: (50, 4)
tmp_yhat.shape: (5778, 4)
call compute_cost
tmp_cost 9.9560
###Markdown
Expected output```CPPtmp_x.shape (5778, 4)tmp_y.shape (5778, 4)tmp_W1.shape (50, 5778)tmp_W2.shape (5778, 50)tmp_b1.shape (50, 1)tmp_b2.shape (5778, 1)tmp_z.shape: (5778, 4)tmp_h.shape: (50, 4)tmp_yhat.shape: (5778, 4)call compute_costtmp_cost 9.9560``` 2.4 Training the Model - Backpropagation Exercise 04Now that you have understood how the CBOW model works, you will train it. You created a function for the forward propagation. Now you will implement a function that computes the gradients to backpropagate the errors.
###Code
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: back_prop
def back_prop(x, yhat, y, h, W1, W2, b1, b2, batch_size):
'''
Inputs:
x: average one hot vector for the context
yhat: prediction (estimate of y)
y: target vector
h: hidden vector (see eq. 1)
W1, W2, b1, b2: matrices and biases
batch_size: batch size
Outputs:
grad_W1, grad_W2, grad_b1, grad_b2: gradients of matrices and biases
'''
### START CODE HERE (Replace instanes of 'None' with your code) ###
# Compute l1 as W2^T (Yhat - Y)
# Re-use it whenever you see W2^T (Yhat - Y) used to compute a gradient
l1 = np.matmul(W2.T,(yhat-y))
# Apply relu to l1
l1 = np.maximum(0,l1)
# Compute the gradient of W1
grad_W1 = np.matmul(l1,x.T)/yhat.shape[1]
# Compute the gradient of W2
grad_W2 = np.matmul((yhat-y),h.T)/yhat.shape[1]
# Compute the gradient of b1
grad_b1 = np.sum(l1,axis=1,keepdims=True)/yhat.shape[1]
# Compute the gradient of b2
grad_b2 = np.sum(yhat - y,axis=1,keepdims=True)/yhat.shape[1]
### END CODE HERE ###
return grad_W1, grad_W2, grad_b1, grad_b2
# Test the function
tmp_C = 2
tmp_N = 50
tmp_batch_size = 4
tmp_word2Ind, tmp_Ind2word = get_dict(data)
tmp_V = len(word2Ind)
# get a batch of data
tmp_x, tmp_y = next(get_batches(data, tmp_word2Ind, tmp_V,tmp_C, tmp_batch_size))
print("get a batch of data")
print(f"tmp_x.shape {tmp_x.shape}")
print(f"tmp_y.shape {tmp_y.shape}")
print()
print("Initialize weights and biases")
tmp_W1, tmp_W2, tmp_b1, tmp_b2 = initialize_model(tmp_N,tmp_V)
print(f"tmp_W1.shape {tmp_W1.shape}")
print(f"tmp_W2.shape {tmp_W2.shape}")
print(f"tmp_b1.shape {tmp_b1.shape}")
print(f"tmp_b2.shape {tmp_b2.shape}")
print()
print("Forwad prop to get z and h")
tmp_z, tmp_h = forward_prop(tmp_x, tmp_W1, tmp_W2, tmp_b1, tmp_b2)
print(f"tmp_z.shape: {tmp_z.shape}")
print(f"tmp_h.shape: {tmp_h.shape}")
print()
print("Get yhat by calling softmax")
tmp_yhat = softmax(tmp_z)
print(f"tmp_yhat.shape: {tmp_yhat.shape}")
tmp_m = (2*tmp_C)
tmp_grad_W1, tmp_grad_W2, tmp_grad_b1, tmp_grad_b2 = back_prop(tmp_x, tmp_yhat, tmp_y, tmp_h, tmp_W1, tmp_W2, tmp_b1, tmp_b2, tmp_batch_size)
print()
print("call back_prop")
print(f"tmp_grad_W1.shape {tmp_grad_W1.shape}")
print(f"tmp_grad_W2.shape {tmp_grad_W2.shape}")
print(f"tmp_grad_b1.shape {tmp_grad_b1.shape}")
print(f"tmp_grad_b2.shape {tmp_grad_b2.shape}")
###Output
get a batch of data
tmp_x.shape (5778, 4)
tmp_y.shape (5778, 4)
Initialize weights and biases
tmp_W1.shape (50, 5778)
tmp_W2.shape (5778, 50)
tmp_b1.shape (50, 1)
tmp_b2.shape (5778, 1)
Forwad prop to get z and h
tmp_z.shape: (5778, 4)
tmp_h.shape: (50, 4)
Get yhat by calling softmax
tmp_yhat.shape: (5778, 4)
call back_prop
tmp_grad_W1.shape (50, 5778)
tmp_grad_W2.shape (5778, 50)
tmp_grad_b1.shape (50, 1)
tmp_grad_b2.shape (5778, 1)
###Markdown
Expected output```CPPget a batch of datatmp_x.shape (5778, 4)tmp_y.shape (5778, 4)Initialize weights and biasestmp_W1.shape (50, 5778)tmp_W2.shape (5778, 50)tmp_b1.shape (50, 1)tmp_b2.shape (5778, 1)Forwad prop to get z and htmp_z.shape: (5778, 4)tmp_h.shape: (50, 4)Get yhat by calling softmaxtmp_yhat.shape: (5778, 4)call back_proptmp_grad_W1.shape (50, 5778)tmp_grad_W2.shape (5778, 50)tmp_grad_b1.shape (50, 1)tmp_grad_b2.shape (5778, 1)``` Gradient Descent Exercise 05Now that you have implemented a function to compute the gradients, you will implement batch gradient descent over your training set. **Hint:** For that, you will use `initialize_model` and the `back_prop` functions which you just created (and the `compute_cost` function). You can also use the provided `get_batches` helper function:```for x, y in get_batches(data, word2Ind, V, C, batch_size):``````...```Also: print the cost after each batch is processed (use batch size = 128)
###Code
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: gradient_descent
def gradient_descent(data, word2Ind, N, V, num_iters, alpha=0.03):
'''
This is the gradient_descent function
Inputs:
data: text
word2Ind: words to Indices
N: dimension of hidden vector
V: dimension of vocabulary
num_iters: number of iterations
Outputs:
W1, W2, b1, b2: updated matrices and biases
'''
W1, W2, b1, b2 = initialize_model(N,V, random_seed=282)
batch_size = 128
iters = 0
C = 2
for x, y in get_batches(data, word2Ind, V, C, batch_size):
### START CODE HERE (Replace instances of 'None' with your own code) ###
# Get z and h
z, h = forward_prop(x, W1, W2, b1, b2)
# Get yhat
yhat = softmax(z)
# Get cost
cost = compute_cost(y, yhat, batch_size)
if ( (iters+1) % 10 == 0):
print(f"iters: {iters + 1} cost: {cost:.6f}")
# Get gradients
grad_W1, grad_W2, grad_b1, grad_b2 = back_prop(x, yhat, y, h, W1, W2, b1, b2, batch_size)
# Update weights and biases
W1 = W1-alpha*grad_W1
W2 = W2-alpha*grad_W2
b1 = b1-alpha*grad_b1
b2 = b2-alpha*grad_b2
### END CODE HERE ###
iters += 1
if iters == num_iters:
break
if iters % 100 == 0:
alpha *= 0.66
return W1, W2, b1, b2
# test your function
C = 2
N = 50
word2Ind, Ind2word = get_dict(data)
V = len(word2Ind)
num_iters = 150
print("Call gradient_descent")
W1, W2, b1, b2 = gradient_descent(data, word2Ind, N, V, num_iters)
###Output
Call gradient_descent
iters: 10 cost: 0.789141
iters: 20 cost: 0.105543
iters: 30 cost: 0.056008
iters: 40 cost: 0.038101
iters: 50 cost: 0.028868
iters: 60 cost: 0.023237
iters: 70 cost: 0.019444
iters: 80 cost: 0.016716
iters: 90 cost: 0.014660
iters: 100 cost: 0.013054
iters: 110 cost: 0.012133
iters: 120 cost: 0.011370
iters: 130 cost: 0.010698
iters: 140 cost: 0.010100
iters: 150 cost: 0.009566
###Markdown
Expected Output```CPPiters: 10 cost: 0.789141iters: 20 cost: 0.105543iters: 30 cost: 0.056008iters: 40 cost: 0.038101iters: 50 cost: 0.028868iters: 60 cost: 0.023237iters: 70 cost: 0.019444iters: 80 cost: 0.016716iters: 90 cost: 0.014660iters: 100 cost: 0.013054iters: 110 cost: 0.012133iters: 120 cost: 0.011370iters: 130 cost: 0.010698iters: 140 cost: 0.010100iters: 150 cost: 0.009566```Your numbers may differ a bit depending on which version of Python you're using. 3.0 Visualizing the word vectorsIn this part you will visualize the word vectors trained using the function you just coded above.
###Code
# visualizing the word vectors here
from matplotlib import pyplot
%config InlineBackend.figure_format = 'svg'
words = ['king', 'queen','lord','man', 'woman','dog','wolf',
'rich','happy','sad']
embs = (W1.T + W2)/2.0
# given a list of words and the embeddings, it returns a matrix with all the embeddings
idx = [word2Ind[word] for word in words]
X = embs[idx, :]
print(X.shape, idx) # X.shape: Number of words of dimension N each
result= compute_pca(X, 2)
pyplot.scatter(result[:, 0], result[:, 1])
for i, word in enumerate(words):
pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))
pyplot.show()
###Output
_____no_output_____
###Markdown
You can see that man and king are next to each other. However, we have to be careful with the interpretation of this projected word vectors, since the PCA depends on the projection -- as shown in the following illustration.
###Code
result= compute_pca(X, 4)
pyplot.scatter(result[:, 3], result[:, 1])
for i, word in enumerate(words):
pyplot.annotate(word, xy=(result[i, 3], result[i, 1]))
pyplot.show()
###Output
_____no_output_____ |
dataset_2_terrorist_attack/preprocessing_terrorist_attack_dataset.ipynb | ###Markdown
Preprocessing of dataIn this notebook the terrorist attack data describing terrorist attacks is preprocessed before running it through deep learning models to conduct experiments.Following steps is preformed:* Dividing strings by hashtags * Exporing the new files as tsv files
###Code
import csv
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.expand_frame_repr', False)
###Output
_____no_output_____
###Markdown
Opening the files
###Code
terrorist_attack_loc = pd.read_csv("TerrorAttack/terrorist_attack_loc.edges", sep=" ", header=None)
terrorist_attack_loc_org = pd.read_csv("TerrorAttack/terrorist_attack_loc_org.edges", sep=" ", header=None)
terrorist_attack_nodes = pd.read_csv("TerrorAttack/terrorist_attack.nodes", sep=" ", header=None)
###Output
_____no_output_____
###Markdown
Dividing the strings by the ''
###Code
terrorist_attack_loc[0] = terrorist_attack_loc[0].str.split("#", n = 1, expand = True)[1]
terrorist_attack_loc[1] = terrorist_attack_loc[1].str.split("#", n = 1, expand = True)[1]
terrorist_attack_loc_org[0] = terrorist_attack_loc_org[0].str.split("#", n = 1, expand = True)[1]
terrorist_attack_loc_org[1] = terrorist_attack_loc_org[1].str.split("#", n = 1, expand = True)[1]
terrorist_attack_nodes[0] = terrorist_attack_nodes[0].str.split("#", n = 1, expand = True)[1]
terrorist_attack_nodes[107] = terrorist_attack_nodes[107].str.split("#", n = 1, expand = True)[1]
###Output
_____no_output_____
###Markdown
Saving the new files
###Code
terrorist_attack_nodes.to_csv('TerrorAttackNew/terrorist_attack.nodes', sep="\t", header = False, index=False)
terrorist_attack_loc.to_csv('TerrorAttackNew/terrorist_attack_loc.edges', sep="\t", header = False, index=False)
terrorist_attack_loc_org = pd.read_csv("TerrorAttackNew/terrorist_attack.nodes", sep="\t")
###Output
_____no_output_____ |
exercises/statistics project 3/sliderule_dsi_inferential_statistics_exercise_3.ipynb | ###Markdown
Hospital readmissions data analysis and recommendations for reduction BackgroundIn October 2012, the US government's Center for Medicare and Medicaid Services (CMS) began reducing Medicare payments for Inpatient Prospective Payment System hospitals with excess readmissions. Excess readmissions are measured by a ratio, by dividing a hospital’s number of “predicted” 30-day readmissions for heart attack, heart failure, and pneumonia by the number that would be “expected,” based on an average hospital with similar patients. A ratio greater than 1 indicates excess readmissions. Exercise overviewIn this exercise, you will:+ critique a preliminary analysis of readmissions data and recommendations (provided below) for reducing the readmissions rate+ construct a statistically sound analysis and make recommendations of your own More instructions provided below. Include your work **in this notebook and submit to your Github account**. Resources+ Data source: https://data.medicare.gov/Hospital-Compare/Hospital-Readmission-Reduction/9n3s-kdb3+ More information: http://www.cms.gov/Medicare/medicare-fee-for-service-payment/acuteinpatientPPS/readmissions-reduction-program.html+ Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet****
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import bokeh.plotting as bkp
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
%matplotlib inline
sns.set_style('white')
# read in readmissions data provided
hospital_read_df = pd.read_csv('data/cms_hospital_readmissions.csv')
###Output
_____no_output_____
###Markdown
**** Preliminary analysis
###Code
# deal with missing and inconvenient portions of data
clean_hospital_read_df = hospital_read_df[(hospital_read_df['Number of Discharges'] != 'Not Available')]
clean_hospital_read_df.loc[:, 'Number of Discharges'] = clean_hospital_read_df['Number of Discharges'].astype(int)
clean_hospital_read_df = clean_hospital_read_df.sort('Number of Discharges')
# generate a scatterplot for number of discharges vs. excess rate of readmissions
# lists work better with matplotlib scatterplot function
x = [a for a in clean_hospital_read_df['Number of Discharges'][81:-3]]
y = list(clean_hospital_read_df['Excess Readmission Ratio'][81:-3])
fig, ax = plt.subplots(figsize=(8,5))
ax.scatter(x, y,alpha=0.2)
ax.fill_between([0,350], 1.15, 2, facecolor='red', alpha = .15, interpolate=True)
ax.fill_between([800,2500], .5, .95, facecolor='green', alpha = .15, interpolate=True)
ax.set_xlim([0, max(x)])
ax.set_xlabel('Number of discharges', fontsize=12)
ax.set_ylabel('Excess rate of readmissions', fontsize=12)
ax.set_title('Scatterplot of number of discharges vs. excess rate of readmissions', fontsize=14)
ax.grid(True)
fig.tight_layout()
###Output
_____no_output_____
###Markdown
**** Preliminary report**A. Initial observations based on the plot above**+ Overall, rate of readmissions is trending down with increasing number of discharges+ With lower number of discharges, there is a greater incidence of excess rate of readmissions (area shaded red)+ With higher number of discharges, there is a greater incidence of lower rates of readmissions (area shaded green) **B. Statistics**+ In hospitals/facilities with number of discharges < 100, mean excess readmission rate is 1.023 and 63% have excess readmission rate greater than 1 + In hospitals/facilities with number of discharges > 1000, mean excess readmission rate is 0.978 and 44% have excess readmission rate greater than 1 **C. Conclusions**+ There is a significant correlation between hospital capacity (number of discharges) and readmission rates. + Smaller hospitals/facilities may be lacking necessary resources to ensure quality care and prevent complications that lead to readmissions.**D. Regulatory policy recommendations**+ Hospitals/facilties with small capacity (< 300) should be required to demonstrate upgraded resource allocation for quality care to continue operation.+ Directives and incentives should be provided for consolidation of hospitals and facilities to have a smaller number of them with higher capacity and number of discharges. **** ExerciseInclude your work on the following **in this notebook and submit to your Github account**. A. Do you agree with the above analysis and recommendations? Why or why not? B. Provide support for your arguments and your own recommendations with a statistically sound analysis: 1. Setup an appropriate hypothesis test. 2. Compute and report the observed significance value (or p-value). 3. Report statistical significance for $\alpha$ = .01. 4. Discuss statistical significance and practical significanceYou can compose in notebook cells using Markdown: + In the control panel at the top, choose Cell > Cell Type > Markdown+ Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet**** Part A
###Code
# A.1 I don't find that it trends down.
# To me the line looks almost flat. There is also a 95% confidence interval surrounding, which is hardly seen.
sns.regplot(data=clean_hospital_read_df, x='Number of Discharges', y='Excess Readmission Ratio', line_kws={'color': 'red'})
plt.xlim([0, max(x)])
plt.ylim([0, max(y)])
# A.2 I based my conclusions on the red and green area as was stated.
# The red area has a readmission ratio of above 1.15 and less than 350 discharges.
# It's all around 5%, but the incidence rate of above 1.15 readmission ratio's is actually slightly lower below 350 discharges.
print('Above 1.15:\n')
incidence_overall = sum(clean_hospital_read_df['Excess Readmission Ratio'] > 1.15) / len(clean_hospital_read_df['Excess Readmission Ratio'])
incidence_lowdischarge = (sum(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] < 350]['Excess Readmission Ratio'] > 1.15) /
len(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] < 350]['Excess Readmission Ratio']))
incidence_highdischarge = (sum(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] > 800]['Excess Readmission Ratio'] > 1.15) /
len(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] > 800]['Excess Readmission Ratio']))
print('overall:', incidence_overall)
print('low:', incidence_lowdischarge)
print('high:', incidence_highdischarge)
# A.3 The green area has a readmission ratio of below 0.95 and more than 800 discharges.
# It went from overall 24% to 34% for the high nr of discharges. Hence here they are right with their statement.
print('Below 0.95:\n')
incidence_overall = sum(clean_hospital_read_df['Excess Readmission Ratio'] < 0.95) / len(clean_hospital_read_df['Excess Readmission Ratio'])
incidence_lowdischarge = (sum(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] < 350]['Excess Readmission Ratio'] < 0.95) /
len(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] < 350]['Excess Readmission Ratio']))
incidence_highdischarge = (sum(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] > 800]['Excess Readmission Ratio'] < 0.95) /
len(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] > 800]['Excess Readmission Ratio']))
print('overall:', incidence_overall)
print('low:', incidence_lowdischarge)
print('high:', incidence_highdischarge)
# B.1 They are right about the readmission rate being 1.023.
# They are wrong about the 63%. It's 59%.
mean = clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] < 100]['Excess Readmission Ratio'].mean()
percentage = (sum(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] < 100]['Excess Readmission Ratio'] > 1) /
len(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] < 100]['Excess Readmission Ratio'])) * 100
print(mean)
print(percentage)
# B.2 They are right about both statements: mean 0.978 and 44% above 1.
mean = clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] > 1000]['Excess Readmission Ratio'].mean()
percentage = (sum(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] > 1000]['Excess Readmission Ratio'] > 1) /
len(clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'] > 1000]['Excess Readmission Ratio'])) * 100
print(mean)
print(percentage)
# C.1 As mentioned before the line in the regression plot looks almost flat, so can't imagine that it's a big correlation.
# They don't mention what the size of the correlation is. It could be that it's a very small correlation,
# but due to the huge sample size that it is significant, but a very small correlation is not very relevant.
# When I calculate the correlation, I get 'nan' since there are NAN values in the dataset!
# These rows should be excluded of any analysis. This goes also for everything above here.
from scipy.stats import pearsonr
pearsonr(clean_hospital_read_df['Number of Discharges'], clean_hospital_read_df['Excess Readmission Ratio'])
# C.2 Can't prove that with the data at hand.
# So far I don't see any indication that there is a big significant correlation between hospital size and readmission ratio.
# D.1 & D.2 Since there is no evidence of a problem, new regulation are premature.
###Output
_____no_output_____
###Markdown
Part B
###Code
# Hospitals/facilities with small capacity (<300) have a different readmission ratio.
# H0: They don't. H1: They have.
# Also check correlation.
# First we exclude everything with a NAN value
print(sum(clean_hospital_read_df['Excess Readmission Ratio'].isnull()))
print(clean_hospital_read_df.dropna(subset=['Excess Readmission Ratio']).shape)
print(clean_hospital_read_df.shape)
hospital_df = clean_hospital_read_df.dropna(subset=['Excess Readmission Ratio'])
# Significantly different readmission ratio mean. Hence H0 rejected (easily below 0.01)
# But the difference is very very small.
# Effect-size is very important with these large datasets.
from scipy.stats import ttest_ind
print(ttest_ind(hospital_df[hospital_df['Number of Discharges'] < 300]['Excess Readmission Ratio'],
hospital_df[hospital_df['Number of Discharges'] >= 300]['Excess Readmission Ratio']))
print(hospital_df[hospital_df['Number of Discharges'] < 300]['Excess Readmission Ratio'].mean())
print(hospital_df[hospital_df['Number of Discharges'] >= 300]['Excess Readmission Ratio'].mean())
# We see also a very significant correlation, but as I expected the value is only -0.097.
# Usually we start speeking of an interesting correlation from 0.7 and above or -0.7 and below.
from scipy.stats import pearsonr
pearsonr(hospital_df['Number of Discharges'], hospital_df['Excess Readmission Ratio'])
# From the hypothesis and p-value we should conclude that the readmission ratio in hospital/facilities with small capacity is
# significantly different. Nevertheless, for the purpose of cutting their money and making them jump through hoops
# to defend that their quality is good enough the difference is way not enough. Due to the large dataset even
# really small values become significant. Even though they are not interesting for the purpose.
# Always check effectsize of the difference!
###Output
_____no_output_____ |
notebooks_business_vitality/week_1/day_4/7_2_data_preparation_for_machine_learning.ipynb | ###Markdown
Website Ghani, Rayid, Frauke Kreuter, Julia Lane, Adrianne Bradford, Alex Engler, Nicolas Guetta Jeanrenaud, Graham Henke, Daniela Hochfellner, Clayton Hunter, Brian Kim, Avishek Kumar, and Jonathan Morgan. Data Preparation for Machine Learning---- Python Setup- Back to [Table of Contents](Table-of-Contents)Before we begin, run the code cell below to initialize the libraries we'll be using in this assignment. We're already familiar with `numpy`, `pandas`, and `psycopg2` from previous tutorials. Here we'll also be using [`scikit-learn`](http://scikit-learn.org) to fit modeling.
###Code
%pylab inline
import pandas as pd
import psycopg2
from sqlalchemy import create_engine
db_name = "appliedda"
hostname = "10.10.2.10"
conn = psycopg2.connect(database=db_name, host = hostname) #database connection
###Output
_____no_output_____
###Markdown
Creating LabelsLabels are the dependent variables, or *Y* variables, that we are trying to predict. In the machine learning framework, your labels are usually *binary*: true or false, encoded as 1 or 0. In this case, our label is whether an employer at least one year old is likely to disappear in the coming year. We need to pick our year of prediction. We will be looking back one year to see if this employer existed 1 year ago, and forward one year to see if the employer still exists one year from now. > For this example, let's use 2013 (Q1) as our reference year (year of prediction).
###Code
def generate_labels(year, db_name = db_name, hostname = hostname, overwrite = False):
conn = psycopg2.connect(database=db_name, host = hostname) #database connection
cursor = conn.cursor()
sql_script="""
-- First, let's make a list of the employers present at time t: Q1 of 2013
DROP TABLE IF EXISTS ada_18_uchi.labels_{year};
CREATE TABLE ada_18_uchi.labels_{year} AS
SELECT CONCAT(a.ein, a.seinunit, a.empr_no) AS id
, a.ein, a.seinunit, a.empr_no
, case when b.flag = 1 then 0 else 1 end as label
FROM (
SELECT x.ein, x.seinunit, x.empr_no
FROM (
SELECT ein, seinunit, empr_no
FROM il_des_kcmo.il_qcew_employers
WHERE year = {year}
AND quarter = 1
) AS x
INNER JOIN (
SELECT ein, seinunit, empr_no
FROM il_des_kcmo.il_qcew_employers
WHERE year = {year}-1
AND quarter = 1
) AS y
ON x.ein = y.ein AND x.seinunit = y.seinunit AND x.empr_no = y.empr_no
) AS a
LEFT JOIN (
SELECT ein, seinunit, empr_no, 1 as flag
FROM il_des_kcmo.il_qcew_employers
WHERE year = {year}+1
AND quarter = 1
) AS b
ON a.ein = b.ein AND a.seinunit = b.seinunit AND a.empr_no = b.empr_no;
ALTER TABLE ada_18_uchi.labels_{year} OWNER TO ada_18_uchi_admin;
COMMIT;
""".format(year = year)
# Let's check if the table already exists:
cursor.execute('''
SELECT * FROM information_schema.tables
WHERE table_name = 'labels_{year}'
AND table_schema = 'ada_18_uchi';
'''.format(year = year))
# Let's write table if it does not exist (or if overwrite = True)
if not(cursor.rowcount) or overwrite:
cursor.execute(sql_script)
cursor.close()
df = pd.read_sql('SELECT * FROM ada_18_uchi.labels_{}'.format(year), conn)
return df
df_labels = generate_labels(2013)
pd.crosstab(index = df_labels['label'], columns = 'count')
###Output
_____no_output_____
###Markdown
Creating FeaturesOur features are our independent variables or predictors. Good features make machine learning systems effective. The better the features the easier it is the capture the structure of the data. You generate features using domain knowledge. In general, it is better to have more complex features and a simpler model rather than vice versa. Keeping the model simple makes it faster to train and easier to understand rather then extensively searching for the "right" model and "right" set of parameters. Machine Learning Algorithms learn a solution to a problem from sample data. The set of features is the best representation of the sample data to learn a solution to a problem. - **Feature engineering** is the process of transforming raw data into features that better represent the underlying problem/data/structure to the predictive models, resulting in improved model accuracy on unseen data." ( from [Discover Feature Engineering](http://machinelearningmastery.com/discover-feature-engineering-how-to-engineer-features-and-how-to-get-good-at-it/) ). In text, for example, this might involve deriving traits of the text like word counts, verb counts, or topics to feed into a model rather than simply giving it the raw text.Example of feature engineering are: - **Transformations**, such a log, square, and square root.- **Dummy (binary) variables**, also known as *indicator variables*, often done by taking categorical variables(such as city) which do not have a numerical value, and adding them to models as a binary value.- **Discretization**. Several methods require features to be discrete instead of continuous. This is often done by binning, which you can do by equal width. - **Aggregation.** Aggregate features often constitute the majority of features for a given problem. These use different aggregation functions (*count, min, max, average, standard deviation, etc.*) which summarize severalvalues into one feature, aggregating over varying windows of time and space. For example, given urban data, we would want to calculate the *number* (and *min, max, mean, variance*, etc.) of crimes within an *m*-mile radiusof an address in the past *t* months for varying values of *m* and *t*, and then use all of them as features.>Our preliminary features are the following>>- `n_spells` (Aggregation): Total number of spells someonse has had up until the date of prediction.>- `age` (Transformation): The age feature is created by substracting the bdate_year with the current year of prediction. >- `edlevel` (Binary): 0 if the person has less than a high school education and 1 if they are more than a high school education. >- `workexp` (Binary): 0 if no work experience 1 if there is some sort of work experience>- `married` (Binary): 1 if the person is married 0 if they are not. >- `gender`: (Binary) 1(male) 2(female)>- `n_days_last_spell`: (Aggregation) The number of days since a person's last spell.>- `(foodstamp, tanf, granf)`: (Binary) 0 if the last benefit was not foodstamp, tanf or grantf, 1 if it was New vs Old Employers Let's create a first binary feature to defining "old" and "new" firms. Old firms are determined according to age cutoff, with a default value is 5 years.
###Code
def employer_age_features(year, age_cutoff = 5, db_name = db_name, hostname = hostname, overwrite = False):
conn = psycopg2.connect(database=db_name, host = hostname) #database connection
cursor = conn.cursor()
sql_script = '''
DROP TABLE IF EXISTS ada_18_uchi.features_age_{year};
CREATE TABLE ada_18_uchi.features_age_{year} AS
SELECT a.*, CASE WHEN b.flag = 1 THEN 0 ELSE 1 END AS new_employer
FROM (
SELECT ein, seinunit, empr_no
FROM ada_18_uchi.labels_{year}
) AS a
LEFT JOIN (
SELECT ein, seinunit, empr_no, 1 as flag
FROM il_des_kcmo.il_qcew_employers
WHERE year = {year}-{age_cutoff}
AND quarter = 1
) AS b
ON a.ein = b.ein AND a.seinunit = b.seinunit AND a.empr_no = b.empr_no;
ALTER TABLE ada_18_uchi.features_age_{year} OWNER TO ada_18_uchi_admin;
COMMIT;
'''.format(year = year, age_cutoff = age_cutoff)
# Let's check if the table already exists:
cursor.execute('''
SELECT * FROM information_schema.tables
WHERE table_name = 'features_age_{year}'
AND table_schema = 'ada_18_uchi';
'''.format(year = year))
# Let's write table if it does not exist (or if overwrite = True)
if not(cursor.rowcount) or overwrite:
cursor.execute(sql_script)
cursor.close()
df = pd.read_sql('SELECT * FROM ada_18_uchi.features_age_{}'.format(year), conn)
return df
df_age = employer_age_features(2013)
df_age.head()
###Output
_____no_output_____
###Markdown
QWI Statistics The next set of features we would like to include are the QWI statistics. Since we are looking at firms what are at least one year old, it might be interesting to consider both the current QWI numbers, and the numbers from the year before. Note that these statistics are taken at company level (EIN), instead of individual entity level (combination of EIN, RUN, and UI Account Number). This is because QWI is calculated at firm level. We therefore merge on EIN, instead of using all three variables.
###Code
conn = psycopg2.connect(database = db_name, host = hostname)
df_qwi = pd.read_sql('SELECT * FROM ada_18_uchi.qwi_ein_{year}_1'.format(year = 2013), conn)
df_qwi.head()
###Output
_____no_output_____
###Markdown
Let's also consider the QWI statistics one year before our prediction quarter. We can create additional features accounting for the variation in level of the QWI statustics.
###Code
df_qwi_m1 = pd.read_sql('SELECT* FROM ada_18_uchi.qwi_ein_{year}_1'.format(year = 2012), conn)
df_qwi_m1 = df_qwi_m1.add_prefix('m1_')
df_qwi_m1.head()
df_qwi = pd.merge(df_qwi, df_qwi_m1, how = 'left', left_on = 'ein', right_on = 'm1_ein')
df_qwi.columns
for var in ['nb_empl', 'emp_current_qrt'
, 'emp_4qtrs_ago', 'emp_3qtrs_ago', 'emp_2qtrs_ago', 'emp_prev_qtr', 'emp_next_qtr'
, 'emp_begin_qtr', 'emp_end_qtr', 'emp_full_qtr'
, 'accessions_current', 'accessions_consecutive_qtr', 'accessions_full_qtr'
, 'separations', 'new_hires', 'recalls']:
m1_var = 'm1_{}'.format(var)
change_var = 'change_{}'.format(var)
df_qwi[change_var] = df_qwi[var] - df_qwi[m1_var]
###Output
_____no_output_____
###Markdown
Dropping Missing Values`NULL` values will make it impossible to run our Machine Leaning Algorithm. Let's see if there are any in the data.
###Code
isnan_rows = df_qwi.isnull().any(axis=1)
df_qwi[isnan_rows].head()
nrows_df_qwi = df_qwi.shape[0]
nrows_df_qwi_isnan = df_qwi[isnan_rows].shape[0]
print('%of rows with NaNs: {} '.format(float(nrows_df_qwi_isnan)/nrows_df_qwi))
df_qwi = df_qwi[~isnan_rows]
###Output
_____no_output_____
###Markdown
Let's combine the two previous queries into a unique SQL query that will retrive all the relevant QWI statistics.
###Code
def qwi_features(year, db_name = db_name, hostname = hostname, overwrite = False):
conn = psycopg2.connect(database=db_name, host = hostname) #database connection
cursor = conn.cursor()
sql_script = '''
DROP TABLE IF EXISTS ada_18_uchi.features_qwi_{year};
CREATE TABLE ada_18_uchi.features_qwi_{year} AS
SELECT a.*
, b.nb_empl AS m1_nb_empl
, b.emp_current_qrt AS m1_emp_current_qrt
, b.emp_4qtrs_ago AS m1_emp_4qtrs_ago
, b.emp_3qtrs_ago AS m1_emp_3qtrs_ago
, b.emp_2qtrs_ago AS m1_emp_2qtrs_ago
, b.emp_prev_qtr AS m1_emp_prev_qtr
, b.emp_next_qtr AS m1_emp_next_qtr
, b.emp_begin_qtr AS m1_emp_begin_qtr
, b.emp_end_qtr AS m1_emp_end_qtr
, b.emp_full_qtr AS m1_emp_full_qtr
, b.accessions_current AS m1_accessions_current
, b.accessions_consecutive_qtr AS m1_accessions_consecutive_qtr
, b.accessions_full_qtr AS m1_accessions_full_qtr
, b.separations AS m1_separations
, b.new_hires AS m1_new_hires
, b.recalls AS m1_recalls
FROM(
SELECT *
FROM ada_18_uchi.qwi_ein_{year}_1
) AS a
LEFT JOIN (
SELECT *
FROM ada_18_uchi.qwi_ein_{year_m1}_1
) AS b
ON a.ein = b.ein;
ALTER TABLE ada_18_uchi.features_qwi_{year} OWNER TO ada_18_uchi_admin;
COMMIT;
'''.format(year = year, year_m1 = year-1)
# Let's check if the table already exists:
cursor.execute('''
SELECT * FROM information_schema.tables
WHERE table_name = 'features_qwi_{year}'
AND table_schema = 'ada_18_uchi';
'''.format(year = year))
# Let's write table if it does not exist (or if overwrite = True)
if not(cursor.rowcount) or overwrite:
cursor.execute(sql_script)
cursor.close()
df = pd.read_sql('SELECT * FROM ada_18_uchi.features_qwi_{};'.format(year), conn)
for var in ['nb_empl', 'emp_current_qrt'
, 'emp_4qtrs_ago', 'emp_3qtrs_ago', 'emp_2qtrs_ago', 'emp_prev_qtr', 'emp_next_qtr'
, 'emp_begin_qtr', 'emp_end_qtr', 'emp_full_qtr'
, 'accessions_current', 'accessions_consecutive_qtr', 'accessions_full_qtr'
, 'separations', 'new_hires', 'recalls']:
m1_var = 'm1_{}'.format(var)
change_var = 'change_{}'.format(var)
df[change_var] = df[var] - df[m1_var]
# Remove NULL rows
isnan_rows = df.isnull().any(axis=1)
df = df[~isnan_rows]
return df
df_qwi = qwi_features(2013)
df_qwi.head()
###Output
_____no_output_____
###Markdown
Wages and Employees Let's use wage and employee statistics from the IL wage records.
###Code
conn = psycopg2.connect(database = db_name, host = hostname)
query = '''
SELECT ein, seinunit, empr_no
, empl_month1::int+empl_month2::int+empl_month3::int AS total_empl
, total_wages
FROM il_des_kcmo.il_qcew_employers
WHERE year = 2013 AND quarter = 1
'''
df_wages = pd.read_sql(query, conn)
###Output
_____no_output_____
###Markdown
Let's create an additional feature for average monthly wage
###Code
df_wages['avg_wage'] = df_wages['total_wages']/df_wages['total_empl']
###Output
_____no_output_____
###Markdown
Imputation It is important to to do a quick check of our matrix to see if we have any outlier values.
###Code
df_wages.describe(include = 'all', percentiles=[0.01,0.05,0.25,0.50,0.75,0.95,0.99])
###Output
_____no_output_____
###Markdown
Because of some data inconsistencies in total employees and total wages, some average wages could not be calculated (when `total_empl == 0` and `total_wages == 0`) and some have `inf` values (when `total_empl == 0`). These `NULL` and `inf` values will be problematic for the machine learning algorithm. Let's impute these missing values to the medial value of all average wages.
###Code
mask = ((df_wages['avg_wage'].isnull()) | (df_wages['avg_wage'] == inf))
vals_to_replace = df_wages[mask]['avg_wage'].values
df_wages['avg_wage'].replace(vals_to_replace,np.NaN, inplace=True)
median_avg_wage = df_wages['avg_wage'].median()
print(median_avg_wage)
df_wages['avg_wage'].fillna(median_avg_wage, inplace=True)
df_wages.describe(include = 'all')
###Output
_____no_output_____
###Markdown
Removing Outliers Some values of average wage still seem impossible for very unlikely. Certain employers can have an average wage of 0, and some outliers have average wages far exceeding the 99th percentile. These are things you'd want to do a "sanity check" on with someone who knows the data will.Here, we believe these are data errors and chose to drop these values.
###Code
# Find all rows where the wage is 0 or above 50,000 per month
outlier_rows = ((df_wages['avg_wage'] == 0) | (df_wages['avg_wage'] > 50000))
df_wages[outlier_rows].head()
nrows_wages = df_wages.shape[0]
nrows_wages_outliers = df_wages[outlier_rows].shape[0]
print('%of outlier rows: {} '.format(float(nrows_wages_outliers)/nrows_wages))
df_wages = df_wages[~outlier_rows]
###Output
_____no_output_____
###Markdown
Scaling of ValuesCertain models will have issue with the distance between features such as number of employees and average wages. Number of employees is typically a number between 1 and 100 while average wages are usually between 1000 and 4000. In order to circumvent this problem we can scale our features.
###Code
# Example: let's scale average wages:
min_avg_wage = df_wages['avg_wage'].min()
max_avg_wage = df_wages['avg_wage'].max()
df_wages['avg_wage_scaled'] = (df_wages['avg_wage']-min_avg_wage)/(max_avg_wage-min_avg_wage)
df_wages[['avg_wage', 'avg_wage_scaled']].describe()
# Replace the original var by the scaled var
df_wages['avg_wage'] = df_wages['avg_wage_scaled']
del df_wages['avg_wage_scaled']
###Output
_____no_output_____
###Markdown
This generic function can be used to scale other variables.
###Code
def scaling_var(df, var):
min_var = df[var].min()
max_var = df[var].max()
scaled_var = '{}_scaled'.format(var)
df[scaled_var] = (df[var] - min_var)/(max_var - min_var)
return df[scaled_var]
df_wages['total_empl_scaled'] = scaling_var(df_wages, 'total_empl')
df_wages['total_wage_scaled'] = scaling_var(df_wages, 'total_wage')
###Output
_____no_output_____
###Markdown
All the steps above can be summarized in the following function:
###Code
def wages_features(year, db_name = db_name, hostname = hostname, overwrite = False):
conn = psycopg2.connect(database=db_name, host = hostname) #database connection
cursor = conn.cursor()
sql_script = '''
DROP TABLE IF EXISTS ada_18_uchi.features_wages_{year};
CREATE TABLE ada_18_uchi.features_wages_{year} AS
SELECT ein, seinunit, empr_no
, empl_month1::int+empl_month2::int+empl_month3::int AS total_empl
, total_wages
FROM il_des_kcmo.il_qcew_employers
WHERE year = {year} AND quarter = 1;
ALTER TABLE ada_18_uchi.features_wages_{year} OWNER TO ada_18_uchi_admin;
COMMIT;
'''.format(year = year)
# Let's check if the table already exists:
cursor.execute('''
SELECT * FROM information_schema.tables
WHERE table_name = 'features_wages_{year}'
AND table_schema = 'ada_18_uchi';
'''.format(year = year))
# Let's write table if it does not exist (or if overwrite = True)
if not(cursor.rowcount) or overwrite:
cursor.execute(sql_script)
cursor.close()
df = pd.read_sql('SELECT * FROM ada_18_uchi.features_wages_{}'.format(year), conn)
df['avg_wage'] = df['total_wages']/df['total_empl']
# Flag null, infinite average wage values
mask = ((df['avg_wage'].isnull()) | (df['avg_wage'] == inf))
vals_to_replace = df[mask]['avg_wage'].values
df['avg_wage'].replace(vals_to_replace,np.NaN, inplace=True)
# Impute the median wage value
df['avg_wage'].fillna(df['avg_wage'].median(), inplace=True)
# Remove Outliers
outlier_rows = ((df['avg_wage'] == 0) | (df['avg_wage'] > 50000))
df_wages = df[~outlier_rows]
# Scaling values
df['total_wage_scaled'] = scaling_var(df, 'total_wages')
df['total_empl_scaled'] = scaling_var(df, 'total_empl')
df['avg_wage_scaled'] = scaling_var(df, 'avg_wage')
return df
df_wages = wages_features(2013)
df_wages.head()
###Output
_____no_output_____
###Markdown
Combining all data We can now combine all our subset of features into one features table.
###Code
df_features = pd.merge(df_age, df_qwi, how = 'left', on = 'ein')
df_features = pd.merge(df_features, df_wages, how = 'left', on = ['ein', 'seinunit', 'empr_no'])
###Output
_____no_output_____
###Markdown
Let's merge our features with our labels.
###Code
df_table = pd.merge(df_labels, df_features, how = 'left', on = ['ein', 'seinunit', 'empr_no'])
###Output
_____no_output_____
###Markdown
Let's now write the table into our class schema so we can use it for the Machine Learning notebook. In order to write a data table, we have to create an engine with SQLAlchemy (see notebook on Databases for more details).
###Code
# Let's check if the table already exists:
conn = psycopg2.connect(database=db_name, host = hostname) #database connection
cursor = conn.cursor()
cursor.execute('''
SELECT * FROM information_schema.tables
WHERE table_name = 'table_employers_2013'
AND table_schema = 'ada_18_uchi';
''')
# Let's write table if it does not exist (or if overwrite = True)
overwrite = False
if not(cursor.rowcount) or overwrite:
engine = create_engine('postgresql://{}/{}'.format(hostname, db_name))
df_table.to_sql('table_employers_2013', engine, schema = 'ada_18_uchi', index = False, if_exists='replace')
# Change Admin rights of table to admin
conn = psycopg2.connect(database = db_name, host = hostname)
cursor = conn.cursor()
cursor.execute('ALTER TABLE ada_18_uchi.table_employers_2013 OWNER TO ada_18_uchi_admin; COMMIT;')
cursor.close()
table_2013 = pd.read_sql('SELECT * FROM ada_18_uchi.table_employers_2013 LIMIT 100', conn)
table_2013.head()
###Output
_____no_output_____
###Markdown
Overall Function for Label and Features Generation: We have recapitulated all the above steps into a general function below.
###Code
def generate_table(year, db_name = db_name, hostname = hostname, schema = 'ada_18_uchi', overwrite = False):
# Generate Labels
print("Generating labels")
df_label = generate_labels(year, db_name = db_name, hostname = hostname, overwrite = overwrite)
# Generate Features
print("Generating features")
df_age = employer_age_features(year, db_name = db_name, hostname = hostname, overwrite = overwrite)
df_qwi = qwi_features(year, db_name = db_name, hostname = hostname, overwrite = overwrite)
df_wages = wages_features(year, db_name = db_name, hostname = hostname, overwrite = overwrite)
# Merge Labels and Features together
print("Merging labels and features")
df_table = pd.merge(df_label, df_age, how = 'inner', on = ['ein', 'seinunit', 'empr_no'])
df_table = pd.merge(df_table, df_qwi, how = 'inner', on = 'ein')
df_table = pd.merge(df_table, df_wages, how = 'inner', on = ['ein', 'seinunit', 'empr_no'])
# Removing NULL values
isnan_rows = df_table.isnull().any(axis=1)
df_table = df_table[~isnan_rows]
# Write Table
print("Writing table")
# Let's check if the table already exists:
conn = psycopg2.connect(database=db_name, host = hostname) #database connection
cursor = conn.cursor()
cursor.execute('''
SELECT * FROM information_schema.tables
WHERE table_name = 'table_employers_{year}'
AND table_schema = '{schema}';
'''.format(year = year, schema = schema))
# Let's write table if it does not exist (or if overwrite = True)
if not(cursor.rowcount) or overwrite:
table_name = 'table_employers_{}'.format(year)
engine = create_engine('postgresql://{}/{}'.format(hostname, db_name))
df_table.to_sql(table_name, engine, schema = 'ada_18_uchi', index = False, if_exists='replace')
# Change Admin rights of table to admin
conn = psycopg2.connect(database = db_name, host = hostname)
cursor = conn.cursor()
cursor.execute('ALTER TABLE ada_18_uchi.table_employers_{} OWNER TO ada_18_uchi_admin; COMMIT;'.format(year))
cursor.close()
return df_table
df_table_2013 = generate_table(2013)
df_table_2014 = generate_table(2014)
df_table_2013.head()
df_table_2014.head()
###Output
_____no_output_____ |
softmax and cross entropy from scratch.ipynb | ###Markdown
We start by downloading the mnist dataset from the keras datasets api
###Code
(train_x, train_y), (test_x, test_y) = mnist.load_data()
train_x[0].shape
plt.imshow(train_x[4], cmap = matplotlib.cm.binary)
train_y[4]
###Output
_____no_output_____
###Markdown
We transform the (28,28) dimensional matrix to a vector of size (28*28,1)
###Code
def preprocess(x):
x = x / 255.
x = x.reshape((x.shape[0], x.shape[1] * x.shape[2]))
return x
def to_categorical(y, num_classes):
res = np.zeros((y.shape[0], num_classes))
res[np.arange(y.shape[0]), y] = 1.
return res
train_x = preprocess(train_x)
train_x = train_x.T
train_y = to_categorical(train_y, 10).T
print(train_x.shape)
print(train_y.shape)
plt.imshow(train_x[:, 5200].reshape((28,28)))
train_y[:,5200]
###Output
_____no_output_____
###Markdown
Softmax is an activation function that outputs probabilities for multi-class classification problems. It is similar to a sigmoid output which is often used for outputing a single probability. Softmax makes sure that the sum of the individual probabilities equals to 1. Softmax formula:$p_i = \dfrac{e^{a_i}}{\sum^N_{k=1} e^{a_k}}$ - Equation 1: SoftmaxBy using the exponential function it gives more weight to higher probabilities. It gives an advantage to higher values. Therefore it's called a soft max function. A max function would give 100% probability to the highest value, softmax is somewhere in between max and an actual probability as given by equation 2:$p_i = \dfrac{a_i}{\sum^N_{k=1} a_k}$ - Equation 2: Standard linear probabilityThere is however a problem with using the regular softmax function, as it uses an exponential function chances are high that it will encounter an overflow. To overcome this, we can subtract the a values by its maximum.
###Code
def softmax(a):
exp_term = np.exp(a) #-max to Prevent overflow
res = exp_term/np.sum(exp_term, axis=0)
return res
def cross_entropy_loss(outputs, y):
loss = np.sum(y * np.log(outputs))
return loss * -1./outputs.shape[1] #average loss of all samples
def relu(a):
return np.maximum(0, a)
def tanh(a):
return np.tanh(a)
def sigmoid(z):
s = 1 / (1 + np.exp(-z))
return s
def forward_pass(a, W, B, activation):
z = W.dot(a) + B
next_a = activation(z)
return next_a, z
class model():
def __init__(self, input_data):
self.learning_rate = 1
self.x = input_data
self.n, self.m = input_data.shape
print(self.n, self.m)
self.first_layer_nodes = 128
self.output_layer_nodes = 10
self.W1 = np.random.randn(self.first_layer_nodes, self.n)
self.B1 = np.random.random((self.first_layer_nodes, 1))
self.W2 = np.random.randn(self.output_layer_nodes, self.first_layer_nodes)
self.B2 = np.random.random((self.output_layer_nodes, 1))
def forward_pass(self):
self.z1 = self.W1.dot(self.x) + self.B1
self.a1 = sigmoid(self.z1)
self.z2 = self.W2.dot(self.a1) + self.B2
print('mean',self.z2.mean())
self.a2 = softmax(self.z2)
return cross_entropy_loss(self.a2, train_y)
def backward_pass(self, epoch):
dz2 = (self.a2 - train_y)
dw2 = dz2.dot(self.a1.T) * 1./self.m
db2 = np.sum(dz2, axis=1, keepdims=True)* 1./self.m
da1 = self.W2.T.dot(dz2)
dz1 = da1 * sigmoid(self.z1) * (1 - sigmoid(self.z1))
dw1 = dz1.dot(self.x.T)* 1./self.m
db1 = np.sum(dz1, axis=1, keepdims=True)* 1./self.m
print('std', dw1.std())
self.W2 = self.W2 - self.learning_rate * dw2
self.B2 = self.B2 - self.learning_rate * db2
self.W1 = self.W1 - self.learning_rate * dw1
self.B1 = self.B1 - self.learning_rate * db1
def error():
pass
m = model(train_x)
for i in range(100):
loss = m.forward_pass()
predictions = np.argmax(m.a2, axis=0)
correct = np.argmax(train_y, axis=0)
print('acc',np.sum(np.array(predictions == correct))/m.a2.shape[1])
print('loss',loss)
print('-------------------------')
m.backward_pass(i)
from keras.layers import Input, Dense
from keras.models import Sequential
km = Sequential()
km.add(Dense(128, activation='tanh', input_shape=(28*28,)))
km.add(Dense(10, activation='softmax'))
km.compile(optimizer="SGD", loss="categorical_crossentropy", metrics=["accuracy"])
km.fit(train_x, train_y, batch_size=60000, epochs=100)
###Output
Epoch 1/100
60000/60000 [==============================] - 4s 72us/step - loss: 2.4927 - acc: 0.0895
Epoch 2/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.4589 - acc: 0.0936
Epoch 3/100
60000/60000 [==============================] - 0s 8us/step - loss: 2.4282 - acc: 0.1002
Epoch 4/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.4000 - acc: 0.1077
Epoch 5/100
60000/60000 [==============================] - 0s 7us/step - loss: 2.3739 - acc: 0.1165
Epoch 6/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.3496 - acc: 0.1258
Epoch 7/100
60000/60000 [==============================] - 0s 7us/step - loss: 2.3267 - acc: 0.1355
Epoch 8/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.3050 - acc: 0.1463
Epoch 9/100
60000/60000 [==============================] - 0s 7us/step - loss: 2.2844 - acc: 0.1580
Epoch 10/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.2646 - acc: 0.1701
Epoch 11/100
60000/60000 [==============================] - 0s 7us/step - loss: 2.2455 - acc: 0.1833
Epoch 12/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.2271 - acc: 0.1974
Epoch 13/100
60000/60000 [==============================] - 0s 8us/step - loss: 2.2093 - acc: 0.2119
Epoch 14/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.1919 - acc: 0.2261
Epoch 15/100
60000/60000 [==============================] - 0s 7us/step - loss: 2.1749 - acc: 0.2417
Epoch 16/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.1584 - acc: 0.2557
Epoch 17/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.1422 - acc: 0.2695
Epoch 18/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.1263 - acc: 0.2831
Epoch 19/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.1107 - acc: 0.2959
Epoch 20/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.0953 - acc: 0.3074
Epoch 21/100
60000/60000 [==============================] - 0s 7us/step - loss: 2.0803 - acc: 0.3189
Epoch 22/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.0654 - acc: 0.3308
Epoch 23/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.0508 - acc: 0.3421
Epoch 24/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.0364 - acc: 0.3529
Epoch 25/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.0222 - acc: 0.3641
Epoch 26/100
60000/60000 [==============================] - 0s 6us/step - loss: 2.0081 - acc: 0.3743
Epoch 27/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.9943 - acc: 0.3851
Epoch 28/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.9807 - acc: 0.3954
Epoch 29/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.9672 - acc: 0.4052
Epoch 30/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.9539 - acc: 0.4157
Epoch 31/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.9407 - acc: 0.4258
Epoch 32/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.9278 - acc: 0.4363
Epoch 33/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.9149 - acc: 0.4455
Epoch 34/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.9023 - acc: 0.4549
Epoch 35/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.8898 - acc: 0.4640
Epoch 36/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.8774 - acc: 0.4730
Epoch 37/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.8652 - acc: 0.4814
Epoch 38/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.8531 - acc: 0.4893
Epoch 39/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.8412 - acc: 0.4974
Epoch 40/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.8294 - acc: 0.5051
Epoch 41/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.8178 - acc: 0.5129
Epoch 42/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.8063 - acc: 0.5209
Epoch 43/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.7949 - acc: 0.5279
Epoch 44/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.7837 - acc: 0.5353
Epoch 45/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.7725 - acc: 0.5425
Epoch 46/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.7616 - acc: 0.5493
Epoch 47/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.7507 - acc: 0.5558
Epoch 48/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.7400 - acc: 0.5627
Epoch 49/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.7294 - acc: 0.5689
Epoch 50/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.7189 - acc: 0.5748
Epoch 51/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.7086 - acc: 0.5802
Epoch 52/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.6984 - acc: 0.5859
Epoch 53/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6883 - acc: 0.5916
Epoch 54/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.6783 - acc: 0.5966
Epoch 55/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6684 - acc: 0.6014
Epoch 56/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6586 - acc: 0.6063
Epoch 57/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6490 - acc: 0.6112
Epoch 58/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6395 - acc: 0.6152
Epoch 59/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6300 - acc: 0.6193
Epoch 60/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6207 - acc: 0.6237
Epoch 61/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6115 - acc: 0.6278
Epoch 62/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.6024 - acc: 0.6317
Epoch 63/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.5934 - acc: 0.6360
Epoch 64/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.5845 - acc: 0.6395
Epoch 65/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.5757 - acc: 0.6436
Epoch 66/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.5671 - acc: 0.6476
Epoch 67/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.5585 - acc: 0.6514
Epoch 68/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.5500 - acc: 0.6546
Epoch 69/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.5416 - acc: 0.6577
Epoch 70/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.5333 - acc: 0.6609
Epoch 71/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.5251 - acc: 0.6642
Epoch 72/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.5170 - acc: 0.6672
Epoch 73/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.5090 - acc: 0.6703
Epoch 74/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.5010 - acc: 0.6733
Epoch 75/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.4932 - acc: 0.6759
Epoch 76/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.4855 - acc: 0.6789
Epoch 77/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.4778 - acc: 0.6817
Epoch 78/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.4702 - acc: 0.6843
Epoch 79/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.4628 - acc: 0.6869
Epoch 80/100
60000/60000 [==============================] - 0s 7us/step - loss: 1.4554 - acc: 0.6889
Epoch 81/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.4480 - acc: 0.6914
Epoch 82/100
60000/60000 [==============================] - 0s 6us/step - loss: 1.4408 - acc: 0.6933
###Markdown
Ref: https://deepnotes.io/softmax-crossentropyhttp://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/
###Code
a= np.array([[1,2,3],[1,4,2]])
np.argmax(a, axis=1)
a.max(axis=1, keepdims=True).shape
###Output
_____no_output_____ |
03-find_missing.ipynb | ###Markdown
Load data
###Code
remanga = pd.read_csv('./data/raw/remanga_catalog_full.csv')
mangalib = pd.read_csv('./data/raw/mangalib_full.csv')
mangalib = mangalib.drop_duplicates(subset=['link'])
gmanga_matched = pd.read_csv('data/gmanga_matched_remanga_mangalib.csv', sep=';')
gmint_matched = pd.read_csv('data/gmint_matched_remanga_mangalib.csv', sep=';')
selfmanga_matched = pd.read_csv('data/selfmanga_matched_remanga_mangalib.csv', sep=';')
gmanga_matched.head(1)
gmint_matched.head(1)
# clean data
def del_useless_cols(df):
useless = list(filter(lambda col: col.lower().count('unnamed')>0, df.columns.tolist()))
df = df[df.name != '0']
return df.drop(useless, axis=1)
gmanga_matched = del_useless_cols(gmanga_matched)
gmint_matched = del_useless_cols(gmint_matched)
selfmanga_matched = del_useless_cols(selfmanga_matched)
remanga.loc[:, 'id'] = list(range(len(remanga)))
mangalib.loc[:, 'id'] = list(range(len(mangalib)))
is_num = lambda s: (all(list(map(lambda c: c.isdigit() or c == '.', str(s)))))
def fetch_chapter(s):
# from string ГЛАВЫ(N)
s = s.lower().replace('главы (', '').strip(')')
return s
remanga.loc[~remanga.n_chapters.apply(is_num), 'n_chapters'] = remanga[~remanga.n_chapters.apply(is_num)].n_chapters.apply(fetch_chapter)
remanga['n_chapters'] = remanga.n_chapters.astype(float).astype(int)
def get_matched(df):
return df[(~df.remanga_id.isna() | (~df.mangalib_id.isna()))]
def matched(df):
n_matched = len(get_matched(df))
return n_matched
n = matched(gmint_matched)
print(f'mint matched part: {round(n / len(gmint_matched), 3)} ({n} of {len(gmint_matched)} are matched)')
n = matched(gmanga_matched)
print(f'read matched part: {round(n / len(gmanga_matched), 3)} ({n} of {len(gmanga_matched)} are matched)')
n = matched(selfmanga_matched)
print(f'read matched part: {round(n / len(selfmanga_matched), 3)} ({n} of {len(selfmanga_matched)} are matched)')
pd.merge(remanga, gmanga_matched[gmanga_matched.remanga_id == 8900], left_on='id', right_on='remanga_id')
from collections import Counter
Counter(gmanga_matched.remanga_id.tolist()).most_common()
###Output
_____no_output_____
###Markdown
Analyze matched
###Code
fetch_valid_ixs = lambda df, col: df[~df[col].isna()][col].tolist()
remanga_matched_ids = []
remanga_matched_ids += fetch_valid_ixs(gmanga_matched, 'remanga_id')
remanga_matched_ids += fetch_valid_ixs(gmint_matched, 'remanga_id')
remanga_matched_ids += fetch_valid_ixs(selfmanga_matched, 'remanga_id')
remanga[~remanga.id.isin(remanga_matched_ids)]
# len(rm_views_missing[rm_views_missing>100000]) / len(rm_views_missing)
rm_views_missing = remanga[~remanga.id.isin(remanga_matched_ids)].total_views
rm_views_matched = remanga[ remanga.id.isin(remanga_matched_ids)].total_views
n = 5000
rm_views_missing[rm_views_missing < n].hist(bins=100, alpha=0.5)
rm_views_matched[rm_views_matched < n].hist(bins=100, alpha=0.5)
plt.legend(['missing', 'matched'])
remanga[~remanga.id.isin(remanga_matched_ids)].to_csv('./data/missing/remanga_exclusive.csv', sep=';')
mangalib_matched_ids = []
mangalib_matched_ids += fetch_valid_ixs(gmanga_matched, 'mangalib_id')
mangalib_matched_ids += fetch_valid_ixs(gmint_matched, 'mangalib_id')
mangalib_matched_ids += fetch_valid_ixs(selfmanga_matched, 'mangalib_id')
mangalib[~mangalib.id.isin(mangalib_matched_ids)].to_csv('./data/missing/mangalib_exclusive.csv', sep=';')
mangalib[~mangalib.id.isin(mangalib_matched_ids)]
###Output
_____no_output_____
###Markdown
Matched ids
###Code
def exclusive(df, matched_ids):
n_matched = len(df[~df.id.isin(matched_ids)])
return n_matched
n = exclusive(remanga, remanga_matched_ids)
print(f'remanga exclusive part: {round(n / len(remanga), 3)} ({n} of {len(remanga)} are exclusive)')
n = exclusive(mangalib, mangalib_matched_ids)
print(f'mangalib exclusive part: {round(n / len(mangalib), 3)} ({n} of {len(mangalib)} are exclusive)')
# Matches with mangalib
def matched_ids_hist(series):
return series.hist(bins=800, alpha=0.8, figsize=(20, 4))
matched_ids_hist(gmanga_matched.mangalib_id),
matched_ids_hist(gmint_matched.mangalib_id),
matched_ids_hist(selfmanga_matched.mangalib_id)
plt.xlabel('id')
plt.ylabel('manga count')
plt.legend(['read', 'mint', 'self'])
# Matches with remanga
def matched_ids_hist(series):
return series.hist(bins=800, alpha=0.8, figsize=(20, 4))
matched_ids_hist(gmanga_matched.remanga_id),
matched_ids_hist(gmint_matched.remanga_id),
matched_ids_hist(selfmanga_matched.remanga_id)
plt.xlabel('id')
plt.ylabel('manga count')
###Output
_____no_output_____
###Markdown
Calc chapters diff
###Code
# add number of remanga chapters
# read
cond = ~gmanga_matched.remanga_id.isna()
remanga_ids = gmanga_matched.loc[cond].remanga_id.astype(int)
gmanga_matched.loc[cond, 'remanga_chapters_n'] = remanga.set_index('id').loc[remanga_ids].n_chapters.tolist()
# mint
cond = ~gmint_matched.remanga_id.isna()
remanga_ids = gmint_matched.loc[cond].remanga_id.astype(int)
gmint_matched.loc[cond, 'remanga_chapters_n'] = remanga.set_index('id').loc[remanga_ids].n_chapters.tolist()
# self
cond = ~selfmanga_matched.remanga_id.isna()
remanga_ids = selfmanga_matched.loc[cond].remanga_id.astype(int)
selfmanga_matched.loc[cond, 'remanga_chapters_n'] = remanga.set_index('id').loc[remanga_ids].n_chapters.tolist()
chapters_n = gmanga_matched[['chapters_count', 'remanga_chapters_n']]
chapters_n = chapters_n[(chapters_n['chapters_count'].apply(is_num)) &
(chapters_n['remanga_chapters_n'].apply(is_num))].astype(float)
chapters_n_mint = gmint_matched[['chapters_count', 'remanga_chapters_n']]
chapters_n_mint = chapters_n_mint[(chapters_n_mint['chapters_count'].apply(is_num)) &
(chapters_n_mint['remanga_chapters_n'].apply(is_num))].astype(float)
chapters_n_self = gmint_matched[['chapters_count', 'remanga_chapters_n']]
chapters_n_self = chapters_n_self[(chapters_n_self['chapters_count'].apply(is_num)) &
(chapters_n_self['remanga_chapters_n'].apply(is_num))].astype(float)
chapters_n = pd.concat((chapters_n, chapters_n_mint, chapters_n_self))
chapters_diff = chapters_n['chapters_count'] - chapters_n['remanga_chapters_n']
CHAPTERS_DIFF = 0
chapters_diff[abs(chapters_diff)>=CHAPTERS_DIFF].hist(bins=100, rwidth=0.9)
plt.suptitle('Разница в главах между grouple и остальными')
plt.xlabel('Разница')
plt.ylabel('Количество тайтлов')
plt.savefig('pics/chapters_diff.png')
CHAPTERS_DIFF = 0
chapters_diff[abs(chapters_diff)>=CHAPTERS_DIFF].hist(bins=50, rwidth=0.7)
plt.ylim(0, 90)
plt.suptitle('Разница в главах между grouple и остальными, детализация')
plt.xlabel('Разница')
plt.ylabel('Количество тайтлов')
plt.savefig('pics/chapters_diff_detailed.png')
safe_cast = lambda s: None if not is_num(s) else float(s)
# gmanga_matched = gmanga_matched[gmanga_matched.,?]
gmanga_matched.loc[:, 'chapters_diff'] = gmanga_matched['remanga_chapters_n'].apply(safe_cast) - gmanga_matched['chapters_count'].apply(safe_cast)
gmanga_matched.to_csv('data/chapters_diff/gmanga_chapters_diff.csv', sep=';', index=False)
gmanga_matched.head(3)
gmint_matched.loc[:, 'chapters_diff'] = gmint_matched['remanga_chapters_n'].apply(safe_cast) - gmint_matched['chapters_count'].apply(safe_cast)
gmint_matched.to_csv('data/chapters_diff/gmint_chapters_diff.csv', sep=';', index=False)
gmint_matched.head(3)
selfmanga_matched.loc[:, 'chapters_diff'] = selfmanga_matched['remanga_chapters_n'].apply(safe_cast) - selfmanga_matched['chapters_count'].apply(safe_cast)
selfmanga_matched.to_csv('data/chapters_diff/selfmanga_chapters_diff.csv', sep=';', index=False)
selfmanga_matched.head(3)
###Output
_____no_output_____ |
eda_hist.ipynb | ###Markdown
How to easily plot hystograms of features
###Code
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
URL = "https://objectstorage.us-ashburn-1.oraclecloud.com/n/bigdatadatasciencelarge/b/hosted-ds-datasets/o/synthetic%2Forcl_attrition.csv"
data_orig = pd.read_csv(URL)
data_orig.head()
# select the columns you want to display
features = [
"Age",
"TravelForWork",
"SalaryLevel",
"CommuteLength",
"Directs",
"EmployeeNumber",
"EnvironmentSatisfaction",
"Gender",
"HourlyRate",
"JobInvolvement",
"JobLevel",
"JobRole",
"JobSatisfaction",
"MaritalStatus",
"MonthlyIncome",
"MonthlyRate",
"NumCompaniesWorked",
"Over18",
"OverTime",
"PercentSalaryHike",
"PerformanceRating",
"RelationshipSatisfaction",
"WeeklyWorkedHours",
"StockOptionLevel",
"YearsinIndustry",
"TrainingTimesLastYear",
"WorkLifeBalance",
"YearsOnJob",
"YearsAtCurrentLevel",
"YearsSinceLastPromotion",
"YearsWithCurrManager",
]
# need to change to get the best size for visualization
FIGSIZE = (20, 20)
NROWS = 6
NCOLS = 6
plt.figure(figsize=FIGSIZE)
#
# here we do a loop over all columns and use subplot to arrange in a grid of plots
#
for i, col in enumerate(features):
plt.subplot(NROWS, NCOLS, i + 1)
# using Seaborn
sns.histplot(data=data_orig[col])
plt.grid(True)
plt.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/Progress Report-checkpoint.ipynb | ###Markdown
An Exploration of Nueral Net Capabilities
###Code
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
matplotlib.style.use('ggplot')
import IPython as ipynb
%matplotlib inline
###Output
_____no_output_____
###Markdown
Abstract A nueral network is a computational analogy to the methods by which humans think. Their design builds upon the idea of a neuron either firing or not firing based on some stimuli and learn whether or not they made the right choice. To allowfor richer results with less complicated networks, boolean response is replaced with a continuous analog, the sigmoidfunction. The network learns by taking our definition of how incorrect they are in the form of a so-called cost function and find the most effective way to reduce the function to a minimum, i.e. be the least incorrect. It is ideal to minimize the number of training sessions that must be used to get a maximum accuracy due to computational cost and time. In thisproject, the minimum number of training sets to reach a sufficient accuracy will be explored for multiple standard cost functions. As well, a new cost function may be explored along with a method for generating cost functions. And finally,given a sufficient amount of time, the network will be tested with nonconformant input, in this case, scanned andpartitioned handwritten digits. Base Question Does it work?Does it work well?The first step in building a neural net is simply understanding and building the base algorithms. There are three things that define a network: ShapeThe shape of a network merely describes how many neurons there are and where they are. There are typically the locations that neurons live in: The Input Layer, The Hidden Layer, and The Output Layer. The Hidden Layer can be composed of more than one layer, but by convention, it is referred to as one layer. The Input Layer is significant because it takes the inputs. It typically does not do any discrimination before passing it along, but there is nothing barring that from occurring. The Output Layer produces a result. In most cases, the result still requires some interpretation, but is in its final form as far as the network is concerned. Each of the layers can have as many neurons as are needed but it is favorable to reduce the number to the bare minimum for both computational reasons and for accuracy. WeightsWeights live in between individual neurons and dictate how much the decision made by a neuron in the layer before it matters to the next neurons decision. A good analogy might be that Tom(a neuron) has two friends, Sally(a neurette?) and Joe(also a neuron). They are good friends so Tom likes to ask Sally and Joe's opinion about decisions he is about to make. However, Joe is a bit crazy, likes to go out and party, etc. so Tom trusts Sally's opinion a bit more than Joe's. If Tom quantified how much he trusted Sally or Joe, that quantification would be called a weight. BiasesBiases are tied to each neuron and its decision making proccess. A bias in the boolean sense acts as a threshold at which point a true is returned. In the continuous generalization of the boolean proccess, the bias corresponds to the threshold at which point a value above 0.5 is returned. Back to our analogy with Tom and his friends, a bias might constitute how strongly each person feels about their opinion on a subject. So when Tim asks Sally and Joe about their opinion about someone else, call her Julie, Sally responds with a fairly nuetral response because she doesn't know Julie, so her bias is around 0. Joe, on the other hand, used to date Julie and they had a bad break up, so he responds quite negatively, and somewhat unintuitively, his bias is very high. (See the graph of the sigmoid function below with zero bias) In other words, he has a very high threshold for speaking positively about Julie.
###Code
z = np.linspace(-10, 10, 100)
f=plt.figure(figsize=(15, 5))
plt.subplot(1, 2,1)
plt.plot(z, 1/(1+np.exp(-z)));
plt.xlabel("Input to Nueron")
plt.title("Sigmoid Response with Bias=0")
plt.ylabel("Sigmoid Response");
plt.subplot(1, 2,2)
plt.plot(z, 1/(1+np.exp(-z+5)));
plt.xlabel("Input to Nueron")
plt.title("Sigmoid Response with Bias=5")
plt.ylabel("Sigmoid Response");
###Output
_____no_output_____
###Markdown
So, how does it work? There are three core algorithms behind every neural net: Feed Forward, Back Propagation/Error Computation, and Gradient Descent. Feed ForwardThe Feed Forward algorithm could be colloquially called the "Gimme an Answer" algorithm. It sends the inputs through the network and returns the outputs. We can break it down step by step and see what is really going on: InputsEach input value is fed into the corresponding input nueron, that's it. In a more sophisticated network, some inputs could be rejected based on bias criterion, but for now we leave them alone. ChannelsEach input neuron is connected to every neuron in the first hidden layer through a channel, to see this visually, look at the diagram below. Each channel is given a weight that is multiplied by the value passed on by the input neuron and is then summed with all the channels feeding the same neuron and is passed into the hidden layer neuron. The channels can be thought of as pipes allowing water to flow from each input neuron to each hidden layer neuron. The weights in our network represent the diameter of these pipes(is it large or small). As well, pipes converge to a hidden layer neuron and dump all of their water into a basin representing the neuron. NeuronsOnce a value reaches a neuron that is not an input neuron, the value is passed through a sigmoid function similar to those above with the proper bias for that neuron. The sigmoid response is the value that gets passed on to the next layer of neurons. RepeatThe *Channels* and *Neurons* steps are repeated through each layer until the final output is reached.
###Code
ipynb.display.Image("http://neuralnetworksanddeeplearning.com/images/tikz11.png")
###Output
_____no_output_____
###Markdown
Back Propagation/Error ComputationBack Propagation is one of the scary buzz words in the world of neural nets, it doesn't have to be so scary. I prefer to call it error computation to be more transparent because, in essence, that is what it does. Let's dig in! Cost FunctionThe cost function is a major factor in how your network learns. It defines, numerically, how wrong your network is. The function itself is typically defined by some sort of difference of your networks output to the actual correct answer. Because it is a function of the output, it is also a function of every weight and bias in your network. This means that it could have potentially thousands of independant variables. In its simplest form, a cost function should have some quite definite properties: when the ouput is near the correct answer, the cost function should be near zero, a small change in any single weight or bias should result in a small change in the cost function, and the cost function must be non-negative everywhere. Error ComputationThrough a set of nifty equations which will not be shown here, once you have a cost function and take the gradient with respect to the output of said cost function, you are able to calculate a metric for the error of the output. Through some clever deductions based on the fact that a small change in any independent variable results in a small change in the cost function we can calculate that same metric for each independent variable. (That is the Back Propagation bit) You can then calculate, through further clever deductions, the partial derivative of the cost function with respect to each independent variable. The partial derivative of the cost function with respect to each variable will come in handy for when we do *Gradient* Descent. Gradient DescentGradient Descent uses the fact that we want to minimize our cost function together with the idea of the gradient as the path of steepest descent. Down the MountainThe Gradient Descent uses the gradients we calculated in the Error Computation step and tells us how we should change our variables if we want to reach a minimum in the fastest way possible. The algorithm usess the fact that the gradient with respect to an independent variable represents the component of the vector pointing in the direction of most change in that variables dimension. Because even Euler couldn't imagine a thousand dimensional space, we draw some intuition from the familiar three dimensioanl case. Suppose that you are dropped at a random location on a mountain. Suppose further that you are blind.(or it is so foggy that you can't see anything) How do you find the fastest way to the bottom? Well, the only thing that you can do is sense the slope that seems to be the steepest and walk down it. But you are a mathemetician and have no grasp on estimating things, so you calculate the gradient with respect to your left-right direction and your front-back direction. You see that if you take a half step to the left and a quarter step forward you will move the furthest downwards. Wait! Why just one step? First of all, mountains are complicated surfaces and their slopes change from place to place so continuing to make the same steps may not take you the most downwards, or even downwards at all. Secondly, you are blind!(or it is really foggy) If you start running or jumping down the slope, you may overshoot a minimum and have to stop and turn around. In the actual gradient descent algorithm, the step size is represented by something called the learning rate. A step in the right direction is performed in the algorithm by reducing each individual variable by this learning constant multiplied by the gradient with respect to that particular variable. After doing this thousands of times, we find the local minimums of our cost funtion.
###Code
ipynb.display.Image("http://blog.datumbox.com/wp-content/uploads/2013/10/gradient-descent.png")
###Output
_____no_output_____ |
reseau-(01062019)-DILLMANN.ipynb | ###Markdown
LE RÉSEAU Manipulation des paquets réseauLe but de cet exercice est de voire quelques notions sur le réseau et d'apprendre à envoyer une requette "ping" sur un ordinateur, grâce à son adresse IP et son adresse MAC.Il y a une grande différence entre ces deux adresses, disons que l'adresse **IP** est le nom d'une machine sur le web, et que l'adresse **MAC** est l'acronyme pour "media access control address", c'est une adresse électronique qui est propre au dispositif physique de transmission des données. On va d'abord commencer par importer la librairie *scapy* qui contient un grand nombre d'outils pour étudier le réseau **TCP-IP**- L'acronyme **TCP** vient de l'anglais "Transmission Control Protocol"- L'acronyme **IP** vient de l'anglais "Internet Protocol"
###Code
from scapy.all import *
###Output
_____no_output_____
###Markdown
L'échange de paquets avec un serveur web est loin d'être simple, elle fait intervenir le **protocole** HTTP, le **handshake** TCP, l'**entête** IP, bref, nous allons au rester plus basique pour voire comment l'on envoie une requette à un appareil et comment on peut détecter des requettes venant qui sont faites à travers un reseau.- Une **requette** est une demande polie, formulée dans les règles de l'art de s'entendre entre ordinateurs. N'imaginez pas comprendre le contenu d'une requette pour le moment, ce sont des formules qui utilisent un langage codé- Un **protocole** est un ensemble de règles de communication- Un **handshake** est une poignée de main, c'est la mise en relation de l'émeteur avec son récepteur on dit aussi entre le host et le client- Une **entête** est l'information qui introduit le début d'un message IPCommençons donc par créer et afficher une trame Éthernet dans l'interpréteur Scapy :
###Code
ma_trame = Ether()
ma_trame.show()
###Output
###[ Ethernet ]###
dst = ff:ff:ff:ff:ff:ff
src = b8:e8:56:34:af:08
type = 0x9000
###Markdown
Ici on peut décortiquer un message de la façon suivante : - `dst` est l'adresse MAC du destinataire- `src` est l'adresse MAC de l'expéditeur- `type` est la taille du paquet à envoyer La commande ping permet de savoir si un hôte, désigné par son adresse IP, existe. En version simplifiée, la commande *ping* consiste à envoyer un paquet "echo-request" à l'hôte et à dire si un paquet "echo-reply" a été renvoyé.Forgeons (oui : "*forger*" est le terme consacré par les ingénieurs réseau) donc un paquet echo-request pour envoyer un ping à la machine dont d'adresse est donnée par la variable `dst`
###Code
mon_ping = mon_ping = Ether() / IP(dst='192.168.0.1') / ICMP()
mon_ping.show()
###Output
###[ Ethernet ]###
dst = ff:ff:ff:ff:ff:ff
src = b8:e8:56:34:af:08
type = 0x800
###[ IP ]###
version = 4
ihl = None
tos = 0x0
len = None
id = 1
flags =
frag = 0
ttl = 64
proto = icmp
chksum = None
src = 192.168.0.28
dst = 192.168.0.1
\options \
###[ ICMP ]###
type = echo-request
code = 0
chksum = None
id = 0x0
seq = 0x0
###Markdown
Voyons maintenant si notre routeur va répondre à cela par un paquet echo-reply.
###Code
sendp(mon_ping)
###Output
Sent 1 packets.
###Markdown
Si le message "Sent 1 packets" apparait c'est qu'assurément l'envoi a pu se faire correctement, mais cela ne nous informe pas sur la réception. Pour envoyer et recevoir, il faut utiliser les fonctions `srp()`. Celui-ci renvoie deux objets : le premier contient les paquets émis et leurs réponses associées, l'autre contient les paquets sans réponse.
###Code
rep,non_rep = srp(mon_ping)
###Output
Begin emission:
Finished sending 1 packets.
Received 466 packets, got 1 answers, remaining 0 packets
###Markdown
On voit qu'on a eu une réponse, zéro échecs, et que notre réponse est un paquet ! Examinons-le
###Code
rep.show()
###Output
0000 Ether / IP / ICMP 192.168.0.28 > 192.168.0.1 echo-request 0 ==> Ether / IP / ICMP 192.168.0.1 > 192.168.0.28 echo-reply 0 / Padding
###Markdown
Le résultat est un couple (tuple à deux valeurs). Pour afficher le paquet émis (notre ICMP echo-request), on fera donc `rep[0][0].show()`, et pour le paquet reçu en réponse : `rep[0][1].show()`
###Code
rep[0][0].show()
rep[0][1].show()
###Output
###[ Ethernet ]###
dst = b8:e8:56:34:af:08
src = ac:84:c9:1f:ee:72
type = 0x800
###[ IP ]###
version = 4
ihl = 5
tos = 0x0
len = 28
id = 10411
flags =
frag = 0
ttl = 64
proto = icmp
chksum = 0xd0c8
src = 192.168.0.1
dst = 192.168.0.28
\options \
###[ ICMP ]###
type = echo-reply
code = 0
chksum = 0xffff
id = 0x0
seq = 0x0
###[ Padding ]###
load = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
###Markdown
Essayons maintenant d'envoyer un ping à un destinataire inconnu à l'adresse IP 10.1.0.201, normallement on ne devrait pas recevoir de réponse.
###Code
rep = sr1(IP(dst='10.1.0.201') / ICMP(), timeout=0.5)
###Output
Begin emission:
Finished sending 1 packets.
Received 193 packets, got 0 answers, remaining 1 packets
###Markdown
Scan d'une plage d'adresseL'avantage de python est de pouvoir automatiser une commande comme ping sur une plage d'adresses IP. On va prendre les 25 premières machines du réseau, on va attendre une seconde à chaque fois pour s'assurer que la communication passe bien. On notera que le dernier chiffre de l'adresse IP permet d'arroser une plage d'une centaine de machines avec des requettes.
###Code
adresses_machines = '192.168.0.1-100'
rep,non_rep = sr( IP(dst=adresses_machines) / ICMP() , timeout=2)
for elem in rep : # elem représente un couple (paquet émis, paquet reçu)
if elem[1].type == 0 : # 0 <=> echo-reply
print('{} a renvoyé un echo-reply '.format(elem[1].src))
###Output
Begin emission:
Finished sending 100 packets.
Received 1274 packets, got 5 answers, remaining 95 packets
192.168.0.1 a renvoyé un echo-reply
192.168.0.10 a renvoyé un echo-reply
192.168.0.11 a renvoyé un echo-reply
192.168.0.25 a renvoyé un echo-reply
192.168.0.38 a renvoyé un echo-reply
###Markdown
Utilisation d'un "mouchard" sur le réseauOn peut également utiliser `sniff` pour scanner l'activité du réseau, c'est ce que l'on appelle un 'mouchard' car cette fonction va nous informer sur toutes les sites consultés et il peut être très utile pour savoir qui est ce qui vous envoie des requettes indésirables. En effet le harcélement sur internet et plus répandu que l'on ne le pense, surtout quand il est invisible.Notez bien qu'à chaque raffraichissement l'activité devrait changer, car le réseau est comme une mer en mouvement, transportant ça et là des requetes et des réponses...
###Code
pkt = sniff(count=3, filter='tcp', prn=Packet.summary)
# Si on veut en savoir un peu plus sur la dernière requette
pkt[2]
###Output
_____no_output_____ |
1. Machine Learning Foundations/Week 5/Recommending Songs/Recommending_Songs_ScikitLearn.ipynb | ###Markdown
Import ScikitLearn, Pandas and Numpy
###Code
import sklearn
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
1. Read the Dataset using Pandas
###Code
data = pd.read_csv('data/song_data.csv')
data
###Output
_____no_output_____
###Markdown
2. Exploratory Data Analysis
###Code
data.head()
data.info()
data.nunique()
###Output
_____no_output_____
###Markdown
3. Data Preprocessing Assignment 1. Counting unique users
###Code
from operator import itemgetter
artists = ['Kanye West', 'Foo Fighters', 'Taylor Swift', 'Lady GaGa']
pair_list = []
for artist in artists:
score = data[data['artist']==artist]['user_id'].nunique()
pair_list.append((artist, score))
print("Number of users listen to ", artist," = ", score)
print( )
print(pair_list)
print( )
result = max(pair_list, key = itemgetter(1))[0]
print("The artist with maximum score is : " + result)
result = min(pair_list, key = itemgetter(1))[0]
print("The artist with minimum score is : " + result)
###Output
Number of users listen to Kanye West = 2522
Number of users listen to Foo Fighters = 2055
Number of users listen to Taylor Swift = 3246
Number of users listen to Lady GaGa = 2928
[('Kanye West', 2522), ('Foo Fighters', 2055), ('Taylor Swift', 3246), ('Lady GaGa', 2928)]
The artist with maximum score is : Taylor Swift
The artist with minimum score is : Foo Fighters
###Markdown
2. Using groupby-aggregate to find the most popular and least popular artist
###Code
popularity = (data.groupby(['artist'], sort=False)['listen_count'].sum()).sort_values(ascending=False)
popularity
###Output
_____no_output_____ |
Programming Challenge/solution.ipynb | ###Markdown
the model
###Code
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.metrics import accuracy_score
accuracy = 0
for i in range(800):
X = trainingDF.drop(['id', 'y'],axis=1)
y = trainingDF['y']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
rfc = RandomForestClassifier(n_estimators=800)
rfc.fit(X_train,y_train)
predictions = rfc.predict(X_test)
if accuracy_score(y_test,predictions) > accuracy:
our_model = rfc
accuracy = accuracy_score(y_test,predictions)
predictions = our_model.predict(X_test)
print(classification_report(y_test,predictions))
###Output
_____no_output_____
###Markdown
apply the model
###Code
#Converting T-F to int & grades to float
for i in range(len(evaluationDF)):
if evaluationDF['x5'][i]==True:
evaluationDF['x5'][i]=1.0
elif evaluationDF['x5'][i]==False:
evaluationDF['x5'][i]=0.0
if evaluationDF['x6'][i]=='A': evaluationDF['x6'][i]=5.0
if evaluationDF['x6'][i]=='B': evaluationDF['x6'][i]=4.0
if evaluationDF['x6'][i]=='C': evaluationDF['x6'][i]=3.0
if evaluationDF['x6'][i]=='D': evaluationDF['x6'][i]=2.0
if evaluationDF['x6'][i]=='E': evaluationDF['x6'][i]=1.0
if evaluationDF['x6'][i]=='Fx': evaluationDF['x6'][i]=0.5
if evaluationDF['x6'][i]=='F': evaluationDF['x6'][i]=0.0
evaluationDF['x6'] = evaluationDF['x6'].astype('float64')
evaluationDF['x5'] = evaluationDF['x5'].astype('float64')
solution = our_model.predict(evaluationDF.drop(['Unnamed: 0'],axis=1))
with open('101892.txt', 'w', newline='') as f:
for i in range(len(solution)):
f.write(solution[i])
f.write('\n')
###Output
_____no_output_____ |
notebooks/monte_carlo_dev/.ipynb_checkpoints/ATB_FullEmptyRatio-checkpoint.ipynb | ###Markdown
Estimate the amount of time that ATBs are full This notebook evaluates ATBs in our study. It identifies the top importeres (by number of transfers) and top exporters (also by number of transfers). It also matches the names from the DOE with MMSI values in [ATB_MMSI](https://docs.google.com/spreadsheets/d/1dlT0JydkFG43LorqgtHle5IN6caRYjf_3qLrUYqANDY/edit) and creates a dataframe with information the compares DOE transfer quatities to cargo capacity by vessel. This notebook is a compliment to [ATB_FullEmptyRatio_AISplot.ipynb](https://github.com/MIDOSS/analysis-rachael/blob/main/notebooks/monte_carlo/ATB_FullEmptyRatio_AISplot.ipynb) in which I plot up AIS ship trackes for the top 3 importers and top 3 exporters (total of 5 vessels) to address whether vessel operations extend up to CAD or if they are represented entirely in DOE database. Comparing import vs. export transfers between the trans-boundary and non-transboundary vessels ought to give a more clear sense of representative values to use. Lastly, I collate information in `Top_six_ATBs_by_DOEtransfers.xlsx` to give an overview of import vs. export transfer behaviors for each of these five vessels and the WA marine terminals that they are documented as servicing in the DOE database. Use `analysis-rachael/env/monte_carlo.yaml` to create an environment for this notebook:``` conda env create -f [analysis-rachael/env/monte_carlo.yaml] ```or, to activate this environment, use``` conda activate monte_carlo ```To deactivate an active environment, use``` conda deactivate ```
###Code
import pandas
import numpy
import matplotlib.pyplot as plt
import yaml
from pathlib import Path
import datetime
# import functions for querying DOE and monte-carlo dataframes
from monte_carlo_utils import get_DOE_df, get_DOE_atb
# Dept. of Ecology data files
DOE_dir = Path('/Users/rmueller/Data/MIDOSS/DeptOfEcology/')
DOE_2018_xlsx = DOE_dir/'MuellerTrans4-30-20.xlsx'
# ATB spreadsheet
atb_dir = Path('/Users/rmueller/Projects/MIDOSS/AIS/origin_destination_analysis')
atb_xlsx = atb_dir/'ATB_MMSIs.xlsx'
atb_out_xlsx = atb_dir/'ATB_'
# Facility names and lat/lon information file
facilities_xlsx = Path(
'/Users/rmueller/Data/MIDOSS/marine_transport_data/'
'Oil_Transfer_Facilities.xlsx'
)
# Load Oil Attribution file
oil_attribution_file = '/Users/rmueller/Data/MIDOSS/marine_transport_data/oil_attribution.yaml'
with open(oil_attribution_file) as file:
oil_attrs = yaml.load(file, Loader=yaml.Loader)
# convert cargo capacity to gallons since DOE transfers are in gallons
bbl2gal = 42
atb_df={}
atb_df['MMSI'] = pandas.read_excel(
atb_xlsx,
sheet_name='DOE ATBs',
usecols="A,B"
)
atb_df['Capacity'] = pandas.read_excel(
atb_xlsx,
sheet_name=0,
nrows=21,
usecols="A,F,I,J"
)
atb_df['Capacity']
atb_df['MMSI']['MMSI'][2,14,16,17]=numpy.NaN
atb_df['MMSI'] = atb_df['MMSI'].drop(index=13)
atb_df['MMSI'] = pandas.merge(
left = atb_df['MMSI'],
right = atb_df['Capacity'],
how='left',
on = 'MMSI'
)
atb_df['MMSI']
atb_df['MMSI']['Cargo Capacity'][0,1] = 155000
atb_df['MMSI']['Cargo Capacity'][3] = 185000
atb_df['MMSI']['Cargo Capacity'][4,6,9] = 178000
atb_df['MMSI']['Cargo Capacity'][7] = 150000
atb_df['MMSI']['Cargo Capacity'][11] = 80000
atb_df['MMSI']['Cargo Capacity'] = bbl2gal * atb_df['MMSI']['Cargo Capacity']
atb_df['MMSI'] = atb_df['MMSI'].drop(columns=["Vessel length",'Fuel Capacity','MMSI'])
[imports, exports]=get_DOE_atb(DOE_2018_xlsx, facilities_xlsx, transfer_type = 'cargo', facilities='selected')
len(imports)
len(exports)
exports.head()
# imports
import_df = {}
# define way to group dataframe
aggregation_functions = {'AntID': 'first', 'Deliverer': 'first', 'TransferQtyInGallon': 'sum'}
imports_sm = imports[['AntID','Deliverer','TransferQtyInGallon']]
# first group by transfer ID
imports_sm_AndID = imports_sm.groupby(imports['AntID']).aggregate(aggregation_functions)
# now group by deliverer
import_df = imports_sm_AndID[['Deliverer','TransferQtyInGallon']].groupby(
'Deliverer').count().rename(columns={'TransferQtyInGallon':'Count'})
import_df.head()
import_df = pandas.merge(
left = import_df,
right = imports_sm_AndID[['Deliverer','TransferQtyInGallon']].groupby('Deliverer').sum(),
how='left',
on = 'Deliverer'
)
import_df['AverageQty'] = import_df['TransferQtyInGallon']/import_df['Count']
len(import_df)
imports_sm_AndID.head(2)
import_df.sort_values(by='Count', ascending=False)
# exports
export_df = {}
aggregation_functions = {'AntID': 'first', 'Receiver': 'first', 'TransferQtyInGallon': 'sum'}
exports_sm = exports[['AntID','Receiver','TransferQtyInGallon']]
exports_sm_AndID = exports_sm.groupby(exports['AntID']).aggregate(aggregation_functions)
export_df = exports_sm_AndID[['Receiver','TransferQtyInGallon']].groupby(
'Receiver').count().rename(columns={'TransferQtyInGallon':'Count'})
export_df.head()
export_df = pandas.merge(
left = export_df,
right = exports_sm_AndID[['Receiver','TransferQtyInGallon']].groupby('Receiver').sum(),
how='left',
on = 'Receiver'
)
export_df['AverageQty'] = export_df['TransferQtyInGallon']/export_df['Count']
len(export_df)
export_df.sort_values(by='Count', ascending=False)
###Output
_____no_output_____
###Markdown
Create a list consisting of the top three importers and the top three exporters and evaluate where imports/exports are going to/from
###Code
top_imports = import_df.iloc[import_df['Count'].argsort()[-3:]].index.tolist()
top_exports = export_df.iloc[export_df['Count'].argsort()[-3:]].index.tolist()
top_dawgs = top_imports + list(set(top_exports) - set(top_imports))
print(top_dawgs)
for dawg in top_dawgs:
print(dawg)
print(imports.loc[
imports.Deliverer == dawg,
['Receiver','Product']
].groupby('Receiver').count()
)
print('')
for dawg in top_dawgs:
print(dawg)
print(exports.loc[
exports.Receiver == dawg,
['Deliverer','Product']
].groupby('Deliverer').count()
)
print('')
###Output
ITB ISLAND TRADER
Empty DataFrame
Columns: [Product]
Index: []
ATB BARGE DBL 185
Product
Deliverer
BP Cherry Point Refinery 94
ATB BARGE ALL ABOARD FOR A CURE
Product
Deliverer
BP Cherry Point Refinery 2
Phillips 66 Ferndale Refinery 36
Shell Puget Sound Refinery 2
ATB BARGE ONEDREAM
Product
Deliverer
BP Cherry Point Refinery 47
Shell Puget Sound Refinery 7
ATB BARGE 550-2
Product
Deliverer
Phillips 66 Ferndale Refinery 79
###Markdown
Investigate dates for ATB BARGE DBL 185 exports from Cherry Point
###Code
exports.loc[
(exports.Receiver == 'ATB BARGE DBL 185') &
(exports.Deliverer == 'BP Cherry Point Refinery'),
['StartDateTime']
]
# Plot up shapefile of december movement for ATB BARGE DBL 185 compare to ATB BARGE ONEDREAM
for dawg in top_dawgs:
print(dawg)
print(imports.loc[
imports.Deliverer == dawg,
['Receiver']
].groupby('Receiver').count().index.tolist()
)
for dawg in top_dawgs:
print(dawg)
print(exports.loc[
exports.Receiver == dawg,
['Deliverer']
].groupby('Deliverer').count().index.tolist()
)
###Output
ITB ISLAND TRADER
[]
ATB BARGE DBL 185
['BP Cherry Point Refinery']
ATB BARGE ALL ABOARD FOR A CURE
['BP Cherry Point Refinery', 'Phillips 66 Ferndale Refinery', 'Shell Puget Sound Refinery']
ATB BARGE ONEDREAM
['BP Cherry Point Refinery', 'Shell Puget Sound Refinery']
ATB BARGE 550-2
['Phillips 66 Ferndale Refinery']
###Markdown
Create historgrams for "top dawg" transfers
###Code
bin_values = numpy.arange(0,5e7,5e7/20)
nbins = 20
max_transfer = 1e7 #note: ATB BARGE DBL 185 recorded 1 transfer as 5e7
bin_values = numpy.arange(0,max_transfer,max_transfer/20)
export_qty = {}
import_qty={}
for dawg in top_dawgs:
print(dawg)
export_qty[dawg]=exports.loc[
exports.Receiver == dawg,
['TransferQtyInGallon','AntID']
].groupby('AntID').sum()
import_qty[dawg]=imports.loc[
imports.Deliverer == dawg,
['TransferQtyInGallon','AntID']
].groupby('AntID').sum()
fig, ax = plt.subplots(1,2)
# the histogram of the data
n, bins, patches = ax[0].hist(import_qty[dawg],bins = bin_values)
n, bins, patches = ax[1].hist(export_qty[dawg],bins = bin_values)
ax[0].set_xlabel('Transfer Qty (gallons)')
ax[0].set_ylabel('Number of transfers')
# add useful information
ax[0].text(max_transfer-.5e6,28,'import', horizontalalignment='right')
ax[0].text(max_transfer-.5e6,26,'median volume:', horizontalalignment='right')
ax[0].text(max_transfer-.5e6,24,f'{numpy.median(import_qty[dawg]):.2e} gallons', horizontalalignment='right')
ax[0].text(max_transfer-.5e6,22,f'{len(import_qty[dawg])} transfers', horizontalalignment='right')
ax[1].text(max_transfer-.5e6,28,'export ', horizontalalignment='right')
ax[1].text(max_transfer-.5e6,26,'median volume:', horizontalalignment='right')
ax[1].text(max_transfer-.5e6,24,f'{numpy.median(export_qty[dawg]):.2e} gallons', horizontalalignment='right')
ax[1].text(max_transfer-.5e6,22,f'{len(export_qty[dawg])} transfers', horizontalalignment='right')
# ax[0].set_title(f'{dawg} (median in:{numpy.median(import_qty[dawg])}, median out:{numpy.median(export_qty[dawg])})')
for numax in [0,1]:
ax[numax].set_ylim(0,30)
ax[numax].set_xlim(0,max_transfer)
plt.show()
atb_df['MMSI'].head()
export_df = pandas.merge(
left = export_df,
right = atb_df['MMSI'],
how='left',
left_on = 'Receiver',
right_on = 'DELIVERER'
)
import_df = pandas.merge(
left = import_df,
right = atb_df['MMSI'],
how='left',
left_on = 'Deliverer',
right_on = 'DELIVERER'
)
export_df['PercentFull']=export_df['AverageQty']/export_df['Cargo Capacity']
import_df['PercentFull']=import_df['AverageQty']/import_df['Cargo Capacity']
export_df
import_df
fill_out = numpy.nanmedian(export_df['PercentFull'].tolist())
fill_in = numpy.nanmedian(import_df['PercentFull'].tolist())
net_in = numpy.sum(import_df['TransferQtyInGallon'])
net_out = numpy.sum(export_df['TransferQtyInGallon'])
net_transferred = (net_in + net_out)
print(f'Export fill percentage: {fill_out:.3f}')
print(f'Number of export transfers {sum(export_df["Count"])}')
print(f'Import fill percentage: {fill_in:.3f}')
print(f'Number of import transfers {sum(import_df["Count"])}')
print(f'full/empty ratio ((import transfers) / (export transfers)): {sum(import_df["Count"])/sum(export_df["Count"]):.3f}')
print(f'full/empty ratio (weighted fill percentage): {fill_out*net_out/net_transferred+fill_in*net_in/net_transferred:.3f}')
###Output
Export fill percentage: 0.746
Number of export transfers 297
Import fill percentage: 0.278
Number of import transfers 185
full/empty ratio ((import transfers) / (export transfers)): 0.623
full/empty ratio (weighted fill percentage): 0.647
###Markdown
Identify marine terminal with greatest number of ATB and barge transfers(TBD: Haven't yet started this analysis)- What is the behavior at this marine terminal?- How many vessels both deliver and receive to this terminal?- How many just deliver?- How many just receive? Identify an ATBs that deliver and receive from same terminal ATBs that both deliver and receive: - `ATB BARGE DBL 185` (32/43)- `ATB BARGE ONEDREAM` (23/39)- `ATB BARGE ALL ABOARD FOR A CURE` (40/30)
###Code
print(32/43)
print(23/39)
print(30/40)
###Output
0.7441860465116279
0.5897435897435898
0.75
|
instruments/barrier_options.ipynb | ###Markdown
 Barrier Options
###Code
import pyvacon.analytics as analytics
import datetime as dt
import pyvacon.tools.converter as converter
import pyvacon.tools.enums as enums
import pyvacon.marketdata.testdata as mkt_testdata
import pyvacon.instruments.testdata as ins_testdata
import math
from scipy.stats import norm
import pyvacon.marketdata.plot as mkt_plot #import module for plotting functionality
#the next lin is a jupyter internal command to show the matplotlib graphs within the notebook
%matplotlib inline
def exp(x):
return math.exp(x)
def cdf(x):
return norm.cdf(x)
def log(x):
return math.log(x)
def sqrt(x):
return math.sqrt(x)
###Output
_____no_output_____
###Markdown
Definition of Barrier OptionsBarrier options are options where the payoff depends on whether the underlying's spot price reaches a certain level during a certain period of time. Barrier options can be classified in know-out options and knock-in options. A knock-in option comes into existence only when the underlying's spot price reaches the defined barrier; a knock-out option ceases to exist if the underlying's spot prices reaches the defined barrier. The different barrier options including their payoff profile are presented in this notebook. For a detailed description please refer to Hull, *Options, futures, and other derivatives, 8th Edition,* 2012, pp. 579-581.The following code defines the valuation formula for barrier options assuming a non-dividend paying stock.
###Code
def BarrierOptionPricer(_Type, S0, K, H, r, q, sigma, T, t=0):
_lambda = (r-q+sigma**2/2)/sigma**2
y = (log(H**2/(S0*K)))/(sigma*sqrt(T-t))+_lambda*sigma*sqrt(T-t)
x1 = (log(S0/H))/(sigma*sqrt(T-t))+_lambda*sigma*sqrt(T-t)
y1 = (log(H/S0))/(sigma*sqrt(T-t))+_lambda*sigma*sqrt(T-t)
d1= (log(S0/K)+(r+sigma**2/2)*(T-t))/(sigma*sqrt(T-t))
d2 = d1-sigma*sqrt(T-t)
p = -1*(S0*cdf(-1*d1)-K*exp(-r*(T-t))*cdf(-1*d2))
c = 1*(S0*cdf(1*d1)-K*exp(-r*(T-t))*cdf(1*d2))
cdi = S0*exp(-q*(T-t))*(H/S0)**(2*_lambda)*cdf(y)-K*exp(-r*(T-t))*(H/S0)**(2*_lambda-2)*cdf(y-sigma*sqrt(T-t))
cdo = S0*cdf(x1)*exp(-q*(T-t))-K*exp(-r*(T-t))*cdf(x1-sigma*sqrt(T-t))-S0*exp(-q*(T-t))*(H/S0)**(2*_lambda)*cdf(y1)+K*exp(-r*(T-t))*(H/S0)**(2*_lambda-2)*cdf(y1-sigma*sqrt(T-t))
cui = S0*cdf(x1)*exp(-q*(T-t))-K*exp(-r*(T-t))*cdf(x1-sigma*sqrt(T-t))-S0*exp(-q*(T-t))*(H/S0)**(2*_lambda)*(cdf(-y)-cdf(-y1))+K*exp(-r*(T-t))*(H/S0)**(2*_lambda-2)*(cdf(-y+sigma*sqrt(T-t))-cdf(-y1+sigma*sqrt(T-t)))
pui = -S0*exp(-q*(T-t))*(H/S0)**(2*_lambda)*cdf(-y)+K*exp(-r*(T-t))*(H/S0)**(2*_lambda-2)*cdf(-y+sigma*sqrt(T-t))
puo = -S0*cdf(-x1)*exp(-q*(T-t))+K*exp(-r*(T-t))*cdf(-x1+sigma*sqrt(T-t))+S0*exp(-q*(T-t))*(H/S0)**(2*_lambda)*cdf(-y1)-K*exp(-r*(T-t))*(H/S0)**(2*_lambda-2)*cdf(-y1+sigma*math.sqrt(T-t))
pdi = -S0*cdf(-x1)*exp(-q*(T-t))+K*exp(-r*(T-t))*cdf(-x1+sigma*sqrt(T-t))+S0*exp(-q*(T-t))*(H/S0)**(2*_lambda)*(cdf(y)-cdf(y1))-K*exp(-r*(T-t))*(H/S0)**(2*_lambda-2)*(cdf(y-sigma*sqrt(T-t))-cdf(y1-sigma*sqrt(T-t)))
if _Type =='cdi' and H<K and S0>H:
return cdi
if _Type =='cdi' and H>=K and S0>H:
return c-cdo
if _Type =='cdi' and S0<=H:
return c
if _Type =='cdo' and H<K and S0>H:
return c-cdi
if _Type =='cdo' and H<K and S0<=H:
return 0
if _Type =='cdo' and H>=K and S0>H:
return cdo
if _Type =='cdo' and H>=K and S0<=H:
return 0
if _Type =='cui' and H>K:
return cui
if _Type =='cui' and H<=K:
return c
if _Type =='cuo' and H>K and S0<H:
return c-cui
if _Type =='cuo' and H>K and S0>=H:
return 0
if _Type =='cuo' and H<=K:
return 0.0
if _Type =='pui' and H>=K and S0<H:
return pui
if _Type =='pui' and H<K and S0<H:
return p-puo
if _Type =='pui' and S0>=H:
return p
if _Type =='puo':
if S0>=H:
return 0
else:
if _Type =='puo' and H>=K:
return p-pui
if _Type =='puo' and H<K:
return puo
if _Type =='pdi' and H>=K:
return p
if _Type =='pdi' and H<K:
return pdi
if _Type =='pdo' and H>=K:
return 0
if _Type =='pdo' and H<K and S0>H:
return p-pdi
if _Type =='pdo' and H<K and S0<=H:
return 0
if _Type =='c':
return c
if _Type =='p':
return p
spots = analytics.vectorDouble()
S0 = 30
n=0.1
while n <=100:
spots.append(n)
n=n+0.1
K = 50
H1 = 40
H2 = 60
r = 0.05
q = 0
sigma = 0.3
T = 1
t = 0
###Output
_____no_output_____
###Markdown
Barrier call options Down-and-in callA down-and-in call is a call option which comes into existence if the stock price hits a barrier which is below the initial asset price.If the barrier $H$ is less than or equal to the strike price $K$, the formula to price a down-and-in call is defined as$$c_{di}=S_0e^{-qT}(H/S_0)^{2\lambda}N(y)-Ke^{-rT}(H/S_0)^{2\lambda-2}N(y-\sigma\sqrt{T}),$$where \begin{align}\lambda &= \frac{r-q+\sigma^2/2}{\sigma^2} \\y &= \frac{\ln[H^2/(S_0K)]}{\sigma\sqrt{T}}+\lambda\sigma\sqrt{T}. \\\end{align}$S_0$ is the underlying's spot price, $K$ is the strike price, $H$ is the barrier level, $\sigma$ is the underlying's volatility, $r$ is the risk-free interest rate, $q$ is the borrowing rate, and $T$ is the time to maturity. $N(x)$ is the cumulative probability distribution function for a standardized normal distribution.If the barrier is greater than or equal to the strike price, the formula for the down-and-in call is$$c_{di}=c-c_{do}.$$
###Code
# Assumption that H has not been reached yet. If H is reached, product becomes normal plain vanilla call.
cdi_price1 = analytics.vectorDouble()
for s in range(len(spots)):
cdi_price1.append(BarrierOptionPricer('cdi', spots[s], K, H1, r, q, sigma, T, t))
vanilla_call1 = analytics.vectorDouble()
for s in range(len(spots)):
vanilla_call1.append(BarrierOptionPricer('c', spots[s], K, H1, r, q, sigma, T, t))
cdi_price2 = analytics.vectorDouble()
for s in range(len(spots)):
cdi_price2.append(BarrierOptionPricer('cdi', spots[s], K, H2, r, q, sigma, T, t))
fig, (cdi1, cdi2) = mkt_plot.plt.subplots(1,2, figsize=(12,4),dpi=100,num=1)
cdi1.plot(spots, cdi_price1, 'k', label='Down-and-in call')
cdi1.plot(spots, vanilla_call1, 'y:', label='Plain vanilla call')
cdi1.set_title('Down-and-in call H<K')
cdi1.set_xlabel('Spot')
cdi1.set_ylabel('Price')
cdi1.axvline(x=K, label='Strike', ls= '--', c='g')
cdi1.axvline(x=H1, label='Barrier', ls=':', c='r')
legend = cdi1.legend(loc='best', shadow=True, fontsize='medium')
#fig, cdi2 = mkt_plot.plt.subplots()
cdi2.plot(spots, cdi_price2, 'k', label='Down-and-in call')
cdi2.plot(spots, vanilla_call1, 'y:', label='Plain vanilla call')
cdi2.set_title('Down-and-in call H>K')
cdi2.set_xlabel('Spot')
cdi2.set_ylabel('Price')
cdi2.axvline(x=K, label='Strike', ls= '--', c='g')
cdi2.axvline(x=H2, label='Barrier', ls=':', c='r')
legend = cdi2.legend(loc='best', shadow=True, fontsize='medium')
###Output
_____no_output_____
###Markdown
Down-and-out callA down-and-out call in a call option that ceases to exists when the stock price hits a barrier which is below the initial asset price.If $H \leq K$, the formula for the down-and-out call is $$c_{do}=c-c_{di},$$if $H \geq K$, the formula is $$c_{do}=S_0N(x_1)e^{-qT}-Ke^{-rT}N(x_1-\sigma\sqrt{T})-S_0e^{-qT}(H/S_0)^{2\lambda}N(y_1)+Ke^{-rT}(H/S_0)^{2\lambda-2}N(y_1-\sigma\sqrt{T})$$where \begin{align}x_1 &=\frac{\ln(S_0/H}{\sigma\sqrt{T}}+\lambda\sigma\sqrt{T} \\y_1 &=\frac{\ln(H/S_0}{\sigma\sqrt{T}}+\lambda\sigma\sqrt{T}. \\\end{align}
###Code
vanilla_call1 = analytics.vectorDouble()
for s in range(len(spots)):
vanilla_call1.append(BarrierOptionPricer('c', spots[s], K, H1, r, q, sigma, T, t))
cdo_price1 = analytics.vectorDouble()
for s in range(len(spots)):
cdo_price1.append(BarrierOptionPricer('cdo', spots[s], K, H1, r, q, sigma, T, t))
cdo_price2 = analytics.vectorDouble()
for s in range(len(spots)):
cdo_price2.append(BarrierOptionPricer('cdo', spots[s], K, H2, r, q, sigma, T, t))
fig, (cdo1, cdo2) = mkt_plot.plt.subplots(1,2, figsize=(12,4),dpi=100,num=1)
cdo1.plot(spots, cdo_price1, 'k', label='Down-and-out call')
cdo1.plot(spots, vanilla_call1, 'y:', label='Plain vanilla call')
cdo1.set_title('Down-and-out call H<K')
cdo1.set_xlabel('Spot')
cdo1.set_ylabel('Price')
cdo1.axvline(x=K, label='Strike', ls= '--', c='g')
cdo1.axvline(x=H1, label='Barrier', ls=':', c='r')
legend = cdo1.legend(loc='best', shadow=True, fontsize='medium')
#fig, cdo2 = mkt_plot.plt.subplots()
cdo2.plot(spots, cdo_price2, 'k', label='Down-and-out call')
cdo2.plot(spots, vanilla_call1, 'y:', label='Plain vanilla call')
cdo2.set_title('Down-and-out call H>K')
cdo2.set_xlabel('Spot')
cdo2.set_ylabel('Price')
cdo2.axvline(x=K, label='Strike', ls= '--', c='g')
cdo2.axvline(x=H2, label='Barrier', ls=':', c='r')
legend = cdo2.legend(loc='best', shadow=True, fontsize='medium')
###Output
_____no_output_____
###Markdown
Up-and-in callAn up-and-in call is a call option which comes into existence if the spots hits a barrier which is above the initial asset price.In the case of $H \leq K$ the value of the up-and-in call $c_{ui}$ is $c$.When $H > K$ the formula for the up-and-in call is defined as$$c_{ui}=S_0N(x_1)e^{-qT}-Ke^{-rT}N(x_1-\sigma\sqrt{T})-S_0e^{-qT}(H/S_0)^{2\lambda}[N(-y)-N(-y_1)]+Ke^{-rT}(H/S_0)^{2\lambda-2}[N(-y+\sigma\sqrt{T})-N(-y_1+\sigma\sqrt{T})].$$
###Code
# Assumption that H has not been reached yet. If the barrier is hit, the it is a plain vanilla call.
vanilla_call1 = analytics.vectorDouble()
for s in range(len(spots)):
vanilla_call1.append(BarrierOptionPricer('c', spots[s], K, H1, r, q, sigma, T, t))
cui_price1 = analytics.vectorDouble()
for s in range(len(spots)):
cui_price1.append(BarrierOptionPricer('cui', spots[s], K, H1, r, q, sigma, T, t))
cui_price2 = analytics.vectorDouble()
for s in range(len(spots)):
cui_price2.append(BarrierOptionPricer('cui', spots[s], K, 80, r, q, sigma, T, t))
fig, (cui1, cui2) = mkt_plot.plt.subplots(1,2, figsize=(12,4),dpi=100,num=1)
cui1.plot(spots, cui_price1, 'k', label='Up-and-in call')
cui1.plot(spots, vanilla_call1, 'y:', label='Plain vanilla call')
cui1.set_title('Up-and-in call H<K')
cui1.set_xlabel('Spot')
cui1.set_ylabel('Price')
cui1.axvline(x=K, label='Strike', ls= '--', c='g')
cui1.axvline(x=H1, label='Barrier', ls=':', c='r')
legend = cui1.legend(loc='best', shadow=True, fontsize='medium')
#fig, cui2 = mkt_plot.plt.subplots()
cui2.plot(spots, cui_price2, 'k', label='Up-and-in call')
cui2.plot(spots, vanilla_call1, 'y:', label='Plain vanilla call')
cui2.set_title('Up-and-in call H>K')
cui2.set_xlabel('Spot')
cui2.set_ylabel('Price')
cui2.axvline(x=K, label='Strike', ls= '--', c='g')
cui2.axvline(x=80, label='Barrier', ls=':', c='r')
legend = cui2.legend(loc='best', shadow=True, fontsize='medium')
###Output
_____no_output_____
###Markdown
Up-and-out callAn up-and-out call is a call option which ceases to exist when the stock price hits a barrier which is above the initial asset price.When $H \leq K$, the value of the up-and-out call is zero.When $H > K$, formula for the up-and-out call is defined as $$c_{uo}=c-c_{ui}.$$
###Code
vanilla_call1 = analytics.vectorDouble()
for s in range(len(spots)):
vanilla_call1.append(BarrierOptionPricer('c', spots[s], K, H1, r, q, sigma, T, t))
cuo_price1 = analytics.vectorDouble()
for s in range(len(spots)):
cuo_price1.append(BarrierOptionPricer('cuo', spots[s], K, H1, r, q, sigma, T, t))
cuo_price2 = analytics.vectorDouble()
for s in range(len(spots)):
cuo_price2.append(BarrierOptionPricer('cuo', spots[s], K, H2, r, q, sigma, T, t))
fig, (cuo1, cuo2) = mkt_plot.plt.subplots(1,2, figsize=(12,4),dpi=100,num=1)
cuo1.plot(spots, cuo_price1, 'k', label='Up-and-out call')
#cuo1.plot(spots, vanilla_call1, 'y:', label='Plain vanilla call')
cuo1.set_title('Up-and-out call H<K')
cuo1.set_xlabel('Spot')
cuo1.set_ylabel('Price')
cuo1.axvline(x=K, label='Strike', ls= '--', c='g')
cuo1.axvline(x=H1, label='Barrier', ls=':', c='r')
legend = cuo1.legend(loc='best', shadow=True, fontsize='medium')
#fig, cuo2 = mkt_plot.plt.subplots()
cuo2.plot(spots, cuo_price2, 'k', label='Up-and-out call')
#cuo2.plot(spots, vanilla_call1, 'y:', label='Plain vanilla call')
cuo2.set_title('Up-and-out call H>K')
cuo2.set_xlabel('Spot')
cuo2.set_ylabel('Price')
cuo2.axvline(x=K, label='Strike', ls= '--', c='g')
cuo2.axvline(x=H2, label='Barrier', ls=':', c='r')
legend = cuo2.legend(loc='best', shadow=True, fontsize='medium')
###Output
_____no_output_____
###Markdown
Barrier put options Down-and-in putA down-and-in put is a put option which comes into existence if the spot price hits a barrier which is below the initial asset price.When the barrier is greater than or equal to the strike price, the value of the down-and-in put is equal to a plain vanilla put $p$. If the barrier is less than the strike price, the formula for the down-and-in put is defined as $$p_{di}=-S_0N(-x_1)e^{-qT}+Ke^{-rT}N(-x_1+\sigma\sqrt{T})+S_0e^{-qT}(H/S_0)^{2\lambda}[N(y)-N(y_1)]-Ke^{-rT}(H/S_0)^{2\lambda-2}[N(y-\sigma\sqrt{T}-N(y_1-\sigma\sqrt{T}].$$
###Code
# H<K: As soon as the barrier is hit, the down-and-in put becomes a plain vanilla put.
vanilla_put = analytics.vectorDouble()
for s in range(len(spots)):
vanilla_put.append(BarrierOptionPricer('p', spots[s], K, H1, r, q, sigma, T, t))
pdi_price1 = analytics.vectorDouble()
for s in range(len(spots)):
pdi_price1.append(BarrierOptionPricer('pdi', spots[s], K, 30, r, q, sigma, T, t))
pdi_price2 = analytics.vectorDouble()
for s in range(len(spots)):
pdi_price2.append(BarrierOptionPricer('pdi', spots[s], K, H2, r, q, sigma, T, t))
fig, (pdi1, pdi2) = mkt_plot.plt.subplots(1,2, figsize=(12,4),dpi=100,num=1)
pdi1.plot(spots, pdi_price1, 'k', label='Down-and-in put')
pdi1.plot(spots, vanilla_put, 'y:', label='Plain vanilla put')
pdi1.set_title('Down-and-in put H<K')
pdi1.set_xlabel('Spot')
pdi1.set_ylabel('Price')
pdi1.axvline(x=K, label='Strike', ls= '--', c='g')
pdi1.axvline(x=H1, label='Barrier', ls=':', c='r')
legend = pdi1.legend(loc='best', shadow=True, fontsize='medium')
#fig, pdi2 = mkt_plot.plt.subplots()
pdi2.plot(spots, pdi_price2, 'k', label='Down-and-in put')
pdi2.plot(spots, vanilla_put, 'y:', label='Plain vanilla put')
pdi2.set_title('Down-and-in put H>K')
pdi2.set_xlabel('Spot')
pdi2.set_ylabel('Price')
pdi2.axvline(x=K, label='Strike', ls= '--', c='g')
pdi2.axvline(x=H2, label='Barrier', ls=':', c='r')
legend = pdi2.legend(loc='best', shadow=True, fontsize='medium')
###Output
_____no_output_____
###Markdown
Down-and-out putA down-and-out put is a put option which ceases to exists when the spot price hits a barrier which is below the initial asset price.When the barrier is greater than or equal to the strike price, the value of the down-and-out put is zero. If the barrier is less than the strike price, the formula for the down-and-out put is defined as$$p_{do} = p - p_{di}.$$
###Code
vanilla_put = analytics.vectorDouble()
for s in range(len(spots)):
vanilla_put.append(BarrierOptionPricer('p', spots[s], K, H1, r, q, sigma, T, t))
pdo_price1 = analytics.vectorDouble()
for s in range(len(spots)):
pdo_price1.append(BarrierOptionPricer('pdo', spots[s], K, H1, r, q, sigma, T, t))
pdo_price2 = analytics.vectorDouble()
for s in range(len(spots)):
pdo_price2.append(BarrierOptionPricer('pdo', spots[s], K, H2, r, q, sigma, T, t))
fig, (pdo1, pdo2) = mkt_plot.plt.subplots(1,2, figsize=(12,4),dpi=100,num=1)
pdo1.plot(spots, pdo_price1, 'k', label='Down-and-out put')
#pdo1.plot(spots, vanilla_put, 'y:', label='Plain vanilla put')
pdo1.set_title('Down-and-out put H<K')
pdo1.set_xlabel('Spot')
pdo1.set_ylabel('Price')
pdo1.axvline(x=K, label='Strike', ls= '--', c='g')
pdo1.axvline(x=H1, label='Barrier', ls=':', c='r')
legend = pdo1.legend(loc='best', shadow=True, fontsize='medium')
#fig, pdo2 = mkt_plot.plt.subplots()
pdo2.plot(spots, pdo_price2, 'k', label='Down-and-out put')
#pdo2.plot(spots, vanilla_put, 'y:', label='Plain vanilla put')
pdo2.set_title('Down-and-out put H>K')
pdo2.set_xlabel('Spot')
pdo2.set_ylabel('Price')
pdo2.axvline(x=K, label='Strike', ls= '--', c='g')
pdo2.axvline(x=H2, label='Barrier', ls=':', c='r')
legend = pdo2.legend(loc='best', shadow=True, fontsize='medium')
###Output
_____no_output_____
###Markdown
Up-and-in putAn up-and-in put is a put option that comes into existence if the spot price hits a barrier which is above the initial asset price.When $H \geq K$, the formula for the up-and-in put is defined as$$ p_{ui}=S_0e^{-qT}(H/S_0)^{2\lambda}N(-y)+Ke^{-rT}(H/S_0)^{2\lambda-2}N(-y+\sigma\sqrt{T})$$when $H<K$ the formula is $$ p_{ui}=p-p_{uo}.$$
###Code
vanilla_put = analytics.vectorDouble()
for s in range(len(spots)):
vanilla_put.append(BarrierOptionPricer('p', spots[s], K, H1, r, q, sigma, T, t))
pui_price1 = analytics.vectorDouble()
for s in range(len(spots)):
pui_price1.append(BarrierOptionPricer('pui', spots[s], K, H1, r, q, sigma, T, t))
pui_price2 = analytics.vectorDouble()
for s in range(len(spots)):
pui_price2.append(BarrierOptionPricer('pui', spots[s], K, H2, r, q, sigma, T, t))
fig, (pui1, pui2) = mkt_plot.plt.subplots(1,2, figsize=(12,4),dpi=100,num=1)
pui1.plot(spots, pui_price1, 'k', label='Up-and-in put')
pui1.plot(spots, vanilla_put, 'y:', label='Plain vanilla put')
pui1.set_title('Up-and-in put H<K')
pui1.set_xlabel('Spot')
pui1.set_ylabel('Price')
pui1.axvline(x=K, label='Strike', ls= '--', c='g')
pui1.axvline(x=H1, label='Barrier', ls=':', c='r')
legend = pui1.legend(loc='best', shadow=True, fontsize='medium')
#fig, pui2 = mkt_plot.plt.subplots()
pui2.plot(spots, pui_price2, 'k', label='Up-and-in put')
pui2.plot(spots, vanilla_put, 'y:', label='Plain vanilla put')
pui2.set_title('Up-and-in H>K')
pui2.set_xlabel('Spot')
pui2.set_ylabel('Price')
pui2.axvline(x=K, label='Strike', ls= '--', c='g')
pui2.axvline(x=H2, label='Barrier', ls=':', c='r')
legend = pui2.legend(loc='best', shadow=True, fontsize='medium')
###Output
_____no_output_____
###Markdown
Up-and-out putAn up-and-out put is a put option which ceases to exists when the spot price hits a barrier which is above the initial asset price.When r $H \geq K$, the formula for the up-and-out put is defined as$$ p_{uo}=p-p_{ui},$$when $H<K$ the formula is $$p_{uo}=-S_0N(-x_1)e^{-qT}+Ke^{-rT}N(-x_1+\sigma\sqrt{T})+S_0e^{-qT}(H/S_0)N(-y_1)-Ke^{-rT}(H/S_0)^{2\lambda-2}N(-y_1+\sigma\sqrt{T}).$$
###Code
vanilla_put = analytics.vectorDouble()
for s in range(len(spots)):
vanilla_put.append(BarrierOptionPricer('p', spots[s], K, H1, r, q, sigma, T, t))
puo_price1 = analytics.vectorDouble()
for s in range(len(spots)):
puo_price1.append(BarrierOptionPricer('puo', spots[s], K, H1, r, q, sigma, T, t))
puo_price2 = analytics.vectorDouble()
for s in range(len(spots)):
puo_price2.append(BarrierOptionPricer('puo', spots[s], K, H2, r, q, sigma, T, t))
fig, (puo1, puo2) = mkt_plot.plt.subplots(1,2, figsize=(12,4),dpi=100,num=1)
puo1.plot(spots, puo_price1, 'k', label='Up-and-out put')
puo1.plot(spots, vanilla_put, 'y:', label='Plain vanilla put')
puo1.set_title('Up-and-out put H<K')
puo1.set_xlabel('Spot')
puo1.set_ylabel('Price')
puo1.axvline(x=K, label='Strike', ls= '--', c='g')
puo1.axvline(x=H1, label='Barrier', ls=':', c='r')
legend = puo1.legend(loc='best', shadow=True, fontsize='medium')
#fig, puo2 = mkt_plot.plt.subplots()
puo2.plot(spots, puo_price2, 'k', label='Up-and-out put')
puo2.plot(spots, vanilla_put, 'y:', label='Plain vanilla put')
puo2.set_title('Up-and-out H>K')
puo2.set_xlabel('Spot')
puo2.set_ylabel('Price')
puo2.axvline(x=K, label='Strike', ls= '--', c='g')
puo2.axvline(x=H2, label='Barrier', ls=':', c='r')
legend = puo2.legend(loc='best', shadow=True, fontsize='medium')
###Output
_____no_output_____ |
StudentsPerformance-checkpoint.ipynb | ###Markdown
Datos Estudiantes
###Code
# Importación de librerias
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
# Mostrar toda las columnas
pd.set_option("display.max_columns", 20)
# Leer datos
df_excel = pd.read_csv("StudentsPerformance.csv")
# Mostrar dataframe
df_excel
# Método Pandas estadistica descriptiva
df_excel.describe()
# Calculo del promedio
df_excel["math score"].mean()
# Valor máximo
df_excel["math score"].max()
# Valor Minimo
df_excel["math score"].min()
# Conteo valores
df_excel["math score"].count()
# calculo el promedio de las 3 notas (matemáticas, lectura y escritura)
df_excel["average"] = (df_excel["math score"] + df_excel["reading score"] + df_excel["writing score"])/3
# otra forma de calculo el promedio de las 3 notas (matemáticas, lectura y escritura)
# agrega una columna llamada average
df_excel["average"] = df_excel.mean(axis=1)
# Muestra los 5 primeros campos
df_excel.head()
# Conteo por valor
df_excel["gender"].value_counts()
# Metodo if condition
df_excel["pass/fail"] = np.where(df_excel["average"] > 70, "Pass", "Fail")
df_excel.sample(10)
# Metodo multiple if conditions
conditions = [
(df_excel["average"]>=90),
(df_excel["average"]>=80) & (df_excel["average"]<90),
(df_excel["average"]>=70) & (df_excel["average"]<80),
(df_excel["average"]>=60) & (df_excel["average"]<70),
(df_excel["average"]>=50) & (df_excel["average"]<60),
(df_excel["average"]<50)
]
# Calificación
values = ["A", "B", "C", "D", "E", "F"]
# Columna de acuerdo a la condición de grades
df_excel["grades"] = np.select(conditions, values)
df_excel.sample(10)
# solo obtener el promedio para género femenino
df_female = df_excel[df_excel['gender'] == 'female']
df_female
#df_female.value_counts()
#df_male = df_excel[df_excel['gender'] == 'male']
#df_male
#df_male.value_counts()
# solo obtener el promedio para el genero femenino y grupo B
df_sumifs = df_excel[(df_excel['gender'] == 'female') & (df_excel['race/ethnicity'] == 'group B')]
df_sumifs["sum"] = df_sumifs["math score"] + df_sumifs["reading score"] + df_sumifs["writing score"]
df_sumifs.sample(10)
excel_1 = "StudentsPerformance.csv"
df_excel_1 = pd.read_csv(excel_1)
df_excel_1 = df_excel_1.reset_index()
df_excel_1 = df_excel_1.rename(columns={"index":"id"})
df_excel_1
excel_2 = "LanguageScore.csv"
df_excel_2 = pd.read_csv(excel_2)
df_excel_2 = df_excel_2.reset_index()
df_excel_2
# calificación registro en la posición 0
df_excel_1.loc[df_excel_1["id"] == 0, "math score"]
# Unir dos tablas en base a una columna en común (ID)
df_excel_3 = pd.merge(df_excel_1, df_excel_2, on = "id", how="right")
df_excel_3.sample(10)
# reemplazar valores vacíos
df_excel_3["language score"] = df_excel_3["language score"].fillna("0")
# alternativa df_excel_3["language score"].fillna("0", inplace = True)
df_excel_3.sample(15)
# concatenar dos tablas
df_excel_3 = pd.concat(
[df_excel_1.set_index("id"), df_excel_2.set_index("id")], axis = 1
)
df_excel_3
###Output
_____no_output_____
###Markdown
Tablas pivote
###Code
df_excel = pd.read_csv("StudentsPerformance.csv")
# tabla pivote de la suma de los resultados de matematicas y escritura
df_excel.pivot_table(index = "race/ethnicity", values = ["math score", "writing score"], aggfunc = "sum")
# mejorar formato de la data
df_excel["gender"].str.title()
# extraer datos de una columna
df_excel["group"] = df_excel["race/ethnicity"].str.extract(r'([A-Z])')
df_excel["group"]
# identificar celdas vacias
df_excel["gender"].isnull()
df_excel.count()
###Output
_____no_output_____
###Markdown
gráficos básicos
###Code
import matplotlib.pyplot as plt
df_pivot = df_excel.pivot_table(index = "race/ethnicity", values = ["math score", "writing score"], aggfunc = "sum")
df_pivot
df_plot = df_pivot.reset_index()
df_plot
# bar plot
plt.bar(df_plot["race/ethnicity"], df_plot["math score"])
plt.show()
# piechart
plt.pie(df_plot["writing score"], labels=df_plot["race/ethnicity"], autopct= "%.0f %%")
plt.show()
###Output
_____no_output_____ |
notebooks/1-SageMaker_model1_basic_convnet.ipynb | ###Markdown
Before defining the model, we will define an fbeta metric that we will monitor which we will use as a proxy for the average AUROC across the 11 labels
###Code
def fbeta(y_true, y_pred, beta=2):
# taken from https://machinelearningmastery.com/how-to-develop-a-convolutional-neural-network-to-classify-satellite-photos-of-the-amazon-rainforest/
#clip predictions (incase our output layer is not bound to [0,1])
y_pred = backend.clip(y_pred, 0, 1)
# calculate tp, fp and fn for each class
tp = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)), axis=1)
fp = backend.sum(backend.round(backend.clip(y_pred - y_true, 0, 1)), axis=1)
fn = backend.sum(backend.round(backend.clip(y_true - y_pred, 0, 1)), axis=1)
# calculate precision
p = tp / (tp + fp + backend.epsilon())
# calculate recall
r = tp / (tp + fn + backend.epsilon())
# calculate fbeta, averaged across each class
bb = beta ** 2
fbeta_score = backend.mean((1 + bb) * (p * r) / (bb * p + r + backend.epsilon()))
return fbeta_score
def create_new_model(input_dim, output_dim):
input_tensor = Input(shape=(input_dim,input_dim,1))
y = layers.Conv2D(32, (3,3), padding='same', activation='relu')(input_tensor)
y = layers.MaxPooling2D(2, strides=2)(y)
y = layers.Conv2D(32, (3,3), padding='same', activation='relu')(y)
y = layers.MaxPooling2D(2, strides=2)(y)
y = layers.Dropout(0.25)(y)
y = layers.Conv2D(64, (3,3), padding='same', activation='relu')(y)
y = layers.MaxPooling2D(2, strides=2)(y)
y = layers.Conv2D(128, (3,3), padding='same', activation='relu')(y)
y = layers.MaxPooling2D(2, strides=2)(y)
y = layers.Dropout(0.25)(y)
y = layers.Flatten()(y)
y = layers.Dense(512, activation= 'relu')(y)
y = layers.Dropout(0.5)(y)
output_tensor = layers.Dense(output_dim, activation='sigmoid')(y)
model = Model(input_tensor, output_tensor)
model.compile(optimizers.rmsprop(lr=0.0001, decay=1e-6),
loss="binary_crossentropy", metrics = [fbeta])
return model
raw_data_path = Path('/Users/Shrinikesh/Documents/personal-projects/kaggle/ranzcr_clip/data/raw')
raw_image_data_path = Path('/Users/Shrinikesh/Documents/personal-projects/kaggle/ranzcr_clip/data/raw/train')
models_dir = Path('/Users/Shrinikesh/Documents/personal-projects/kaggle/ranzcr_clip/models')
train_data_path = raw_data_path / 'train.csv'
train_df = pd.read_csv(train_data_path)
train_df.shape
###Output
_____no_output_____
###Markdown
We will drop PatientID for now as it is not included in test images. Perhaps we can incorporate the information later Moreover, as we need the filenames in full to use the flow_from_dataframe function for training, we will append the extension to all the StudyInstanceUIDs (.jpg)
###Code
def append_ext(fn):
return fn+".jpg"
del train_df['PatientID']
train_df['StudyInstanceUID'] = train_df['StudyInstanceUID'].apply(append_ext)
train_df.head()
class_names = list(train_df.columns)
class_names.remove('StudyInstanceUID')
###Output
_____no_output_____
###Markdown
We will create a class to index mapping so that the model will work irrespective of the order of the columns
###Code
class_mapping = {class_names[i]:i for i in range(len(class_names))}
class_mapping
###Output
_____no_output_____
###Markdown
We will use stratified K-Fold validation for training
###Code
# create a function to one hot encode each example's
# labels as an array using the mapping
def one_hot_encode(example_labels_dict, mapping=class_mapping):
encoding = np.zeros(len(mapping), dtype='uint8')
for label, value in example_labels_dict.items():
if value:
encoding[mapping[label]] = 1
return encoding
Y = train_df[class_names]
n_splits = 3
kf = KFold(n_splits = n_splits, random_state = 7, shuffle=True)
###Output
_____no_output_____
###Markdown
Define percentage of overall data to use for training here (just as using all the data for training might take too long)
###Code
train_use_percent = 0.2
n_samples = int(np.ceil(train_df.shape[0]*train_use_percent))
n_samples
###Output
_____no_output_____
###Markdown
We will use ImageDataGenerator to turn our images into batches of preprocessed training and validation images during each fold
###Code
idg = ImageDataGenerator(rescale=1./255)
###Output
_____no_output_____
###Markdown
We also need to save the best model during each fold, so will also create a function here that creates a model name for each fold
###Code
def get_model_name(k):
return 'model_{}.h5'.format(str(k))
###Output
_____no_output_____
###Markdown
MAIN TRAINING LOOP
###Code
VALIDATION_FBETA = []
VALIDATION_LOSS = []
logs_dir = models_dir / 'logs' / model_type
logs_dir.mkdir(parents=True, exist_ok=True)
save_dir = models_dir / model_type
save_dir.mkdir(parents=True, exist_ok=True)
fold_var = 1
input_dim = 256
output_dim = 11
history_log_dict = defaultdict(int)
for train_index, val_index in kf.split(np.zeros(n_samples),Y[:n_samples]):
# get the data that will be used for training in this fold
training_data = train_df.iloc[train_index]
# get the data that will be used for validation in this fold
validation_data = train_df.iloc[val_index]
# now set up the generators to feed the data in batches to
# the model during training
train_data_generator = idg.flow_from_dataframe(training_data,
directory=raw_image_data_path,
x_col = 'StudyInstanceUID',
y_col=class_names,
target_size = (input_dim,input_dim),
color_mode='grayscale',
class_mode='raw',
batch_size=32,
shuffle=True,
seed=42)
valid_data_generator = idg.flow_from_dataframe(validation_data,
directory=raw_image_data_path,
x_col = 'StudyInstanceUID',
y_col=class_names,
target_size = (input_dim,input_dim),
color_mode='grayscale',
class_mode='raw',
batch_size=32,
shuffle=True,
seed=42)
model = create_new_model(input_dim, output_dim)
model._get_distribution_strategy = lambda: None
model_filepath = str(save_dir / get_model_name(fold_var))
# Create callbacks below
callbacks_list = [
keras.callbacks.ModelCheckpoint(
filepath=model_filepath,
monitor="val_fbeta",
save_best_only=True),
keras.callbacks.TensorBoard(
log_dir = logs_dir)
]
# Fitting the model
step_size_train = train_data_generator.n//train_data_generator.batch_size
step_size_val = valid_data_generator.n//valid_data_generator.batch_size
# fit_generator is deprecated so we can use fit
history = model.fit(x=train_data_generator,
steps_per_epoch=step_size_train,
validation_data=valid_data_generator,
validation_steps=step_size_val,
callbacks=callbacks_list,
epochs=30)
history_log_dict[fold_var] = history
# now we will just locally load the best model from this fold
# and evaluate on the validation set
model.load_weights(model_filepath)
results = model.evaluate(valid_data_generator)
results = dict(zip(model.metrics_names, results))
VALIDATION_FBETA.append(results["fbeta"])
VALIDATION_LOSS.append(results["loss"])
###Output
Found 2407 validated image filenames.
Found 602 validated image filenames.
Epoch 1/10
75/75 [==============================] - 273s 4s/step - loss: 0.3209 - fbeta: 0.5310 - val_loss: 0.3335 - val_fbeta: 0.5030
Epoch 2/10
37/75 [=============>................] - ETA: 2:10 - loss: 0.3039 - fbeta: 0.5629 |
summer-of-code/week-04/.ipynb_checkpoints/day3_class-checkpoint.ipynb | ###Markdown
1millionwomentotech SummerOfCode Intro to AI: Week 4 Day 3
###Code
print(baby_train[50000]['reviewText'])
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sia = SentimentIntensityAnalyzer()
text = baby_train[50000]['reviewText']
for s in sent_tokenize(text):
print(s)
print(sia.polarity_scores(s))
def sia_features(dataset):
"""For each review text in the dataset, extract:
(1) the mean positive sentiment over all sentences
(2) the mean neutral sentiment over all sentences
(3) the mean negative sentiment over all sentences
(4) the maximum positive sentiment over all sentences
(5) the maximum neutral sentiment over all sentences
(6) the maximum negative sentiment over all sentences"""
feat_matrix = numpy.empty((len(dataset), 6))
for i in range(len(dataset)):
sentences = sent_tokenize(dataset[i]['reviewText'])
nsent = len(sentences)
if nsent:
sentence_polarities = numpy.empty((nsent, 3))
for j in range(nsent):
polarity = sia.polarity_scores(sentences[j])
sentence_polarities[j, 0] = polarity['pos']
sentence_polarities[j, 1] = polarity['neu']
sentence_polarities[j, 2] = polarity['neg']
feat_matrix[i, 0:3] = numpy.mean(sentence_polarities, axis=0) # mean over the columns
feat_matrix[i, 3:6] = numpy.max(sentence_polarities, axis=0) # maximum over the columns
else:
feat_matrix[i, 0:6] = 0.0
return feat_matrix
sia_tr = sia_features(baby_train)
testmat = numpy.arange(12.).reshape((3, 4))
print(testmat)
print(numpy.max(testmat, axis=0))
print(numpy.mean(testmat, axis=1))
def len_features(dataset):
"""Add two features:
(1) length of review (in thousands of characters) - truncate at 2,500
(2) percentage of exclamation marks (in %)"""
feat_matrix = numpy.empty((len(dataset), 2))
for i in range(len(dataset)):
text = dataset[i]['reviewText']
feat_matrix[i, 0] = len(text) / 1000.
if text:
feat_matrix[i, 1] = 100. * text.count('!') / len(text)
else:
feat_matrix[i, 1] = 0.0
feat_matrix[feat_matrix>2.5] = 2.5
return feat_matrix
len_tr = len_features(baby_train)
print(X_train_neg.shape, sia_tr.shape, len_tr.shape)
X_train_augmented = numpy.concatenate((X_train_neg, sia_tr, len_tr), axis=1) # stack horizontally
lreg_augmented = LinearRegression().fit(X_train_augmented, Y_train)
pred_train_augmented = lreg_augmented.predict(X_train_augmented)
mae_train_augmented = mean_absolute_error(pred_train_augmented, Y_train)
print("Now the mean absolute error on the training data is %f stars" % mae_train_augmented)
rf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train)
rfpred_train_augmented = rf_augmented.predict(X_train_augmented)
mae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train)
print("For the RF, it is %f stars" % mae_train_rf_augmented)
X_valid_neg = dataset_to_matrix_with_neg(baby_valid)
sia_valid = sia_features(baby_valid)
len_valid = len_features(baby_valid)
X_valid_augmented = numpy.concatenate((X_valid_neg, sia_valid, len_valid), axis=1)
pred_valid_augmented = lreg_augmented.predict(X_valid_augmented)
pred_valid_rf_augmented = rf_augmented.predict(X_valid_augmented)
mae_valid_augmented = mean_absolute_error(pred_valid_augmented, Y_valid)
print("On the validation set, we get %f error for the linear regression" % mae_valid_augmented)
mae_valid_rf_augmented = mean_absolute_error(pred_valid_rf_augmented, Y_valid)
print("And %f for the random forest regression" % mae_valid_rf_augmented)
print(baby_train[50000]['reviewText'])
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sia = SentimentIntensityAnalyzer()
text = baby_train[50000]['reviewText']
for s in sent_tokenize(text):
print(s)
print(sia.polarity_scores(s))
def sia_features(dataset):
"""For each review text in the dataset, extract:
(1) mean positive sentiment over all sentences
(2) mean neutral sentiment over all sentences
(3) mean negative sentiment over all sentences
(4) maximum positive sentiment over all sentences
(5) maximum neutral sentiment over all sentences
(6) maximum negative sentiment over all sentences
"""
feat_matrix = numpy.empty((len(dataset), 6))
for i in range(len(dataset)):
sentences = sent_tokenize(dataset[i]['reviewText'])
nsent = len(sentences)
if nsent:
sentence_polarities = numpy.empty((nsent, 3))
for j in range(nsent):
polarity = sia.polarity_scores(sentences[j])
sentence_polarities[j, 0] = polarity['pos']
sentence_polarities[j, 1] = polarity['neu']
sentence_polarities[j, 2] = polarity['neg']
feat_matrix[i, 0:3] = numpy.mean(sentence_polarities, axis = 0) # mean over the columns
feat_matrix[i, 3:6] = numpy.max(sentence_polarities, axis = 0) # maximum over the columns
else:
feat_matrix[i, 0:6] = 0.0
return feat_matrix
sia_tr = sia_features(baby_train)
print(sia_tr[:10])
testmat = numpy.arange(12.).reshape((3,4))
print(testmat)
print(numpy.max(testmat, axis = 0))
print(numpy.mean(testmat, axis = 1))
# Homework - required for Certification
def len_features(dataset):
"""Add two features:
(1) length of review (in thousands of character) - truncate at 2,500
(2) percentage of exclamation marks (in %)
"""
len_tr = len_features(baby_train)
print(X_train_neg.shape, sia_tr.shape)
# stack horizontally
X_train_augmented = numpy.concatenate( (X_train_neg, sia_tr), axis = 1)
lreg_augmented = LinearRegression().fit(X_train_augmented, Y_train)
pred_train_augmented = lreg_augmented.predict(X_train_augmented)
mae_train_augmented = mean_absolute_error(pred_train_augmented, Y_train)
print("Now the mean absolute error on the training data is %f starts" % mae_train_augmented)
# random forest
rf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train)
rfpred_train_augmented = rf_augmented.predict(X_train_augmented)
mae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train)
print("For the RF, MAE is %f stars" % mae_train_rf_augmented)
X_valid_neg = dataset_to_matrix_with_neg(baby_valid)
sia_valid = sia_features(baby_valid)
# len_valid =
X_valid_augmented = numpy.concatenate((X_valid_neg, sia_valid), axis = 1)
pred_valid_augmented =
pred_valid_rfaugmented =
mae_valid_augmented =
mae_valid_rfaugmented =
###Output
_____no_output_____ |
_downloads/plot_contour_ext.ipynb | ###Markdown
Display the contours of a function===================================An example demoing how to plot the contours of a function, withadditional layout tweeks.
###Code
import numpy as np
import matplotlib.pyplot as plt
def f(x,y):
return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X, Y = np.meshgrid(x, y)
plt.contourf(X, Y, f(X, Y), 8, alpha=.75, cmap=plt.cm.hot)
C = plt.contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
plt.clabel(C, inline=1, fontsize=10)
plt.xticks([])
plt.yticks([])
# Add a title and a box around it
from matplotlib.patches import FancyBboxPatch
ax = plt.gca()
ax.add_patch(FancyBboxPatch((-0.05, .87),
width=.66, height=.165, clip_on=False,
boxstyle="square,pad=0", zorder=3,
facecolor='white', alpha=1.0,
transform=plt.gca().transAxes))
plt.text(-0.05, 1.02, " Contour Plot: plt.contour(..)\n",
horizontalalignment='left',
verticalalignment='top',
size='xx-large',
transform=plt.gca().transAxes)
plt.text(-0.05, 1.01, "\n\n Draw contour lines and filled contours ",
horizontalalignment='left',
verticalalignment='top',
size='large',
transform=plt.gca().transAxes)
plt.show()
###Output
_____no_output_____ |
TwoBars.ipynb | ###Markdown
https://lcvmwww.epfl.ch/~lcvm/dna_teaching_05_06/exercises/ex5.pdf$$E(\theta, \phi, \lambda) = \frac12 \theta^2 + \frac12(\phi - \theta)^2 + \lambda(\cos\theta + \cos\phi)$$
###Code
from math import sin, cos
def energy(theta, phi, lam):
return 0.5 * theta ** 2 + 0.5 * (phi - theta)**2 + lam * (cos(theta) + cos(phi))
###Output
_____no_output_____
###Markdown
The equilibrium condition is given by $$0 = \frac{\partial E}{\partial \theta} = \theta + \theta - \phi - \lambda\sin\theta = 2\theta-\phi- \lambda\sin\theta,\quad 0 = \frac{\partial E}{\partial \phi} = \phi - \theta - \lambda\sin\phi$$
###Code
def F(theta, phi, lam):
return 2*theta - phi - lam * sin(theta), phi - theta - lam * sin(phi)
###Output
_____no_output_____
###Markdown
The Jacobian of F is given by $$J = \begin{pmatrix}2-\lambda\cos(\theta) & -1\\-1 & 1-\lambda\cos\phi\end{pmatrix}$$
###Code
import numpy as np
def J(theta, phi, lam):
return np.array([[2-lam*cos(theta), -1], [-1, 1-lam*cos(phi)]])
J(0,0,0)
###Output
_____no_output_____
###Markdown
In the case of the straight rod (\theta = \phi = 0) J(0,0,\lambda) is singular when $$\lambda^2 - 3\lambda + 1 = 0$$thus when $\lambda = \frac12(3\pm\sqrt5)$
###Code
J(0,0,0.5*(3-5**.5))
J(0,0,0.5*(3+5**.5))
###Output
_____no_output_____
###Markdown
The null spaces are spanned by $(1, \frac12(\sqrt5+1))$ and $(\frac12(\sqrt5+1), -1)$ respectively
###Code
J(0,0,0.5*(3-5**.5)) @ np.array([1,0.5*(5**0.5+1)]), J(0,0,0.5*(3+5**.5)) @ np.array([0.5*(5**0.5+1), -1])
###Output
_____no_output_____
###Markdown
StabilityThe hessian is identical to J in this case. Its eigenvalues are given by$$\mu_1\mu_2 = \det J(0,0,\lambda) = \lambda^2 - 3\lambda+1\quad\text{and}$$$$\mu_1 + \mu_2 = \operatorname{tr}J(0,0,\lambda) = 3-2\lambda$$Thus for $\lambda \frac12(3+\sqrt5)$ the eigenvalues have the same sign, and in between they have opposite signs. For $\lambda \frac12(3+\sqrt5)$ their sum is negative and thus they are both negative.
###Code
np.linalg.eig(J(0,0,0.3))
np.linalg.eig(J(0,0,1.5))
np.linalg.eig(J(0,0,2.7))
np.linalg.eig(J(0,0,0.5*(3-5**.5)))
###Output
_____no_output_____
###Markdown
Bifurcation Shape
###Code
from sympy import *
eps, phi, the, lam, E = symbols('ε φ θ λ E')
phi_ = symbols(['φ_%d' % i for i in range(4)])
the_ = symbols(['θ_%d' % i for i in range(4)])
lam_ = symbols(['λ_%d' % i for i in range(4)])
phi_eps = sum(eps**i * phi_[i] for i in range(4))
the_eps = sum(eps**i * the_[i] for i in range(4))
lam_eps = sum(eps**i * lam_[i] for i in range(4))
the_eps
E = (the**2 + (phi-the)**2)/2 + lam*(cos(phi)+cos(the))
F1, F2 = diff(E, the), diff(E, phi)
F1_eps = F1.subs([(the, the_eps), (phi, phi_eps), (lam, lam_eps)])
F1_eps.subs(eps,0)
diff(F1_eps, eps).subs(eps, 0)
conditions = [
diff(diff(E, x).subs([(the, the_eps), (phi, phi_eps), (lam, lam_eps)]), eps, i).subs(eps, 0) * factorial(i)
for i in range(4)
for x in (phi, the)
]; conditions
cond_1 = [c.subs([(the_[0], 0), (phi_[0],0), (lam_[0], (3-sqrt(5))/2)]) for c in conditions]; cond_1
from sympy.solvers.solveset import linsolve
linsolve(cond_1[:4], (the_[1], phi_[1]))
cond_2 = [c.subs([(phi_[1],1), (the_[1], (sqrt(5)-1)/2)]).simplify() for c in cond_1]
#cond_2[4] /= 2
#cond_2[5] /= (1+sqrt(5))
#cond_2[5] = cond_2[5].simplify()
cond_2
linsolve(cond_2[:6], (the_[2], phi_[2], lam_[1]))
cond_3 = [c.subs([(phi_[2],1), (the_[2], (sqrt(5)-1)/2), (lam_[1], 0)]).simplify() for c in cond_2]
cond_3
linsolve(cond_3, (the_[3], phi_[3], lam_[2]))
###Output
_____no_output_____ |
[MAC005] - Trabalho 01.ipynb | ###Markdown
Condições GeraisEsta avaliação tem como objetivo avaliar os conhecimentos adquiridos durante a disciplina de Mecânica dos Sólidos.Essa forma de avaliação tem por objetivo promover a discussão dos exercícios entre os membros do grupo (e eventualmente entre grupos) e ampliar a diversidade de exercícios a serem realizados.---As condicões abaixo devem ser observadas: 1. Serão formadas equipes e cada uma delas com no mínimo 3 e no máximo 4 integrantes. 2. A avaliação será realizada por meio da entrega de uma cópia deste notebook com as soluções desenvolvidas até a data estipulada de entrega.3. Da entrega da avaliação. * Os documentos necessários para a entrega do trabalho são (1) os códigos desenvolvidos pela equipe. * A equipe deve usar este modelo de notebook para desenvolver os códigos. * Os códigos podem ser desenvolvidos combinado a linguagem LaTeX e computação simbólica via python quando necessário.4. Da distribuição das questões. * Serão atribuídas para cada grupo até 9 questões referentes ao capítulo 2 do livro texto. * A quantidade de questões será a mesma para cada grupo. * A distribuição das questões será aleatória. * A pontuacão referente a cada questão será igualitária e o valor total da avaliação será 100 pontos.5. As equipes devem ser formadas até às **18 horas o dia 23/11/2021** por meio do preenchimento da planilha [[MAC005] Formação das Equipes](https://docs.google.com/spreadsheets/d/1j59WVAl1cMzXgupwG86WFNGAQhbtVtc0b5aIQSbqGQE/edit?usp=sharing).6. A formação das equipes pode ser acompanhada arquivo [[MAC005] Formação das Equipes](https://docs.google.com/spreadsheets/d/1j59WVAl1cMzXgupwG86WFNGAQhbtVtc0b5aIQSbqGQE/edit?usp=sharing). Cada equipe será indentificada por uma letra em ordem alfabética seguida do número 1 (A1, B1, C1, e assim por diante). O arquivo está aberto para edição e pode ser alterado pelos alunos até a data estipulada.7. Equipes formadas após a data estabelecida para a formação das equipes terão a nota da avaliação multiplicada por um coeficiente de **0.80**.8. A equipe deve indicar no arquivo [[MAC005] Formação das Equipes](https://docs.google.com/spreadsheets/d/1j59WVAl1cMzXgupwG86WFNGAQhbtVtc0b5aIQSbqGQE/edit?usp=sharing) um responsável pela entrega do projeto. * Somente o responsável pela entrega deve fazer o upload do arquivo na plataforma9. A entrega dos projetos deve ocorrer até às **23:59 do dia 30/11/2021** na plataforma da disciplina pelo responsável pela entrega. * Caso a entrega seja feita por outro integrante diferente daquele indicado pela pela equipe a avaliação será desconsiderada e não será corrigida até que a a condição de entrega seja satisfeita.10. Quaisquer dúvidas ou esclarecimentos devem ser encaminhadas pela sala de aula virtual. Exercicios2.3, 2.5, 2.7, 2.10, 2.19, 2.21, 2.25, 2.27, 2.46[Link do Livro](http://fn.iust.ac.ir/files/fnst/ssadeghzadeh_52bb7/files/Introduction_to_continuum_mechanics_-Lai-2010-4edition%281%29.pdf) Solução do problema 2.3 (inserir número e enunciado)  a) 1 - Montamos e resolvemos o sistema de equações para a primeira equação: $b_i = B_{ij} a_j$ $b_1 = B_{1j} a_j$ $b_2 = B_{2j} a_j$ $b_3 = B_{3j} a_j$ $b_1 = B_{11} a_1 + B_{12} a_2 + B_{13} a_3$ $b_2 = B_{21} a_1 + B_{22} a_2 + B_{23} a_3$ $b_3 = B_{31} a_1 + B_{32} a_2 + B_{33} a_3$ $b_1 = 2*1 + 3*0 + 0*2 = 2$ $b_2 = 0*1 + 5*0 + 1*2 = 2$ $b_3 = 0*1 + 2*0 + 1*2 = 2$ $b = \begin{bmatrix} b_1 \\ b_2 \\ b_3 \end{bmatrix}=\begin{bmatrix} 2 \\ 2 \\ 2 \end{bmatrix}$2 - Multiplicamos as matrizes para a segunda equação: $[b] = [B][a]$
###Code
import numpy as np
import sympy as sp
sp.init_printing()
B = np.matrix([[2,3,0],[0,5,1],[0,2,1]])
a = np.matrix([[1],[0],[2]])
b = sp.Matrix(B*a)
print("Dessa forma, as duas equações (do passo 1 e 2) são equivalentes")
b
###Output
Dessa forma, as duas equações (do passo 1 e 2) são equivalentes
###Markdown
b) 1 - Montamos e resolvemos o sistema de equações para a primeira equação: $s = B_{ij} a_i a_j$ $s = B_{11} a_1 a_1 + B_{12} a_1 a_2 + B_{13} a_1 a_3 + B_{21} a_2 a_1 + B_{22} a_2 a_2 + B_{23} a_2 a_3 + B_{31} a_3 a_1 + B_{32} a_3 a_2 + B_{33} a_3 a_3 $$s = 2*1*1 + 3*1*0 + 0*1*2 + 0*0*1* + 5*0*0 + 1*0*2 + 0*2*1 + 2*2*0 + 1*2*2 $$s = 2 + 4 = 6$2 - Multiplicamos as matrizes para a segunda equação: $s=[a]^t[B][a]$
###Code
import numpy as np
import sympy as sp
sp.init_printing()
B = np.matrix([[2,3,0],[0,5,1],[0,2,1]])
a = np.matrix([[1],[0],[2]])
at = np.transpose(a)
#calculo da segunda equação
b = sp.Matrix(at*B*a)
print("Dessa forma, as duas equações (do passo 1 e 2) são equivalentes")
b
###Output
Dessa forma, as duas equações (do passo 1 e 2) são equivalentes
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.