path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Data8.1x/lec01.ipynb | ###Markdown
Visualization
###Code
Table().with_columns(
'Jo', np.char.count(chapters, 'Jo'),
'Meg', np.char.count(chapters, 'Meg'),
'Amy', np.char.count(chapters, 'Amy'),
'Beth', np.char.count(chapters, 'Beth'),
'Laurie', np.char.count(chapters, 'Laurie')
).cumsum().plot()
###Output
_____no_output_____
###Markdown
Visualizing chapter length vs number of periods
###Code
Table().with_columns([
'Chapter Length', [len(c) for c in chapters],
'Number of Periods', np.char.count(chapters, '.'),
]).scatter('Number of Periods')
###Output
_____no_output_____ |
toronto-restaurant-locatoins.ipynb | ###Markdown
Library Imports:
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Step 1: Collect Neighborhood Data for the City Toronto Scraping Table from Wiki
###Code
# link to wiki:
link = 'https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M'
# Loading data and pre-processing
postal_df = pd.read_html(link)[0]
postal_df = postal_df[postal_df['Borough'] != 'Not assigned']
postal_df = postal_df.reset_index().drop('index', axis = 1)
# lambda function to replace
replace = lambda s : s.replace(' /', ',')
postal_df['Neighborhood'] = postal_df['Neighborhood'].apply(replace)
postal_df.head()
###Output
_____no_output_____
###Markdown
Loading Co-ordinates from csv file
###Code
geo_df = pd.read_csv('Geospatial_Coordinates.csv')
geo_df.rename(columns = {'Postal Code' : 'Postal code'}, inplace = True)
geo_df.head()
###Output
_____no_output_____
###Markdown
Merge the Dataframes:
###Code
to_data = postal_df.merge(geo_df, left_on = 'Postal code', right_on = 'Postal code')
to_data.head(10)
###Output
_____no_output_____
###Markdown
Step 2: Collecting Venue Data from Foursquare API: More Library Imports
###Code
import numpy as np
import requests
###Output
_____no_output_____
###Markdown
Define Foursquare API Credentials:
###Code
CLIENT_ID = 'T5PXJE5TAI2JDSTEJDY2TQR2EQRML2AHDEBBRN0NC2LWBYYR' # your Foursquare ID
CLIENT_SECRET = '1TFUQOLNAGXPYILNGZ3L3O3GBBTNVHAQUCHRCRZTSU1TFFMM' # your Foursquare Secret
VERSION = '20180605'
print('Your credentails:')
print('CLIENT_ID: ' + CLIENT_ID)
print('CLIENT_SECRET:' + CLIENT_SECRET)
###Output
Your credentails:
CLIENT_ID: T5PXJE5TAI2JDSTEJDY2TQR2EQRML2AHDEBBRN0NC2LWBYYR
CLIENT_SECRET:1TFUQOLNAGXPYILNGZ3L3O3GBBTNVHAQUCHRCRZTSU1TFFMM
###Markdown
Function to get Nearby Venues:
###Code
def getNearbyVenues(names, latitudes, longitudes, LIMIT = 100, radius=500):
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
# create the API request URL
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
LIMIT)
# make the GET request
results = requests.get(url).json()["response"]['groups'][0]['items']
# return only relevant information for each nearby venue
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighborhood',
'Neighborhood Latitude',
'Neighborhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
###Output
_____no_output_____
###Markdown
Request for Nearby Venues from Foursquare API and store them into a dataframe:
###Code
to_venues = getNearbyVenues(names=to_data['Neighborhood'],
latitudes=to_data['Latitude'],
longitudes=to_data['Longitude']
)
to_venues.head()
to_venues.shape
###Output
_____no_output_____
###Markdown
Step 3: Data Analysis
###Code
# No. of unique cateories
to_venues['Venue Category'].value_counts().head(20)
# lambda function to check for restaurant
check_rest = lambda s : 'Restaurant' in s
# dataframe only contains restaurant
rest_df = to_venues[to_venues['Venue Category'].apply(check_rest)]
rest_df.head()
rest_df.shape
# Create a new Column Containing Restaurant Count:
to_venues['Restaurant'] = to_venues['Venue Category'].apply(check_rest)*1
to_venues.head()
###Output
_____no_output_____
###Markdown
dataframe showing the count of restaurants grouped by Neighborhood
###Code
rest_counts = to_venues.groupby('Neighborhood')['Restaurant'].sum()
rest_counts = pd.DataFrame(rest_counts).reset_index()
rest_counts.head()
###Output
_____no_output_____
###Markdown
Showing the Locations with Most Restaurants
###Code
rest_counts.sort_values(by = 'Restaurant', ascending = False).head(10)
###Output
_____no_output_____
###Markdown
So it might be suggested that these locations are good candidates for opening restaurants.
###Code
# Take the locations
to_locs = to_venues[['Neighborhood', 'Neighborhood Latitude', 'Neighborhood Longitude']]
to_locs.drop_duplicates(subset='Neighborhood', keep="first", inplace = True)
to_locs.head()
# Add latitude and longitude to dataframe
rest_counts = pd.merge(to_locs, rest_counts)
rest_counts.rename(columns = {'Neighborhood Latitude': 'Latitude', 'Neighborhood Longitude': 'Longitude'}, inplace = True)
rest_counts.head()
###Output
_____no_output_____
###Markdown
Step 4: Plot the Restaurants in a map to Visualize Hotspots: More Library Imports:
###Code
from geopy.geocoders import Nominatim
import folium
from folium import plugins
###Output
_____no_output_____
###Markdown
Get Latitude & Longitude of Toronto:
###Code
address = 'Toronto, TO'
geolocator = Nominatim(user_agent="to_explorer")
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
print('The geograpical coordinate of Toronto are {}, {}.'.format(latitude, longitude))
###Output
The geograpical coordinate of Toronto are 43.6534817, -79.3839347.
###Markdown
Plot the number of Restaurants in Map:
###Code
toronto_map = folium.Map(location = [latitude, longitude], zoom_start = 11)
# instantiate a mark cluster object for the incidents in the dataframe
incidents = plugins.MarkerCluster().add_to(toronto_map)
# loop through the dataframe and add each data point to the mark cluster
for lat, lng, label, in zip(rest_counts.Latitude, rest_counts.Longitude, rest_counts.Restaurant):
folium.Marker(
location=[lat, lng],
icon=None,
#popup=label,
).add_to(incidents)
# display map
toronto_map
###Output
_____no_output_____
###Markdown
So we can decide on the location with hotspots from the Plot. The **Yellow Zone** is more likely to host a Restaurant. Step 5: Data Preperation for Machine Learning Models One-hot Encoding of the Venues vs. 267 Categories
###Code
# one hot encoding
toronto_onehot = pd.get_dummies(to_venues[['Venue Category']], prefix="", prefix_sep="")
# add neighborhood column back to dataframe
toronto_onehot['Neighborhood'] = to_venues['Neighborhood']
# move neighborhood column to the first column
fixed_columns = [toronto_onehot.columns[-1]] + list(toronto_onehot.columns[:-1])
toronto_onehot = toronto_onehot[fixed_columns]
toronto_onehot.head()
###Output
_____no_output_____
###Markdown
Group By Neighborhood and Sum
###Code
# Group by Neighborhood and Sum
toronto_grouped = toronto_onehot.groupby('Neighborhood').sum().reset_index()
toronto_grouped.head()
###Output
_____no_output_____
###Markdown
collect the top 5 occuring venues per neighborhood in a list
###Code
num_top_venues = 5
top_venues = []
for hood in toronto_grouped['Neighborhood']:
temp = toronto_grouped[toronto_grouped['Neighborhood'] == hood].T.reset_index()
temp.columns = ['venue','freq']
temp = temp.iloc[1:]
temp['freq'] = temp['freq'].astype(float)
temp = temp.round({'freq': 2})
venues = temp.sort_values('freq', ascending = False).head(num_top_venues)['venue'].tolist()
top_venues.extend(venues)
top_venues = np.unique(np.asarray(top_venues))
venues = pd.Series(top_venues)
venues.head()
###Output
_____no_output_____
###Markdown
Filter Out restaurants from the venues We filter out the columns containing any information about restaurants since we want to train a classifier to predict exactly the same.
###Code
# lambda function to filter out the not restaurants:
check_notrest = lambda s : not 'Restaurant' in s
features = venues[venues.apply(check_notrest)].tolist()
features.insert(0, 'Neighborhood')
features[0:10]
###Output
_____no_output_____
###Markdown
This part takes all the features. It will be used for the clustering model.
###Code
all_features = venues.tolist()
all_features.insert(0, 'Neighborhood')
###Output
_____no_output_____
###Markdown
Step 6: Create a Machine Learning Classifier to predict the Likelihood of a Restaurant More Library Imports:
###Code
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import jaccard_similarity_score, log_loss, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
import seaborn as sns
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Dataframe with 108 Features
###Code
feat_df = toronto_grouped[features]
feat_df.head()
rest_counts.head()
###Output
_____no_output_____
###Markdown
Merge to create final dataframe
###Code
df = pd.merge(rest_counts, feat_df)
# move restaurant column to the back
rest = df.pop('Restaurant')
df['Restaurant'] = rest
df.head()
###Output
_____no_output_____
###Markdown
Correlation Positive Correlation:
###Code
# Taking correlation and using features with corr score > + 0.20
corr_values = pd.DataFrame(df.corr()['Restaurant']).reset_index()
corr_values.sort_values(by = 'Restaurant', ascending = False).head(10)
###Output
_____no_output_____
###Markdown
Negative Correlation:
###Code
corr_values.sort_values(by = 'Restaurant', ascending = True).head(10)
###Output
_____no_output_____
###Markdown
Negative correlation values are insignificant, so we can ignore them. Seaborn Regression Plots:
###Code
sns.regplot(data = df, x = 'Café', y = 'Restaurant')
plt.show()
sns.regplot(data = df, x = 'Coffee Shop', y = 'Restaurant')
plt.show()
sns.regplot(data = df, x = 'Hotel', y = 'Restaurant')
plt.show()
sns.regplot(data = df, x = 'Gastropub', y = 'Restaurant')
plt.show()
sns.regplot(data = df, x = 'Gym', y = 'Restaurant')
plt.show()
###Output
_____no_output_____
###Markdown
Taking the important features: Having a +ve correlation value > 0.20
###Code
imp_features = corr_values[corr_values['Restaurant'] > 0.20]['index'].tolist()
# discarding the feature 'Restaurant', since it is our target
imp_features.remove('Restaurant')
###Output
_____no_output_____
###Markdown
Taking Important Feature values for the Model:
###Code
X = df[imp_features].values
y = df['Restaurant'].values
print(X.shape)
print(y.shape)
###Output
(93, 37)
(93,)
###Markdown
Data Normalization:
###Code
ss = StandardScaler()
ss.fit(X)
X = ss.fit_transform(X)
# Converting y into a binary feature 1 = Restaurant Exists, 0 = Does not exist
y = y.astype(np.bool)*1
###Output
_____no_output_____
###Markdown
Train-Test Split:
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
model_lr = LogisticRegression()
model_knn = KNeighborsClassifier(n_neighbors = 5)
model_dt = DecisionTreeClassifier(max_depth = 5)
model_svc = SVC(kernel = 'rbf')
models = [model_lr, model_knn, model_dt, model_svc]
model_names = ['Logistic Regression', 'kNN', 'Decision Tree', 'SVM']
for model, model_name in zip(models, model_names):
model.fit(X_train, y_train)
y_hat = model.predict(X_test)
print('---------------')
print(model_name)
print('Training score: ' , model.score(X_train, y_train))
print('Testing score: ', model.score(X_test, y_test))
print('Jaccard index: ', jaccard_similarity_score(y_test, y_hat))
print('Log loss: ' , log_loss(y_test, y_hat))
print('f1 Score: ', f1_score(y_test, y_hat))
model_lr.predict(X_train)
model_lr.predict_proba(X_train)[:10]
###Output
_____no_output_____
###Markdown
The **logistic regression model** is performing well on the Test data. So we can use it to predict the probability of a location given the 37 features we taook. Step 7: Clustering More Library Imports:
###Code
from sklearn.cluster import KMeans
import matplotlib.cm as cm
import matplotlib.colors as colors
###Output
_____no_output_____
###Markdown
Preparing the dataTaking all the Features including those of restaurants for Clustering, since it is unsupervised.
###Code
all_feat_df = toronto_grouped[all_features]
rest_counts.rename(columns = {'Restaurant':'RestCnt'}, inplace = True)
df = pd.merge(rest_counts, all_feat_df)
rest = df.pop('RestCnt')
df['RestCnt'] = rest
df.head()
###Output
_____no_output_____
###Markdown
Taking correlation and using features with corr score > + 0.20
###Code
corr_values = pd.DataFrame(df.corr()['RestCnt']).reset_index()
features = corr_values[corr_values['RestCnt'] > 0.20]['index'].tolist()
features.remove('RestCnt')
###Output
_____no_output_____
###Markdown
Taking data values
###Code
X = df[features].values
X.shape
###Output
_____no_output_____
###Markdown
Normalizing:
###Code
ss = StandardScaler()
ss.fit(X)
X = ss.fit_transform(X)
###Output
_____no_output_____
###Markdown
Clustering:
###Code
# set number of clusters
kclusters = 5
# run k-means clustering
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(X)
# check cluster labels generated for each row in the dataframe
kmeans.labels_[0:10]
df.insert(3, 'Cluster', kmeans.labels_)
df.head()
###Output
_____no_output_____
###Markdown
Create Cluster Map
###Code
# create map
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, poi, cluster in zip(df['Latitude'], df['Longitude'], df['Neighborhood'], df['Cluster']):
label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
map_clusters
df.groupby(by = 'Cluster').mean()
###Output
_____no_output_____
###Markdown
Taking the mean number of Restaurants for Each Cluster:
###Code
df.groupby(by = 'Cluster').mean()['RestCnt']
###Output
_____no_output_____
###Markdown
**We can conclude that cluster 2 has a less chance of hosting a Restaurant. The other clusters have plenty chance.** Counting the number of entries per cluster:
###Code
df['Cluster'].value_counts()
###Output
_____no_output_____ |
uvxy_daily.ipynb | ###Markdown
data.rate.plot()plt.axhline(data.rate.mean(), color='red', linestyle='--')
###Code
def zscore(series):
print(series.mean(), series.std())
return (series - series.mean()) / np.std(series)
z_score = zscore(data.rate)
z_score.plot(figsize=(8,8))
plt.axhline(z_score.mean())
plt.axhline(1.0, color='red')
plt.axhline(-1.0, color='green')
plt.show()
data['z_score'] = z_score
sns.distplot(z_score)
statistical(z_score)
show_col = ['Date', 'Close_uvxy', 'gains_uvxy', 'Close_spy', 'gains_spy', 'rate','z_score']
data[show_col].tail(20)
# data[(data['gains_spy']>0) & (data['z_score']<0)][show_col]
# data['ma5'] = data.z_score.rolling(window=2, center=False).mean()
# data['rate_ma5'] = data.rate.rolling(window=2, center=False).mean()
# data[200:300]
ax = data[['Close_uvxy']].plot(figsize=(16,8))
buy = data[['Date','Close_uvxy','z_score','gains_spy']].copy()
buy.loc[~((buy['gains_spy']>0) & (buy['z_score']<-0.1)), ['Close_uvxy']] = 0
buy = buy[buy['Close_uvxy'] != 0]
# buy.plot.scatter(ax=ax,x=buy.index ,y='Close_uvxy',marker='^',color='r')
buy[['Close_uvxy']].plot(ax=ax,marker='^',color='r',linewidth=0)
print(buy.tail(10))
sell = data[['Date','Close_uvxy','z_score','gains_spy']].copy()
sell.loc[sell['z_score']>-2, ['Close_uvxy']] = 0
sell = sell[sell['Close_uvxy'] != 0]
sell[['Close_uvxy']].plot(ax=ax,marker='>',color='black',linewidth=0)
###Output
Date Close_uvxy z_score gains_spy
182 2021-07-22 28.840000 -0.657901 0.209407
183 2021-07-23 28.680000 -0.272529 1.028805
198 2021-08-13 23.020000 -0.273983 0.181982
199 2021-08-16 23.139999 -0.323607 0.235465
202 2021-08-19 28.030001 -1.089272 0.154833
205 2021-08-24 23.500000 -0.134636 0.158738
230 2021-09-29 24.530001 -0.667618 0.168314
242 2021-10-15 18.170000 -0.335526 0.761581
244 2021-10-19 17.370001 -0.104209 0.771487
249 2021-10-26 16.490000 -0.685137 0.090002
|
performance_on_text_data.ipynb | ###Markdown
Notebook which focuses on the text dataset and the performance comparison of algorithms on it
###Code
from IPython.core.display import display, HTML
display(HTML('<style>.container {width:100% !important;}</style>'))
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
import torch
import nmf.mult
import nmf.pgrad
import nmf.nesterov
import nmf_torch.mult
import nmf_torch.pgrad
import nmf_torch.nesterov
import nmf_torch.norms
import matplotlib
import pickle
from read_data.reading import read_reuters21578, HashTfidfVectoriser
from performance.performance_eval_func import get_random_lowrank_matrix, get_time_ratio,\
compare_performance, plot_performance_dict,\
torch_algo_wrapper, plot_errors_dict,\
plot_ratios_gpu_algo, plot_ratios_cpu_gpu, plot_ratios_cpu_algo,\
plot_differences_gpu_algo, plot_differences_cpu_gpu, plot_differences_cpu_algo
vectorizer=HashTfidfVectoriser(12000)
reuters_data = read_reuters21578("data/reuters21578", vectorizer=vectorizer)
algo_dict_to_test = {
"mult": nmf.mult.factorize_Fnorm,
"pgrad": nmf.pgrad.factorize_Fnorm_subproblems,
"nesterov": nmf.nesterov.factorize_Fnorm,
"mult_torch": torch_algo_wrapper(nmf_torch.mult.factorize_Fnorm,
device="cuda"),
"pgrad_torch": torch_algo_wrapper(nmf_torch.pgrad.factorize_Fnorm_subproblems,
device="cuda"),
"nesterov_torch": torch_algo_wrapper(nmf_torch.nesterov.factorize_Fnorm,
device="cuda")
}
###Output
_____no_output_____
###Markdown
Performance evaluation
###Code
errors_dict = pickle.load(open("text_data_errors_dict.pkl","rb"))
inner_dim = 120
shape = reuters_data.shape
W_init = np.random.rand(shape[0], inner_dim) * 0.001
H_init = np.random.rand(inner_dim, shape[1]) * 0.001
errors_dict = compare_performance(reuters_data.toarray(),
inner_dim, time_limit=1000,
W_init=W_init, H_init=H_init,
algo_dict_to_test=algo_dict_to_test)
pickle.dump(errors_dict, open("text_data_errors_dict.pkl","wb"))
###Output
_____no_output_____
###Markdown
Graphs of time atio
###Code
nmbytes = 1975.52
shape = (21578, 12000)
inner_dim = 120
shape = reuters_data.shape
inner_dim = 120
nmbytes = reuters_data.toarray().nbytes / 2**20
f, axes = plt.subplots(2, 2, figsize=(10, 10),
gridspec_kw=gridspec_kw)
f.suptitle("Text data represdented by {0} matrix, {2:.2f} MB \n Factorization of size {1}".format(shape, inner_dim,
nmbytes))
plot_errors_dict(errors_dict, axes[0, 0], log=True, title="Cost function", x_lbl="time [s]")
plot_ratios_cpu_gpu(errors_dict, axes[0, 1])
plot_ratios_gpu_algo(errors_dict, axes[1, 0:], selected_algs=["mult_torch", "pgrad_torch", "nesterov_torch"])
axes[0, 1].set_title("CPU / GPU comparison")
axes[1, 0].set_title("multiplicative / gradient\nalgorithms comparison")
axes[1, 1].set_title("Nesterov / projected\n gradient algorithms comparison")
###Output
_____no_output_____
###Markdown
Demonstration
###Code
W, H, errors = nmf.nesterov.factorize_Fnorm(reuters_data.toarray(), 120, max_steps=20,
epsilon=0, record_errors=True)
for topic_id in range(H.shape[0]):
hashes = cols[topic_id, :3]
words = ["({})".format("|".join(vectorizer.words_by_hash(h))) for h in hashes]
print(topic_id, *words)
text_id_of_interest = 160
text_vector_of_interest = W[text_id_of_interest, :]
idxs = np.argsort(np.linalg.norm(W[:, :] - text_vector_of_interest, axis=1))
print("Text corresponding to 160th row:")
print(vectorizer.last_data[idxs[0]])
print()
print("Text corresponding to the closest to the 160th row in the space of latent topic")
print(vectorizer.last_data[idxs[1]])
print()
print("Text corresponding to the second closest to the 160th row in the space of latent topic")
print(vectorizer.last_data[idxs[2]])
print()
###Output
_____no_output_____ |
docs/running/interaction/to_hdf.ipynb | ###Markdown
Example to_hdf calls Initialize the simulation with the `tardis_example.yml` configuration file.
###Code
from tardis.io.config_reader import Configuration
from tardis.simulation import Simulation
# Must have the tardis_example folder in the working directory.
config_fname = 'tardis_example/tardis_example.yml'
config = Configuration.from_yaml(config_fname)
simulation = Simulation.from_config(config)
###Output
[[1mtardis.plasma.standard_plasmas[0m][[1;37mINFO[0m ] Reading Atomic Data from tardis_example/kurucz_cd23_chianti_H_He.h5 ([1mstandard_plasmas.py[0m:72)
tardis.plasma.standard_plasmas - [1;37mINFO[0m - Reading Atomic Data from tardis_example/kurucz_cd23_chianti_H_He.h5
tardis.atomic - INFO - Read Atom Data with UUID=5ca3035ca8b311e3bb684437e69d75d7 and MD5=21095dd25faa1683f4c90c911a00c3f8
[[1mtardis.plasma.base [0m][[1;34mDEBUG[0m ] Updating modules in the following order: ([1mbase.py[0m:197)
tardis.plasma.base - [1;34mDEBUG[0m - Updating modules in the following order:
[[1mtardis.montecarlo.base[0m][[1;34mDEBUG[0m ] Electron scattering switched on ([1mbase.py[0m:393)
tardis.montecarlo.base - [1;34mDEBUG[0m - Electron scattering switched on
[[1mpy.warnings [0m][[1;33mWARNING[0m] /home/vaibhav/anaconda2/lib/python2.7/site-packages/astropy/units/equivalencies.py:74: RuntimeWarning: divide by zero encountered in double_scalars
(si.m, si.Hz, lambda x: _si.c.value / x),
([1mequivalencies.py[0m:74)
py.warnings - [1;33mWARNING[0m - /home/vaibhav/anaconda2/lib/python2.7/site-packages/astropy/units/equivalencies.py:74: RuntimeWarning: divide by zero encountered in double_scalars
(si.m, si.Hz, lambda x: _si.c.value / x),
###Markdown
Run the simulation.
###Code
simulation.run()
###Output
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Starting iteration 1/5 ([1mbase.py[0m:196)
tardis.simulation.base - [1;37mINFO[0m - Starting iteration 1/5
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Luminosity emitted = 8.10360e+42 erg / s Luminosity absorbed = 2.67587e+42 erg / s Luminosity requested = 1.07688e+43 erg / s ([1mbase.py[0m:273)
tardis.simulation.base - [1;37mINFO[0m - Luminosity emitted = 8.10360e+42 erg / s Luminosity absorbed = 2.67587e+42 erg / s Luminosity requested = 1.07688e+43 erg / s
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Plasma stratification:
t_rad next_t_rad w next_w
Shell
0 9967.488442 10074.800724 0.400392 0.452516
5 9893.293062 10103.998786 0.211205 0.202916
10 9820.194102 10007.439423 0.142695 0.130205
15 9748.167438 9820.947309 0.104556 0.096257
([1mbase.py[0m:264)
tardis.simulation.base - [1;37mINFO[0m - Plasma stratification:
t_rad next_t_rad w next_w
Shell
0 9967.488442 10074.800724 0.400392 0.452516
5 9893.293062 10103.998786 0.211205 0.202916
10 9820.194102 10007.439423 0.142695 0.130205
15 9748.167438 9820.947309 0.104556 0.096257
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] t_inner 9974.969 K -- next t_inner 10736.934 K ([1mbase.py[0m:266)
tardis.simulation.base - [1;37mINFO[0m - t_inner 9974.969 K -- next t_inner 10736.934 K
[[1mtardis.plasma.base [0m][[1;34mDEBUG[0m ] Updating modules in the following order: ([1mbase.py[0m:197)
tardis.plasma.base - [1;34mDEBUG[0m - Updating modules in the following order:
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Starting iteration 2/5 ([1mbase.py[0m:196)
tardis.simulation.base - [1;37mINFO[0m - Starting iteration 2/5
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Luminosity emitted = 1.08633e+43 erg / s Luminosity absorbed = 3.60272e+42 erg / s Luminosity requested = 1.07688e+43 erg / s ([1mbase.py[0m:273)
tardis.simulation.base - [1;37mINFO[0m - Luminosity emitted = 1.08633e+43 erg / s Luminosity absorbed = 3.60272e+42 erg / s Luminosity requested = 1.07688e+43 erg / s
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Plasma stratification:
t_rad next_t_rad w next_w
Shell
0 10074.800724 10480.642737 0.452516 0.485511
5 10103.998786 10542.429569 0.202916 0.203942
10 10007.439423 10413.108276 0.130205 0.127795
15 9820.947309 10179.669303 0.096257 0.094281
([1mbase.py[0m:264)
tardis.simulation.base - [1;37mINFO[0m - Plasma stratification:
t_rad next_t_rad w next_w
Shell
0 10074.800724 10480.642737 0.452516 0.485511
5 10103.998786 10542.429569 0.202916 0.203942
10 10007.439423 10413.108276 0.130205 0.127795
15 9820.947309 10179.669303 0.096257 0.094281
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] t_inner 10736.934 K -- next t_inner 10713.534 K ([1mbase.py[0m:266)
tardis.simulation.base - [1;37mINFO[0m - t_inner 10736.934 K -- next t_inner 10713.534 K
[[1mtardis.plasma.base [0m][[1;34mDEBUG[0m ] Updating modules in the following order: ([1mbase.py[0m:197)
tardis.plasma.base - [1;34mDEBUG[0m - Updating modules in the following order:
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Starting iteration 3/5 ([1mbase.py[0m:196)
tardis.simulation.base - [1;37mINFO[0m - Starting iteration 3/5
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Luminosity emitted = 1.08260e+43 erg / s Luminosity absorbed = 3.52022e+42 erg / s Luminosity requested = 1.07688e+43 erg / s ([1mbase.py[0m:273)
tardis.simulation.base - [1;37mINFO[0m - Luminosity emitted = 1.08260e+43 erg / s Luminosity absorbed = 3.52022e+42 erg / s Luminosity requested = 1.07688e+43 erg / s
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Plasma stratification:
t_rad next_t_rad w next_w
Shell
0 10480.642737 10711.113330 0.485511 0.493541
5 10542.429569 10819.470987 0.203942 0.199112
10 10413.108276 10633.892704 0.127795 0.125270
15 10179.669303 10359.259776 0.094281 0.092971
([1mbase.py[0m:264)
tardis.simulation.base - [1;37mINFO[0m - Plasma stratification:
t_rad next_t_rad w next_w
Shell
0 10480.642737 10711.113330 0.485511 0.493541
5 10542.429569 10819.470987 0.203942 0.199112
10 10413.108276 10633.892704 0.127795 0.125270
15 10179.669303 10359.259776 0.094281 0.092971
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] t_inner 10713.534 K -- next t_inner 10699.352 K ([1mbase.py[0m:266)
tardis.simulation.base - [1;37mINFO[0m - t_inner 10713.534 K -- next t_inner 10699.352 K
[[1mtardis.plasma.base [0m][[1;34mDEBUG[0m ] Updating modules in the following order: ([1mbase.py[0m:197)
tardis.plasma.base - [1;34mDEBUG[0m - Updating modules in the following order:
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Starting iteration 4/5 ([1mbase.py[0m:196)
tardis.simulation.base - [1;37mINFO[0m - Starting iteration 4/5
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Luminosity emitted = 1.07886e+43 erg / s Luminosity absorbed = 3.48159e+42 erg / s Luminosity requested = 1.07688e+43 erg / s ([1mbase.py[0m:273)
tardis.simulation.base - [1;37mINFO[0m - Luminosity emitted = 1.07886e+43 erg / s Luminosity absorbed = 3.48159e+42 erg / s Luminosity requested = 1.07688e+43 erg / s
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Plasma stratification:
t_rad next_t_rad w next_w
Shell
0 10711.113330 10866.508115 0.493541 0.489288
5 10819.470987 11017.147737 0.199112 0.192830
10 10633.892704 10795.422555 0.125270 0.121775
15 10359.259776 10499.976778 0.092971 0.090524
([1mbase.py[0m:264)
tardis.simulation.base - [1;37mINFO[0m - Plasma stratification:
t_rad next_t_rad w next_w
Shell
0 10711.113330 10866.508115 0.493541 0.489288
5 10819.470987 11017.147737 0.199112 0.192830
10 10633.892704 10795.422555 0.125270 0.121775
15 10359.259776 10499.976778 0.092971 0.090524
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] t_inner 10699.352 K -- next t_inner 10694.430 K ([1mbase.py[0m:266)
tardis.simulation.base - [1;37mINFO[0m - t_inner 10699.352 K -- next t_inner 10694.430 K
[[1mtardis.plasma.base [0m][[1;34mDEBUG[0m ] Updating modules in the following order: ([1mbase.py[0m:197)
tardis.plasma.base - [1;34mDEBUG[0m - Updating modules in the following order:
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Starting iteration 5/5 ([1mbase.py[0m:196)
tardis.simulation.base - [1;37mINFO[0m - Starting iteration 5/5
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Luminosity emitted = 1.07897e+43 erg / s Luminosity absorbed = 3.45406e+42 erg / s Luminosity requested = 1.07688e+43 erg / s ([1mbase.py[0m:273)
tardis.simulation.base - [1;37mINFO[0m - Luminosity emitted = 1.07897e+43 erg / s Luminosity absorbed = 3.45406e+42 erg / s Luminosity requested = 1.07688e+43 erg / s
[[1mtardis.simulation.base[0m][[1;37mINFO[0m ] Simulation finished in 5 iterations and took 50.01 s ([1mbase.py[0m:223)
tardis.simulation.base - [1;37mINFO[0m - Simulation finished in 5 iterations and took 50.01 s
###Markdown
You can now use `to_hdf` method, to save properties to a HDF file. Parameters `file_path`: Path where the HDF file should be stored. (Required) `path`: Path inside the HDF store to store the elements. (Optional) `name`: Name of the group inside HDF store, under which properties will be saved.(Optional)
###Code
simulation.to_hdf('/tmp/full_example.hdf')
#simulation.to_hdf(file_path='/tmp/full_example.hdf', path='/', name='simulation')
###Output
[[1mpy.warnings [0m][[1;33mWARNING[0m] /home/vaibhav/anaconda2/lib/python2.7/site-packages/pandas/core/generic.py:1299: PerformanceWarning:
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->mixed,key->block0_values] [items->[0]]
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
([1mpytables.py[0m:2675)
py.warnings - [1;33mWARNING[0m - /home/vaibhav/anaconda2/lib/python2.7/site-packages/pandas/core/generic.py:1299: PerformanceWarning:
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->mixed,key->block0_values] [items->[0]]
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
[[1mpy.warnings [0m][[1;33mWARNING[0m] /home/vaibhav/anaconda2/lib/python2.7/site-packages/pandas/core/generic.py:1299: PerformanceWarning:
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->mixed,key->values] [items->None]
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
([1mpytables.py[0m:2675)
py.warnings - [1;33mWARNING[0m - /home/vaibhav/anaconda2/lib/python2.7/site-packages/pandas/core/generic.py:1299: PerformanceWarning:
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->mixed,key->values] [items->None]
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
###Markdown
Open the stored HDF file with pandas and print its structure.
###Code
import pandas as pd
data = pd.HDFStore('/tmp/full_example.hdf')
print data
###Output
<class 'pandas.io.pytables.HDFStore'>
File path: /tmp/full_example.hdf
/simulation/model/homologous_density/density_0 series (shape->[21])
/simulation/model/homologous_density/scalars series (shape->[1])
/simulation/model/scalars series (shape->[1])
/simulation/model/t_radiative series (shape->[20])
/simulation/model/v_inner series (shape->[20])
/simulation/model/v_outer series (shape->[20])
/simulation/model/w series (shape->[20])
/simulation/plasma/abundance frame (shape->[6,20])
/simulation/plasma/atomic_mass series (shape->[6])
/simulation/plasma/beta_rad series (shape->[20])
/simulation/plasma/beta_sobolev frame (shape->[29224,20])
/simulation/plasma/density series (shape->[20])
/simulation/plasma/electron_densities series (shape->[20])
/simulation/plasma/excitation_energy series (shape->[4439])
/simulation/plasma/f_lu series (shape->[29224])
/simulation/plasma/g series (shape->[4439])
/simulation/plasma/g_electron series (shape->[20])
/simulation/plasma/general_level_boltzmann_factor frame (shape->[4439,20])
/simulation/plasma/ion_number_density frame (shape->[94,20])
/simulation/plasma/ionization_data frame (shape->[88,1])
/simulation/plasma/j_blues frame (shape->[29224,20])
/simulation/plasma/level_boltzmann_factor frame (shape->[4439,20])
/simulation/plasma/level_number_density frame (shape->[4439,20])
/simulation/plasma/levels frame (shape->[1,1])
/simulation/plasma/lines frame (shape->[29224,12])
/simulation/plasma/lines_lower_level_index series (shape->[29224])
/simulation/plasma/lines_upper_level_index series (shape->[29224])
/simulation/plasma/metastability series (shape->[4439])
/simulation/plasma/nu series (shape->[29224])
/simulation/plasma/number_density frame (shape->[6,20])
/simulation/plasma/partition_function frame (shape->[94,20])
/simulation/plasma/phi frame (shape->[88,20])
/simulation/plasma/scalars series (shape->[1])
/simulation/plasma/selected_atoms series (shape->[6])
/simulation/plasma/stimulated_emission_factor frame (shape->[29224,20])
/simulation/plasma/t_electrons series (shape->[20])
/simulation/plasma/t_rad series (shape->[20])
/simulation/plasma/tau_sobolevs frame (shape->[29224,20])
/simulation/plasma/transition_probabilities frame (shape->[87672,20])
/simulation/plasma/w series (shape->[20])
/simulation/plasma/wavelength_cm series (shape->[29224])
/simulation/runner/j_estimator series (shape->[20])
/simulation/runner/last_interaction_in_nu series (shape->[500000])
/simulation/runner/last_interaction_type series (shape->[500000])
/simulation/runner/last_line_interaction_in_id series (shape->[500000])
/simulation/runner/last_line_interaction_out_id series (shape->[500000])
/simulation/runner/last_line_interaction_shell_id series (shape->[500000])
/simulation/runner/montecarlo_virtual_luminosity series (shape->[10000])
/simulation/runner/nu_bar_estimator series (shape->[20])
/simulation/runner/output_energy series (shape->[500000])
/simulation/runner/output_nu series (shape->[500000])
/simulation/runner/packet_luminosity series (shape->[500000])
/simulation/runner/spectrum/_frequency series (shape->[10001])
/simulation/runner/spectrum/luminosity series (shape->[10000])
/simulation/runner/spectrum_reabsorbed/_frequency series (shape->[10001])
/simulation/runner/spectrum_reabsorbed/luminosity series (shape->[10000])
/simulation/runner/spectrum_virtual/_frequency series (shape->[10001])
/simulation/runner/spectrum_virtual/luminosity series (shape->[10000])
###Markdown
Access `model.homologous_density.density_0` under simulation, which is a one-dimensional array
###Code
print data['/simulation/model/homologous_density/density_0']
###Output
0 1970.527174
1 13.360318
2 10.146658
3 7.786621
4 6.033444
5 4.717122
6 3.718946
7 2.954982
8 2.365191
9 1.906156
10 1.546154
11 1.261789
12 1.035646
13 0.854653
14 0.708918
15 0.590901
16 0.494811
17 0.416168
18 0.351490
19 0.298047
20 0.253691
dtype: float64
###Markdown
Scalars are stored in a `scalars` `pandas.Series` for every module. For example to access `model.t_inner` under simulation, one would need to do the following.Note: Quantities are always stored as their SI values.
###Code
print data['/simulation/model/scalars']['t_inner']
###Output
10694.430019
###Markdown
Breakdown of the various to_hdf methodsEvery module in TARDIS has its own `to_hdf` method responsible to store its own data to an HDF file. PlasmaThe following call will store every plasma property to `/tmp/plasma_output.hdf` under `/parent/plasma`
###Code
simulation.plasma.to_hdf('/tmp/plasma_output.hdf', path='parent')
import pandas
with pandas.HDFStore('/tmp/plasma_output.hdf') as data:
print data
###Output
<class 'pandas.io.pytables.HDFStore'>
File path: /tmp/plasma_output.hdf
/parent/plasma/abundance frame (shape->[6,20])
/parent/plasma/atomic_mass series (shape->[6])
/parent/plasma/beta_rad series (shape->[20])
/parent/plasma/beta_sobolev frame (shape->[29224,20])
/parent/plasma/density series (shape->[20])
/parent/plasma/electron_densities series (shape->[20])
/parent/plasma/excitation_energy series (shape->[4439])
/parent/plasma/f_lu series (shape->[29224])
/parent/plasma/g series (shape->[4439])
/parent/plasma/g_electron series (shape->[20])
/parent/plasma/general_level_boltzmann_factor frame (shape->[4439,20])
/parent/plasma/ion_number_density frame (shape->[94,20])
/parent/plasma/ionization_data frame (shape->[88,1])
/parent/plasma/j_blues frame (shape->[29224,20])
/parent/plasma/level_boltzmann_factor frame (shape->[4439,20])
/parent/plasma/level_number_density frame (shape->[4439,20])
/parent/plasma/levels frame (shape->[1,1])
/parent/plasma/lines frame (shape->[29224,12])
/parent/plasma/lines_lower_level_index series (shape->[29224])
/parent/plasma/lines_upper_level_index series (shape->[29224])
/parent/plasma/metastability series (shape->[4439])
/parent/plasma/nu series (shape->[29224])
/parent/plasma/number_density frame (shape->[6,20])
/parent/plasma/partition_function frame (shape->[94,20])
/parent/plasma/phi frame (shape->[88,20])
/parent/plasma/scalars series (shape->[1])
/parent/plasma/selected_atoms series (shape->[6])
/parent/plasma/stimulated_emission_factor frame (shape->[29224,20])
/parent/plasma/t_electrons series (shape->[20])
/parent/plasma/t_rad series (shape->[20])
/parent/plasma/tau_sobolevs frame (shape->[29224,20])
/parent/plasma/transition_probabilities frame (shape->[87672,20])
/parent/plasma/w series (shape->[20])
/parent/plasma/wavelength_cm series (shape->[29224])
###Markdown
Plasma's `to_hdf` method can also accept a `collection` parameter which can specify which types of plasma properties will be stored. For example if we wanted to only store Input plasma properties, we would do the following:
###Code
from tardis.plasma.properties.base import Input
simulation.plasma.to_hdf('/tmp/plasma_input_output.hdf', collection=[Input])
import pandas
with pandas.HDFStore('/tmp/plasma_input_output.hdf') as data:
print data
###Output
<class 'pandas.io.pytables.HDFStore'>
File path: /tmp/plasma_input_output.hdf
/plasma/abundance frame (shape->[6,20])
/plasma/density series (shape->[20])
/plasma/scalars series (shape->[1])
/plasma/t_rad series (shape->[20])
/plasma/w series (shape->[20])
###Markdown
ModelThe following call will store properties of the `Radial1DModel` to `/tmp/model_output.hdf` under `/model`.
###Code
simulation.model.to_hdf('/tmp/model_output.hdf')
###Output
_____no_output_____
###Markdown
MontecarloRunnerThe following call will store properties of the `MontecarloRunner` to `/tmp/runner_output.hdf` under `/runner`
###Code
simulation.runner.to_hdf('/tmp/runner_output.hdf')
import pandas
with pandas.HDFStore('/tmp/runner_output.hdf') as data:
print data
###Output
<class 'pandas.io.pytables.HDFStore'>
File path: /tmp/runner_output.hdf
/runner/j_estimator series (shape->[20])
/runner/last_interaction_in_nu series (shape->[500000])
/runner/last_interaction_type series (shape->[500000])
/runner/last_line_interaction_in_id series (shape->[500000])
/runner/last_line_interaction_out_id series (shape->[500000])
/runner/last_line_interaction_shell_id series (shape->[500000])
/runner/montecarlo_virtual_luminosity series (shape->[10000])
/runner/nu_bar_estimator series (shape->[20])
/runner/output_energy series (shape->[500000])
/runner/output_nu series (shape->[500000])
/runner/packet_luminosity series (shape->[500000])
/runner/spectrum/_frequency series (shape->[10001])
/runner/spectrum/luminosity series (shape->[10000])
/runner/spectrum_reabsorbed/_frequency series (shape->[10001])
/runner/spectrum_reabsorbed/luminosity series (shape->[10000])
/runner/spectrum_virtual/_frequency series (shape->[10001])
/runner/spectrum_virtual/luminosity series (shape->[10000])
|
notebooks/Field Line Trace 3D.ipynb | ###Markdown
Observatory Locations
###Code
# Source: http://www.polar.umd.edu/data_archive/mag_obs.txt
CGM coordinates at 100 km altitude, as of 1997
South CGM Pole: Lat. = -74.15, Long. = 126.14
----------------------------------------------------------------------------
AGO Date Established: Geographic CGM
Lat. Lon. Lat. Lon. L UT-MLT
----------------------------------------------------------------------------
P1 Jan 1994 S 83.86 E 129.61 S 80.14 E 16.87 34.1 3:44
P2 Dec 1992 S 85.67 E 313.62 S 69.84 E 19.33 8.4 3:29
P3 Jan 1995 S 82.75 E 28.59 S 71.80 E 40.25 10.3 2:02
P4 Jan 1994 S 82.01 E 96.76 S 80.00 E 41.64 33.2 1:59
P5 Jan 1996 S 77.24 E 123.52 S 86.74 E 29.46 309.2 2:52
P6 Jan 1997 S 69.51 E 130.03 S 84.92 E 215.39 127.5 14:26
South Pole Station S 90.00 E 000.00 S 74.02 E 18.35 13.2 3:35
McMurdo Station S 77.85 E 166.67 S 79.94 E 326.97 32.8 6:57
Halley Bay S 75.50 E 333.40 S 61.56 E 29.01 4.4 2:43
Davis S 68.58 E 77.97 S 74.58 E 100.14 14.2 22:00
HAARP N 62.41 E 214.88 N 63.12 E 267.25 4.9 10:55
Iqaluit, NWT N 63.75 E 291.47 N 72.09 E 14.53 11.8 4:07
Sondre Stromfjord N 67.02 E 309.28 N 73.35 E 41.48 12.2 2:16
Kilpisjarvi N 69.06 E 20.74 N 65.85 E 104.52 6.0 21:15
###Output
_____no_output_____
###Markdown
Convert Lat-Lon to GSM
###Code
obs_lat, obs_lon, name = -85.67, 313.62, 'AGO 2'
ut = 0 # Hours 0-24
ps = geopack.recalc(ut)
# From CartoPy Example: https://scitools.org.uk/cartopy/docs/latest/gallery/aurora_forecast.html#sphx-glr-gallery-aurora-forecast-py
fig = plt.figure(figsize=[12, 12])
projection = ccrs.Orthographic(180, -90)
ax = fig.add_subplot(1, 1, 1, projection=projection)
ax.coastlines(zorder=3)
ax.stock_img()
ax.gridlines()
pxx,pyy = projection.transform_point(obs_lon,obs_lat,ccrs.Geodetic())
ax.scatter([pxx],[pyy],marker='*',facecolor='y',edgecolor='k',s=500)
plt.annotate(name, xy=(pxx,pyy),xytext=(12, 0),va='center',textcoords='offset points',
fontsize='xx-large',fontweight='bold')
ax.set_extent([0, 359.9999, -90, -60],crs=ccrs.PlateCarree())
plt.show()
lat_rad = np.deg2rad(obs_lat)
lon_rad = np.deg2rad(obs_lon)
# Convert Geodetic to Geocentric Spherical
r, theta_rad = geopack.geodgeo(0,lat_rad,1)
# Convert Geocentric Spherical to Geocentric Cartesian
x_gc,y_gc,z_gc = geopack.sphcar(1,theta_rad,lon_rad,1)
print('GC: ', x_gc,y_gc,z_gc,' R=',np.sqrt(x_gc**2+y_gc**2+z_gc**2))
# Convert Geocentric Cartesian to GSM
x_gsm,y_gsm,z_gsm = geopack.geogsm(x_gc,y_gc,z_gc,1)
print('GSM: ', x_gsm,y_gsm,z_gsm,' R=',np.sqrt(x_gsm**2+y_gsm**2+z_gsm**2))
###Output
GC: 0.05243494986401025 -0.05502364924549361 -0.9971073031807899 R= 1.0
GSM: 0.3429509938404858 0.2501689605554522 -0.9054281346404184 R= 1.0
###Markdown
Field Line Trace
###Code
x,y,z,xx,yy,zz = geopack.trace(x_gsm,y_gsm,z_gsm,dir=-1,rlim=100,r0=.99999,parmod=2,exname='t89',inname='igrf',maxloop=10000)
ax=setup_fig()
ax.plot(xx,zz)
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Make data
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = 1 * np.outer(np.cos(u), np.sin(v))
y = 1 * np.outer(np.sin(u), np.sin(v))
z = 1 * np.outer(np.ones(np.size(u)), np.cos(v))
# Plot the surface
ax.plot_surface(x, y, z, color='b')
ax.plot(xx,yy,zz)
plt.show()
###Output
_____no_output_____ |
week_5/Monte_carlo.ipynb | ###Markdown
Monte Carlo MethodsIn this notebook, you will write your own implementations of many Monte Carlo (MC) algorithms. While we have provided some starter code, you are welcome to erase these hints and write your code from scratch. Part 0: Explore BlackjackEnvWe begin by importing the necessary packages.
###Code
import sys
import gym
import numpy as np
from collections import defaultdict
from plot_utils import plot_blackjack_values, plot_policy
###Output
_____no_output_____
###Markdown
Use the code cell below to create an instance of the [Blackjack](https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py) environment.
###Code
env = gym.make('Blackjack-v0')
###Output
_____no_output_____
###Markdown
Each state is a 3-tuple of:- the player's current sum $\in \{0, 1, \ldots, 31\}$,- the dealer's face up card $\in \{1, \ldots, 10\}$, and- whether or not the player has a usable ace (`no` $=0$, `yes` $=1$).The agent has two potential actions:``` STICK = 0 HIT = 1```Verify this by running the code cell below.
###Code
print(env.observation_space)
print(env.action_space)
###Output
_____no_output_____
###Markdown
Execute the code cell below to play Blackjack with a random policy. (_The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to get some experience with the output that is returned as the agent interacts with the environment._)
###Code
for i_episode in range(3):
state = env.reset()
while True:
print(state)
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
print('End game! Reward: ', reward)
print('You won :)\n') if reward > 0 else print('You lost :(\n')
break
###Output
_____no_output_____
###Markdown
Part 1: MC PredictionIn this section, you will write your own implementation of MC prediction (for estimating the action-value function). We will begin by investigating a policy where the player _almost_ always sticks if the sum of her cards exceeds 18. In particular, she selects action `STICK` with 80% probability if the sum is greater than 18; and, if the sum is 18 or below, she selects action `HIT` with 80% probability. The function `generate_episode_from_limit_stochastic` samples an episode using this policy. The function accepts as **input**:- `bj_env`: This is an instance of OpenAI Gym's Blackjack environment.It returns as **output**:- `episode`: This is a list of (state, action, reward) tuples (of tuples) and corresponds to $(S_0, A_0, R_1, \ldots, S_{T-1}, A_{T-1}, R_{T})$, where $T$ is the final time step. In particular, `episode[i]` returns $(S_i, A_i, R_{i+1})$, and `episode[i][0]`, `episode[i][1]`, and `episode[i][2]` return $S_i$, $A_i$, and $R_{i+1}$, respectively.
###Code
def generate_episode_from_limit_stochastic(bj_env):
episode = []
state = bj_env.reset()
while True:
probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]
action = np.random.choice(np.arange(2), p=probs)
next_state, reward, done, info = bj_env.step(action)
episode.append((state, action, reward))
state = next_state
if done:
break
return episode
###Output
_____no_output_____
###Markdown
Execute the code cell below to play Blackjack with the policy. (*The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to gain some familiarity with the output of the `generate_episode_from_limit_stochastic` function.*)
###Code
for i in range(3):
print(generate_episode_from_limit_stochastic(env))
###Output
_____no_output_____
###Markdown
Now, you are ready to write your own implementation of MC prediction. Feel free to implement either first-visit or every-visit MC prediction; in the case of the Blackjack environment, the techniques are equivalent.Your algorithm has three arguments:- `env`: This is an instance of an OpenAI Gym environment.- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.- `generate_episode`: This is a function that returns an episode of interaction.- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).The algorithm returns as output:- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
###Code
def mc_prediction_q(env, num_episodes, generate_episode, gamma=1.0):
# initialize empty dictionaries of arrays
returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))
N = defaultdict(lambda: np.zeros(env.action_space.n))
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## TODO: complete the function
return Q
###Output
_____no_output_____
###Markdown
Use the cell below to obtain the action-value function estimate $Q$. We have also plotted the corresponding state-value function.To check the accuracy of your implementation, compare the plot below to the corresponding plot in the solutions notebook **Monte_Carlo_Solution.ipynb**.
###Code
# obtain the action-value function
Q = mc_prediction_q(env, 500000, generate_episode_from_limit_stochastic)
# obtain the corresponding state-value function
V_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \
for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V_to_plot)
###Output
_____no_output_____
###Markdown
Part 2: MC ControlIn this section, you will write your own implementation of constant-$\alpha$ MC control. Your algorithm has four arguments:- `env`: This is an instance of an OpenAI Gym environment.- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.- `alpha`: This is the step-size parameter for the update step.- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).The algorithm returns as output:- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.- `policy`: This is a dictionary where `policy[s]` returns the action that the agent chooses after observing state `s`.(_Feel free to define additional functions to help you to organize your code._)
###Code
def mc_control(env, num_episodes, alpha, gamma=1.0):
nA = env.action_space.n
# initialize empty dictionary of arrays
Q = defaultdict(lambda: np.zeros(nA))
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## TODO: complete the function
return policy, Q
###Output
_____no_output_____
###Markdown
Use the cell below to obtain the estimated optimal policy and action-value function. Note that you should fill in your own values for the `num_episodes` and `alpha` parameters.
###Code
# obtain the estimated optimal policy and action-value function
policy, Q = mc_control(env, ?, ?)
###Output
_____no_output_____
###Markdown
Next, we plot the corresponding state-value function.
###Code
# obtain the corresponding state-value function
V = dict((k,np.max(v)) for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V)
###Output
_____no_output_____
###Markdown
Finally, we visualize the policy that is estimated to be optimal.
###Code
# plot the policy
plot_policy(policy)
###Output
_____no_output_____ |
MoSeka/Pooling_util.ipynb | ###Markdown
functions below are NOT USED in training
###Code
def pool_forward_orig(A_prev, hparameters, mode = "max"):
"""
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
"""
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
for i in range(m): # loop over the training examples
for h in range(n_H): # loop on the vertical axis of the output volume
for w in range(n_W): # loop on the horizontal axis of the output volume
for c in range (n_C): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = h*stride
vert_end = vert_start+f
horiz_start = w*stride
horiz_end = horiz_start+f
# Use the corners to define the current slice on the ith training example of A_prev, channel c.
a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]
# Compute the pooling operation on the slice. Use an if statment to differentiate the modes.
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.average(a_prev_slice)
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
def create_mask_from_window(x):
"""
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
"""
mask = x==np.max(x)
return mask
def distribute_value(dz, shape):
"""
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
"""
(n_H, n_W) = shape
a = np.ones(shape) * dz/n_H/n_W
return a
def pool_backward_orig(dA, cache, mode = "max"):
"""
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
"""
# Retrieve information from cache
(A_prev, hparameters) = cache
# Retrieve hyperparameters from "hparameters"
stride = hparameters["stride"]
f = hparameters["f"]
# Retrieve dimensions from A_prev's shape and dA's shape
m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
m, n_H, n_W, n_C = dA.shape
# Initialize dA_prev with zeros
dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev))
for i in range(m): # loop over the training examples
# select training example from A_prev
a_prev = A_prev[i, :, :, :]
for h in range(n_H): # loop on the vertical axis
for w in range(n_W): # loop on the horizontal axis
for c in range(n_C): # loop over the channels (depth)
# Find the corners of the current "slice"
vert_start = h*stride
vert_end = vert_start+f
horiz_start = w*stride
horiz_end = horiz_start+f
# Compute the backward propagation in both modes.
if mode == "max":
# Use the corners and "c" to define the current slice from a_prev (≈1 line)
a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]
# Create the mask from a_prev_slice (≈1 line)
mask = create_mask_from_window(a_prev_slice)
# Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += np.multiply(mask, dA[i, h, w, c])
elif mode == "average":
# Get the value a from dA
da = dA[i, h, w, c]
# Define the shape of the filter as fxf
shape = (f, f)
# Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da.
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += distribute_value(da, shape)
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
###Output
_____no_output_____ |
aoe2de_data_analysis.ipynb | ###Markdown
Regression, Classification and Clustering Machine Learning Algorithms for Age of Empires II Definitive Edition Dataset* Trying to predict the Winner in Ranked Random Match in the Regression Problem* Winner prediction in Ranked Random Match in the Classification Problem* Players segmentation in Ranked Random Match in the Clustering Problem
###Code
from google.colab import drive
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import zscore
from scipy.stats.mstats import winsorize
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
import xgboost as xgb
from sklearn import metrics
from sklearn.metrics import mean_absolute_error, confusion_matrix
import statsmodels.api as sm
drive.mount('/gdrive')
%cd /gdrive
root = '/gdrive/My Drive/AoE2_DE_Datasets/'
###Output
Mounted at /gdrive
/gdrive
###Markdown
1.Statistics of The Age of Empires II DE 1.1 Data
###Code
dataSetRanked = pd.read_csv(root + "rankedrm.csv", encoding = 'ISO-8859-1')
dataSetRanked.head()
dataSetMatchPlayers = pd.read_csv(root + "match_players.csv")
dataSetMatchPlayers.head()
dataSetMatches = pd.read_csv(root + "matches.csv")
dataSetMatches.head()
###Output
_____no_output_____
###Markdown
1.2 Statistics of Civilizations
###Code
dataSetMatchPlayers = pd.concat([dataSetMatchPlayers.iloc[:, 2], dataSetMatchPlayers.iloc[:, 4], dataSetMatchPlayers.iloc[:, 6]], axis = 1, join = "inner")
dataSetMatchPlayers = dataSetMatchPlayers.sample(n = int(len(dataSetMatchPlayers) * 0.001), random_state = 1)
dataSetMatchPlayers.info()
dataSetMatchPlayers["rating"] = dataSetMatchPlayers["rating"].replace(np.nan, dataSetMatchPlayers.median()["rating"])
dataSetMatchPlayers.isnull().sum() * 100 / dataSetMatchPlayers.shape[0]
dataSetMatchPlayers["winner"] = dataSetMatchPlayers["winner"].astype(int)
sns.pairplot(dataSetMatchPlayers, hue = "civ")
def pieDonut(dataFrame, column):
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw = dict(aspect = "equal"))
data = dataFrame[column].value_counts()
dataLabels = data.keys()
wedges, texts = ax.pie(data, wedgeprops = dict(width = 0.5), startangle = -40)
bbox_props = dict(boxstyle = "square,pad=0.3", fc = "w", ec = "k", lw = 0.72)
kw = dict(arrowprops = dict(arrowstyle = "-"),
bbox = bbox_props, zorder = 0, va = "center")
for i, p in enumerate(wedges):
if i < 10:
ang = (p.theta2 - p.theta1)/2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax.annotate(dataLabels[i], xy = (x, y), xytext = (1.35 * np.sign(x), 1.4 * y),
horizontalalignment = horizontalalignment, **kw)
else:
break
ax.set_title(column)
plt.show()
civMatches = dataSetMatchPlayers["civ"].groupby(dataSetMatchPlayers["civ"]).count().sort_values(ascending = False)
civMatches
plt.figure(figsize=(10, 10))
plt.barh(dataSetMatchPlayers.groupby("civ")["civ"].count().index, dataSetMatchPlayers.groupby("civ")["civ"].count())
plt.title("civ")
plt.show()
print(dataSetMatchPlayers["civ"].groupby(dataSetMatchPlayers["civ"]).count().sort_values(ascending = False) / len(dataSetMatchPlayers["civ"]))
pieDonut(dataSetMatchPlayers, "civ")
civWins = dataSetMatchPlayers["winner"].loc[dataSetMatchPlayers['winner'] == 1].groupby(dataSetMatchPlayers["civ"]).count().sort_values(ascending = False)
civWinRate = civWins / civMatches
civWinRate.sort_values(ascending = False)
###Output
_____no_output_____
###Markdown
1.3 Statistics of Maps
###Code
dataSetMatches = dataSetMatches.iloc[:, 3:10].sample(n = int(len(dataSetMatches) * 0.001), random_state = 1)
dataSetMatches.info()
dataSetMatches.isnull().sum() * 100 / dataSetMatches.shape[0]
dataSetMatches["average_rating"] = dataSetMatches["average_rating"].replace(np.nan, dataSetMatches.median()["average_rating"])
numericalColumnsDSM = ["patch", "average_rating", "num_players"]
categoricalColumnsDSM = ["ladder", "map", "map_size", "server"]
sns.pairplot(dataSetMatches, hue = "map")
plt.figure(figsize=(40,30))
counter = 1
for i in categoricalColumnsDSM[:3]:
plt.subplot(3, 4, counter)
plt.barh(dataSetMatches.groupby(i)[i].count().index, dataSetMatches.groupby(i)[i].count())
plt.title(i)
counter += 1
print(dataSetMatches["map"].groupby(dataSetMatches["map"]).count().sort_values(ascending = False) / len(dataSetMatches["map"]))
pieDonut(dataSetMatches, "map")
for i in dataSetMatches["map_size"].unique():
print("-" * 10 + i + "-" * 10)
mapsWithSize = dataSetMatches["map_size"].loc[dataSetMatches["map_size"] == i].groupby(dataSetMatches["map"]).count().sort_values(ascending = False)
print(mapsWithSize)
###Output
----------tiny----------
map
arabia 908
arena 247
four_lakes 165
golden_pit 117
hideout 103
acropolis 74
megarandom 57
golden_swamp 49
serengeti 45
kilimanjaro 44
hill_fort 43
gold_rush 39
mediterranean 34
valley 33
nomad 29
mountain_pass 24
ghost_lake 20
islands 17
continental 15
bog_islands 13
socotra 12
black_forest 12
cenotes 12
baltic 8
migration 7
steppe 7
nile_delta 6
archipelago 5
alpine_lakes 4
mongolia 3
fortress 3
hamburger 2
ravines 2
team_islands 2
water_nomad 1
Name: map_size, dtype: int64
----------large----------
map
arabia 45
arena 40
lombardia 15
black_forest 12
oasis 12
hideout 11
megarandom 10
hill_fort 8
golden_swamp 7
scandinavia 6
golden_pit 4
valley 3
nomad 3
yucatan 3
land_nomad 3
gold_rush 3
cenotes 3
rivers 2
socotra 2
team_islands 2
mediterranean 1
mongolia 1
ghost_lake 1
four_lakes 1
fortress 1
serengeti 1
steppe 1
acropolis 1
Name: map_size, dtype: int64
----------medium----------
map
arabia 139
arena 61
lombardia 39
oasis 29
black_forest 27
hideout 25
megarandom 24
nomad 24
scandinavia 17
hill_fort 16
land_nomad 13
valley 11
golden_pit 10
golden_swamp 9
ghost_lake 7
team_islands 6
gold_rush 6
mongolia 5
steppe 5
cenotes 4
wolf_hill 4
yucatan 3
mediterranean 3
four_lakes 3
migration 2
rivers 2
serengeti 2
socotra 2
fortress 2
budapest 2
mountain_ridge 1
islands 1
highland 1
baltic 1
alpine_lakes 1
Name: map_size, dtype: int64
----------normal----------
map
arabia 74
arena 46
lombardia 21
hideout 17
black_forest 14
scandinavia 10
nomad 10
megarandom 10
land_nomad 9
team_islands 9
hill_fort 8
golden_swamp 7
oasis 7
golden_pit 6
gold_rush 4
yucatan 4
ghost_lake 4
four_lakes 3
socotra 3
valley 3
mongolia 2
mediterranean 2
wolf_hill 2
mountain_ridge 1
pacific_islands 1
rivers 1
fortress 1
steppe 1
highland 1
ravines 1
Name: map_size, dtype: int64
###Markdown
1.4 Statistics of Servers
###Code
servers = dataSetMatches["server"].groupby(dataSetMatches["server"]).count().sort_values(ascending = False)
servers
plt.figure(figsize=(10, 10))
plt.barh(dataSetMatches.groupby("server")["server"].count().index, dataSetMatches.groupby("server")["server"].count())
plt.title("server")
plt.show()
print(servers / len(dataSetMatches["server"]))
pieDonut(dataSetMatches, "server")
###Output
_____no_output_____
###Markdown
2.Data Preprocessing for Ranked Random Matches Dataset 2.1 Data Cleaning
###Code
dataSetRanked = pd.concat([dataSetRanked.iloc[:, 7], dataSetRanked.iloc[:, 14] ,dataSetRanked.iloc[:, 18:21] ,dataSetRanked.iloc[:, 22]], axis = 1, join = "inner")
dataSetRanked = dataSetRanked.sample(n = int(len(dataSetRanked) * 0.01), random_state = 1)
dataSetRanked.info()
dataSetRanked
###Output
_____no_output_____
###Markdown
2.1.1 Missing Values
###Code
dataSetRanked["rating.win"] = dataSetRanked["rating.win"].replace(np.nan, dataSetRanked.median()["rating.win"])
dataSetRanked["rating.lose"] = dataSetRanked["rating.lose"].replace(np.nan, dataSetRanked.median()["rating.lose"])
import time, datetime
def convertTime(t):
h, m, s = map(int, t.split(':'))
return (h * 60 + m) * 60 + s
dataSetRanked["duration"] = dataSetRanked["duration"].apply(convertTime)
winners = pd.concat([dataSetRanked.iloc[:, 0], dataSetRanked.iloc[:, 2], dataSetRanked.iloc[:, 4:]], axis = 1)
winners = winners.rename({"rating.win" : "rating", "map_type.name" : "map", "civ.win.name" : "civ"}, axis = 1)
winners["winner"] = pd.Series([1 for i in range(len(winners.index))])
winners["winner"] = winners["winner"].replace(np.nan, 1)
losers = pd.concat([dataSetRanked.iloc[:, 1], dataSetRanked.iloc[:, 2:4], dataSetRanked.iloc[:, 5]], axis = 1)
losers = losers.rename({"rating.lose" : "rating", "map_type.name" : "map", "civ.lose.name" : "civ"}, axis = 1)
losers["winner"] = pd.Series([1 for i in range(len(losers.index))])
losers["winner"] = losers["winner"].replace(np.nan, 0)
#test = dataSetRanked['rating.win'].append(dataSetRanked['rating.lose']).reset_index(drop = True)
trainSet = pd.concat([winners, losers])
trainSet
trainSet.info()
trainSet.nunique()
numericalColumns = ["rating", "duration"]
categoricalColumns = ["map", "civ", "winner"]
###Output
_____no_output_____
###Markdown
2.1.2 Outlier 2.1.2.1 Outlier (Boxplot)
###Code
plt.figure(figsize=(20,20))
counter = 1
for i in numericalColumns:
plt.subplot(3,4,counter)
plt.boxplot(trainSet[i], whis=10)
plt.title(f"{i} (whis=10)")
counter += 1
plt.show()
###Output
_____no_output_____
###Markdown
2.1.2.2 Outlier (Winsorize)
###Code
trainSet["duration"] = winsorize(trainSet["duration"], (0, 0.02))
plt.figure(figsize = (5, 5))
plt.title("Winsorized data(duration)")
plt.boxplot(trainSet["duration"], whis = 10)
plt.show()
###Output
_____no_output_____
###Markdown
2.2 Data Exploration 2.2.1 Continous Variable Visualization
###Code
plt.figure(figsize=(20,20))
counter = 1
for i in numericalColumns:
plt.subplot(3,4,counter)
plt.hist(trainSet[i])
plt.title(i)
counter += 1
plt.show()
plt.figure(figsize=(5,5))
sns.heatmap(trainSet[["rating", "duration"]].corr(), square=True, annot=True, linewidths=.5, vmin=0, vmax=1, cmap='viridis')
###Output
_____no_output_____
###Markdown
2.2.2 Categorical Variable Visualization
###Code
plt.figure(figsize=(30,22))
counter = 1
for i in categoricalColumns:
plt.subplot(3, 4, counter)
plt.barh(trainSet.groupby(i)[i].count().index, trainSet.groupby(i)[i].count())
plt.title(i)
counter += 1
plt.show()
###Output
_____no_output_____
###Markdown
3.Feature Engineering
###Code
featuresDF = pd.get_dummies(trainSet, columns = categoricalColumns)
###Output
_____no_output_____
###Markdown
3.1 Standard Scaling
###Code
scaler = StandardScaler()
for i in numericalColumns:
featuresDF[i] = scaler.fit_transform(featuresDF[[i]])
plt.figure(figsize=(20,20))
counter = 1
for i in numericalColumns:
plt.subplot(3, 4, counter)
plt.scatter(featuresDF[i], featuresDF[i])
plt.title(i)
plt.xlabel("value")
plt.ylabel("Standard Scaling Value")
counter += 1
plt.show()
featuresDF.shape
###Output
_____no_output_____
###Markdown
3.2 Dimension Reduction
###Code
corrMatrix = featuresDF.corr().abs()
upper = corrMatrix.where(np.triu(np.ones(corrMatrix.shape), k=1).astype(np.bool))
toDrop = [column for column in upper.columns if any(upper[column] > 0.90)]
featuresDF = featuresDF.drop(toDrop, axis=1)
featuresDF.shape
featuresDF
###Output
_____no_output_____
###Markdown
4.Regression Problem 4.1 Train-Test Split
###Code
Y = featuresDF["winner_0.0"]
X = featuresDF.drop("winner_0.0", axis = 1)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size = 0.2, random_state = 42)
###Output
_____no_output_____
###Markdown
4.2 Grid Search
###Code
#Decision Tree
DTC_Regressor = DecisionTreeRegressor()
DTC_params= {"criterion":["squared_error"], "min_samples_split": [2, 3, 4], "min_samples_leaf": [1, 2, 4], "max_depth": [None, 3, 5]}
DTC_grid_cv = GridSearchCV(DTC_Regressor, param_grid=DTC_params, cv=4)
DTC_grid_cv.fit(X_train, Y_train)
#Random Forest
rf_Regressor = RandomForestRegressor()
rf_params= {"n_estimators":[70, 100, 130], "min_samples_leaf": [1, 2, 4], "min_samples_split": [2, 5, 10], "n_jobs": [-1]}
rf_grid_cv = GridSearchCV(rf_Regressor, param_grid=rf_params, cv=4)
rf_grid_cv.fit(X_train, Y_train)
#KNN
KNN_Regressor = KNeighborsRegressor()
KNN_params= {"n_neighbors":[3,4,5,6,7], "n_jobs": [-1], "weights":["uniform", "distance"]}
KNN_grid_cv = GridSearchCV(KNN_Regressor, param_grid=KNN_params, cv=4)
KNN_grid_cv.fit(X_train, Y_train)
print("-" * 60 + "Decision Tree" + "-" * 60)
print("Best parameters : ", DTC_grid_cv.best_params_)
print("Best scores : ", DTC_grid_cv.best_score_)
print("-" * 60 + "Random Forest" + "-" * 60)
print("Best parameters : ", rf_grid_cv.best_params_)
print("Best scores : ", rf_grid_cv.best_score_)
print("-" * 60 + "KNN" + "-" * 70)
print("Best parameters : ", KNN_grid_cv.best_params_)
print("Best scores : ", KNN_grid_cv.best_score_)
###Output
------------------------------------------------------------Decision Tree------------------------------------------------------------
Best parameters : {'criterion': 'squared_error', 'max_depth': 3, 'min_samples_leaf': 1, 'min_samples_split': 4}
Best scores : -0.07748270638447752
------------------------------------------------------------Random Forest------------------------------------------------------------
Best parameters : {'min_samples_leaf': 4, 'min_samples_split': 10, 'n_estimators': 100, 'n_jobs': -1}
Best scores : -0.12366443388840581
------------------------------------------------------------KNN----------------------------------------------------------------------
Best parameters : {'n_jobs': -1, 'n_neighbors': 7, 'weights': 'uniform'}
Best scores : -0.14583244353330604
###Markdown
We can not apply to regression models in this case. 5.Classification Problem 5.1 Model Training 5.1.1 Grid search
###Code
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
#Decision Tree
DTC_Classifier = DecisionTreeClassifier()
DTC_params= {"criterion":["entropy"], "min_samples_split": [2, 3, 4], "min_samples_leaf": [1, 2, 4]}
DTC_grid_cv = GridSearchCV(DTC_Classifier, param_grid=DTC_params, cv=4)
DTC_grid_cv.fit(X_train, Y_train)
#XGBOOST
xgb_Classifier = XGBClassifier()
xgb_params= {"min_child_weight": [1, 5, 10], "max_depth": [3,4], "colsample_bytree": [0.6, 1.0], "learning_rate":[0.1, 0.3], "n_jobs": [-1]}
xgb_grid_cv = GridSearchCV(xgb_Classifier, param_grid=xgb_params, cv=4)
xgb_grid_cv.fit(X_train, Y_train)
#Random Forest
rf_Classifier = RandomForestClassifier()
rf_params= {"n_estimators":[90, 100, 110], "min_samples_leaf": [1, 2, 4], "min_samples_split": [2, 5, 10], "n_jobs": [-1]}
rf_grid_cv = GridSearchCV(rf_Classifier, param_grid=rf_params, cv=4)
rf_grid_cv.fit(X_train, Y_train)
#KNN
KNN_Classifier = KNeighborsClassifier()
KNN_params= {"n_neighbors":[3,4,5,6,7], "n_jobs": [-1]}
KNN_grid_cv = GridSearchCV(KNN_Classifier, param_grid=KNN_params, cv=4)
KNN_grid_cv.fit(X_train, Y_train)
print("-" * 60 + "Decision Tree" + "-" * 60)
print("Best parameters : ", DTC_grid_cv.best_params_)
print("Best scores : ", DTC_grid_cv.best_score_)
print("-" * 60 + "XGBOOST" + "-" * 66)
print("Best parameters : ", xgb_grid_cv.best_params_)
print("Best scores : ", xgb_grid_cv.best_score_)
print("-" * 60 + "Random Forest" + "-" * 60)
print("Best parameters : ", rf_grid_cv.best_params_)
print("Best scores : ", rf_grid_cv.best_score_)
print("-" * 60 + "KNN" + "-" * 70)
print("Best parameters : ", KNN_grid_cv.best_params_)
print("Best scores : ", KNN_grid_cv.best_score_)
###Output
------------------------------------------------------------Decision Tree------------------------------------------------------------
Best parameters : {'criterion': 'entropy', 'min_samples_leaf': 4, 'min_samples_split': 4}
Best scores : 0.5143306379155436
------------------------------------------------------------XGBOOST------------------------------------------------------------------
Best parameters : {'colsample_bytree': 1.0, 'learning_rate': 0.1, 'max_depth': 3, 'min_child_weight': 1, 'n_jobs': -1}
Best scores : 0.561769991015274
------------------------------------------------------------Random Forest------------------------------------------------------------
Best parameters : {'min_samples_leaf': 4, 'min_samples_split': 5, 'n_estimators': 110, 'n_jobs': -1}
Best scores : 0.5736522911051213
------------------------------------------------------------KNN----------------------------------------------------------------------
Best parameters : {'n_jobs': -1, 'n_neighbors': 7}
Best scores : 0.5379829290206649
###Markdown
5.1.2 Training
###Code
BNB_Classifier = BernoulliNB()
BNB_Classifier.fit(X_train, Y_train)
DTC_Classifier = DecisionTreeClassifier(criterion = 'entropy', min_samples_leaf = 4, min_samples_split = 4)
DTC_Classifier.fit(X_train, Y_train)
xgb = XGBClassifier(colsample_bytree = 1.0, learning_rate = 0.1, max_depth = 3, min_child_weight = 1, n_jobs = -1)
xgb.fit(X_train, Y_train)
rf = RandomForestClassifier(min_samples_leaf = 4, min_samples_split = 5, n_estimators = 110, n_jobs = -1)
rf.fit(X_train,Y_train)
KNN_Classifier = KNeighborsClassifier(n_jobs = -1, n_neighbors = 7)
KNN_Classifier.fit(X_train, Y_train)
###Output
_____no_output_____
###Markdown
5.2 Model Evaluating
###Code
models = []
models.append(('Naive Bayes Classifier', BNB_Classifier))
models.append(('Decision Tree Classifier', DTC_Classifier))
models.append(('XGBOOST', xgb))
models.append(('Random Forest', rf))
models.append(('KNN_Classifier', KNN_Classifier))
from sklearn import metrics
for i, v in models:
scores = cross_val_score(v, X_train, Y_train, cv = 4)
accuracy = metrics.accuracy_score(Y_train, v.predict(X_train))
classification = metrics.classification_report(Y_train, v.predict(X_train))
print(f"----------------------------- {i} Model Evaluation -----------------------------")
print(f"\nCross Validation Mean Score:\n {scores.mean()}\n")
print(f"\n Model Accuracy:\n {accuracy}\n")
print(f"\n Classification report:\n\n {classification}")
###Output
----------------------------- Naive Bayes Classifier Model Evaluation -----------------------------
Cross Validation Mean Score:
0.5308625336927224
Model Accuracy:
0.6327014218009479
Classification report:
precision recall f1-score support
0 0.61 0.49 0.54 189
1 0.64 0.75 0.69 233
accuracy 0.63 422
macro avg 0.63 0.62 0.62 422
weighted avg 0.63 0.63 0.63 422
----------------------------- Decision Tree Classifier Model Evaluation -----------------------------
Cross Validation Mean Score:
0.5191374663072776
Model Accuracy:
0.8009478672985783
Classification report:
precision recall f1-score support
0 0.74 0.86 0.79 189
1 0.87 0.76 0.81 233
accuracy 0.80 422
macro avg 0.80 0.81 0.80 422
weighted avg 0.81 0.80 0.80 422
----------------------------- XGBOOST Model Evaluation -----------------------------
Cross Validation Mean Score:
0.561769991015274
Model Accuracy:
0.7914691943127962
Classification report:
precision recall f1-score support
0 0.87 0.62 0.73 189
1 0.75 0.93 0.83 233
accuracy 0.79 422
macro avg 0.81 0.78 0.78 422
weighted avg 0.81 0.79 0.78 422
----------------------------- Random Forest Model Evaluation -----------------------------
Cross Validation Mean Score:
0.5712713387241689
Model Accuracy:
0.7322274881516587
Classification report:
precision recall f1-score support
0 0.81 0.53 0.64 189
1 0.70 0.90 0.79 233
accuracy 0.73 422
macro avg 0.75 0.71 0.71 422
weighted avg 0.75 0.73 0.72 422
----------------------------- KNN_Classifier Model Evaluation -----------------------------
Cross Validation Mean Score:
0.5379829290206649
Model Accuracy:
0.6729857819905213
Classification report:
precision recall f1-score support
0 0.66 0.54 0.60 189
1 0.68 0.78 0.72 233
accuracy 0.67 422
macro avg 0.67 0.66 0.66 422
weighted avg 0.67 0.67 0.67 422
###Markdown
5.3 Model Test Accuracy
###Code
def confusionMatrix(i, cm):
fig, ax = plt.subplots(figsize = (5, 5))
sns.heatmap(cm, linewidths = 1, annot = True, ax = ax, fmt = 'g')
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title(f'{i} Confusion Matrix')
for i, v in models:
accuracy = metrics.accuracy_score(Y_test, v.predict(X_test))
confusion_matrix = metrics.confusion_matrix(Y_test, v.predict(X_test))
classification = metrics.classification_report(Y_test, v.predict(X_test))
print(f"----------------------------- {i} Model Test Results -----------------------------")
print(f"\n Model Accuracy:\n {accuracy}\n")
print(f"\n Classification report:\n\n {classification}")
confusionMatrix(i, confusion_matrix)
###Output
----------------------------- Naive Bayes Classifier Model Test Results -----------------------------
Model Accuracy:
0.4940828402366864
Classification report:
precision recall f1-score support
0 0.52 0.37 0.43 877
1 0.48 0.63 0.54 813
accuracy 0.49 1690
macro avg 0.50 0.50 0.49 1690
weighted avg 0.50 0.49 0.49 1690
----------------------------- Decision Tree Classifier Model Test Results -----------------------------
Model Accuracy:
0.4834319526627219
Classification report:
precision recall f1-score support
0 0.50 0.49 0.50 877
1 0.46 0.47 0.47 813
accuracy 0.48 1690
macro avg 0.48 0.48 0.48 1690
weighted avg 0.48 0.48 0.48 1690
----------------------------- XGBOOST Model Test Results -----------------------------
Model Accuracy:
0.508284023668639
Classification report:
precision recall f1-score support
0 0.54 0.35 0.43 877
1 0.49 0.68 0.57 813
accuracy 0.51 1690
macro avg 0.52 0.51 0.50 1690
weighted avg 0.52 0.51 0.50 1690
----------------------------- Random Forest Model Test Results -----------------------------
Model Accuracy:
0.4911242603550296
Classification report:
precision recall f1-score support
0 0.52 0.27 0.35 877
1 0.48 0.73 0.58 813
accuracy 0.49 1690
macro avg 0.50 0.50 0.47 1690
weighted avg 0.50 0.49 0.46 1690
----------------------------- KNN_Classifier Model Test Results -----------------------------
Model Accuracy:
0.47633136094674555
Classification report:
precision recall f1-score support
0 0.49 0.37 0.42 877
1 0.47 0.59 0.52 813
accuracy 0.48 1690
macro avg 0.48 0.48 0.47 1690
weighted avg 0.48 0.48 0.47 1690
###Markdown
6.Clustering Problem 6.1 Elbow Method
###Code
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 10):
kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
kmeans.fit(featuresDF)
wcss.append(kmeans.inertia_)
plt.figure(figsize = (16, 8))
plt.title('The Elbow Method')
plt.plot(range(1, 10), wcss, 'bx-')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
###Output
_____no_output_____
###Markdown
6.2 Silhoutte Score
###Code
from sklearn.metrics import silhouette_score
silhouetteScores = []
for i in range(2, 10):
km = KMeans(n_clusters = i, random_state = 42)
c = km.fit_predict(featuresDF)
silhouetteScores.append(silhouette_score(featuresDF, c))
plt.figure(figsize = (16, 8))
plt.title('Silhouette Scores')
plt.bar(range(2,10), silhouetteScores)
plt.xlabel('Number of clusters', fontsize = 20)
plt.ylabel('S(i)', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
6.3 Model Building
###Code
# Training a predicting using K-Means Algorithm.
kmeans = KMeans(n_clusters = 2, random_state = 42).fit(featuresDF)
pred = kmeans.predict(featuresDF)
# Appending those cluster value into main dataframe (without standard-scalar)
featuresDF['cluster'] = pred + 1 # preprocessed data
trainSet['cluster'] = pred + 1 # old data
featuresDF["cluster"].value_counts()
###Output
_____no_output_____
###Markdown
6.4 Cluster Interpretation
###Code
for i in trainSet:
if (i == "cluster"):
continue
g = sns.FacetGrid(trainSet, col = "cluster", hue = "cluster", palette = "Set2")
g.map(plt.hist, i, bins = 52, ec = "k")
g.set_xticklabels(rotation = 90, color = 'black')
g.set_yticklabels(color = 'black')
g.set_xlabels(size = 15, color = 'black')
g.set_titles(size = 15, color = 'black', fontweight = "bold")
g.fig.set_figheight(5)
plt.gcf().set_size_inches(15, 15)
###Output
_____no_output_____ |
Code/Updated__New_Notebook_Blancpain.ipynb | ###Markdown
DMML TEAM BLANCPAIN Import some librairies
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections as mc
%load_ext autoreload
%autoreload 2
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
sns.set_style("white")
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
from sklearn.utils.multiclass import unique_labels
from sklearn.base import TransformerMixin
from sklearn.pipeline import Pipeline
import string
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.base import TransformerMixin
from sklearn.pipeline import Pipeline
import spacy
from gensim.models import Word2Vec
!pip install autocorrect
from autocorrect import Speller
spell = Speller(lang='en')
%matplotlib inline
sns.set_style("dark")
# Hide warnings
import warnings
warnings.filterwarnings('ignore')
###Output
Collecting autocorrect
[?25l Downloading https://files.pythonhosted.org/packages/a0/71/eb8c1f83439dfe6cbe1edb03be1f1110b242503b61950e7c292dd557c23e/autocorrect-2.2.2.tar.gz (621kB)
[K |████████████████████████████████| 624kB 5.7MB/s
[?25hBuilding wheels for collected packages: autocorrect
Building wheel for autocorrect (setup.py) ... [?25l[?25hdone
Created wheel for autocorrect: filename=autocorrect-2.2.2-cp36-none-any.whl size=621491 sha256=e55c833e93347a128f8792927527fb7699ce77f8e9483a6037477daa1f537eb9
Stored in directory: /root/.cache/pip/wheels/b4/0b/7d/98268d64c8697425f712c897265394486542141bbe4de319d6
Successfully built autocorrect
Installing collected packages: autocorrect
Successfully installed autocorrect-2.2.2
###Markdown
Data Importation
###Code
df_train = pd.read_csv('https://raw.githubusercontent.com/sfrancey/Real-or-Not-NLP-with-Disaster-Tweets_Team_Blancpain/main/Data/training_data.csv')
# Import df_train with emoji standardization
#df_train = pd.read_csv('https://raw.githubusercontent.com/sfrancey/Real-or-Not-NLP-with-Disaster-Tweets_Team_Blancpain/main/Data/df_train_emoji.csv')
df_test = pd.read_csv('https://raw.githubusercontent.com/sfrancey/Real-or-Not-NLP-with-Disaster-Tweets_Team_Blancpain/main/Data/test_data.csv')
df_sample = pd.read_csv('https://raw.githubusercontent.com/sfrancey/Real-or-Not-NLP-with-Disaster-Tweets_Team_Blancpain/main/Data/sample_submission.csv')
###Output
_____no_output_____
###Markdown
Data visualization Training set
###Code
df_train
print("There are {0} rows and {1} columns in the train dataset.".format(df_train.shape[0],df_train.shape[1]))
###Output
There are 6471 rows and 5 columns in the train dataset.
###Markdown
Test Set
###Code
df_test
print("There are {0} rows and {1} columns in the test dataset.".format(df_test.shape[0],df_test.shape[1]))
df_sample.head()
###Output
_____no_output_____
###Markdown
Drop Null values
###Code
df_train.fillna("Unknown", inplace = True)
df_test.fillna("Unknown", inplace = True)
###Output
_____no_output_____
###Markdown
Base Rate
###Code
base_rate = max(len(df_train[df_train["target"] == 0]) / len(df_train), len(df_train[df_train["target"] == 1]) / len(df_train))
print("The base rate for this problem is :", base_rate)
###Output
The base rate for this problem is : 0.5719363313243703
###Markdown
Processing
###Code
#import emojis library
!pip install emot
import re
from emot.emo_unicode import UNICODE_EMO, EMOTICONS
#removing emojis function (emoticons displayed in text)
def convert_emoticons_text(dataFrame):
len = dataFrame.shape[0]
i=0
while i<len:
for emot in EMOTICONS:
dataFrame.iloc[i] = re.sub(u'('+emot+')', "_".join(EMOTICONS[emot].replace(",","").split()), dataFrame.iloc[i])
i+=1
return dataFrame.head(20)
#removing emojis function (emoticons displayed in emojis)
def convert_emoticons_emoji(dataFrame):
len = dataFrame.shape[0]
i=0
while i<len:
for emot in UNICODE_EMO:
dataFrame.iloc[i] = dataFrame.iloc[i].replace(emot, "_".join(UNICODE_EMO[emot].replace(",","").replace(":","").split()))
i+=1
return dataFrame.head(20)
#clean smileys in text
#convert_emoticons_text(df_train['text'])
#convert_emoticons_emoji(df_train['text'])
#export new dataframe to csv file. We will re use it in our code by importing it at the beginning of our notebook
#from google.colab import drive
#drive.mount("drive", force_remount=True)
#df_train_emoji_csv =df_train.to_csv("df_train_emoji.csv")
#!cp df_train_emoji.csv "drive/My Drive/Database_emoji"
df_train["keyword_text"] = df_train['keyword'] + ' ' + df_train['text']
df_train["keyword_text"] = df_train["keyword_text"].astype(str)
df_test["keyword_text"] = df_test['keyword'] + ' ' + df_test['text']
df_test["keyword_text"] = df_test["keyword_text"].astype(str)
keyword_list = ['panicking',
'suicide%20bombing',
'body%20bags',
'blight',
'armageddon',
'stretcher',
'razed',
'ruin',
'wreckage',
'derailment',
'bombing',
'nuclear%20disaster',
'panic',
'obliteration',
'blazing',
'typhoon',
'outbreak',
'wrecked',
'oil%20spill',
'rescuers',
'debris',
'wildfire',
'aftershock',
'meltdown',
'bloody',
'traumatised',
'electrocute',
'smoke',
'screaming',
'body%20bag',
'suicide%20bomber',
'blew%20up']
def clean_keword_text(row):
if row['keyword'] in keyword_list:
return row['keyword'] + ' ' + row['text']
else:
return row['text']
df_train["keyword_cleaned_text"] = df_train.apply(clean_keword_text, axis=1)
df_test["keyword_cleaned_text"] = df_test.apply(clean_keword_text, axis=1)
df_train.head()
###Output
_____no_output_____
###Markdown
Processing function
###Code
# Load English language model
sp = spacy.load('en_core_web_sm')
# Create a list of punctuation marks
punctuations = string.punctuation
# Create a list of stopwords
stop_words = spacy.lang.en.stop_words.STOP_WORDS
# Create a list of isolated letters
lonely_letters = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
# Create a list of further elements that could be deleted to improve the model
to_be_deleted = ['http','html','@']
def tokenize_function(text):
# Create token object, which is used to create documents with linguistic annotations.
sp_obj = sp(text)
# Lemmatize each token and convert each token into lowercase
mytokens = [ word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_ for word in sp_obj ]
# Correcting spelling mistakes
#mytokens = [ spell(str(word)) for word in sp_obj]
## Remove stop words
#mytokens = [ word for word in mytokens if word not in stop_words]
# Remove punctuation
#mytokens = [ word for word in mytokens if word not in punctuations]
# Remove isolated letters
#mytokens = [ word for word in mytokens if word not in lonely_letters]
# Delete further elements
#mytokens = [ word for word in mytokens if all(char not in word for char in to_be_deleted)]
return mytokens
tokenize_function("j'aime le machine learning788")
###Output
_____no_output_____
###Markdown
Create ngrams on text feature
###Code
import gensim.downloader
import multiprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
def eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword', model_type='randomForest', feature_engineering='tfidf'):
''' Eval pipeline '''
# Make copies to avoid any changes in the main datasets
train = df_train.copy()
test = df_test.copy()
# Define the model
if model_type == 'logisticRegression':
model = LogisticRegression(solver='lbfgs', #LogisticRegressionCV
#cv=2,
max_iter=1000,
random_state=50,
n_jobs=multiprocessing.cpu_count())
elif model_type == 'randomForest':
model = RandomForestClassifier(n_estimators=200,
max_depth=8,
random_state=50,
n_jobs=multiprocessing.cpu_count())
elif model_type == 'knn':
model = KNeighborsClassifier(random_state=50,
n_jobs=multiprocessing.cpu_count())
# Define embeddings and model
if feature_engineering == 'tfidf':
feature_vector = TfidfVectorizer(tokenizer=tokenize_function)
model = Pipeline([('vectorizer', feature_vector),
('classifier', model)])
elif feature_engineering == 'w2v':
# Function that uses w2v embeddings and compute the mean value of all the of w2v vectors of all the words
def document_vector(doc):
"""Create document vectors by averaging word vectors. Remove out-of-vocabulary words."""
tokens = tokenize_function(doc)
doc = [word for word in tokens if word in feature_vector.wv.vocab]
return np.mean(feature_vector[doc], axis=0)
# Create corpus for w2v
sentences = []
for text in [list(train[target_col]), list(test[target_col])]:
for string in text:
sentences.append(tokenize_function(string))
# Create w2v embeddings
feature_vector = gensim.models.Word2Vec(sentences,
size=100,
window=10,
min_count=1,
iter=20,
seed=50,
workers=multiprocessing.cpu_count()
)
train[target_col] = train[target_col].apply(document_vector)
test[target_col] = test[target_col].apply(document_vector)
# Split dataset
df_train_eval, df_validation = train_test_split(train, test_size=0.2, random_state = 50)
y_train_eval = df_train_eval[["target"]]
y_validation = df_validation[["target"]]
X1_train_eval = list(df_train_eval[target_col])
X1_validation = list(df_validation[target_col])
# Fit data
model.fit(X1_train_eval, y_train_eval)
# Eval data
train_accuracy_score = accuracy_score(y_train_eval, model.predict(X1_train_eval))
print(f"The training accuracy is : {train_accuracy_score}")
validation_accuracy_score = accuracy_score(y_validation, model.predict(X1_validation))
print(f"The validation accuracy is : {validation_accuracy_score}")
# Retrain on the full dataset
y_train = train[["target"]]
X1_train = list(train[target_col])
X1_test = list(test[target_col])
# Create submission file
model.fit(X1_train, y_train)
submission = pd.DataFrame(model.predict(X1_test), columns=["target"])
submission.to_csv(f'submission_{target_col}_{feature_engineering}_model_{model_type}_test2.csv', index=False)
###Output
_____no_output_____
###Markdown
With "Keyword" feature
###Code
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword', model_type='randomForest', feature_engineering='tfidf')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword', model_type='randomForest', feature_engineering='w2v')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword', model_type='logisticRegression', feature_engineering='tfidf')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword', model_type='logisticRegression', feature_engineering='w2v')
###Output
_____no_output_____
###Markdown
With "Text" Feature
###Code
eval_pipeline(df_train, df_test, tokenize_function, target_col='text', model_type='randomForest', feature_engineering='tfidf')
eval_pipeline(df_train, df_test, tokenize_function, target_col='text', model_type='randomForest', feature_engineering='w2v')
eval_pipeline(df_train, df_test, tokenize_function, target_col='text', model_type='logisticRegression', feature_engineering='tfidf')
eval_pipeline(df_train, df_test, tokenize_function, target_col='text', model_type='logisticRegression', feature_engineering='w2v')
###Output
_____no_output_____
###Markdown
With "Text" and "Keyword" features
###Code
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword_text', model_type='randomForest', feature_engineering='tfidf')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword_text', model_type='randomForest', feature_engineering='w2v')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword_text', model_type='logisticRegression', feature_engineering='tfidf')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword_text', model_type='logisticRegression', feature_engineering='w2v')
###Output
_____no_output_____
###Markdown
With "Text" and "Keyword" cleaned
###Code
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword_cleaned_text', model_type='randomForest', feature_engineering='tfidf')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword_cleaned_text', model_type='randomForest', feature_engineering='w2v')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword_cleaned_text', model_type='logisticRegression', feature_engineering='tfidf')
eval_pipeline(df_train, df_test, tokenize_function, target_col='keyword_cleaned_text', model_type='logisticRegression', feature_engineering='w2v')
###Output
_____no_output_____ |
proyecto1.ipynb | ###Markdown
Proyecto 1 - SIMARGLSergio Marchena - 16387
###Code
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
# load the datasets
df1 = pd.read_csv('dataset-part1.csv')
df2 = pd.read_csv('dataset-part2.csv')
###Output
_____no_output_____
###Markdown
exploratory analysis
###Code
# first view
df1.head()
# first view
df2.head()
# merge the dataframes in order to work only with one
frames = [df1, df2]
df = pd.concat(frames, ignore_index=True)
df
# summary of the variables in the df
df.info()
# check for null entries
df.isnull().sum()
# check for NaN entries
df.isna().sum()
###Output
_____no_output_____
###Markdown
pre-processing
###Code
# non numerical variables to deal with:
# 1. 'DST_TO_SRC_SECOND_BYTES'
# 2. 'IPV4_DST_ADDR'
# 3. 'IPV4_SRC_ADDR'
# 4. 'PROTOCOL_MAP'
# 5. 'SRC_TO_DST_SECOND_BYTES'
# 6. 'L7_PROTO_NAME'
# 7. 'LABEL'
# 1. 'DST_TO_SRC_SECOND_BYTES'
df['DST_TO_SRC_SECOND_BYTES'].describe()
# 569243 unique values, too many to encode, dropping it
df.drop('DST_TO_SRC_SECOND_BYTES', axis=1, inplace=True)
df.head(5)
# 2. 'IPV4_DST_ADDR'
df['IPV4_DST_ADDR'].describe()
# 440887 unique values, too many to encode, dropping it
df.drop('IPV4_DST_ADDR', axis=1, inplace=True)
df.head(5)
# 3. 'IPV4_SRC_ADDR'
df['IPV4_SRC_ADDR'].describe()
# 440887 unique values, too many to encode, dropping it
df.drop('IPV4_SRC_ADDR', axis=1, inplace=True)
df.head(5)
# 4. 'PROTOCOL_MAP'
df['PROTOCOL_MAP'].describe()
# 5 unique values, encoding it
# encoding 'PROTOCOL_MAP' to numerical values
df['PROTOCOL_MAP'].value_counts()
# tcp = 1
# udp = 2
# icmp = 3
# ipv6-icmp = 4
# gre = 5
df['PROTOCOL_MAP'] = df['PROTOCOL_MAP'].replace(to_replace='tcp', value=1)
df['PROTOCOL_MAP'] = df['PROTOCOL_MAP'].replace(to_replace='udp', value=2)
df['PROTOCOL_MAP'] = df['PROTOCOL_MAP'].replace(to_replace='icmp', value=3)
df['PROTOCOL_MAP'] = df['PROTOCOL_MAP'].replace(to_replace='ipv6-icmp', value=4)
df['PROTOCOL_MAP'] = df['PROTOCOL_MAP'].replace(to_replace='gre', value=5)
df['PROTOCOL_MAP'].value_counts()
# successfuly encoded!
# 5. 'SRC_TO_DST_SECOND_BYTES'
df['SRC_TO_DST_SECOND_BYTES'].describe()
# 3160289 unique values, too many to encode, dropping it
df.drop('SRC_TO_DST_SECOND_BYTES', axis=1, inplace=True)
df.head(5)
# 5. 'L7_PROTO_NAME'
df['L7_PROTO_NAME'].describe(), df['L7_PROTO_NAME'].value_counts()
df_test = df[['L7_PROTO_NAME', 'FIREWALL_EVENT']]
labels, unique = pd.factorize(df_test['L7_PROTO_NAME'])
df_test2 = pd.DataFrame(labels, columns = ['new_L7_PROTO_NAME'])
df_test2
# Substituting with numerical value (457 uniques)
df['L7_PROTO_NAME'] = df_test2['new_L7_PROTO_NAME']
df
# 6. 'LABEL'
df['LABEL'].value_counts()
# **TARGET VALUE**
# 4 unique values, encoding them:
# Normal flow = 1
# SYN Scan - aggressive = 2
# Denial of Service R-U-Dead-Yet = 3
# Denial of Service Slowloris = 4
df['LABEL'] = df['LABEL'].replace(to_replace='Normal flow', value=1)
df['LABEL'] = df['LABEL'].replace(to_replace='SYN Scan - aggressive', value=2)
df['LABEL'] = df['LABEL'].replace(to_replace='Denial of Service R-U-Dead-Yet', value=3)
df['LABEL'] = df['LABEL'].replace(to_replace='Denial of Service Slowloris', value=4)
df['LABEL'].value_counts()
# last checkup of the data
df.info()
# 46 numerical varibales variables
# ready to go to selection
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 12207873 entries, 0 to 12207872
Data columns (total 46 columns):
# Column Dtype
--- ------ -----
0 BIFLOW_DIRECTION int64
1 DIRECTION int64
2 FIREWALL_EVENT int64
3 FIRST_SWITCHED int64
4 FLOW_ACTIVE_TIMEOUT int64
5 FLOW_DURATION_MICROSECONDS int64
6 FLOW_DURATION_MILLISECONDS int64
7 FLOW_END_MILLISECONDS int64
8 FLOW_END_SEC int64
9 FLOW_ID int64
10 FLOW_INACTIVE_TIMEOUT int64
11 FLOW_START_MILLISECONDS int64
12 FLOW_START_SEC int64
13 FRAME_LENGTH int64
14 IN_BYTES int64
15 IN_PKTS int64
16 L4_DST_PORT int64
17 L4_SRC_PORT int64
18 LAST_SWITCHED int64
19 MAX_IP_PKT_LEN int64
20 MIN_IP_PKT_LEN int64
21 OOORDER_IN_PKTS int64
22 OOORDER_OUT_PKTS int64
23 OUT_BYTES int64
24 OUT_PKTS int64
25 PROTOCOL int64
26 PROTOCOL_MAP int64
27 RETRANSMITTED_IN_BYTES int64
28 RETRANSMITTED_IN_PKTS int64
29 RETRANSMITTED_OUT_BYTES int64
30 RETRANSMITTED_OUT_PKTS int64
31 TCP_FLAGS int64
32 TCP_WIN_MAX_IN int64
33 TCP_WIN_MAX_OUT int64
34 TCP_WIN_MIN_IN int64
35 TCP_WIN_MIN_OUT int64
36 TCP_WIN_MSS_IN int64
37 TCP_WIN_MSS_OUT int64
38 TCP_WIN_SCALE_IN int64
39 TCP_WIN_SCALE_OUT int64
40 SRC_TOS int64
41 DST_TOS int64
42 L7_PROTO_NAME int64
43 SAMPLING_INTERVAL int64
44 TOTAL_FLOWS_EXP int64
45 LABEL int64
dtypes: int64(46)
memory usage: 4.2 GB
###Markdown
feature selection
###Code
# Pearson Correlation
#plt.figure(figsize=(25,25))
cor = df.corr()
#sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
#plt.show()
# correlation with output variable
cor_target = abs(cor['LABEL'])
# selecting highly correlated features
relevant_features = cor_target[cor_target>0.45]
relevant_features
highCorrDf = df[['FLOW_DURATION_MICROSECONDS', 'FLOW_DURATION_MILLISECONDS', 'PROTOCOL_MAP','TCP_FLAGS',
'TCP_WIN_SCALE_OUT']]
highCorrDf
target = df['LABEL']
target
###Output
_____no_output_____
###Markdown
train/test splitting
###Code
# we will separate the data in train (55%), validation (15%) and test (30%) = 100%
from sklearn import metrics, model_selection, tree
# first split (train 55% and test 45%)
x_train, x_val_test, y_train, y_val_test = model_selection.train_test_split(highCorrDf, target, test_size = 0.45, random_state=321)
# second split: 33% and 67%.
x_val, x_test, y_val, y_test = model_selection.train_test_split(x_val_test, y_val_test, test_size = 0.67, random_state=123)
###Output
_____no_output_____
###Markdown
implementation no. 1: decision tree classifier
###Code
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x_train,y_train)
target_pred = clf.predict(x_val)
###Output
_____no_output_____
###Markdown
metrics for implementation no. 1 (validation set: 15% of the data)
###Code
print('validation data:')
print()
h = metrics.accuracy_score(y_val, target_pred)
print('accuracy: %0.2f' % (h*100), '%')
print('-------------------------------------------------------')
print('confusion matrix \n',metrics.confusion_matrix(y_val, target_pred))
print('-------------------------------------------------------')
print(metrics.classification_report(y_val, target_pred, target_names=['Normal Flow', 'SYN', 'RUDY', 'Slowloris']))
print('-------------------------------------------------------')
###Output
validation data:
accuracy: 91.73 %
-------------------------------------------------------
confusion matrix
[[849305 109274 9414 8228]
[ 3437 367099 13 3]
[ 8395 23 326344 2867]
[ 5401 8 2784 120274]]
-------------------------------------------------------
precision recall f1-score support
Normal Flow 0.98 0.87 0.92 976221
SYN 0.77 0.99 0.87 370552
RUDY 0.96 0.97 0.97 337629
Slowloris 0.92 0.94 0.93 128467
accuracy 0.92 1812869
macro avg 0.91 0.94 0.92 1812869
weighted avg 0.93 0.92 0.92 1812869
-------------------------------------------------------
###Markdown
metrics for implementation no. 1 (test set: 30% of the data)
###Code
target_pred_test = clf.predict(x_test)
print('test data:')
print('-------------------------------------------------------')
h = metrics.accuracy_score(y_test, target_pred_test)
print('accuracy: %0.2f' % (h*100), '%')
print('-------------------------------------------------------')
print('confusion matrix \n',metrics.confusion_matrix(y_test, target_pred_test))
print('-------------------------------------------------------')
print(metrics.classification_report(y_test, target_pred_test, target_names=['Normal Flow', 'SYN', 'RUDY', 'Slowloris']))
print('-------------------------------------------------------')
fpr, tpr, thresholds = metrics.roc_curve(y_test, target_pred_test, pos_label=2)
metrics.auc(fpr, tpr)
plt.plot(fpr, tpr)
###Output
_____no_output_____
###Markdown
implementation no. 2: KNN classifier
###Code
# scaler
from sklearn.preprocessing import StandardScaler
#sc = StandardScaler()
#X_train = sc.fit_transform(x_train)
#X_test = sc.transform(x_val)
# knn
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 7)
classifier.fit(x_train, y_train)
y_pred = classifier.predict(x_val)
ac = metrics.accuracy_score(y_val,y_pred)
print('validation data: ')
print('-------------------------------------------------------')
print('accuracy: %0.2f' % (100*ac),'%')
print('-------------------------------------------------------')
print('confusion matrix \n',metrics.confusion_matrix(y_val, y_pred))
print('-------------------------------------------------------')
print(metrics.classification_report(y_val, y_pred, target_names=['Normal Flow', 'SYN', 'RUDY', 'Slowloris']))
print('-------------------------------------------------------')
y_pred_test = classifier.predict(x_test)
ac = metrics.accuracy_score(y_test,y_pred_test)
print('test data: ')
print('-------------------------------------------------------')
print('accuracy: %0.2f' % (100*ac),'%')
print('-------------------------------------------------------')
print('confusion matrix \n',metrics.confusion_matrix(y_test, y_pred_test))
print('-------------------------------------------------------')
print(metrics.classification_report(y_test, y_pred_test, target_names=['Normal Flow', 'SYN', 'RUDY', 'Slowloris']))
print('-------------------------------------------------------')
cm = metrics.confusion_matrix(y_test,y_pred_test)
cm
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_test, pos_label=2)
metrics.auc(fpr, tpr)
plt.plot(fpr,tpr)
###Output
_____no_output_____ |
docs/notebooks/BiologicalConstraints.ipynb | ###Markdown
Biological ConstraintsThe default RNN network has all to all connectivity, and allows units to have both excitatory and inhibitory connections. However, this does not reflect the biology we know. PsychRNN includes a framework for easily specifying biological constraints on the model. This example will introduce the different options for biological constraints included in PsychRNN:- Dale Ratio- Autapses- Connectivity- Fixed Weights
###Code
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
%matplotlib inline
# ---------------------- Import the package ---------------------------
from psychrnn.tasks.perceptual_discrimination import PerceptualDiscrimination
from psychrnn.backend.models.basic import Basic
# ---------------------- Set up a basic model ---------------------------
pd = PerceptualDiscrimination(dt = 10, tau = 100, T = 2000, N_batch = 128)
network_params = pd.get_task_params() # get the params passed in and defined in pd
network_params['name'] = 'model' # name the model uniquely if running mult models in unison
network_params['N_rec'] = 50 # set the number of recurrent units in the model
# -------------------- Set up variables that will be useful later -------
N_in = network_params['N_in']
N_rec = network_params['N_rec']
N_out = network_params['N_out']
###Output
_____no_output_____
###Markdown
This function will plot the colormap of the weights
###Code
def plot_weights(weights, title=""):
cmap = plt.set_cmap('RdBu_r')
img = plt.matshow(weights, norm=Normalize(vmin=-.5, vmax=.5))
plt.title(title)
plt.colorbar()
###Output
_____no_output_____
###Markdown
Biologically Unconstrained
###Code
basicModel = Basic(network_params) # instantiate a basic vanilla RNN we will compare to later on
weights = basicModel.get_weights()
plot_weights(weights['W_rec'])
basicModel.destruct()
###Output
_____no_output_____
###Markdown
Dale RatioDale’s Principle states that a neuron releases the same set of neurotransmitters at each of its synapses ([Eccles et al., 1954](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1365877/pdf/jphysiol01404-0118.pdf)). Since neurotransmitters tend to be either excitatory or inhibitory, theorists have taken this to mean that each neuron has exclusively either excitatory or inhibitory synapses ([Song et al., 2016](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004792); [Rajan and Abbott, 2006](https://pubmed.ncbi.nlm.nih.gov/17155583/)). To set the dale ratio, simply set ``network_params['dale_ratio']`` equal to the proportion of total recurrent neurons that should be excitatory. The remainder will be inhibitory.The dale ratio can be combined with any other parameter settings except for ``network_params['initializer']``, in which case the dale ratio needs to be passed directly into the [initializer](../apidoc/backend.rstmodule-psychrnn.backend.initializations) being used. Dale ratio is not enforced if [LSTM](../apidoc/backend.rstpsychrnn.backend.models.lstm.LSTM) is used as the RNN imlementation.Once the model is instantiated it can be trained and tested as demonstrated in [Simple Example](PerceptualDiscrimination.ipynb)
###Code
dale_network_params = network_params.copy()
dale_network_params['name'] = 'dales_model'
dale_network_params['dale_ratio'] = .8
daleModel = Basic(dale_network_params)
weights = daleModel.get_weights()
plot_weights(weights['W_rec'])
daleModel.destruct()
###Output
_____no_output_____
###Markdown
AutapsesTo disallow autapses (self connections) or not, simply set ``network_params['autapses'] = False``.The autapses parameter can be combined with any other parameter settings except for ``network_params['initializer']``, in which case the boolean for autapses needs to be passed directly into the [initializer](../apidoc/backend.rstmodule-psychrnn.backend.initializations) being used. Autapses are not enforced if [LSTM](../apidoc/backend.rstpsychrnn.backend.models.lstm.LSTM) is used as the RNN imlementation.Once the model is instantiated it can be trained and tested as demonstrated in [Simple Example](PerceptualDiscrimination.ipynb)
###Code
autapses_network_params = network_params.copy()
autapses_network_params['name'] = 'autapses_model'
autapses_network_params['autapses'] = False
autapsesModel = Basic(autapses_network_params)
weights = autapsesModel.get_weights()
plot_weights(weights['W_rec'])
###Output
_____no_output_____
###Markdown
Notice the white line on the diagonal (self-connections) above, where the weights are 0.
###Code
autapsesModel.destruct()
###Output
_____no_output_____
###Markdown
ConnectivityThe brain is not all-to-all connected, so it can be useful to restrict and structure the connectivity of our RNNs.The input_connectivity, recurrent_connectivity, and output_connectivity parameters allow us to do just that. Any subset of them can be combined with any other parameter settings except for ``network_params['initializer']``, in which case the connectivity matrices need to be passed directly into the [initializer](../apidoc/backend.rstmodule-psychrnn.backend.initializations) being used. Connectivity is not enforced if [LSTM](../apidoc/backend.rstpsychrnn.backend.models.lstm.LSTM) is used as the RNN imlementation.Once the model is instantiated it can be trained and tested as demonstrated in [Simple Example](PerceptualDiscrimination.ipynb)
###Code
modular_network_params = network_params.copy()
modular_network_params['name'] = 'modular_model'
# Set connectivity matrices to the default -- fully connected
input_connectivity = np.ones((N_rec, N_in))
rec_connectivity = np.ones((N_rec, N_rec))
output_connectivity = np.ones((N_out, N_rec))
# Specify certain connections to disallow. This can be done with input and output connectivity matrices as well
rec_connectivity[2*(N_rec//5):4*(N_rec//5),:2*(N_rec//5)] = 0
rec_connectivity[:2*(N_rec//5),2*(N_rec//5):4*(N_rec//5)] = 0
###Output
_____no_output_____
###Markdown
Plot the recurrent connectivity matrix
###Code
plot_weights(rec_connectivity, "recurrent connectivity")
###Output
_____no_output_____
###Markdown
Specify the connectivity matrices in ``network_params``.
###Code
modular_network_params['input_connectivity'] = input_connectivity
modular_network_params['rec_connectivity'] = rec_connectivity
modular_network_params['output_connectivity'] = output_connectivity
modularModel = Basic(modular_network_params)
weights = modularModel.get_weights()
plot_weights(weights['W_rec'])
modularModel.destruct()
###Output
_____no_output_____
###Markdown
Fixed Weights Some parts of the brain we may assume to be less plastic than others. Alternatively, we may want to specify particular weights within the model and train the rest of them around those.The fixed_weights parameter for the [train()](../apidoc/backend.rstpsychrnn.backend.rnn.RNN.train) fucntion allows us to do this. Instantiate the model
###Code
fixed_network_params = network_params.copy()
fixed_network_params['name'] = 'fixed_model'
fixedModel = Basic(fixed_network_params) # instantiate a basic vanilla RNN we will compare to later on
###Output
_____no_output_____
###Markdown
Plot the model weights before training
###Code
weights = fixedModel.get_weights()
plot_weights(weights['W_rec'])
# Set fixed weight matrices to the default -- fully trainable
W_in_fixed = np.zeros((N_rec,N_in))
W_rec_fixed = np.zeros((N_rec,N_rec))
W_out_fixed = np.zeros((N_out, N_rec))
# Specify certain weights to fix.
W_rec_fixed[N_rec//5*4:, :4*N_rec//5] = 1
W_rec_fixed[:4*N_rec//5, N_rec//5*4:] = 1
# Specify the fixed weights parameters in train_params
train_params = {}
train_params['fixed_weights'] = {
'W_in': W_in_fixed,
'W_rec': W_rec_fixed,
'W_out': W_out_fixed
}
losses, initialTime, trainTime = fixedModel.train(pd, train_params)
###Output
Iter 1280, Minibatch Loss= 0.177185
Iter 2560, Minibatch Loss= 0.107636
Iter 3840, Minibatch Loss= 0.099301
Iter 5120, Minibatch Loss= 0.085224
Iter 6400, Minibatch Loss= 0.082593
Iter 7680, Minibatch Loss= 0.079836
Iter 8960, Minibatch Loss= 0.080765
Iter 10240, Minibatch Loss= 0.079680
Iter 11520, Minibatch Loss= 0.072564
Iter 12800, Minibatch Loss= 0.067365
Iter 14080, Minibatch Loss= 0.040751
Iter 15360, Minibatch Loss= 0.052333
Iter 16640, Minibatch Loss= 0.046463
Iter 17920, Minibatch Loss= 0.031513
Iter 19200, Minibatch Loss= 0.033700
Iter 20480, Minibatch Loss= 0.033375
Iter 21760, Minibatch Loss= 0.035751
Iter 23040, Minibatch Loss= 0.041844
Iter 24320, Minibatch Loss= 0.038133
Iter 25600, Minibatch Loss= 0.023348
Iter 26880, Minibatch Loss= 0.027589
Iter 28160, Minibatch Loss= 0.019354
Iter 29440, Minibatch Loss= 0.022398
Iter 30720, Minibatch Loss= 0.020543
Iter 32000, Minibatch Loss= 0.013847
Iter 33280, Minibatch Loss= 0.017195
Iter 34560, Minibatch Loss= 0.019519
Iter 35840, Minibatch Loss= 0.020920
Iter 37120, Minibatch Loss= 0.016392
Iter 38400, Minibatch Loss= 0.019325
Iter 39680, Minibatch Loss= 0.015266
Iter 40960, Minibatch Loss= 0.031248
Iter 42240, Minibatch Loss= 0.023118
Iter 43520, Minibatch Loss= 0.015399
Iter 44800, Minibatch Loss= 0.018544
Iter 46080, Minibatch Loss= 0.021445
Iter 47360, Minibatch Loss= 0.012260
Iter 48640, Minibatch Loss= 0.017937
Iter 49920, Minibatch Loss= 0.020652
Optimization finished!
###Markdown
Plot the weights after training:
###Code
weights = fixedModel.get_weights()
plot_weights(weights['W_rec'])
fixedModel.destruct()
###Output
_____no_output_____
###Markdown
Unfortunately, it's hard to see visually whether the weights actually stayed fixed or not. To make it more apparent, we will set all of the fixed weights to the same value, the average of their previous value.
###Code
weights['W_rec'][N_rec//5*4:, :4*N_rec//5] = np.mean(weights['W_rec'][N_rec//5*4:, :4*N_rec//5])
weights['W_rec'][:4*N_rec//5, N_rec//5*4:] = np.mean(weights['W_rec'][:4*N_rec//5, N_rec//5*4:])
###Output
_____no_output_____
###Markdown
Now we make a new model loading the weights ``weights``
###Code
fixed_network_params = network_params.copy()
fixed_network_params['name'] = 'fixed_model_clearer'
for key, value in weights.items():
fixed_network_params[key] = value
fixedModelClearer = Basic(fixed_network_params) # instantiate an RNN loading the revised weights from the previous model
###Output
_____no_output_____
###Markdown
Plot the model weights before training
###Code
weights = fixedModelClearer.get_weights()
plot_weights(weights['W_rec'])
losses, initialTime, trainTime = fixedModelClearer.train(pd, train_params)
###Output
Iter 1280, Minibatch Loss= 0.050554
Iter 2560, Minibatch Loss= 0.024552
Iter 3840, Minibatch Loss= 0.021128
Iter 5120, Minibatch Loss= 0.028251
Iter 6400, Minibatch Loss= 0.019927
Iter 7680, Minibatch Loss= 0.016723
Iter 8960, Minibatch Loss= 0.013385
Iter 10240, Minibatch Loss= 0.016600
Iter 11520, Minibatch Loss= 0.020957
Iter 12800, Minibatch Loss= 0.012375
Iter 14080, Minibatch Loss= 0.019829
Iter 15360, Minibatch Loss= 0.020301
Iter 16640, Minibatch Loss= 0.019600
Iter 17920, Minibatch Loss= 0.017423
Iter 19200, Minibatch Loss= 0.010484
Iter 20480, Minibatch Loss= 0.014385
Iter 21760, Minibatch Loss= 0.017793
Iter 23040, Minibatch Loss= 0.009582
Iter 24320, Minibatch Loss= 0.014552
Iter 25600, Minibatch Loss= 0.010809
Iter 26880, Minibatch Loss= 0.012337
Iter 28160, Minibatch Loss= 0.017401
Iter 29440, Minibatch Loss= 0.012895
Iter 30720, Minibatch Loss= 0.016758
Iter 32000, Minibatch Loss= 0.011036
Iter 33280, Minibatch Loss= 0.007268
Iter 34560, Minibatch Loss= 0.008717
Iter 35840, Minibatch Loss= 0.014370
Iter 37120, Minibatch Loss= 0.012818
Iter 38400, Minibatch Loss= 0.021543
Iter 39680, Minibatch Loss= 0.011174
Iter 40960, Minibatch Loss= 0.010043
Iter 42240, Minibatch Loss= 0.015098
Iter 43520, Minibatch Loss= 0.012391
Iter 44800, Minibatch Loss= 0.011706
Iter 46080, Minibatch Loss= 0.015107
Iter 47360, Minibatch Loss= 0.012814
Iter 48640, Minibatch Loss= 0.009676
Iter 49920, Minibatch Loss= 0.009720
Optimization finished!
###Markdown
Plot the model weights after training. Now it is clear that the weights haven't changed.
###Code
weights = fixedModelClearer.get_weights()
plot_weights(weights['W_rec'])
fixedModelClearer.destruct()
###Output
_____no_output_____ |
visualize_mesh.ipynb | ###Markdown
Visualize MeshThis notebook provides a demo of how to generate SMPL mesh from 3D skeletons of MUGL
###Code
import numpy as np
import ipyvolume as ipv
import h5py
import os
from smplx import SMPL
import pickle
import torch
import torch.nn as nn
from model import *
from rotation.rotation import rot6d_to_rotmat, batch_rigid_transform
from torch.autograd import Variable
skeleton = np.load('./files/skeleton.npy')
viewpoint = np.load('./files/viewpoint.npy')
beta = torch.tensor([-0.1474, 0.0632, 0.7616, 2.9261, 0.3609, 0.2267, -0.3828, 0.3000,
0.5667, 0.0230])
latent_dim = 352
device = torch.device('cuda:0')
num_class = 120
main_path = "/ssd_scratch/cvit/debtanu.gupta/"
smpl = SMPL('./files',batch_size=32)
with open("./files/SMPL_NEUTRAL.pkl",'rb') as f:
smpl_data = pickle.load(f,encoding='latin1')
parent_array = list(smpl_data['kintree_table'][0][:24])
parent_array[0] = -1
def infer(model, label,v=0):
'''
this function generates samples
model: model object as input
label: calss label
v: viewpoint
'''
model.eval()
# z = torch.randn(6, latent_dim).to(device).float()
# y = np.repeat(np.arange(3),2)
# y = np.arange(num_class)
y = np.repeat(label,10)
# rot_list = []
# for i in y:
# idx = np.where(label==i)
# rot_lbl = rot[idx]
# rand = np.random.randint(rot_lbl.shape[0])
# rot_list.append(rot_lbl[rand])
rot = np.repeat(viewpoint[v:v+1], 10, axis=0)
# rot = torch.tensor(rot[:,0,:]).to(device).float()
rot = rot[:,0,:].reshape((rot.shape[0],48,6))
rot = rot[:,0,:]
rot = torch.tensor(rot).to(device).float()
label = np.zeros((y.shape[0], num_class))
label[np.arange(y.shape[0]), y] = 1
label = torch.tensor(label).to(device).float()
with torch.no_grad():
m, v = model.gaussian_parameters(model.z_pre.squeeze(0), dim=0)
idx = torch.distributions.categorical.Categorical(model.pi).sample((label.shape[0],))
m, v = m[idx], v[idx]
z = model.sample_gaussian(m, v)
z = torch.cat((z,label,rot), dim=1)
z = model.latent2hidden(z)
z = z.reshape((z.shape[0], 4, -1))
pred = model.decoder_net(z)
root_pred = model.root_traj(z).unsqueeze(2)
N,T,_ = pred.shape
alpha = rot6d_to_rotmat(torch.tensor(pred.reshape((N,T,2,144)))).view((N,T,2,24,3,3)).float()
betas = beta.view((1,1,1,10)).repeat((N,T,2,1))
alpha = alpha.to(betas.device)
root_pred = root_pred.to(betas.device)
# pred_3d1 = fkt(pred[:,:,:144].contiguous(), skeleton)
# pred_3d2 = fkt(pred[:,:,144:].contiguous(), skeleton)
# # pred_3d = fkt(pred, skeleton).cpu().data.numpy()
# pred_3d1 = pred_3d1.reshape((pred_3d1.shape[0], pred_3d1.shape[1], 24,-1)).cpu().data.numpy()
# pred_3d2 = pred_3d2.reshape((pred_3d2.shape[0], pred_3d2.shape[1], 24,-1)).cpu().data.numpy()
# root_pred = root_pred.cpu().data.numpy()
# pred_3d2 = pred_3d2 + root_pred
return alpha, betas, root_pred
def plot(alpha, betas, root,ind, single=False, save_gif=False, save_name='example'):
'''
This function takes 2 persons as input and plots them
skeleton_motion1: Skeleton of person 1
skeleton_motion2: Skeleton of person 2
single: True if single person class else false
save_fig: Save gif file if True
'''
# print(betas.device, alpha.device, root.device)
output1 = smpl(betas=betas[ind,:,0,:],body_pose=alpha[ind,:,0,1:], global_orient=alpha[ind,:,0,0].unsqueeze(1), pose2rot=False)
output2 = smpl(betas=betas[ind,:,1,:],body_pose=alpha[ind,:,1,1:], global_orient=alpha[ind,:,1,0].unsqueeze(1), pose2rot=False)
v1 = output1.vertices.data.cpu().numpy()
skeleton_motion1 = output1.joints.data.cpu().numpy()
v2 = output2.vertices.data.cpu().numpy() + root[ind].data.cpu().numpy()
skeleton_motion2 = output2.joints.data.cpu().numpy() + root[ind].data.cpu().numpy()
v1[:,:,1] *= -1
skeleton_motion1[:,:,1] *= -1
v2[:,:,1] *= -1
skeleton_motion2[:,:,1] *= -1
v1[:,:,2] *= -1
skeleton_motion1[:,:,2] *= -1
v2[:,:,2] *= -1
skeleton_motion2[:,:,2] *= -1
fig = ipv.figure(height=800,width=800)
plot_mesh1 = ipv.plot_trisurf(x=v1[:,:,0],y=v1[:,:,1],z=v1[:,:,2],triangles=smpl_data['f'],color=[1,1,.5,1])
plot_mesh1.material.transparent = True
plot_mesh1.material.side = "FrontSide"
a = np.arange(-np.max(skeleton_motion2[:,:,:])-1,np.max(skeleton_motion1[:,:,:])+1)
if not single:
a = np.arange(np.min(skeleton_motion1[:,:,:])-1,np.max(skeleton_motion1[:,:,:])+1)
plot_mesh2 = ipv.plot_trisurf(x=v2[:,:,0],y=v2[:,:,1],z=v2[:,:,2],triangles=smpl_data['f'],color=[1,0,1,1])
plot_mesh2.material.transparent = True
plot_mesh2.material.side = "FrontSide"
x,z = np.meshgrid(a,a)
y = np.ones_like(x)*np.min(skeleton_motion1[:,:,1])
s1 = ipv.scatter(skeleton_motion1[:,:,0],skeleton_motion1[:,:,1],skeleton_motion1[:,:,2],size=1,color='indigo',marker='sphere')
if not single:
s2 = ipv.scatter(skeleton_motion2[:,:,0],skeleton_motion2[:,:,1],skeleton_motion2[:,:,2],size=1,color='indigo',marker='sphere')
if single:
anim_list = [plot_mesh1,s1]
else:
anim_list = [plot_mesh1,s1,plot_mesh2,s2]
for i,p in enumerate(parent_array): # Run loop for each bone
if p == -1:
continue
b1 = ipv.plot(np.array([skeleton_motion1[:,i,0],skeleton_motion1[:,p,0]]).T,np.array([skeleton_motion1[:,i,1],skeleton_motion1[:,p,1]]).T,np.array([skeleton_motion1[:,i,2],skeleton_motion1[:,p,2]]).T ,size=10, color='darkviolet')
if not single:
b2 = ipv.plot(np.array([skeleton_motion2[:,i,0],skeleton_motion2[:,p,0]]).T,np.array([skeleton_motion2[:,i,1],skeleton_motion2[:,p,1]]).T,np.array([skeleton_motion2[:,i,2],skeleton_motion2[:,p,2]]).T ,size=10, color='darkviolet')
anim_list.append(b1)
if not single:
anim_list.append(b2)
ipv.plot_surface(x,y,z, color='lightgray')
ipv.plot_wireframe(x,y,z,color='black')
ipv.animation_control(anim_list)
ipv.style.background_color('dark')
ipv.style.box_off()
ipv.style.axes_off()
ipv.xyzlim(min(np.min(v1),np.min(v2)),max(np.max(v1),np.max(v2)))
ipv.show()
if save_gif:
def slide(figure, framenr, fraction):
for a in anim_list:
if a.sequence_index == skeleton_motion1.shape[0]:
a.sequence_index = 0
a.sequence_index += 1
ipv.movie(save_name + '.gif', slide, fps=5, frames=skeleton_motion1.shape[0])
model = Model(latent_dim).to(device)
model.load_state_dict(torch.load('./checkpoints/' + 'model_199.pt', map_location=torch.device('cpu')))
print('model loaded..')
cls_lbl = 58
alpha, betas, root_pred = infer(model, cls_lbl, v=0)
idx = 6
plot(alpha, betas, root_pred, idx, single=False)
###Output
_____no_output_____ |
git0/Exercise_Branch_and_Rebase.ipynb | ###Markdown
Branch and RebaseIn this notebook you will start with a repository containing names of cities in various US states. Following the distinction between the develop and issue branches, the names for each state will be added to the `cities` file in a different commit. However, the order of the commits will not match the order in which the states joined United States. For example, New York joined the union before Texas, and Hawaii joined after Texas. You will use the `rebase` command in Git to reorder the commits to match the order in which the states became part of the United States.The following initializes the repo and creates the commits in an arbitrary order. Feel free to modify the cell to use your user name and email.
###Code
%%bash
git init rebase_repo
cd rebase_repo
git config --global user.email "[email protected]"
git config --global user.name "Peter Gibbons"
git checkout -b develop
echo "This repo contains
lists of cities for New York, Hawaii, and Texas" > README
git add README
git commit -m 'initial commit'
git branch hawaii
git branch newyork
git branch texas
git checkout --force hawaii
echo "Honolulu
Hilo
Kailua" >> cities
git add cities
git commit -am 'added hawaii'
git checkout --force newyork
echo "New York
Albany
Buffalo" >> cities
git add cities
git commit -am 'added new york'
git checkout --force texas
echo "Austin
Dallas
Houston" >> cities
git add cities
git commit -am 'added texas'
%cd rebase_repo
###Output
_____no_output_____
###Markdown
Start by defining an usual alias for the `git log` command.
###Code
###Output
_____no_output_____
###Markdown
After you run the detailed `log`, your output should resemble the following:* b4e0... (hawaii) added hawaii| * b52c... (newyork) added new york|/ | * df45... (HEAD -> texas) added texas|/ * d3a8... (develop) initial commit
###Code
###Output
_____no_output_____
###Markdown
Since New York was the first to join the union, ensure that your `HEAD` points to the `newyork` branch before doing the rebase.
###Code
###Output
_____no_output_____
###Markdown
Use your detailed log to confirm the correct state of the `HEAD` reference.
###Code
###Output
_____no_output_____
###Markdown
You are ready to start with the `rebase`. Ensure that the commit for `newyork` is rebased back to the `develop` branch.
###Code
###Output
_____no_output_____
###Markdown
Don't be surprised with the output of the rebase command. If there is a direct path from `develop` to `newyork` then there is nothing to rebase.
###Code
###Output
_____no_output_____
###Markdown
Next, rebase `texas` on top of the `newyork` commit.
###Code
###Output
_____no_output_____
###Markdown
This time the command results in a conflict. Review the conflicting file and resolve the issue.
###Code
%%writefile cities
New York
Albany
Buffalo
Austin
Dallas
Houston
###Output
_____no_output_____
###Markdown
Remeber that once the `cities` file has the right content you need to re-stage it and `--continue` the rebase.
###Code
###Output
_____no_output_____
###Markdown
Confirm that the rebase completed successfully using your `git log` alias.
###Code
###Output
_____no_output_____
###Markdown
Finally, complete the steps to rebase `hawaii`.
###Code
%%writefile cities
New York
Albany
Buffalo
Austin
Dallas
Houston
Honolulu
Hilo
Kailua
###Output
_____no_output_____
###Markdown
Once the rebase is done, check the detailed log.
###Code
###Output
_____no_output_____
###Markdown
Assuming the rebase completed as expected, the order of the commits in the log should resemble the following:* ebac... (HEAD -> hawaii) added hawaii* 0e46... (texas) added texas* b52c... (newyork) added new york* d3a8... (develop) initial commit Finally, checkout the `develop` branch and "fast-forward" it to the `hawaii` branch so that future commits to develop happen based on the `hawaii` commit.
###Code
###Output
_____no_output_____
###Markdown
At the conclusion of this exercise your log should resemble the following:* 538b... (HEAD -> develop, hawaii) added hawaii* f7b1... (texas) added texas* baca... (newyork) added new york* 28fe... initial commit
###Code
###Output
_____no_output_____ |
session_II-problems/Ans_redshift_estimator.ipynb | ###Markdown
Data is downloaded from https://dr15.sdss.org/optical/plate/search
###Code
df = pd.read_csv('./data/sdss/metadata.csv')
def name_maker(x):
return 'spec-{:04d}-{:05d}-{:04d}.fits'.format(x['#plate'],x['mjd'],x['fiberid'])
df['filename'] = df.apply(name_maker, axis=1)
spec_list = glob('./data/sdss/*.fits')
n_spec = len(spec_list)
hdul = fits.open(spec_list[0]) # open a FITS file
data = hdul[1].data # assume the first extension is a table
tdata = Table(data)
tdata['flux','loglam','model'][:10]
X_train = []
y_train = []
X_test = []
y_test = []
lamins = []
lam_max = 3500
for i in range(n_spec):
hdul = fits.open(spec_list[i]) # open a FITS file
data = hdul[1].data # assume the first extension is a table
tdata = Table(data)
mdl = np.array(tdata['model'])
if np.array(tdata['loglam'])[0]>3.6 or mdl.shape[0]<lam_max:
continue
xx = mdl[:lam_max]
# mdl = gfilter(mdl,10)
# p_list = find_peaks(mdl)[0]
# xx = np.zeros(50)
# xx[:len(p_list):2] = np.array(tdata['loglam'])[p_list][:25]
# xx[1:len(p_list):2] = np.array(tdata['model'])[p_list][:25]
# inds = np.argsort(mdl)[::-1]
# xx = np.array(tdata['loglam'])[inds][:1000]
z = df[df['filename']==spec_list[i].split('/')[-1]]['z'].values[0]
zclass = int(10*(z-1))
zhotv = np.zeros(10)
zhotv[zclass] = 1
if i<n_spec//2:
X_train.append(xx)
y_train.append(zhotv)
else:
X_test.append(xx)
y_test.append(zhotv)
# tdata
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
input_dim = X_train.shape[1]
X_train = np.expand_dims(X_train,-1)
X_test = np.expand_dims(X_test,-1)
print(X_train.shape,y_train.shape,X_test.shape,y_test.shape)
# num = 100
# for i in range(num):
# plt.plot(X_train[i,:],color=(1.*(num-i)/num,1.*i/num,0),alpha=0.1)
# model = Sequential()
# model.add(Dense(128, activation='relu', input_dim=input_dim))
# model.add(Dense(64, activation='relu'))
# model.add(Dense(32, activation='relu'))
# model.add(Dense(16, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(10, activation='sigmoid'))
model = Sequential()
model.add(Conv1D(10, 10, strides=1))
model.add(MaxPooling1D(pool_size=3))
model.add(Conv1D(10, 10, strides=1))
model.add(MaxPooling1D(pool_size=3))
model.add(Conv1D(10, 10, strides=1))
model.add(MaxPooling1D(pool_size=3))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='sigmoid'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train,y_train,epochs=100,batch_size=100,verbose=0)
model.evaluate(X_test,y_test)
y_pred = model.predict(X_test)
y_true = y_test.argmax(axis=1)
y_pred = y_pred.argmax(axis=1)
plot_confusion_matrix(y_true, y_pred,
['{:4.2f}'.format(i+0.05) for i in np.linspace(0,1,10,endpoint=0)],
normalize=1,title='CM')
###Output
_____no_output_____ |
Lezioni/Lezione 2.B - Esercizi.ipynb | ###Markdown
Lezione 2 - Esercizi Utilizziamo ed esploariamo un dataset di Pandas.Provate a svolgere i seguenti esercizi: 1) [Plottare grafici per età di uomini e donne](section1) 2) [Rappresentare i morti/sopravvissuti per ogni classe](section2)3) [Creare un grafico a torta dei soldi spesi per i biglietti](section3) 4) [Fitting di un istogramma](section4) 5) [Correzione tra prezzo biglietto ed età?](section5) 6) [Correlazioni in SerieA?](section6) Importiamo il dataset del Titanic
###Code
import pandas as pd
titanic = pd.read_csv("data/train_dataset_titanic.csv")
titanic.head()
###Output
_____no_output_____ |
Iris_Classification/Iris Classification.ipynb | ###Markdown
Iris Data Classification A notebook by [Ashish Rai](https://github.com/darthv115/)**It is recommended to view this notebook in [nbviewer](http://nbviewer.jupyter.org/github/darthv115/Machine-Learning-and-Data-Science-Projects/blob/master/Iris_Classification/Iris%20Classification.ipynb) for the best viewing experience.** Table of Contents- [Introduction](intro) - [Licence](licence) - [Required Libraries](reqd_libs) - [The Problem](problem) - [Step 1: Answering the question](step_1) - [Step 2: Checking the data](step_2) - [Step 3: Tidying the data](step_3) - [Step 4: Testing our data](step_4) - [Step 5: Classification](step_5) - [Step 6: CrossValidation](step_6) - [Parameter Tuning](tuning)- [Step 7: Reproducibility](step_7) Introduction This notebook is my Ipython notebook and this is made with reference to Dr. Randal S Olson's notebook, to get started with Ipython notebooks. Licence Please refer to the repository Licence file. Required Libraries You need to have python installed on your system. Refer to this link to get python on your system.You also need to have Ipython notebook installed. For that, refer here.Required python packages:- numpy- pandas- matplotlib- seaborn- scikit-learnTo install them using pip:pip install numpy pandas matplotlib seaborn scikit-learnTo get the latest versions, update pip:pip install --upgrade pip The problem I am using the popular Iris dataset to classify the flowers according to their species (Iris-setosa, Iris-versicolor and Iris-virginica).For those who might point out, this is not the actual data set, but rather a small part of it and it also has some intended anomalies inserted by **Dr. Randal S Olson**, which I will try to rectify in this notebook. Step 1: Answering the question Some particular questions need to be answered regarding the **goals** of the projects and how it will be achieved, before you do anything with the data.We need to define the problem a bit more and also define the metrics for our success before starting to work.Some of the questions are:Did you specify the type of data analytic question (e.g. exploration, association causality) before touching the data?I am trying to classify the species of the flower based on four features: sepal length, sepal width, petal length, petal width. Did you define the metric for success before beginning? Since classification is done, we can use accuracy (% of correctly predicted flowers) as a metric. Did you understand the context for the question and the scientific or business application? This can be used in a smartphone app where you take a picture of a flower and get the species of that flower back. Did you record the experimental design?The field researchers are hand-measuring 50 randomly-sampled flowers of each species using a standardized methodology. Did you consider whether the question could be answered with the available data? The data set we will be using consits of only 3 types of **Iris** flowers. So, it would be able to classify those 3 species. For a more general classifier, we would need more data.And as Randal Olson says,***Thinking about and documenting the problem we're working on is an important step to performing effective data analysis that often goes overlooked.*** Step 2: Checking the data Checking the data for any errors. It's vital that we spot these errors before we invest our time in analysing the data.- Is there anything wrong with the data?- Are there any quirks (pattern)?- Do I need to fix or remove any data?We will use pandas' dataframes here.
###Code
import pandas as pd
iris_data = pd.read_csv('data/iris-data.csv')
iris_data.head()
###Output
_____no_output_____
###Markdown
**Bonus Tip**: If your data is missing some values and it follows a certain pattern (for example here, all the missing data is replaced by the string 'NA'), you can tell pandas to treate such rows as missing data.This can be done by specifying the 'na_values' attribute as 'NA'. iris_data = pd.read_csv('data/iris_data.csv', na_values='NA')Here, it already knew how to handle missing values. So, I need not specify the parameter 'na_values'.
###Code
null_data = iris_data[iris_data.isnull().any(axis=1)]
null_data
# gives useful stastistical info (summary) about the data
iris_data.describe()
###Output
_____no_output_____
###Markdown
So, what do we understand from the summary. One fine thing to notice is that it shows that the count for petal_width_cm is 145, i.e. 5 data points are missing, as we expected pandas to handle them and it did.We can't get much insight from tables like unless we know in advance that our data should lie in some particular range. That's why it is better to visualize the data to find certain anomalies or erroneous data, that we may have to fix. We will use matplotlib to visualize the data.
###Code
# lines starting with '%' are called magic commands
# tells the interpreter to show the plots inside the notebook
%matplotlib inline
# import required libraries
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
# set a random seed for reproducibility
np.random.seed(22)
# shows the rows with missing values
iris_data[iris_data.isnull().any(axis=1)]
# drops the rows with missing values
no_null = iris_data.dropna()
print "Dims of no_null:",no_null.shape
# will return an empty dataframe since we got rid of the missing value rows
no_null[no_null.isnull().any(axis=1)]
###Output
Dims of no_null: (145, 5)
###Markdown
Scatterplot MatricesWe will create a **scatterplot** matrix for visualization. A scatterplot matrix plots the distribution of each column along the diagonals, then plots a scatterplot matrix for combinations of each variable.They make up for an efficient tool to look for errors in data.We can even color the data by its class to see classwise trends, by setting the 'hue' parameter to 'class'.
###Code
# Seaborn does not know how to deal with rows with missing values,
# that's why we did away with them already.
# sb.pairplot(no_null, hue='class')
sb.pairplot(iris_data.dropna(), hue='class')
###Output
_____no_output_____
###Markdown
We can clearly some of the problems from the graphs above:- There are 5 classes where there should have only been 3.- One sepal_width_cm for Iris-setosa falls well outside the range which can be seen in the plots in which sepal_width_cm is used as a parameter.- Several sepal_length_cm entries for Iris-versicolor fall near zero.We need to rectify these.- We had to drop those rows with missing data. Step 3: Tidying the data Let's fix the problems we pointed out.First with the number of classes.- There's a typo in the erroneous class Iris-setossa which can be fixed using indexing in dataframes.- There's Iris- missing in versicolor class which can be appended for erroneous entries.We will use .loc indexer for indexing purposes which uses 'Selection by label'.
###Code
iris_data.loc[iris_data['class'] == 'versicolor', 'class'] = 'Iris-versicolor'
iris_data.loc[iris_data['class'] == 'Iris-setossa', 'class'] = 'Iris-setosa'
iris_data['class'].unique()
###Output
_____no_output_____
###Markdown
Now, onto fixing erroneous data (or outliers). This can be **tricky**. Because, we can't say for whether this error was introduced at the time of collecting, recording or whether it is a real anomaly. So, we should be extra careful when dealing with outliers. If we decide to exclude such data, we need to make sure we document which data we excluded and also provide solid reasoning for excluding that data. (Because we can't just go around saying "This data didn't fit my hypothesis" and will not stand peer review).Here, let's say that we know for a fact that the sepal width of an Iris-setosa flower cannot be less than 2.5 cm. So clearly, this was an error in either measurement or while recording it. We are better off discarding this data than spending hours and resources debugging where the mistake happened.
###Code
# Drops any 'Iris-setosa' rows with 'sepal_width_cm' < 2.5 cm
iris_data = iris_data.loc[(iris_data['class'] != 'Iris-setosa') | (iris_data['sepal_width_cm'] >= 2.5)]
# The above statement works because .loc also takes a list of boolean values as a parameter
iris_data.loc[iris_data['class'] == 'Iris-setosa', 'sepal_width_cm'].hist()
###Output
_____no_output_____
###Markdown
Phew! Done with that.The next issue we need to fix is several near-zero values of sepal length for Iris-versicolor. Let's take a look at them.
###Code
iris_data.loc[(iris_data['class'] == 'Iris-versicolor') &
(iris_data['sepal_length_cm'] < 1.0)]
###Output
_____no_output_____
###Markdown
One strange observation is that all the values are off by orders of two magnitude. As if someone took the measurements in metres but forgot to convert them into cm. You need to talk to your data collection team regarding this. And apparently in this particular case, there was indeed that mistake made by the data collection team.So, let's fix that.
###Code
iris_data.loc[(iris_data['class'] == 'Iris-versicolor') &
(iris_data['sepal_length_cm'] < 1.0),
'sepal_length_cm'] *= 100.0
iris_data.loc[iris_data['class'] == 'Iris-versicolor', 'sepal_length_cm'].hist()
# shows rows with missing data in any of the columns
iris_data[iris_data['sepal_length_cm'].isnull() |
iris_data['sepal_width_cm'].isnull() |
iris_data['petal_length_cm'].isnull() |
iris_data['petal_width_cm'].isnull()]
###Output
_____no_output_____
###Markdown
It's not ideal that we had to drop those rows, especially considering they're all Iris-setosa entries. Since it seems like the missing data is systematic — all of the missing values are in the same column for the same Iris type — this error could potentially bias our analysis.One way to deal with missing data is **mean imputation**: If we know that the values for a measurement fall in a certain range, we can fill in empty values with the average of that measurement.Let's take a look at the data first.
###Code
iris_data.loc[iris_data['class'] == 'Iris-setosa', 'petal_width_cm'].hist()
mean_petal_width = iris_data.loc[iris_data['class'] == 'Iris-setosa', 'petal_width_cm'].mean()
iris_data.loc[(iris_data['class'] == 'Iris-setosa') & iris_data['petal_width_cm'].isnull(),
'petal_width_cm'] = mean_petal_width
# Check if it has updated
iris_data.loc[(iris_data['class'] == 'Iris-setosa') & (iris_data['petal_width_cm'] == mean_petal_width)]
# shows rows with missing data in any of the columns
iris_data[iris_data['sepal_length_cm'].isnull() |
iris_data['sepal_width_cm'].isnull() |
iris_data['petal_length_cm'].isnull() |
iris_data['petal_width_cm'].isnull()]
# should return null
###Output
_____no_output_____
###Markdown
If you don't feel like imputing the data with the mean value, you can drop the missing value rows with dropna() call.iris_data.dropna(inplace=True)After all this, let's save the cleaned data to a new file so that we don't have to go through the same hassle again.
###Code
iris_data.to_csv('data/iris_data_clean.csv', index=False)
iris_data_clean = pd.read_csv('data/iris_data_clean.csv')
# Scatterplotmatrix for the cleaned data
sb.pairplot(iris_data_clean, hue='class')
###Output
_____no_output_____
###Markdown
Takeaways:- Make sure the data is encoded properly- Make sure the data falls in an expected range, use domain knowledge if possible to define this expected range- Deal with missing data judiciously, replace it if you can or drop it.- Never tidy the data manually. That way, you won't be able to reproduce them.- Use code as a record of how the data was tidied. (This notebook serves a good purpose for that)- Plot everything you can about the data at this stage of analysis so you can visually confirm everything is as it should be. Step 4: Testing our data Just like running unit tests in many programming styles, we can quickly test our data using assert statements. This way, we would know if anything breaks due to changes made to the data.It goes like this: We assert something. If it is True, the notebook continues running after it. However, if it is False, it will throw an error and we will know of any error before starting to actually analyse the data.Following are the unit tests we need the data to pass.
###Code
# We know that we should only have three classes
assert len(iris_data_clean['class'].unique()) == 3
# We know that sepal lengths for 'Iris-versicolor' should never be below 2.5 cm
assert iris_data_clean.loc[iris_data_clean['class'] == 'Iris-versicolor', 'sepal_length_cm'].min() >= 2.5
# We know that our data set should have no missing measurements
assert len(iris_data_clean.loc[(iris_data_clean['sepal_length_cm'].isnull()) |
(iris_data_clean['sepal_width_cm'].isnull()) |
(iris_data_clean['petal_length_cm'].isnull()) |
(iris_data_clean['petal_width_cm'].isnull())]) == 0
###Output
_____no_output_____
###Markdown
Exploratory Analysis Finally, we can start analysing our data!Exploratory analysis is the step where we start delving deeper into the data set beyond the outliers and errors. We'll be looking to answer questions such as:- How is my data distributed?- Are there any corelations in my data?- Are there confounding factors that explain these corelations?In this stage, we plot our data in as many ways as possible and in as many charts and graphs, but don't bother about making them pretty. They are just for internal use.Let's look at the scatterplot from before again.
###Code
sb.pairplot(iris_data_clean)
###Output
_____no_output_____
###Markdown
Most of our data is normally distributed (bell curve) and it is good if we plan to use modelling methods which assume the data to be normally distributed.However, the petal measurements show a strange behaviour. They have large values in the beginning, which is kind of not like normal distribution. One conclusion is that it may be for diferrent classes, hence the irregularity. So, let's color code them according to classes.
###Code
sb.pairplot(iris_data_clean, hue = 'class')
###Output
_____no_output_____
###Markdown
Surely, it was because of different classes that we were getting the strange distribution of petal lenghts and widths. But in a way, this helps to distinguish the Iris-setosa from the other Iris types.However, the values for Iris-versicolor and Iris-virginica are overlapping for the most parts. This will be a problem in classifying them.*There are also correlations between petal length and petal width, as well as sepal length and sepal width. The field biologists assure us that this is to be expected: Longer flower petals also tend to be wider, and the same applies for sepals.*We can also make **violin** plots for different classes. Violin plots are similar to box plots (refer Wikipedia) but provide much more by scaling the box according to the density of data.
###Code
plt.figure(figsize=(10, 10))
for column_index, column in enumerate(iris_data_clean.columns):
if column == 'class':
continue
plt.subplot(2,2, column_index + 1)
sb.violinplot(x = 'class', y = column, data = iris_data_clean)
###Output
_____no_output_____
###Markdown
Step 5: Classification As boring and tiring as the previous parts (tidying and analysing) seem, it is necessary.Because **Bad data leads to bad models.**Now, Let's fit our data with models.Before that, we need to split our data in **training** and **testing** units.I assume you already know the importance of this (to avoid overfitting and all). Now, let's get onto it.
###Code
# sklearn assumes data to be present in lists.
# So, we need to convert our data into lists. 'values' attribute does that.
all_inputs = iris_data_clean[['sepal_length_cm', 'sepal_width_cm', 'petal_length_cm', 'petal_width_cm']].values
all_classes = iris_data_clean['class'].values
# To check
all_inputs[:5]
# from sklearn.cross_validation import train_test_split
# The above module is deprecated. All functions have been transfered to the new module used below.
from sklearn.model_selection import train_test_split
# splits the data into training and testing sets based on
(training_inputs, testing_inputs,
training_classes, testing_classes) = train_test_split(all_inputs, all_classes, train_size = 0.75, random_state = 1)
###Output
_____no_output_____
###Markdown
Decision Tree ClassifierOne of the basic classifiers used in ML is a **decision tree classifier**. It asks Yes/No questions about the data - each time getting closer to classfying the data.
###Code
from sklearn.tree import DecisionTreeClassifier
# Create the classifier
decision_tree_classifier = DecisionTreeClassifier()
# Train the classifier on the training set
decision_tree_classifier.fit(training_inputs, training_classes)
# Test the classifier on the testing set
decision_tree_classifier.score(testing_inputs, testing_classes)
###Output
_____no_output_____
###Markdown
You might be happy about the 97% accuracy which you just got.That might just be a fluke, depending on how the data was split into training and testing sets.Go ahead and change the 'random_state' attribute to 0 and your accuracy would drop down to 86 - 89%.So, let's try some more combinations and verify if our decision tree classifier is actually working great.
###Code
model_accuracies = []
for i in xrange(1000):
(training_inputs, testing_inputs,
training_classes, testing_classes) = train_test_split(all_inputs, all_classes, train_size = 0.75)
decision_tree_classifier = DecisionTreeClassifier()
decision_tree_classifier.fit(training_inputs, training_classes)
accuracy = decision_tree_classifier.score(testing_inputs, testing_classes)
model_accuracies.append(accuracy)
sb.distplot(model_accuracies)
###Output
_____no_output_____
###Markdown
Our accuracy varies because of **overfitting**. Our model becomes way too dependent on the training set so that it can't genelarize for new data that it sees. Step 6: Cross-Validation We can avoid overfitting by cross-validation. Most of the data scientists use **k-fold cross-validation** for this purpose. Here, one of the k subsets of the data is used as a testing set and the model is trained on the remaining subsets. This process is repeated k times, so every set gets to be the testing set exactly once so that the model does not overfit.10-fold CV is the most common choice.
###Code
from sklearn.cross_validation import StratifiedKFold
def plot_cv(cv, n_labels):
masks = []
for train, test in cv:
mask = np.zeros(n_labels, dtype = bool)
mask[test] = 1
masks.append(mask)
# print len(masks)
plt.figure(figsize = (15,15))
plt.imshow(masks, interpolation = 'none')
plt.ylabel('Fold')
plt.xlabel('Row #')
plot_cv(StratifiedKFold(all_classes, n_folds=10), len(all_classes))
###Output
/home/ashish/ml-projects/venv/local/lib/python2.7/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
"This module will be removed in 0.20.", DeprecationWarning)
###Markdown
So, let's see what's happening here.First about StratifiedKFold. So, it divides the entire dataset into training and testing sets, all while keeping the variance and proportion of all the classes almost the same across all sets.So, it returns a list of **k** lists (or folds). This is taken by our custom fuction to represent the testing set in a more visual way. So, the black parts are the data points which are used for testing the performance of the model for each fold. The three groups are the three classes which we intend to classify, so it takes a bunch from each one of them so that the proportion is maintained.[5 from each of 5 groups = 15 * 9 + 14 (for last fold) = all 149 data points covered] **Bonus Tip**: You will get a deprecation warning as the cross_validation module will be deprecated from sklearn v 0.20, instead all the functions have been transferred to model_selection module. So, I tried importing but I just get my head around the similarities between the functions in two modules, so I stuck to the previous one because I can't implement the one from model_selection. So, a word of caution here: you might need to use the model_selection in the future if the other one becomes totally incompatible. sklearn has a function to directly calculate the cv scores.
###Code
from sklearn.model_selection import cross_val_score
decision_tree_classifier = DecisionTreeClassifier()
# gives a list of scores for each of the cv folds
cv_scores = cross_val_score(decision_tree_classifier, all_inputs, all_classes, cv=10)
sb.distplot(cv_scores)
plt.title("Average Score: {}".format(np.mean(cv_scores)))
###Output
_____no_output_____
###Markdown
Now, this is much more consistent and reliable classifier which will generalize more easily than the previous model. So, CrossValidation rocks! Parameter Tuning Now that our model is working with an acceptable rate of accuracy, is this the best we can do?Every machine learning model comes with a set of parameters which can be tuned to improve its performance. For example, in case of decision trees, the depth of the tree is one such parameter. Let's see its effect on the performance.What if we severely limit the depth of the decision tree?
###Code
decision_tree_classifier = DecisionTreeClassifier(max_depth=1)
cv_scores = cross_val_score(decision_tree_classifier, all_inputs, all_classes, cv = 10)
sb.distplot(cv_scores)
plt.title("Average score: {}".format(np.mean(cv_scores)))
###Output
_____no_output_____
###Markdown
So, the classification drops tremendously if we limit the depth to 1.But there needs to be a systematic way to determine the optimum parameters. One such (and the most common) method is **Grid Search**. What it essentially does is that it tries a bunch of parameters we pass to it and returns the best set of parameters.Let's do this with our decision tree classifier. We will consider two params for now, depth and features.
###Code
from sklearn.grid_search import GridSearchCV
decision_tree_classifier = DecisionTreeClassifier()
parameter_grid = {'max_depth': [1, 2, 3, 4, 5],
'max_features': [1, 2, 3, 4]}
cross_validation = StratifiedKFold(all_classes, n_folds=10)
grid_search = GridSearchCV(decision_tree_classifier, param_grid = parameter_grid,
cv = cross_validation)
grid_search.fit(all_inputs, all_classes)
print "Best Score: {}".format(grid_search.best_score_)
print "Best params: {}".format(grid_search.best_params_)
###Output
/home/ashish/ml-projects/venv/local/lib/python2.7/site-packages/sklearn/grid_search.py:43: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.
DeprecationWarning)
###Markdown
Now, I got a lot of different results every time I ran the above script. As the maximum number of features or depth increases, the performance improves which is quite understandable. It's like a trade off between performance and computation time (since it would increase if the structure complexity increases). But shouldn't it return the same value regardless of that. I was under the impression that it does the choosing for us. For now, I will put up a question on Stackexchange [insert link here later] and try to get to understand this better (Maybe somewhere some function is using a random seed or maybe it's a bug).Now as Dr. Randal Olson would say, let's visualize the grid search.
###Code
grid_visualization = []
# grid_scores is a list of tuples of possible params tried by grid_search
for grid_pair in grid_search.grid_scores_:
grid_visualization.append(grid_pair.mean_validation_score)
grid_visualization = np.array(grid_visualization)
grid_visualization.shape = (5,4)
sb.heatmap(grid_visualization, cmap='Reds')
# Credits - Randal Olson
# because I didn't type that out
plt.xticks(np.arange(4) + 0.5, grid_search.param_grid['max_features'])
plt.yticks(np.arange(5) + 0.5, grid_search.param_grid['max_depth'][::-1])
plt.xlabel('max_features')
plt.ylabel('max_depth')
###Output
_____no_output_____
###Markdown
We can see the effect of variation of the parameters on the accuracy. We need to atleast have a max_depth of 2. max_features does not make a lot of difference though. For our data, we have 4 features which is pretty good.Now, let's do a more elaborate **grid search**.
###Code
decision_tree_classifier = DecisionTreeClassifier()
parameter_grid = {'criterion': ['gini', 'entropy'],
'splitter': ['best', 'random'],
'max_depth': [1, 2, 3, 4, 5],
'max_features': [1, 2, 3, 4]}
cross_validation = StratifiedKFold(all_classes, n_folds=10)
grid_search = GridSearchCV(decision_tree_classifier, param_grid = parameter_grid,
cv = cross_validation)
grid_search.fit(all_inputs, all_classes)
print "Best Score: {}".format(grid_search.best_score_)
print "Best params: {}".format(grid_search.best_params_)
# Use the best params obtained from grid search for prediction
decision_tree_classifier = grid_search.best_estimator_
# take a look at the params
decision_tree_classifier
###Output
_____no_output_____
###Markdown
Visualising Decision making using GraphVizGraphviz can be used to visualize the decision making process done by the DTC (Decision Tree Classifier).
###Code
from sklearn import tree
# can't find the use of the following line
# considering it to be not important, ignored it
# more info below
# from sklearn.externals.six import StringIO
with open('iris_dtc.dot', 'w') as out_file:
tree.export_graphviz(decision_tree_classifier, out_file = out_file)
###Output
_____no_output_____
###Markdown
The above piece of code generates graphs in the form of .dot files.You can convert the dot files to .png files by:dot -Tpng iris_dtc.dot -o iris_dtc.png- You might need to install graphviz first with apt-get.From what I found, StringIO gives file-like access to strings. The possible usecase if for some modules which only accept files, so you can open a string as if it's a file, make changes to it and you are done (e.g.: gzip module). The possible usecase here might be that since dot files are essentially strings which show the relation between nodes, and Randal Olson might have used a deprecated module, so probably that is the reason. Now that we have our classfier ready, let's visualize its performance.
###Code
dt_scores = cross_val_score(decision_tree_classifier, all_inputs, all_classes, cv = 10)
sb.boxplot(dt_scores)
sb.stripplot(dt_scores, jitter=True, color='white')
###Output
_____no_output_____
###Markdown
Let's compare this performance against some other ML classifier.Let's try an emsemble classifier. We will use a **Random Forrests** classifier. The problem with a single classifier (like a single decision tree) is that it is prone to over-fitting. It tries to fit the training set very well and hence fails to generalize.Ensemble classifiers, on the other hand, create a number of different classifiers (here, decision trees), train them on random subsets of training examples (drawn with replacement) and training features (drawn without replacement) and takes a weighted average of all the classifiers to get a better (average) classifier, thus reducing the chances of overfitting.So, let's use a Random Forest Classifier.
###Code
from sklearn.ensemble import RandomForestClassifier
random_forest_classifier = RandomForestClassifier()
parameter_grid = {'n_estimators': [5,10,25,50],
'criterion': ['gini', 'entropy'],
'max_features': [1,2,3,4],
'warm_start': [True, False]}
cross_validation = StratifiedKFold(all_classes ,n_folds=10)
grid_search = GridSearchCV(random_forest_classifier,
param_grid = parameter_grid,
cv = cross_validation)
grid_search.fit(all_inputs, all_classes)
print "Best score: {}".format(grid_search.best_score_)
print "Best params: {}".format(grid_search.best_params_)
random_forest_classifier = grid_search.best_estimator_
random_forest_classifier
random_forest_classifier = grid_search.best_estimator_
rf_df = pd.DataFrame({'accuracy': cross_val_score(random_forest_classifier, all_inputs, all_classes, cv=10),
'classifier': ['Random Forest'] * 10})
dt_df = pd.DataFrame({'accuracy': cross_val_score(decision_tree_classifier, all_inputs, all_classes, cv=10),
'classifier': ['Decision Tree'] * 10})
both_df = rf_df.append(dt_df)
sb.boxplot(x='classifier', y='accuracy', data=both_df)
sb.stripplot(x='classifier', y='accuracy', data=both_df, jitter=True, color='white')
###Output
_____no_output_____
###Markdown
They almost perform the same. This is because we only have 4 features describing our data, and random forests show their magic when there are hundreds of features. So, there's not much improvement we can do here. Step 7: Reproducibility We must always ensure that our analysis (work) can be reproduced later. **As a rule, we shouldn't place much weight on a discovery that can't be reproduced.** And notebooks like these go a long way when making our work reproducible.This notebook provides documentation and any figures along with the actual code.Along with this, one must also record what software and hardware were used to perform the analysis.There is a notebook tool named watermark, developed by **Sebastian Raschka** just for this.Installation: pip install watermark
###Code
%load_ext watermark
%watermark -a 'Ashish Rai' -nmv --packages numpy,pandas,matplotlib,seaborn,sklearn
###Output
Ashish Rai Tue Apr 04 2017
CPython 2.7.6
IPython 5.3.0
numpy 1.12.1
pandas 0.19.2
matplotlib 2.0.0
seaborn 0.7.1
sklearn 0.18.1
compiler : GCC 4.8.4
system : Linux
release : 4.4.0-71-generic
machine : x86_64
processor : x86_64
CPU cores : 4
interpreter: 64bit
###Markdown
Finally, extracting the entire code in a single pipeline.
###Code
%matplotlib inline
import pandas as pd
import seaborn as sb
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import cross_val_score
# for why I used two different modules, see notes above
# We can directly use the cleaned data
iris_data_clean = pd.read_csv('data/iris_data_clean.csv',)
## Testing the data
# We know that we should only have three classes
assert len(iris_data_clean['class'].unique()) == 3
# We know that sepal lengths for 'Iris-versicolor' should never be below 2.5 cm
assert iris_data_clean.loc[iris_data_clean['class'] == 'Iris-versicolor', 'sepal_length_cm'].min() >= 2.5
# We know that our data set should have no missing measurements
assert len(iris_data_clean.loc[(iris_data_clean['sepal_length_cm'].isnull()) |
(iris_data_clean['sepal_width_cm'].isnull()) |
(iris_data_clean['petal_length_cm'].isnull()) |
(iris_data_clean['petal_width_cm'].isnull())]) == 0
# load the inputs and classes (targets)
all_inputs = iris_data_clean[['sepal_length_cm', 'sepal_width_cm',
'petal_length_cm', 'petal_width_cm']].values
all_classes = iris_data_clean['class'].values
# we will use the random_forest_classifier returned by GridSearchCV
random_forest_classifier = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features=4, max_leaf_nodes=None,
min_impurity_split=1e-07, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=5, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=True)
# Let's plot the cross validation scores
rfc_scores = cross_val_score(random_forest_classifier, all_inputs, all_classes, cv = 10)
sb.boxplot(rfc_scores)
sb.stripplot(rfc_scores, jitter = True, color = 'white')
# let's show some predictions on the dataset that we have
(training_inputs,
testing_inputs,
training_classes,
testing_classes) = train_test_split(all_inputs, all_classes, train_size=0.75)
random_forest_classifier.fit(training_inputs, training_classes)
for input_features, prediction, actual in zip(testing_inputs[:10],
random_forest_classifier.predict(testing_inputs[:10]),
testing_classes[:10]):
print('{}\t-->\t{}\t(Actual: {})'.format(input_features, prediction, actual))
###Output
[ 5.7 4.4 1.5 0.4] --> Iris-setosa (Actual: Iris-setosa)
[ 5.6 2.5 3.9 1.1] --> Iris-versicolor (Actual: Iris-versicolor)
[ 5.5 2.6 4.4 1.2] --> Iris-versicolor (Actual: Iris-versicolor)
[ 4.4 3.2 1.3 0.2] --> Iris-setosa (Actual: Iris-setosa)
[ 4.6 3.4 1.4 0.3] --> Iris-setosa (Actual: Iris-setosa)
[ 6. 2.8 5.1 1.6] --> Iris-virginica (Actual: Iris-versicolor)
[ 5.5 3.5 1.3 0.2] --> Iris-setosa (Actual: Iris-setosa)
[ 5.8 4. 1.2 0.2] --> Iris-setosa (Actual: Iris-setosa)
[ 7.2 3.2 6. 1.8] --> Iris-virginica (Actual: Iris-virginica)
[ 4.8 3.4 1.9 0.2] --> Iris-setosa (Actual: Iris-setosa)
|
0_0_Data_Exploration.ipynb | ###Markdown
Dataset investigation and initial visualisation
###Code
import matplotlib.pyplot as plt
import pydicom
import numpy as np
import glob
import os
###Output
_____no_output_____
###Markdown
CMMD Sample
###Code
#WRITE YOUR PATH TO CMMD MANIFEST DIRECTORY BELOW:
cmmd_manifest_directory = "/media/craig/Larry/python/manifest-1616439774456/"
a = next(os.walk('.'))[1]
sample_cmmd_file = (cmmd_manifest_directory+
"/CMMD/D1-0001/07-18-2010-NA-NA-79377/""1.000000-NA-70244/1-1.dcm")
ds = pydicom.dcmread(sample_cmmd_file)
plt.imshow(ds.pixel_array, cmap=plt.cm.gray)
a = os.chdir(cmmd_manifest_directory)
a = next(os.walk('.'))[1]
a = next(os.walk('.'))[1]
a = next(os.walk('.'))[1]
a
def show_cmmd_array():
directory = cmmd_manifest_directory+"/CMMD/D1-1059/07-18-2009-NA-NA-59926/1.000000-NA-22834/"
ds1 = (pydicom.dcmread(directory+"1-1.dcm")).pixel_array
ds2 = (pydicom.dcmread(directory+"1-2.dcm")).pixel_array
directory = cmmd_manifest_directory+"/CMMD/D1-1069/07-18-2011-NA-NA-56881/1.000000-NA-54169/"
ds3 = (pydicom.dcmread(directory+"1-2.dcm")).pixel_array
directory = cmmd_manifest_directory+"/CMMD/D1-1061/07-18-2011-NA-NA-52688/1.000000-NA-89685/"
ds4 = (pydicom.dcmread(directory+"1-1.dcm")).pixel_array
ds5 = (pydicom.dcmread(directory+"1-2.dcm")).pixel_array
f, plots = plt.subplots(ncols= 5,figsize=(20, 20))
plots = plots.flatten()
plots[0].axis('off')
plots[0].imshow(ds1, cmap=plt.cm.gray)
plots[1].axis('off')
plots[1].imshow(ds2, cmap=plt.cm.gray)
plots[2].axis('off')
plots[2].imshow(ds3, cmap=plt.cm.gray)
plots[3].axis('off')
plots[3].imshow(ds4, cmap=plt.cm.gray)
plots[4].axis('off')
plots[4].imshow(ds5, cmap=plt.cm.gray)
plt.savefig("CMMD_Sample.png", bbox_inches='tight')
###Output
_____no_output_____
###Markdown
CMMD Array
###Code
show_cmmd_array()
###Output
_____no_output_____ |
BTP.ipynb | ###Markdown
Applying PCAFor our purpose, we want to retain 90% of the variance. So, we are finding the minimum number of principal components which can give us 90% of cumulative variance.
###Code
X = StandardScaler().fit_transform(df_model)
pca = PCA().fit(X)
#Plotting the Cumulative Summation of the Explained Variance
variance_list = np.cumsum(pca.explained_variance_ratio_)
optimal_n_components = 2
REQUIRED_VARIANCE = 0.9
for (i, variance) in enumerate(variance_list) :
if(variance > REQUIRED_VARIANCE) :
optimal_n_components = i+1
break
pca = PCA(n_components=optimal_n_components)
X_pca = pca.fit_transform(X)
print("Optimal number of principal components :", optimal_n_components)
print("Cumulative variance reserved :", variance_list[optimal_n_components-1])
x_coord = list(range(1, variance_list.size+1))
plt.figure()
plt.plot(x_coord, variance_list, 'b')
plt.plot(x_coord, variance_list, 'ob')
plt.plot(optimal_n_components, variance_list[optimal_n_components-1], 'or')
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Cumulative Explained Variance graph')
plt.show()
###Output
Optimal number of principal components : 12
Cumulative variance reserved : 0.9020902687148747
###Markdown
Selecting the number of clusters with silhouette analysis on KMeans clustering
###Code
range_n_clusters = [2, 3, 4, 5, 6]
cluster_colors = ['r', 'b', 'g', 'y', 'c', 'm']
optimal_n_clusters = 2
best_silhoutte_avg = -1e8
for (i, n_clusters) in enumerate(range_n_clusters) :
fig, ax = plt.subplots(figsize=(8,8))
clusterer = KMeans(n_clusters=n_clusters, random_state=42)
cluster_labels = clusterer.fit_predict(X_pca)
# find silhouette avg
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
if silhouette_avg > best_silhoutte_avg :
best_silhoutte_avg = silhouette_avg
optimal_n_clusters = n_clusters
centers = clusterer.cluster_centers_
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax.scatter(X_pca[:, 0], X_pca[:, 1], marker='.',
c=colors)
ax.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax.set_title("The visualization of the clustered data.")
ax.set_xlabel("Feature space for the 1st feature")
ax.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
print("\nOptimal number of clusters '", optimal_n_clusters, " 'with avg silhouette_score", best_silhoutte_avg, "\n")
plt.show()
model = KMeans(n_clusters=optimal_n_clusters, random_state=42).fit(X_pca)
cluster_labels = model.labels_
fig, ax = plt.subplots(figsize=(8,8))
x_coord = X_pca[:,0]
y_coord = X_pca[:,1]
color_list = ['r', 'b', 'g', 'c', 'm', 'y', 'k', 'o']
for cluster_label in range(optimal_n_clusters) :
ax.scatter(x_coord[cluster_labels==cluster_label],
y_coord[cluster_labels==cluster_label],
s = 100,
marker = '.',
label='cluster ' + str(cluster_label),
c=color_list[cluster_label])
# uncomment to annotate the points
# for i in range(x_coord.size) :
# ax.annotate(i, xy=(x_coord[i],y_coord[i]), xytext=(0, 0),
# textcoords='offset points')
plt.legend()
plt.show()
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____ |
_notebooks/2022-01-13-decorators.ipynb | ###Markdown
"Intuitively understanding Python decorators"> "Syntactic sugar and functional implementation of decorators"- toc: true- branch: master- badges: true- comments: true- categories: [python, functional programming]- hide: false- search_exclude: true When first utilising functional programming concepts in Python, a traditional approach is to write each function one by one, and pass the input sequentially through this pipeline. As programmers gain more experience, they tend to begin bundling all these functions in the same namespace, typically through the use of classes. However, if these classes use concepts such as inheritance, it is easy to get lost in a sea of arguments being passed around, and attempts to caress each class into the appropriate place in the pipeline. The programmer almost goes back to the original strategy of sequential transformations on input, except now they're now dealing with more complicated objects than simple functions.Python decorators are functions that aim to counter this object-oriented problem, allowing flexible code that can dynamically modify other functions on the fly. By definition, a decorator is a function that takes another function and extends the behavior of the latter function without explicitly modifying it. Functions First-class objectsIn Python, recall that functions are first-class objects. This means that functions can be passed around and used as arguments, just like any other object (`string`, `int`, `float`, `list`, and so on). Consider the following three functions:
###Code
def cricket_runs(runs_scored:int):
return f"You made {runs_scored} runs."
def roll_dice(number:int):
return f"You rolled a {number}."
def general_123(specific_function):
return specific_function("2")
###Output
_____no_output_____
###Markdown
Here, `cricket_runs()` and `roll_dice()` are regular functions that expect a name given as an `int`. The `general_123()` function, however, expects a function as its argument. We can, for instance, pass it the `cricket_runs()` or the `roll_dice()` function:
###Code
general_123(cricket_runs)
general_123(roll_dice)
###Output
_____no_output_____
###Markdown
Note that `general_123(roll_dice)` refers to two functions, but in different ways: `general_123()` and `roll_dice`. The `roll_dice` function is named without parentheses. This means that only a reference to the function is passed. The function is not executed. The `greet_bob()` function, on the other hand, is written with parentheses, so it will be called as usual. To get a reference to any function (i.e. to refer to as an object, rather than as something that executes), we simply don't write the parentheses:
###Code
general_123
###Output
_____no_output_____
###Markdown
Now is a good time to recall that you can create _inner functions_ in Python (functions defined within other functions, that are only available locally), and that you can also return functions from other functions.> Note: the main concept to grasp here is whether to return or execute a function. If you are returning a function as an _object_ to store and run later, don't use parentheses. Otherwise, if you want to execute a function on the spot, use parentheses, as per usual. Simple decoratorsNow that you’ve seen that functions are just like any other object in Python, you’re ready to move on and see the magical beast that is the Python decorator. Let’s start with an example:
###Code
def standard_decorator(func):
def wrapper():
print("Something is happening before func is called.")
func()
print("Something is happening after func is called.")
return wrapper
def shoot_ball():
print("I shot the ball.")
shoot = standard_decorator(shoot_ball)
shoot()
###Output
Something is happening before func is called.
I shot the ball.
Something is happening after func is called.
###Markdown
The so-called decoration happens at the following line:```pythonshoot = standard_decorator(shoot_ball)``` In effect, the name `shoot_ball` now points to the `wrapper()` inner function. Remember that you return wrapper as a function when you call `standard_decorator(shoot_ball)`:
###Code
shoot_ball
###Output
_____no_output_____
###Markdown
However, `wrapper()` has a reference to the original `shoot_ball()` as `func`, and calls that function between the two calls to `print()`.Put simply: __decorators wrap a function, modifying its behavior__.Before moving on, let’s have a look at a second example. Because `wrapper()` is a regular Python function, the way a decorator modifies a function can change dynamically. So as not to disturb your neighbors, the following example will only run the decorated code during the day:
###Code
from datetime import datetime
def not_during_the_night(func):
def wrapper():
if 7 <= datetime.now().hour < 22:
func()
else:
pass # Hush, the neighbors are asleep
return wrapper
def shoot_ball():
print("I shot the ball!")
shoot_ball = not_during_the_night(shoot_ball)
###Output
_____no_output_____
###Markdown
If you try to call `say_whee()` after bedtime, nothing will happen:
###Code
shoot_ball()
###Output
I shot the ball!
###Markdown
Syntactic sugarThe way you decorated `shoot_ball()` above is a little clunky. First of all, you end up typing the name `shoot_ball` three times. In addition, the decoration gets a bit hidden away below the definition of the function.Instead, Python allows you to use decorators in a simpler way with the `@` symbol, sometimes called the “pie” syntax. The following example does the exact same thing as the first decorator example:
###Code
def standard_decorator(func):
def wrapper():
print("Something is happening before the function is called.")
func()
print("Something is happening after the function is called.")
return wrapper
@standard_decorator
def shoot_ball():
print("I shot the ball!")
###Output
_____no_output_____
###Markdown
So, `@standard_decorator` is just an easier way of saying `shoot_ball = standard_decorator(shoot_ball)`. It’s how you apply a decorator to a function. Reusing decoratorsRecall that a decorator is just a regular Python function. All the usual tools for easy reusability are available. Let’s move the decorator to its own module that can be used in many other functions.Create a file called `decorators.py` with the following content:
###Code
def do_twice(func):
def wrapper_do_twice():
func()
func()
return wrapper_do_twice
###Output
_____no_output_____
###Markdown
> Note: You can name your inner function whatever you want, and a generic name like `wrapper()` is usually okay. You’ll see a lot of decorators in this article. To keep them apart, we’ll name the inner function with the same name as the decorator but with a `wrapper_ prefix`. You can now use this new decorator in other files by doing a regular import:```pythonfrom decorators import do_twice@do_twicedef say_whee(): print("Whee!")```When you run this example, you should see that the original say_whee() is executed twice:```python>>> say_whee()Whee!Whee!``` Decorating functions with argumentsSay that you have a function that accepts some arguments. Can you still decorate it? Let’s try:
###Code
def do_twice(func):
def wrapper_do_twice():
func()
func()
return wrapper_do_twice
@do_twice
def greet(name):
print(f"Hello {name}")
###Output
_____no_output_____
###Markdown
Unfortunately, running this code raises an error:
###Code
# collapse
greet("World")
###Output
_____no_output_____
###Markdown
The problem is that the inner function `wrapper_do_twice()` does not take any arguments, but `name="World"` was passed to it. You could fix this by letting `wrapper_do_twice()` accept one argument, but then it would not work for the `say_whee()` function you created earlier.The solution is to use `*args` and `**kwargs` in the inner wrapper function. Then it will accept an arbitrary number of positional and keyword arguments. Rewrite `do_twice()` as follows:
###Code
def do_twice(func):
def wrapper_do_twice(*args, **kwargs):
func(*args, **kwargs)
func(*args, **kwargs)
return wrapper_do_twice
###Output
_____no_output_____
###Markdown
The `wrapper_do_twice()` inner function now accepts any number of arguments and passes them on to the function it decorates. Now both your `say_whee()` and `greet()` examples works:
###Code
say_whee()
@do_twice
def greet(name):
print(f"Hello {name}")
greet("World")
###Output
Hello World
Hello World
###Markdown
Returning values from decorated functionsWhat happens to the return value of decorated functions? Well, that’s up to the decorator to decide. Let’s say you decorate a simple function as follows:
###Code
@do_twice
def return_greeting(name):
print("Creating greeting")
return f"Hi {name}"
hi_adam = return_greeting("Adam")
print(hi_adam)
###Output
None
###Markdown
Oops, your decorator ate the return value from the function.Because the `do_twice_wrapper()` doesn’t explicitly return a value, the call `return_greeting("Adam")` ended up returning `None`.To fix this, you need to make sure the wrapper function returns the return value of the decorated function. Change your decorator:
###Code
def do_twice(func):
def wrapper_do_twice(*args, **kwargs):
func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper_do_twice
###Output
_____no_output_____
###Markdown
The return value from the last execution of the function is returned:
###Code
@do_twice
def return_greeting(name):
print("Creating greeting")
return f"Hi {name}"
return_greeting("Adam")
###Output
Creating greeting
Creating greeting
###Markdown
IntrospectionA great convenience when working with Python, especially in the interactive shell, is its powerful introspection ability. Introspection is the ability of an object to know about its own attributes at runtime. For instance, a function knows its own name and documentation:
###Code
print
print.__name__
help(print)
###Output
Help on built-in function print in module builtins:
print(...)
print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
flush: whether to forcibly flush the stream.
###Markdown
The introspection works for functions you define yourself as well:
###Code
say_whee
say_whee.__name__
###Output
_____no_output_____
###Markdown
However, after being decorated, `say_whee()` has gotten very confused about its identity. It now reports being the `wrapper_do_twice()` inner function inside the `do_twice()` decorator. Although technically true, this is not very useful information.To fix this, decorators should use the `@functools.wraps` decorator, which will preserve information about the original function. Update decorator again:
###Code
import functools
def do_twice(func):
@functools.wraps(func)
def wrapper_do_twice(*args, **kwargs):
func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper_do_twice
###Output
_____no_output_____
###Markdown
You do not need to change anything about the decorated `say_whee()` function:
###Code
@do_twice
def say_whee():
print("Whee!")
say_whee
say_whee.__name__
###Output
_____no_output_____
###Markdown
Much better! Now `say_whee()` is still itself after decoration. Some examplesLet’s look at a few more useful examples of decorators. You’ll notice that they’ll mainly follow the same pattern that you’ve learned so far:```pythonimport functoolsdef decorator(func): @functools.wraps(func) def wrapper_decorator(*args, **kwargs): Do something before value = func(*args, **kwargs) Do something after return value return wrapper_decorator```This formula is a good boilerplate template for building more complex decorators. Timing decoratorLet’s start by creating a `@timer` decorator. It will measure the time a function takes to execute and print the duration to the console. Here’s the code:
###Code
import functools
import time
def timer(func):
"""Print the runtime of the decorated function"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
print(f"Finished {func.__name__!r} in {run_time:.4f} secs")
return value
return wrapper_timer
@timer
def waste_some_time(num_times):
for _ in range(num_times):
sum([i**2 for i in range(10000)])
###Output
_____no_output_____
###Markdown
This decorator works by storing the time just before the function starts running (at the line marked 1) and just after the function finishes (at 2). The time the function takes is then the difference between the two (at 3). We use the `time.perf_counter()` function, which does a good job of measuring time intervals. Here are some examples of timings:
###Code
waste_some_time(1)
waste_some_time(999)
###Output
Finished 'waste_some_time' in 2.3977 secs
###Markdown
> Note: The `@timer` decorator is great if you just want to get an idea about the runtime of your functions. If you want to do more precise measurements of code, you should instead consider the `timeit` module in the standard library. It temporarily disables garbage collection and runs multiple trials to strip out noise from quick function calls. Fancy decorators Decorating classesThere are two different ways you can use decorators on classes. The first one is very close to what you have already done with functions: you can decorate the methods of a class. This was one of the motivations for introducing decorators back in the day.Some commonly used decorators that are even built-ins in Python are `@classmethod`, `@staticmethod`, and `@property`. The `@classmethod` and `@staticmethod` decorators are used to define methods inside a class namespace that are not connected to a particular instance of that class. The `@property` decorator is used to customise getters and setters for class attributes.The following definition of a `Circle` class uses the `@classmethod`, `@staticmethod`, and `@property` decorators:
###Code
class Circle:
def __init__(self, radius):
self._radius = radius
@property
def radius(self):
"""Get value of radius"""
return self._radius
@radius.setter
def radius(self, value):
"""Set radius, raise error if negative"""
if value >= 0:
self._radius = value
else:
raise ValueError("Radius must be positive")
@property
def area(self):
"""Calculate area inside circle"""
return self.pi() * self.radius**2
def cylinder_volume(self, height):
"""Calculate volume of cylinder with circle as base"""
return self.area * height
@classmethod
def unit_circle(cls):
"""Factory method creating a circle with radius 1"""
return cls(1)
@staticmethod
def pi():
"""Value of π, could use math.pi instead though"""
return 3.1415926535
###Output
_____no_output_____
###Markdown
In this class:* `cylinder_volume()` is a regular method.* `radius` is a mutable property: it can be set to a different value. However, by defining a setter method, we can do some error testing to make sure it’s not set to a nonsensical negative number. Properties are accessed as attributes without parentheses.* `area` is an immutable property: properties without `.setter()` methods can’t be changed. Even though it is defined as a method, it can be retrieved as an attribute without parentheses.* `unit_circle()` is a class method. It’s not bound to one particular instance of `Circle`. Class methods are often used as factory methods that can create specific instances of the class.* `pi()` is a static method. It’s not really dependent on the `Circle` class, except that it is part of its namespace. Static methods can be called on either an instance or the class.The `Circle` class can for example be used as follows:
###Code
c = Circle(5)
c.radius
c.area
c.radius = 2
c.area
c.area = 100
c.cylinder_volume(height=4)
c.radius = -1
c.pi()
Circle.pi()
###Output
_____no_output_____
###Markdown
Let’s define a class where we decorate some of its methods using the `@debug` and `@timer` decorators from earlier:
###Code
import functools
def debug(func):
"""Print the function signature and return value"""
@functools.wraps(func)
def wrapper_debug(*args, **kwargs):
args_repr = [repr(a) for a in args] # 1
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] # 2
signature = ", ".join(args_repr + kwargs_repr) # 3
print(f"Calling {func.__name__}({signature})")
value = func(*args, **kwargs)
print(f"{func.__name__!r} returned {value!r}") # 4
return value
return wrapper_debug
class TimeWaster:
@debug
def __init__(self, max_num):
self.max_num = max_num
@timer
def waste_time(self, num_times):
for _ in range(num_times):
sum([i**2 for i in range(self.max_num)])
###Output
_____no_output_____
###Markdown
Using this class, you can see the effect of the decorators:
###Code
tw = TimeWaster(1000)
tw.waste_time(999)
###Output
Finished 'waste_time' in 0.2636 secs
###Markdown
Decorators with argumentsSometimes, it’s useful to pass arguments to your decorators. For instance, `@do_twice` could be extended to a `@repeat(num_times)` decorator. The number of times to execute the decorated function could then be given as an argument.Think about how you could achieve this. So far, the name written after the @ has referred to a function object that can be called with another function. To be consistent, you then need repeat(num_times=4) to return a function object that can act as a decorator. Luckily, you already know how to return functions!Typically, the decorator creates and returns an inner wrapper function, so writing the example out in full will give you an inner function within an inner function. While this might sound like the programming equivalent of the Inception movie, we’ll untangle it all in a moment:
###Code
def repeat(num_times):
def decorator_repeat(func):
@functools.wraps(func)
def wrapper_repeat(*args, **kwargs):
for _ in range(num_times):
value = func(*args, **kwargs)
return value
return wrapper_repeat
return decorator_repeat
###Output
_____no_output_____
###Markdown
It looks a little messy, but we have only put the same decorator pattern you have seen many times by now inside one additional `def` that handles the arguments to the decorator. Let’s start with the innermost function:```pythondef wrapper_repeat(*args, **kwargs): for _ in range(num_times): value = func(*args, **kwargs) return value```This `wrapper_repeat()` function takes arbitrary arguments and returns the value of the decorated function, `func()`. This wrapper function also contains the loop that calls the decorated function `num_times` times. This is no different from the earlier wrapper functions you have seen, except that it is using the `num_times` parameter that must be supplied from the outside.One step out, you’ll find the decorator function:```pythondef decorator_repeat(func): @functools.wraps(func) def wrapper_repeat(*args, **kwargs): ... return wrapper_repeat```Again, `decorator_repeat()` looks exactly like the decorator functions you have written earlier, except that it’s named differently. That’s because we reserve the base name—`repeat()`—for the outermost function, which is the one the user will call.As you have already seen, the outermost function returns a reference to the decorator function:```pythondef repeat(num_times): def decorator_repeat(func): ... return decorator_repeat```There are a few subtle things happening in the repeat() function:Defining `decorator_repeat()` as an inner function means that `repeat()` will refer to a function object—`decorator_repeat`. Earlier, we used repeat without parentheses to refer to the function object. The added parentheses are necessary when defining decorators that take arguments.The `num_times` argument is seemingly not used in `repeat()` itself. But by passing `num_times` a closure is created where the value of `num_times` is stored until it will be used later by `wrapper_repeat()`.With everything set up, let’s see if the results are as expected:
###Code
@repeat(num_times=4)
def greet(name):
print(f"Hello {name}")
greet("World")
###Output
Hello World
Hello World
Hello World
Hello World
|
docs/usage/competing_risks_stackplot.ipynb | ###Markdown
We can investigate the data for a given competing risks group of states with a probability stackplot. Let's see this in action using the AIDSI dataset:
###Code
# Load and prep data
from pymsm.datasets import load_aidssi, prep_aidssi
data = load_aidssi()
competing_risk_dataset, covariate_cols, state_labels = prep_aidssi(data)
from pymsm.plotting import competingrisks_stackplot
competingrisks_stackplot(
data=competing_risk_dataset,
duration_col='time_transition_to_target',
event_col ='target_state',
order_top= [2],
order_bottom = [3],
state_labels = state_labels);
###Output
_____no_output_____ |
06_Computer_Vision/05_CNN_and_features_visualization/01_conv_layer_visualization.ipynb | ###Markdown
Convolutional Layer---In this notebook, we visualize four filtered outputs (a.k.a. feature maps) of a convolutional layer. Import the image
###Code
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'images/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
###Output
_____no_output_____
###Markdown
Define and visualize the filters
###Code
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
### do not modify the code below this line ###
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
###Output
_____no_output_____
###Markdown
Define a convolutional layer Initialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network!
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
###Output
Net(
(conv): Conv2d(1, 4, kernel_size=(4, 4), stride=(1, 1), bias=False)
)
###Markdown
Visualize the output of each filterFirst, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.
###Code
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
###Output
_____no_output_____
###Markdown
Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied.
###Code
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
# visualize the output of an activated conv layer
viz_layer(activated_layer)
###Output
_____no_output_____ |
notebooks/model_parameter_search.ipynb | ###Markdown
Model Parameter Search - Automated and Distributed Back when I was [creating a movie recommender](https://turi.com/learn/gallery/notebooks/recsys_explicit_rating.html) the most time consuming part was finding good parameters to use in training my recommender; the right training parameters can make all the difference between a lame model and a great model. Finding good training parameters is [a very common problem](http://en.wikipedia.org/wiki/Hyperparameter_optimization) in machine learning.Fortunately GraphLab Create makes it easy to tune training parameters. By just calling [model_parameter_search](https://turi.com/products/create/docs/graphlab.toolkits.model_parameter_search.html) we can create a job to automatically search for parameters. With just one more line of code we can make it a distributed search, training and evaluating models in parallel. Setup The first step is to import graphlab and read in our data:
###Code
import graphlab as gl
data_url = 'https://static.turi.com/datasets/movie_ratings/sample.small'
movie_data = gl.SFrame.read_csv(data_url, delimiter='\t')
###Output
[INFO] Start server at: ipc:///tmp/graphlab_server-18448 - Server binary: /usr/local/lib/python2.7/dist-packages/graphlab/unity_server - Server log: /tmp/graphlab_server_1425080074.log
[INFO] GraphLab Server Version: 1.5.0
###Markdown
Each row in our data represents a movie rating from a user. There are only three columns: user, movie and rating.
###Code
movie_data
###Output
_____no_output_____
###Markdown
So What Exactly is Model Parameter Search? To quickly create a recommender we can simply call the create method of factorization_recommender. We only need to pass it our data and tell it what columns represent: user id, item id and prediction target. That looks like:
###Code
model = gl.factorization_recommender.create(movie_data, user_id='user',
item_id='movie', target='rating')
###Output
_____no_output_____
###Markdown
If we did it this way, the default values would be used for all of our training parameters. All of the models in Graphlab Create come with good default values. However no single value will ever be optimal for all data. With just a little work we can find better parameter values and create a more effective model.In order to be able to tell the best parameter values, we have to be able to measure a model's performance. It's important that you don't use the same data to both train the model and evaluate its effectiveness. So we'll create a random split of our data, using 80% for training the models and the other 20% for evaluating the models.
###Code
train_set, validation_set = movie_data.random_split(0.8)
###Output
_____no_output_____
###Markdown
Once we have a model we want to evaluate, We can then evaluate this model with our test_set, by:
###Code
evaluation = model.evaluate(validation_set)
###Output
_____no_output_____
###Markdown
In a nutshell, model parameter search trains several different model, each with different values for training parameters, then evaluates each of the models. Doing a Model Parameter Search There are [a lot of different parameters](https://turi.com/products/create/docs/generated/graphlab.recommender.ranking_factorization_recommender.create.htmlgraphlab.recommender.ranking_factorization_recommender.create) we could tweak when creating a factorization_recommender. Probably the most important is the number of latent factors. With one call to [model_parameter_search](https://turi.com/products/create/docs/graphlab.toolkits.model_parameter_search.html) we can easily search over several different values for the number of latent factors. The first parameter to model_parameter_search is training set and the validation set. The second parameter is the function that creates the model, in our case that's "gl.factorization_recommender.create". In additon, we need to specify the parameters that will be used to create the models. There are two types of parameters: fixed parameters and free parameters. Fixed parameter are the parameters that are the same for all of the models that get created, for us that's: user_id, item_id, and target. Free parameters are the parameters you want to search over, so that's num_factors. Putting it all together we get:
###Code
job = gl.model_parameter_search.create(
(train_set, validation_set),
gl.factorization_recommender.create,
model_parameters = {'user_id': 'user', 'item_id': 'movie', 'target': 'rating', 'num_factors': [4, 5, 6, 7]}
)
###Output
[INFO] Validating job.
[INFO] Creating a LocalAsync environment called 'async'.
[INFO] Validation complete. Job: 'Model-Parameter-Search-Feb-27-2015-15-35-51' ready for execution
[INFO] Job: 'Model-Parameter-Search-Feb-27-2015-15-35-51' scheduled.
###Markdown
By default, the job will run asynchronously in a background process. We can check weather the job has completed by calling job.get_status()
###Code
job.get_status()
###Output
_____no_output_____
###Markdown
It will take a few minutes to train and evaluate four models ......
###Code
job.get_status()
###Output
_____no_output_____
###Markdown
Getting the Best Model Once the job is completed, we can get the results by calling job.get_results(). The results contain two things: all of the models that were created and summary information about each model. The summary information includes the [RMSE](http://en.wikipedia.org/wiki/Root-mean-square_deviation) on the validation set. With a little work we can determine the best [RMSE](http://en.wikipedia.org/wiki/Root-mean-square_deviation) score and get the corresponding model:
###Code
search_summary= job.get_results()
best_RMSE = search_summary['validation_rmse'].min()
best_model_id = search_summary[search_summary['validation_rmse'] == best_RMSE]['model_id'][0]
###Output
_____no_output_____
###Markdown
best_model_id will be the best of the four models we searched over.The more parameters combinations we try the more likely we are to find an even better model. We might want to try a larger range for the number of latent factors. There are other parameter we can tweak too. For example, regularization is another important parameters to tune.As we increase the number of parameters and range of values we want to tweak, the number of combinations gets large quickly. Doing the entire search just on your computer could take a long time. Making it Distributed With only a couple more lines of code we can make our search distributed, training and evaluating models in parrallel. GraphLab Create makes it easy to use either Amazon Web Services or a Hadoop cluster. All we need to do is create a deployment environment and pass that to model_parameter_search.To use a Hadoop cluster, create an environment object like this:
###Code
hadoop_cluster = gl.deploy.hadoop_cluster.create(name = '<name of hadoop cluster>',
turi_dist_path = '<distributed path>')
hadoop_conf_dir = '<path to hadoop config dir>')
###Output
_____no_output_____
###Markdown
To use an EC2 environment with three hosts, create an environment like this:
###Code
ec2_config = gl.deploy.Ec2Config(aws_access_key_id = '<my access key>',
aws_secret_access_key = '<my secret key>')
my_env = gl.deploy.ec2_cluster.create('<name for my environment>',
s3_path = 's3://<my bucket name>',
num_hosts = 3,
ec2_config = ec2_config)
###Output
_____no_output_____
###Markdown
Searching over several values for num_factors and regularization, and using our distributed environment, the model_parameter_search call will look like:
###Code
job = gl.model_parameter_search.create(
(train_set, validation_set),
gl.factorization_recommender.create,
environment = my_env,
model_parameters = {'user_id': 'user', 'item_id': 'movie', 'target': 'rating', 'num_factors': [4, 5, 6, 7]}
)
###Output
_____no_output_____ |
Bonus/temp_analysis_bonus_2_starter.ipynb | ###Markdown
Reflect Tables into SQLALchemy ORM
###Code
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from sqlalchemy.sql.expression import and_
# create engine to hawaii.sqlite
engine = create_engine("sqlite:///../Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# View all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create inspector
inspector = inspect(engine)
# Inspect columns for Measurement
columns = inspector.get_columns('Measurement')
for column in columns:
print(column['name'],column['type'])
# Inspect columns for Station
columns = inspector.get_columns('Station')
for column in columns:
print(column['name'],column['type'])
# Create our session (link) from Python to the DB
session = Session(engine)
###Output
_____no_output_____
###Markdown
Bonus Challenge Assignment: Temperature Analysis II
###Code
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, maximum, and average temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# For example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use the function `calc_temps` to calculate the tmin, tavg, and tmax
# for a year in the data set
result = calc_temps('2016-01-01', '2016-12-31')
print(result)
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for bar height (y value)
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
result_df = pd.DataFrame(result, columns = ['TMIN', "TAVE",'TMAX'])
avg_temp = result_df['TAVE']
yerr = result_df['TMAX'] - result_df['TMIN']
plt.figure(facecolor=(1,1,1))
plt.ylabel('Temp (F)')
avg_temp.plot(kind='bar', yerr = yerr, figsize =(5,8), grid = True, color='#DB9C9C',alpha = 0.9, title = 'Trip Avg Temp')
plt.xticks(np.arange(len(avg_temp)),["2016 average temperature"], rotation = 0)
plt.tight_layout()
plt.savefig("../Images/Trip_avg_temp.png")
plt.show()
###Output
_____no_output_____
###Markdown
Daily Rainfall Average
###Code
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's
# matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
end_Date = '2016-12-31'
# Convert end_Date to be used for datatime
year = int(end_Date[:4])
month = int(end_Date[5:7])
day = int(end_Date[8:])
# Design a query to retrieve the last 12 months of precipitation data and plot the results.
# Starting from the most recent data point in the database.
recent_date = dt.date(year, month, day)
# Calculate the date one year from the last date in data set.
query_date_1year = recent_date - dt.timedelta(days=365)
# Perform a query to retrieve the station and precipitation scores
sel = [Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation, func.sum(Measurement.prcp)]
prcp_data = session.query(*sel).filter(Station.station == Measurement.station).\
filter(and_(Measurement.date >= query_date_1year, Measurement.date <= recent_date)).\
group_by(Measurement.station).order_by(func.sum(Measurement.prcp).desc()).all()
# Save the query results as a Pandas DataFrame
prcp_df = pd.DataFrame(prcp_data, columns=['Station','Name','Latitude','Longitude','Elevation','PrcpSum'])
prcp_df
###Output
_____no_output_____
###Markdown
Daily Temperature Normals
###Code
# Use this function to calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
# For example
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
normals = []
travel_dates = []
# Set the start and end date of the trip
start_date = '2017-08-01'
end_date = '2017-08-07'
# Use the start and end date to create a range of dates
# Strip off the year and save a list of strings in the format %m-%d
start_date_obj = dt.datetime.strptime(start_date, '%Y-%m-%d')
end_date_obj = dt.datetime.strptime(end_date, '%Y-%m-%d')
# Use the `daily_normals` function to calculate the normals for each date string
# and append the results to a list called `normals`.\
while (start_date_obj <= end_date_obj):
travel_dates.append(dt.datetime.strftime(start_date_obj, '%m-%d'))
normals.append(list(np.ravel(daily_normals(dt.datetime.strftime(start_date_obj, '%m-%d')))))
start_date_obj = start_date_obj + dt.timedelta(days=1)
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
week_travel_tem = pd.DataFrame(normals, columns=['TMIN','TAVE','TMAX'])
week_travel_tem['Date'] = travel_dates
week_travel_tem = week_travel_tem.set_index('Date')
week_travel_tem
# Plot the daily normals as an area plot with `stacked=False`
week_travel_tem.plot.area(stacked = False, ylabel='Temperature (F)',xlabel='Date', title=f"Daily Temperature from {start_date} to {end_date}", figsize=(8,5), rot = 45, colormap = 'OrRd')
plt.tight_layout()
plt.savefig(f'../Images/Daily_Temperature_from_{start_date}_to_{end_date}')
plt.show()
###Output
_____no_output_____
###Markdown
Close Session
###Code
session.close()
###Output
_____no_output_____ |
Metodos_Restritos.ipynb | ###Markdown
UNIVERSIDADE FEDERAL DO PIAUÍCURSO DE GRADUAÇÃO EM ENGENHARIA ELÉTRICADISCIPLINA: TÉCNICAS DE OTIMIZAÇÃODOCENTE: ALDIR SILVA SOUSADISCENTE: MARIANA DE SOUSA MOURAMATRÍCULA: 20159049702--- **Atividade Final: Analise dos Métodos de Otimização para Problemas Irrestritos e Restritos** **Problemas Irrestritos** **Links de acesso para os códigos:**[Método do Gradiente Conjugado (Fletcher-Reeves)](https://colab.research.google.com/drive/15F2ezcGu69VYaGB81SPimkG9RINxLNy3?usp=sharing)[Método da Descida Gradiente](https://colab.research.google.com/drive/1p6T99OfRePDGMgYoyFRx6PHWVa09CFuK?usp=sharing)[Método de Newton](https://colab.research.google.com/drive/1YQRVRyNExoYgD1O_si-79oQ_ZkWRJQND?usp=sharing)[Método Quasi-Newton (Davidon-Fletcher-Powell)](https://colab.research.google.com/drive/1ougB-DeksZBYqFqO_hqm7Cum1sgrMRiz?usp=sharing) **1. Função de Rastringin:** Minimizar $ An + \sum_{i=1}^{n} [x_i^2 - Acos(2 \pi x_i)]$ sujeito a: $-5.12 \leq x_i \leq 5.12$ Resolva para n = 2, 5, e 10. ---Solução ótima: $f(0,...,0)=0.$
###Code
import math
import sympy as sym
c = list(sym.symbols("x:2"))
A = 10
n = 2
def f1(c):
fo = A*n
for i in range(0,n):
fo = fo + (c[i]**2 - A*sym.cos(2*math.pi*c[i]))
return fo
fig = sym.plotting.plot3d(f1(c),(c[0],-5.12,5.12),(c[1],-5.12,5.12), points = 100,title = 'Função de Rastringin')
###Output
_____no_output_____
###Markdown
Como pode ser observado, a função de Rastringir apresenta inúmeros pontos de mínimo local, o que dificulta a busca da solução ótima. O método de obteve o melhor resultado mesmo utilizando pontos distantes foi o método do Gradiente Conjugado, conseguindo atingir a solução mesmo iniciando de x = [2,2]. A análise dos métodos foi baseada no ponto de partida x = [1,1] para n igual a 2, 5 e 10. Os métodos de Newton e Quasi-Newton (Davidon-Fletcher-Powell) não conseguem atingir o mínimo global, estacionando em uma solução local. O métodos de Gradiente Conjugado e Gradiente de Descida consegue atingir o mínimo global partindo dessas condições iniciais. O desempenho do método de Gradiente Conjugado:* n = 2>* Tempo de resposta: 0,19 s* Precisão da resposta: 3,1e-4 * n = 5>* Tempo de resposta: 0,27 s* Precisão da resposta: 4,9e-4 * n = 10>* Tempo de resposta: 0,67 s* Precisão da resposta: 6,9e-4 O desempenho do método de Gradiente de Descida:* n = 2>* Tempo de resposta: 0,32 s* Precisão da resposta: 1,5e-4 * n = 5>* Tempo de resposta: 0,21 s* Precisão da resposta: 2,4e-4 * n = 10>* Tempo de resposta: 0,34 s* Precisão da resposta: 3,3e-4 Assim, conclui-se que o método do Gradiente de Descida obteve o melhor desempenho para esta função partindo das condições iniciais adotadas, apresentando em geral, um menor tempo de convergência para a solução ótima e maior precisão. No entanto, o Gradiente de Descida não consegue convergir para o mínimo global quando o ponto de partida é maior, por exemplo, x = [2, 2] como observado para o método do Gradiente Conjugado. **2. Função Ackley:** f(x,y) = $-20e^ {-0.2 \sqrt {0.5(x^2 +y^2)}} - e^{0.5[cos(2 \pi x) + cos(2 \pi y)]} + e + 20$$-5 \leq x,y \leq 5$ ---Solução ótima: $f(0,0) = 0.$
###Code
#import matplotlib.pyplot as plt
import math
import sympy as sym
c = list(sym.symbols("x:2"))
def fo(c):
return -20*sym.exp(-0.2*(0.5*(c[0]**2 + c[1]**2)**(1/2))) - sym.exp(0.5*(sym.cos(2*math.pi*c[0]) + sym.cos(2*math.pi*c[1]))) + math.e + 20
fig = sym.plotting.plot3d(fo(c),(c[0],-5,5),(c[1],-5,5), points = 100,title = 'Função Ackley')
#plt.show()
###Output
_____no_output_____
###Markdown
A função Ackley apresenta também inúmeros pontos de mínimo local, o que dificulta bastante a busca do mínimo global. Desse modo, a análise dos métodos foi então baseada no ponto de partida próximo à solução, com x = [0.2, 0.2]. Os métodos Gradiente de Descida e Quasi-Newton (Davidon-Fletcher-Powell) não conseguem atingir o mínimo global, estacionando em uma mínimo local. O métodos de Gradiente Conjugado só consegue convergir para o mínimo global, neste caso, quando a função é elevada ao quadrado. O método de Newton também consegue a convergência nestas mesmas condições.O desempenho do método de Gradiente Conjugado:* Tempo de resposta: 0,10 s* Precisão da resposta: 5,1e-5 O desempenho do método de Newton:* Tempo de resposta: 0,03 s* Precisão da resposta: 6.8e-6Portanto, o método de Newton obteve melhor desempenho para a função de Ackley dadas as condições iniciais adotadas, apresentando em geral, um tempo de convergência e uma precisão muito maiores que as obtidas para o Gradiente Conjugado. O Gradiente Conjugado consegue convergir para o mínimo global mesmo partindo de ponto mais distante da solução, por exemplo, x = [1, 1]. O mesmo não é possível para o método de Newton que exige que os pontos iniciais sejam essencialmente próximos à solução. **3. Função de Beale:** $f(x,y) = (1.5 - x +xy)^2 + (2.5 - x + xy^2)^2 + (2.625 - x + xy^3)^2 $ sujeito a: $-4.5 \leq x,y \leq 4.5$ ---Solução ótima: $f(3,0.5) = 0$
###Code
import matplotlib.pyplot as plt
c = list(sym.symbols("x:2"))
def fo(c):
return (1.5 - c[0] + c[0]*c[1])**2 + (2.5 - c[0] + c[0]*c[1]**2)**2 + (2.625 - c[0] + c[0]*c[1]**3)**2
fig = sym.plotting.plot3d(fo(c),(c[0],-4.5,4.5),(c[1],-4.5,4.5), points = 100,title = 'Função de Beale')
plt.show()
###Output
_____no_output_____
###Markdown
A função de Beale não apresenta pontos mínimo locais como nos casos anteriores, portanto a busca pela região factível é facilitada. Escolheu um ponto de partida em que todos os métodos chegariam a solução ótima para analisar a precisão da solução e o tempo de convergência de cada um.O ponto de partida escolhido foi relativamente perto da solução, sendo x = [2, 0.1]. O desempenho do método de Gradiente Conjugado:* Tempo de resposta: 0,99 s* Precisão da resposta: 1,3e-4 O desempenho do método de Gradiente de Descida:* Tempo de resposta: 18,23 s* Precisão da resposta: 9,5e-4 O desempenho do método de Newton:* Tempo de resposta: 0,34 s* Precisão da resposta: 2e-4O desempenho do método Quasi-Newton:* Tempo de resposta: 0,50 s* Precisão da resposta: 2,5e-4O método de Newton obteve o melhor desempenho com relação ao tempo de convergência enquanto o método de Gradiente Conjugado com relação à precisão na solução. A desvantagem do método de Newton em relação aos demais é que este não converge com pontos de partida distantes da solução, mesmo a função não apresentando pontos de mínimos locais.O método de Gradiente de Descida levou um tempo bem superior aos demais métodos, realizando aproximadamente 112 iterações até a solução. **4. Função de Eason:** $f(x,y) = -cos(x)cos(y)exp(-((x-\pi)^2+(y-\pi)^2))$$-100 \leq x,y\leq 100$ ---Solução: $f(\pi, \pi) = -1.$
###Code
import matplotlib.pyplot as plt
c = list(sym.symbols("x:2"))
def fo(c):
return -sym.cos(c[0])*sym.cos(c[1])*sym.exp(-((c[0] - math.pi)**2 + (c[1] - math.pi)**2))
fig = sym.plotting.plot3d(fo(c),(c[0],-5,5),(c[1],-5,5), points = 100,title = 'Função de Eason')
plt.show()
###Output
_____no_output_____
###Markdown
A função de Eason, assim como a função de Beale, não apresenta pontos mínimo locais, portanto a busca pela região factível é facilitada. Escolheu um ponto de partida em que todos os métodos chegariam a solução ótima para analisar a precisão da solução e o tempo de convergência de cada um.O ponto de partida escolhido foi relativamente perto da solução, sendo x = [3, 3]. O desempenho do método de Gradiente Conjugado:* Tempo de resposta: 0,06 s* Precisão da resposta: 3,4e-4 O desempenho do método de Gradiente de Descida:* Tempo de resposta: 0,08 s* Precisão da resposta: 2,4e-4 O desempenho do método de Newton:* Tempo de resposta: 0,09 s* Precisão da resposta: 1,6e-4O desempenho do método Quasi-Newton:* Tempo de resposta: 0,25 s* Precisão da resposta: 1,3e-12O método de Gradiente Conjugado atingiu o melhor desempenho com relação ao tempo de convergência enquanto o método Quasi-Newton com relação à precisão na solução. No geral, o tempos de convergência forma bem pequenos para as condições iniciais adotadas. **Problemas Restritos** **Links de acesso para os códigos:**[Barreira com Gradiente Conjugado](https://colab.research.google.com/drive/10X-diHmoiP-6LsHy4ocxDfIVIDddqzhL?usp=sharing)[Barreira com Quasi-Newton (Davidon-Fletcher-Powell)](https://colab.research.google.com/drive/12xfXCt9l5QKcfGgfASc5P75SNCU9VRTT?usp=sharing)[Penalidade com Gradiente Conjugado ](https://colab.research.google.com/drive/1fZbVugqiAGz8JIphfuDPz0pthpNB96Uh?usp=sharing)[Penalidade com Quasi-Newton (Davidon-Fletcher-Powell)](https://colab.research.google.com/drive/1-yR4WTWYZnS0DH8oUVqBtUnZ8LkrQ4uP?usp=sharing) **5. Função de McCormick**Minimizar: $f(x,y) = sin(x+y) + (x+y)^2 -1.5x + 2.5y + 1 $ Sujeito a: $ -1.5 \leq x \leq 4 $ $-3 \leq y \leq 4$ ---Solução: $f(-0.54719, -1.54719) = -1.9133$
###Code
import sympy as sym
import matplotlib.pyplot as plt
x1 = sym.Symbol('x1')
x2 = sym.Symbol('x2')
f = sym.sin(x1+x2) + (x1 - x2)**2 - 1.5*x1 + 2.5*x2 + 1
fig = sym.plotting.plot3d(f,(x1,-1.5,4),(x2,-3,4), points = 100,title = 'Função de McCormick')
plt.show()
###Output
_____no_output_____
###Markdown
Podemos observar pelo gráfico que a função de McCormick apresenta apenas um ponto de inflexão, facilitando busca do mínimo ótimo pela região factível.Implementou-se os algoritmos de Barreira e Penalidade para transformar problemas restritos em problemas irrestritos em conjunto com os métodos de otimização Gradiente Conjugado e Quasi-Newton (Davidon-Fletcher-Powell).*Método de Barreira*O Método da Barreira exige que o ponto inicial esteja dentro da região factível e g(x) < 0. Escolheu o ponto de partida x = [1,-1]. Os valores dos parâmetros $\mu$ e $\beta$ escolhidos foram respectivamente 3 e 0.1. Ambos os métodos de otimização conseguiram atingir a solução global. Desempenho do método de Barreira com Gradiente Conjugado:* Tempo de resposta: 47,17 s* Precisão da resposta: 6,4e-4 O desempenho do método de Barreira com Quasi-Newton:* Tempo de resposta: 4,59 s* Precisão da resposta: 6,4e-4 *Método de Penalidade*O ponto de partida escolhido foi x = [1,-1] ao qual ambos os métodos obtém a solução ótima. O valor de $\mu$ foi escolhido de forma a se obter uma boa precisão na resposta sendo, portanto, $\mu$ = 1. O parâmetro $\beta$ escolhido foi $\beta$ = 10.Desempenho do método de Penalidade com Gradiente Conjugado:* Tempo de resposta: 13,13 s* Precisão da resposta: 1,2e-4 O desempenho do método de Penalidade com Quasi-Newton:* Tempo de resposta: 2,34 s* Precisão da resposta: 4,4e-4 Portanto, o método que obtém o melhor desempenho para esta função com relação ao tempo de convergência é o Penalidade com Quasi-Newton, porém o que obtém melhor precisão foi o Penalidade com Gradiente Conjugado. Pôde-se notar também que para pontos mais distantes da solução, como exemplo x =[-2, 2], o Gradiente Conjugado mantém o mesmo comportamento similar ao notado anteriormente. Este consegue convergir com um tempo de 8,49 s e precisão de 1,6e-4, ou seja, apresenta um bom desempenho também para pontos iniciais distantes da solução. No entanto, para pontos distantes como o exemplo, o Quasi-Newton converge para um mínimo local. **6. Função de Shubert**Maximizar: $SH(x1,x2) = (\sum_{j=1}^{5} j\cdot cos[(j+1)\cdot x1 + j]) \cdot (\sum_{j=1}^{5} j\cdot cos[(j+1)\cdot x2 + j]) $ Sujeito a: $-10 \leq x1,x2 \leq 10$ Solução ótima: $f(x1,x2) = -186,7309$
###Code
import sympy as sym
import matplotlib.pyplot as plt
x1 = sym.Symbol('x1')
x2 = sym.Symbol('x2')
f1 = f2 = f = 0
for i in range(1,6):
f1 = f1 + i*sym.cos((i+1)*x1 + i)
f2 = f2 + i*sym.cos((i+1)*x2 + i)
f = f + f1*f2
fig = sym.plotting.plot3d(f,(x1,-10,10),(x2,-10,10), points = 100,title = 'Função de Shubert')
plt.show()
###Output
_____no_output_____
###Markdown
Nota-se forte presença de pontos de inflexão na função de Shubert. Isso dificulta a busca do mínimo global pela região factível.*Método de Barreira*O Método da Barreira exige que o ponto inicial esteja dentro da região factível e g(x) < 0. Escolheu o ponto de partida x = [4.55, -6.8], ponto próximo à solução ótima. Os valores dos parâmetros $\mu$ e $\beta$ escolhidos foram respectivamente 3 e 0.1. O método de Barreira com Quasi-Newton obteve uma solução f(4.83,-7.07) = -298.36. Enquanto que o método de Barreira com Gradiente, para as mesmas condições, obteve f(-3.02,-2.47) = -15.67.O método de Barreira com Gradiente Conjugado levou em torno de 11,13 s. O método de Barreira com Quasi-Newton levou 9,81 s. Ambos obtiveram solução distantes do ponto de mínimo desejado. Isso é devido a enorme quantidade de mínimos locais na função.*Método de Penalidade*O ponto de partida escolhido foi o mesmo, x = [4.55, -6.8]. O valor dos prâmetros $\mu$ e $\beta$ escolhido foram respectivamente iguais a 100 e 5. Da mesma forma que ocorrido anteriormente, o método de Penalidade com o Quasi-Newton obtém a solução f(4.83,-7.07) = -298.36 em um tempo de 3,13 s. Já o método de Penalidade com Gradiente Conjugado obtém f(9.51,-6.49) = -25.49.O método Quasi-Newton com funções de Penalidade obtém o melhor desempenho para a solução deste problema de otimização mesmo diante da dificuldade pela enorme quantidade de mínimos locais. **7. Função de Rosenbrock**Minimizar: $f(x,y) = (1-x)^2 + 100(y-x^2)^2$ Sujeito a: $(x-1)^3 - y + 1 \leq 0 $ $ x+y -2 \leq 0$ ---Solução: f(1.0, 1.0) = 0
###Code
import sympy as sym
import matplotlib.pyplot as plt
x1 = sym.Symbol('x1')
x2 = sym.Symbol('x2')
f = (1-x1)**2 + 100*(x2 - x1**2)**2
fig = sym.plotting.plot3d(f,(x1,-10,10),(x2,-10,10), points = 100,title = 'Função de Rosenbrock')
plt.show()
###Output
_____no_output_____
###Markdown
Apesar de a função de Rosenbrock não apresentar mais de um ponto de inflexão, o algoritmo utilizando o método de Barreira muitas vezes acaba divergindo devido ao mal-condicondicionamento.Partindo do ponto iniciais x = [1,2], próximo à solução ótima, o método de Barreira com Gradiente Conjugado diverge e encerra a execução. Para pontos mais distantes, por exemplo x = [-0.9,1], obtém um mínimo local em f(0,0) = 1.Analisou-se então também partindo do ponto x = [1,2] os demais métodos e todos conseguiram atingir a solução ótima. Os valores dos parâmetros $\mu$ e $\beta$ escolhidos foram 10 e 0.5, respectivamente.*Método de Barreira*A dificuldade na escolha do ponto inicial para os métodos dificultou a parametrização do método.O desempenho do método de Barreira com Quasi-Newton:* Tempo de resposta: 2,46 s* Precisão da resposta: 6,8e-4 *Método de Penalidade*O método de penalidade apresenta uma estabilidade bem maior que o método de Barreira e, portanto, obtém a convergia mesmo que em um tempo maior e com pontos iniciais distantes. O valor de $\mu$ foi escolhido de forma a se obter uma boa precisão na resposta sendo, portanto, $\mu$ = 1. O parâmetro $\beta$ escolhido foi $\beta$ = 10.Desempenho do método de Penalidade com Gradiente Conjugado:* Tempo de resposta: 4,29 s* Precisão da resposta: 2,1e-4 O desempenho do método de Penalidade com Quasi-Newton:* Tempo de resposta: 3,47 s* Precisão da resposta: 5,1e-3 Portanto, o método que obtém o melhor desempenho em relação à precisão é o método de Penalidade com Gradiente Conjugado. O Método de Penalidade com Quasi-Newton obtém o menor tempo. Não há muita diferença em relação ao tempo, o que torna o Gradiente Conjugado mais vantajoso. **8. Mishra's Bird**Minimizar: $f(x,y) = sin(y)e^{[1-cos(x)]^2} + cos(x)e^{[1-sin(y)]^2} + (x-y)^2$ Sujeito a: $(x+5)^2 + (y+5)^2 Solução: $f(-3.1302468,-1.5821422) = -106.7645367$
###Code
import sympy as sym
import matplotlib.pyplot as plt
x1 = sym.Symbol('x1')
x2 = sym.Symbol('x2')
f = sym.sin(x2)*sym.exp(1-sym.cos(x1))**2 + sym.cos(x1)*sym.exp(1-sym.sin(x2))**2 + (x1-x2)**2
fig = sym.plotting.plot3d(f,(x1,-10,10),(x2,-10,10), points = 100,title = 'Mishra\'s Bird')
plt.show()
###Output
_____no_output_____ |
marketing_campaign_analysis.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statistics as st
from scipy.stats import chi2_contingency, ttest_ind, ttest_rel, normaltest, shapiro,probplot
data = pd.read_csv('marketing_campaign.csv', sep='\t')
data.head(10)
data.info()
data['Dt_Customer'] = pd.to_datetime(data['Dt_Customer'],format='%d-%m-%Y')
data['Income'].isna().sum(), data['Income'].isna().sum()/len(data['Income'])
median_no_input = data['Income'].median()
mean_no_input = data['Income'].mean()
median_input = data['Income'].fillna(median_no_input)
mean_input = data['Income'].fillna(mean_no_input)
mean_no_input, mean_input
ttest_ind(data['Income'].dropna(), median_input)
ttest_ind(data['Income'].dropna(), mean_input)
data['Income'] = data['Income'].fillna(data['Income'].mean())
cat_cols = ['Education','Marital_Status']
for i in cat_cols:
data[i] = data[i].astype('category')
data.info()
data.drop(columns=['Z_CostContact','Z_Revenue'], inplace=True)
data['TotalSpent'] = data['MntWines'] + data['MntFruits'] + data['MntFishProducts'] + data['MntMeatProducts'] + data['MntSweetProducts'] + data['MntGoldProds']
data['PromAccepted'] = data['AcceptedCmp1'] + data['AcceptedCmp2'] + data['AcceptedCmp3'] + data['AcceptedCmp4'] + data['AcceptedCmp5'] + data['Response']
data['Age'] = 2021 - data['Year_Birth']
###Output
_____no_output_____
###Markdown
Análisis univariado Education
###Code
sns.barplot(x=data['Education'].value_counts().index, y=data['Education'].value_counts())
###Output
_____no_output_____
###Markdown
Marital Status
###Code
sns.barplot(x=data['Marital_Status'].value_counts().index, y=data['Marital_Status'].value_counts())
###Output
_____no_output_____
###Markdown
Income
###Code
fig, ax = plt.subplots(1,2, figsize=(10,4))
sns.histplot(data['Income'], ax=ax[0])
sns.histplot(np.log(data['Income']), ax=ax[1])
def find_outliers(dataframe, column:str):
q3 = dataframe[column].quantile(0.75)
q1 = dataframe[column].quantile(0.25)
iqr = q3 - q1
l_whi, u_whi = q1 - (iqr * 1.5), q3 + (iqr * 1.5)
df_outliers = dataframe[(dataframe[column] < l_whi) | (dataframe[column]>u_whi)]
return df_outliers.index
index_out = find_outliers(data, 'Income')
data.drop(index=index_out, inplace=True)
fig, ax = plt.subplots(1,2, figsize=(10,4))
sns.histplot(data['Income'], ax=ax[0])
sns.histplot(np.log(data['Income']), ax=ax[1])
###Output
_____no_output_____
###Markdown
Las pruebas normaltest y shapiro de scipy prueban la hipótesis nula de que las observaciones provienen de una distribución normal.
###Code
_, p_nt = normaltest(data['Income'])
p_nt
_, p_sh = shapiro(data['Income'])
p_sh
probplot(data['Income'], plot=plt)
###Output
_____no_output_____
###Markdown
Hay evidencia suficiente para rechazar la hipótesis nula para ambas pruebas, es decir, el ingreso no proviene de una distribución normal. Kidhome
###Code
sns.barplot(data['Kidhome'].value_counts(), data['Kidhome'].value_counts().index, orient='h')
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
Teenhome
###Code
sns.barplot(data['Teenhome'].value_counts(), data['Teenhome'].value_counts().index, orient='h')
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
MntWines Análisis bivariado Relación entre educación y estado marital
###Code
tabla_contingencia = pd.crosstab(data['Marital_Status'],data['Education'])
chi2, p, dof, ex = chi2_contingency(tabla_contingencia)
if p <= 0.05:
print('Se rechaza la hipótesis nula. Las variables no son independientes.')
else:
print('No hay evidencia para rechazar la hipótesis nula.')
###Output
No hay evidencia para rechazar la hipótesis nula.
###Markdown
Relación entre ingreso y estado marital
###Code
sns.relplot(data=data, y='Income', x='Marital_Status')
sns.histplot(data=data, x='Income', hue='Marital_Status')
lista_marit_income = []
name_marit = []
for i in data['Marital_Status'].unique():
lista_marit_income.append(data[data['Marital_Status']==i]['Income'].describe())
name_marit.append(i)
income_marit = pd.concat(lista_marit_income,axis=1)
income_marit.columns = name_marit
income_marit
fig, ax = plt.subplots(1,1)
sns.scatterplot(income_marit.loc['mean'].index,income_marit.loc['mean'].values)
ax.set_ylim(18000,82000)
ax.axhline(data['Income'].mean(), color='k',ls='--',alpha=0.3,label='General mean')
ax.axhline(st.median(data['Income']), color='r',ls='--',alpha=0.3,label='Median')
ax.legend()
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
Relación entre ingreso y educación
###Code
sns.relplot(data=data, y='Income', x='Education')
sns.histplot(data=data, x='Income', hue='Education')
lista_educ_income = []
name_educ = []
for i in data['Education'].unique():
lista_educ_income.append(data[data['Education']==i]['Income'].describe())
name_educ.append(i)
income_educ = pd.concat(lista_educ_income,axis=1)
income_educ.columns = name_educ
income_educ
fig, ax = plt.subplots(1,1)
sns.scatterplot(income_educ.loc['mean'].index,income_educ.loc['mean'].values)
ax.set_ylim(18000,82000)
ax.axhline(data['Income'].mean(), color='k',ls='--',alpha=0.3,label='General mean')
ax.axhline(st.median(data['Income']), color='r',ls='--',alpha=0.3,label='Median')
ax.legend()
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
FutureWarning
###Markdown
Relación consumo de vino, ingreso, educación
###Code
sns.relplot(data=data, x='Education',y='MntWines')
sns.scatterplot(data['Income'], data['MntWines'])
sns.relplot(data=data, x='Income', y='MntWines',row='Education')
###Output
_____no_output_____
###Markdown
Relacion consumo de vino, ingreso, estatus marital
###Code
sns.relplot(data=data, x='Marital_Status', y='MntWines')
sns.relplot(data=data, y='Income', x='Marital_Status')
sns.relplot(data=data, x='Income', y='MntWines',hue='Marital_Status')
sns.relplot(data=data, x='Income', y='MntWines',row='Marital_Status')
###Output
_____no_output_____
###Markdown
Análisis multivariado
###Code
###Output
_____no_output_____ |
Supporting Information/Fig 3a - LSL - KO - 1 Trial.ipynb | ###Markdown
Linear Sturm-Liouville Problem Overview:For details on problem formulation, visit the data folder and view the dataset for System 1.Noise: None (0% $\sigma$)Known Operator? No. Learning GoalsKnowns: $f_j(x)$ forcing functions and observed responses $u_j(x)$ Unknowns: Operator $L$, parametric coefficients $p(x)$ and $q(x)$----------------Input: Observations of $u_j(x)$ and the corresponding forcings $f_j(x)$Output: Operator $L$, including parametric coefficients $p(x)$ and $q(x)$
###Code
%load_ext autoreload
%autoreload 2
# Import Python packages
import pickle
import sys
sys.path.append("..")
# Third-Party Imports
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Package Imports
from tools.variables import DependentVariable, IndependentVariable
from tools.term_builder import TermBuilder, build_datapools, NoiseMaker
from tools.differentiator import Differentiator, FiniteDiff
from tools.regressions import *
from tools.NoiseMaker import NoiseMaker
from tools.misc import report_learning_results
from tools.plotter3 import Plotter, compute_coefficients
from tools.Grouper import PointwiseGrouper
from tools.GroupRegressor import GroupRegressor
# Use 0 for seeding random state in data randomization (for consistency)
seed = 0
print("Random seed:", seed)
## Step 1. Load in the data
file_stem = "../data/S1-LSL-"
x_array = pickle.load(open(file_stem +"x.pickle", "rb"))
ode_sols = pickle.load(open(file_stem +"sols.pickle", "rb"))
forcings = pickle.load(open(file_stem + "fs.pickle", "rb"))
sl_coeffs = pickle.load(open(file_stem + "coeffs.pickle", "rb"))
## Step 2. Split data into testing and training
num_trials = 1 # Number of trials to use for observation
sol_train, sol_test, f_train, f_test = train_test_split(ode_sols, forcings, train_size=num_trials, random_state = seed)
## Step 3. Build datapool matrices
# Datapools are a matrix of numerically evaluated terms, which will be used to create each $\Theta^(k)$ to learn $\Xi$
differentiator = Differentiator(diff_order = 2, diff_method = 'FD', ) # finite differences
lhs_term = 'd^{2}u/dx^{2}' # Set LHS term for regression
train_dps = build_datapools(sol_train, differentiator, lhs_term, f_train) # build the matrices using training data
## Step 4. Create group regressor and provide optimization function
# Define the regression function as a lambda function which only expects lists of Thetas, LHSs as inputs
RegFunc = lambda Thetas, LHSs: TrainSGTRidge3(Thetas, LHSs, num_tols = 1, lam = 1e-5, epsilon = 1e-6, normalize = 2)
# Create the group regressor (Uses SGTR regression function above)
groupreg = GroupRegressor(RegFunc, PointwiseGrouper(lhs_term = lhs_term), train_dps, 'x')
%%time
## Step 5. Group regression
groupreg.group_regression(known_vars=['u', 'du/dx', 'f', 'd^{2}u/dx^{2}']) # Regress coefficients
groupreg.report_learning_results() # Report the learned coefficients
### PLOT RESULTS
# X vector from the first ODE solution
x_vector = ode_sols[0].t
# Construct a plotter object with groupreg (which stores results)
pltr = Plotter(groupreg = groupreg, x_vector = x_vector,
dependent_variable='u', true_coeffs = sl_coeffs,
is_sturm_liouville = True, show_legends=False)
# Generate the analysis plots: ODE solutions (first 3), p(x) and q(x), and u_xx model coefficients (entries in Xi)
pltr.generate_analysis_plots(save_stem='3a-KO',
ode_sols=ode_sols,
xi_ylims=[-1.7,3.6],
coeff_ylims=[-1,2.6])
# Show all the plots (pyplot command)
plt.show()
score_interval = [0.1,9.9]
low_idcs = np.where(pltr.true_x_vector > score_interval[0])
high_idcs = np.where(pltr.true_x_vector < score_interval[1])
idcs = np.intersect1d(low_idcs, high_idcs)
p_error = np.linalg.norm(pltr.inferred_phi[idcs] - pltr.p_x[idcs])/np.linalg.norm(pltr.p_x[idcs])
print('L2 p error: %.4f' % (p_error))
q_error = np.linalg.norm(pltr.inferred_q[idcs] - pltr.q_x[idcs])/np.linalg.norm(pltr.q_x[idcs])
print('L2 q error: %.4f' % (q_error))
###Output
L2 p error: 520.6260
L2 q error: 3.1102
|
Chap5-Analog.ipynb | ###Markdown
Imports
###Code
%load_ext autoreload
%autoreload 2
%matplotlib notebook
import matplotlib.pyplot as plt
import global_params as G
plt.rcParams['figure.figsize'] = G.SMALL_FIG
import numpy as np
from numpy import pi
np.set_printoptions(precision=4, suppress=True)
import wavegen as waves
from freq_domain import plotspec
from utilities import power, detect_env
from scipy import signal
###Output
_____no_output_____
###Markdown
Envelope
###Code
dur = 0.33
Ts = 1/10000
t = np.arange(0, dur, Ts)
# Fast signal
fc = 1000
c = np.cos(2*pi*fc*t)
# Modulated with a decaying slow wave
fm = 10
w = np.cos(2*pi*fm*t)*np.exp(-5*t) + 0.5
x = c*w
# Low pass filter
fbe = [0, 0.05, 0.1, 1]
damps = [1, 1, 0, 0]
b = signal.firls(99, fbe, damps)
gd = 49 # Can be computed using signal.group_delay
envx = (pi/2)*signal.lfilter(b, 1, np.abs(x))
plt.plot(t, x)
plt.plot(t[:-gd], envx[gd:]); # Env shifted by group delay to align with signal
# Fast signal
fc = 1000
c = np.cos(2*pi*fc*t)
# Modulated with a decaying slow wave
fm = 10
w = np.cos(2*pi*fm*t)*np.exp(-5*t) + 0.5
x = c*w
# Low pass filter
fbe = [0, 0.05, 0.1, 1]
damps = [1, 1, 0, 0]
b = signal.firls(99, fbe, damps)
gd = 49 # Can be computed using signal.group_delay
x_rect = np.maximum(x, 0) # Use a rectifier instead of abs()
envx = (pi)*signal.lfilter(b, 1, x_rect)
plt.plot(t, x)
plt.plot(t[:-gd], envx[gd:]); # Env shifted by group delay to align with signal
###Output
_____no_output_____
###Markdown
Amplitude Modulation with Large Carrier
###Code
# Carrier
fc = 1000;
phase = 0; g = 0;
c = np.cos(2*pi*(fc+g)*t + phase)
# Message
fm = 20; w = 10*np.arange(len(t))/len(t) + np.cos(2*np.pi*fm*t)
v = c*w + c
# Modulation
envv, gd = detect_env(v)
plt.figure(figsize=G.FIGSIZE)
plt.subplot(121)
plt.plot(t, v, 'r')
plt.plot(t, 1+w, 'k')
plt.plot(t[:-gd], envv[gd:]);
plt.subplot(122)
plt.plot(t, w, 'k')
plt.plot(t[:-gd], envv[gd:]-1, 'r');
plt.title('Recovered msg vs orig');
###Output
_____no_output_____
###Markdown
AM with Suppressed Carrier
###Code
dur = 1.0
Ts = 1/10000
t = np.arange(0, dur, Ts)
## Transmitter
# Carrier
fc = 1000;
c = np.cos(2*pi*fc*t)
# Message
fm = 20
w = 5*np.arange(len(t))/len(t) + np.cos(2*pi*fm*t)
v = c*w
## Receiver
gamma = 0
phi = 0
c2 = np.cos(2*pi*(fc+gamma)*t + phi)
x = v*c2
#LPF
fbe = [0, 0.1, 0.2, 1]
damps = [1, 1, 0, 0]
taps = 99; gd = 49
b = signal.firls(taps, fbe, damps)
m = 2*signal.lfilter(b, 1, x)
plt.plot(t, w, 'k')
plt.plot(t[:-gd], m[gd:], 'r');
plotspec(m, Ts);
###Output
_____no_output_____ |
project-python-aplication-snmp/python-application-SNMP-2.ipynb | ###Markdown
การเขียนโปรแกรมติดต่อ SNMP morning session 2
###Code
pip install pysnmp
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmdGen = cmdgen.CommandGenerator()
def snmp(ip, community, oid):
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
cmdgen.CommunityData(community),
cmdgen.UdpTransportTarget((ip, 161)),
oid,
lookupNames=True,
lookupValues=True
)
name, value = varBinds[0]
print("name : ",name)
print("value : ", value)
print("print : ", value.prettyPrint())
print("prettyprint : ", name.prettyPrint())
snmp("10.10.2.20","YRUMRTG", "1.3.6.1.2.1.1.3.0") #linux
snmp("10.10.19.55","test", "1.3.6.1.2.1.1.3.0") # switch
# for multiple looping
no1 = {
'ip':"10.10.2.19",
'community':"YRUMRTG",
'oid':"1.3.6.1.2.1.1.3.0"
}
no2 = {
'ip':"10.10.2.19",
'community':"test",
'oid':"1.3.6.1.2.1.1.3.0"
}
no2.keys()
machine = ['no1', 'n02']
machine['0']
no1['ip'], no1['community'], no1['oid'])
print (no1['ip'], no1['community'], no1['oid'])
oid = "1.3.6.1.2.1.1.3.0"
for i in no1:
try:
snmp(i['ip'], i['community'],oid)
except:
print("ERROR")
def runtime(tmp):
time = tmp // 100
day = time // 86400
t2 = time - (day * 86400)
hour = t2 // 3600
t3 = t2 - (hour * 3600)
min = t3 // 60
ms = time % 60
subms = time % 100
dt = str(day) + ' วัน ' + str(hour) + ':' + str(min) + ':' + str(ms) + '.' + str(subms)
return dt
runtime(856855595)
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmdGen = cmdgen.CommandGenerator()
from datetime import datetime
import os
def convertTime(originalTime):
time = originalTime // 100
day = time // 86400
t2 = time - (day * 86400)
hour = t2 // 3600
t3 = t2 - (hour * 3600)
min = t3 // 60
ms = time % 60
subms = time % 100
showTime = str(day) + ' Days, ' + str(hour) + ':' + str(min) + ':' + str(ms) + '.' + str(subms)
return showTime
server1 = ["10.10.2.19",'161',"YRUMRTG"] # linux
switch1 = ["10.10.19.55",'161',"test"] # switch
linux1 = ["10.10.2.20",'161',"YRUMRTG"]
linux2 = ["10.10.2.21",'161',"YRUMRTG"]
serverList = [server1, switch1, linux1, linux2]
uptimeName = ['System Uptime']
uptime = ['1.3.6.1.2.1.1.3.0']
LoadName = ['1 minute Load',
'5 minute Load',
'15 minute Load']
Load = ['1.3.6.1.4.1.2021.10.1.3.1',
'1.3.6.1.4.1.2021.10.1.3.2',
'1.3.6.1.4.1.2021.10.1.3.3']
CPUName = ['Percentage of user CPU time',
'Raw user cpu time',
'Percentages of system CPU time',
'Raw system cpu time',
'Percentages of idle CPU time',
'Raw idle cpu time',
'Raw nice cpu time']
CPU = ['1.3.6.1.4.1.2021.11.9.0',
'1.3.6.1.4.1.2021.11.50.0',
'1.3.6.1.4.1.2021.11.10.0',
'1.3.6.1.4.1.2021.11.52.0',
'1.3.6.1.4.1.2021.11.11.0',
'1.3.6.1.4.1.2021.11.53.0',
'1.3.6.1.4.1.2021.11.51.0']
memoryName = ['Total Swap Size',
'Available Swap Space',
'Total RAM in machine',
'Total RAM used',
'Total RAM Free',
'Total RAM Buffered',
'Total Cached Memory']
memory = ['1.3.6.1.4.1.2021.4.3.0',
'1.3.6.1.4.1.2021.4.4.0',
'1.3.6.1.4.1.2021.4.5.0',
'1.3.6.1.4.1.2021.4.6.0',
'1.3.6.1.4.1.2021.4.11.0',
'1.3.6.1.4.1.2021.4.14.0',
'1.3.6.1.4.1.2021.4.15.0']
oidSequenceList = [uptimeName,uptime,
LoadName,Load,
CPUName,CPU,
memoryName,memory]
for server in serverList:
SNMP_HOST = server[0]
SNMP_PORT = server[1]
SNMP_COMMUNITY = server[2]
snmpData = [SNMP_HOST]
print("="*28)
response = os.system("ping -c 1 " + snmpData[0])
print(SNMP_HOST,SNMP_PORT,SNMP_COMMUNITY, response)
if response != 0:
for i in range(0, len(oidSequenceList) - 1, 2):
oidNameList = oidSequenceList[i]
oidList = oidSequenceList[i + 1]
for j in range(len(oidList)):
cmdGen = cmdgen.CommandGenerator()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
cmdgen.CommunityData(SNMP_COMMUNITY),
cmdgen.UdpTransportTarget((SNMP_HOST, SNMP_PORT)),
oidList[j])
if errorIndication:
print(errorIndication)
break
else:
if errorStatus:
print('%s at %s' % (
errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1] or '?'))
else:
for name, val in varBinds:
if oidList[j] == '1.3.6.1.2.1.1.3.0':
convert_time = str(val)
showTime = convertTime(int(convert_time))
print(oidNameList[j], '=', showTime)
snmpData.append(showTime)
else:
print(oidNameList[j], '=', val)
snmpData.append(str(val))
print(snmpData)
else:
for i in range(1, 19):
snmpData.append("NA")
print(snmpData)
from time import ctime
print(ctime)
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmdGen = cmdgen.CommandGenerator()
from datetime import datetime
import os
def convertTime(originalTime):
time = originalTime // 100
day = time // 86400
t2 = time - (day * 86400)
hour = t2 // 3600
t3 = t2 - (hour * 3600)
min = t3 // 60
ms = time % 60
subms = time % 100
showTime = str(day) + ' Days, ' + str(hour) + ':' + str(min) + ':' + str(ms) + '.' + str(subms)
return showTime
def main():
tmp = ""
Linux1 = ["10.10.2.19",'161',"YRUMRTG"] # linux
Switch1 = ["10.10.19.55",'161',"test"] # switch
Linux2 = ["10.10.2.20",'161',"YRUMRTG"] # Linux
Linux3 = ["10.10.2.21",'161',"YRUMRTG"] # Linux
Linux4 = ["10.10.2.200",'161',"YRUMRTG"] # Linux
serverList = [Linux1, Linux2, Linux3,Linux4, Switch1]
uptimeName = ['System Uptime']
uptime = ['1.3.6.1.2.1.1.3.0']
LoadName = ['1 minute Load',
'5 minute Load',
'15 minute Load']
Load = ['1.3.6.1.4.1.2021.10.1.3.1',
'1.3.6.1.4.1.2021.10.1.3.2',
'1.3.6.1.4.1.2021.10.1.3.3']
CPUName = ['Percentage of user CPU time',
'Raw user cpu time',
'Percentages of system CPU time',
'Raw system cpu time',
'Percentages of idle CPU time',
'Raw idle cpu time',
'Raw nice cpu time']
CPU = ['1.3.6.1.4.1.2021.11.9.0',
'1.3.6.1.4.1.2021.11.50.0',
'1.3.6.1.4.1.2021.11.10.0',
'1.3.6.1.4.1.2021.11.52.0',
'1.3.6.1.4.1.2021.11.11.0',
'1.3.6.1.4.1.2021.11.53.0',
'1.3.6.1.4.1.2021.11.51.0']
memoryName = ['Total Swap Size',
'Available Swap Space',
'Total RAM in machine',
'Total RAM used',
'Total RAM Free',
'Total RAM Buffered',
'Total Cached Memory']
memory = ['1.3.6.1.4.1.2021.4.3.0',
'1.3.6.1.4.1.2021.4.4.0',
'1.3.6.1.4.1.2021.4.5.0',
'1.3.6.1.4.1.2021.4.6.0',
'1.3.6.1.4.1.2021.4.11.0',
'1.3.6.1.4.1.2021.4.14.0',
'1.3.6.1.4.1.2021.4.15.0']
oidSequenceList = [uptimeName,uptime,
LoadName,Load,
CPUName,CPU,
memoryName,memory]
for server in serverList:
SNMP_HOST = server[0]
SNMP_PORT = server[1]
SNMP_COMMUNITY = server[2]
snmpData = [SNMP_HOST]
print("="*28)
response = os.system("ping -c 1 " + snmpData[0])
print(SNMP_HOST,SNMP_PORT,SNMP_COMMUNITY, response)
if response != 0:
for i in range(0, len(oidSequenceList) - 1, 2):
oidNameList = oidSequenceList[i]
oidList = oidSequenceList[i + 1]
for j in range(len(oidList)):
cmdGen = cmdgen.CommandGenerator()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
cmdgen.CommunityData(SNMP_COMMUNITY),
cmdgen.UdpTransportTarget((SNMP_HOST, SNMP_PORT)),
oidList[j])
if errorIndication:
print(errorIndication)
break
else:
if errorStatus:
print('%s at %s' % (
errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1] or '?'))
else:
for name, val in varBinds:
if oidList[j] == '1.3.6.1.2.1.1.3.0':
convert_time = str(val)
showTime = convertTime(int(convert_time))
print(oidNameList[j], '=', showTime)
snmpData.append(showTime)
else:
print(oidNameList[j], '=', val)
snmpData.append(str(val))
tmp = str(snmpData)
tmp = tmp.replace("', '","|")[2:-2]
open("log.txt","a").write(tmp+"\n")
#print(snmpData)
else:
for i in range(1, 19):
snmpData.append("NA")
open("log.txt","a").write(tmp+"\n")
from time import sleep
for i in range(10):
main()
sleep(1)
###Output
============================
10.10.2.19 161 YRUMRTG 0
============================
10.10.2.20 161 YRUMRTG 0
============================
10.10.2.21 161 YRUMRTG 0
============================
10.10.2.200 161 YRUMRTG 512
No SNMP response received before timeout
No SNMP response received before timeout
No SNMP response received before timeout
|
Python introduction/homework/for_while_and_if_homework.ipynb | ###Markdown
Цикли і умови в пайтоні Домашнє завдання Task 1* Дано масив з учнів в класі* Порахувати кількість учнів* Вивести так , якщо в класі є учень з ім'ям Олег , ні якщо немає* Порахувати кількість учнів імена яких починаються з букви А
###Code
classroom = ["Andriy" , "Artem" , "Axel" , "Olena" , "Ihor"]
lenghth_classroom = 0
lenghth_classroom_a = 0
name = "Andriy"
print(name[0])
for i in classroom:
lenghth_classroom = lenghth_classroom + 1
print(lenghth_classroom)
#for x in [classroom]:
# if "Oleg" in x:
# print("yes")
isOlegIn = "Oleg" in classroom
print(isOlegIn)
for x in [classroom]:
print(x)
if "Andriy" or "Artem" or "Axel" in x:
lenghth_classroom_a = lenghth_classroom_a + 1
print (lenghth_classroom_a)
###Output
A
5
False
['Andriy', 'Artem', 'Axel', 'Olena', 'Ihor']
0
###Markdown
Task 2* Дана стрічка* Вивести перше слово та останнє
###Code
message = "Бьякуган","Шарінган", "Рінеган"
print(message[0])
print(message[-1])
# message[-1] - останній елемент
###Output
Бьякуган
Рінеган
###Markdown
Task 3* Є 2 числа * порівняти , яке з них більше* можливо вони рівні?* перевірити 3 різні випадки
###Code
# перший
a = 5
c = 5.5
if a < c:
print(a < c)
if a > c:
print(a > c)
if a == c:
print(a == c)
# другий
b = 10
g = 10
if b < g:
print(b < g)
if b > g:
print(b > g)
if b == g:
print(b == g)
# третій
p = 3.14
pi = 3.1415
if p < pi:
print(p < pi)
if p > pi:
print(p > pi)
if p == pi:
print(p == pi)
###Output
True
|
sagemaker/sm-special-webinar/lab_3_pipeline/Advanced/5.1.All-Pipeline.ipynb | ###Markdown
[모듈 5.1] 고급 모델 빌딩 파이프라인 개발 이 노트북은 아래와 같은 목차로 진행 됩니다. 전체를 모두 실행시에 완료 시간은 **약 30분** 소요 됩니다.- 1. SageMaker 모델 빌드 파이프라인을 이용한 모델 빌드 오케스트레이션- 2. 파이프라인 개발자 가이드- 3. 기본 라이브러리 로딩- 4. 모델 빌딩 파이프라인 의 스텝(Step) 생성 - 4.1 모델 빌딩 파이프라인 변수 생성 - 4.2 캐싱 정의 - 4.3 전처리 스텝 단계 정의 - 4.4 모델 학습을 위한 학습단계 정의 - 4.5 모델 평가 단계 - 4.6 모델 등록 스텝 - 4.7 세이지 메이커 모델 스텝 생성 - 4.8 HPO 스텝 - 4.9 조건 스텝- 5. 파리마터, 단계, 조건을 조합하여 최종 파이프라인 정의 및 실행- 6. 세이지 메이커 스튜디오에서 실행 확인 하기- 7. Pipeline 캐싱 및 파라미터 이용한 실행- 8. 계보(Lineage) --- 1. SageMaker 모델 빌드 파이프라인을 이용한 모델 빌드 오케스트레이션Amazon SageMaker Model building pipeline은 머신러닝 워크플로우를 개발하는 데이터 과학자, 엔지니어들에게 SageMaker작업과 재생산가능한 머신러닝 파이프라인을 오케스트레이션하는 기능을 제공합니다. 또한 커스텀빌드된 모델을 실시간 추론환경이나 배치변환을 통한 추론 실행환경으로 배포하거나, 생성된 아티팩트의 계보(lineage)를 추적하는 기능을 제공합니다. 이 기능들을 통해 모델 아티팩트를 배포하고, 업무환경에서의 워크플로우를 배포/모니터링하고, 간단한 인터페이스를 통해 아티팩트의 계보 추적하고, 머신러닝 애플리케이션 개발의 베스트 프렉티스를 도입하여, 보다 안정적인 머신러닝 애플리케이션 운영환경을 구현할 수 있습니다. SageMaker pipeline 서비스는 JSON 선언으로 구현된 SageMaker Pipeline DSL(Domain Specific Language, 도메인종속언어)를 지원합니다. 이 DSL은 파이프라인 파라마터와 SageMaker 작업단계의 DAG(Directed Acyclic Graph)를 정의합니다. SageMaker Python SDK를 이용하면 이 파이프라인 DSL의 생성을 보다 간편하게 할 수 있습니다. 2. 파이프라인 개발자 가이드 SageMaker 파이프라인 소개SageMaker 파이프라인은 다음 기능을 지원하며 본 lab_03_pipelinie 에서 일부를 다루게 됩니다. * Processing job steps - 데이터처러 워크로드를 실행하기 위한 SageMaker의 관리형 기능. Feature engineering, 데이터 검증, 모델 평가, 모델 해석 등에 주로 사용됨 * Training job steps - 학습작업. 모델에게 학습데이터셋을 이용하여 모델에게 예측을 하도록 학습시키는 작업 * Conditional execution steps - 조건별 실행분기. 파이프라인을 분기시키는 역할.* Register model steps - 학습이 완료된 모델패키지 리소스를 이후 배포를 위한 모델 레지스트리에 등록하기 * Create model steps - 추론 엔드포인트 또는 배치 추론을 위한 모델의 생성 * Transform job steps - 배치추론 작업. 배치작업을 이용하여 노이즈, bias의 제거 등 데이터셋을 전처리하고 대량데이터에 대해 추론을 실행하는 단계* Pipelines - Workflow DAG. SageMaker 작업과 리소스 생성을 조율하는 단계와 조건을 가짐* Parametrized Pipeline executions - 특정 파라미터에 따라 파이프라인 실행방식을 변화시키기 - 상세한 개발자 가이드는 아래 참조 하세요. - [세이지 메이커 모델 빌딩 파이프라인의 개발자 가이드](https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines.html) 3. 기본 라이브러리 로딩세이지 메이커 관련 라이브러리를 로딩 합니다.
###Code
import boto3
import sagemaker
import pandas as pd
import os
sagemaker_session = sagemaker.session.Session()
role = sagemaker.get_execution_role()
sm_client = boto3.client("sagemaker")
###Output
_____no_output_____
###Markdown
3.1 노트북 변수 로딩 저장된 변수를 확인 합니다.
###Code
%store
###Output
Stored variables and their in-db values:
bucket -> 'sagemaker-us-east-1-051065130547'
claims_data_uri -> 's3://sagemaker-us-east-1-051065130547/sagemaker-w
customers_data_uri -> 's3://sagemaker-us-east-1-051065130547/sagemaker-w
image_uri -> '683313688378.dkr.ecr.us-east-1.amazonaws.com/sage
input_data_uri -> 's3://sagemaker-us-east-1-051065130547/sagemaker-w
input_preproc_data_uri -> 's3://sagemaker-us-east-1-051065130547/sagemaker-w
project_prefix -> 'sagemaker-webinar-pipeline-advanced'
test_preproc_data_uri -> 's3://sagemaker-us-east-1-051065130547/sagemaker-w
train_model_artifact -> 's3://sagemaker-us-east-1-051065130547/sagemaker-w
train_preproc_data_uri -> 's3://sagemaker-us-east-1-051065130547/sagemaker-w
###Markdown
기존 노트북에서 저장한 변수를 로딩 합니다.
###Code
%store -r
###Output
_____no_output_____
###Markdown
4. 모델 빌딩 파이프라인 의 스텝(Step) 생성 4.1 모델 빌딩 파이프라인 변수 생성본 노트북에서 사용하는 파라미터는 다음과 같습니다.* `processing_instance_type` - 프로세싱 작업에서 사용할 `ml.*` 인스턴스 타입 * `processing_instance_count` - 프로세싱 작업에서 사용할 인스턴스 개수 * `training_instance_type` - 학습작업에서 사용할 `ml.*` 인스턴스 타입* `model_approval_status` - 학습된 모델을 CI/CD를 목적으로 등록할 때의 승인 상태 (디폴트는 "PendingManualApproval")* `input_data` - 입력데이터에 대한 S3 버킷 URI파이프라인의 각 스텝에서 사용할 변수를 파라미터 변수로서 정의 합니다.
###Code
from sagemaker.workflow.parameters import (
ParameterInteger,
ParameterString,
ParameterFloat,
)
processing_instance_count = ParameterInteger(
name="ProcessingInstanceCount",
default_value=1
)
processing_instance_type = ParameterString(
name="ProcessingInstanceType",
default_value="ml.m5.xlarge"
)
training_instance_type = ParameterString(
name="TrainingInstanceType",
default_value="ml.m5.xlarge"
)
training_instance_count = ParameterInteger(
name="TrainingInstanceCount",
default_value=1
)
model_eval_threshold = ParameterFloat(
name="model2eval2threshold",
default_value=0.85
)
input_data = ParameterString(
name="InputData",
default_value=input_data_uri,
)
model_approval_status = ParameterString(
name="ModelApprovalStatus", default_value="PendingManualApproval"
)
###Output
_____no_output_____
###Markdown
4.2 캐싱 정의- 참고: 캐싱 파이프라인 단계: [Caching Pipeline Steps](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/pipelines-caching.html)
###Code
from sagemaker.workflow.steps import CacheConfig
cache_config = CacheConfig(enable_caching=True,
expire_after="7d")
###Output
_____no_output_____
###Markdown
4.3 전처리 스텝 단계 정의
###Code
from sagemaker.sklearn.processing import SKLearnProcessor
split_rate = 0.2
framework_version = "0.23-1"
sklearn_processor = SKLearnProcessor(
framework_version=framework_version,
instance_type=processing_instance_type,
instance_count=processing_instance_count,
base_job_name="sklearn-fraud-process",
role=role,
)
print("input_data: \n", input_data)
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.workflow.steps import ProcessingStep
step_process = ProcessingStep(
name="Fraud-Advance-Preprocess",
processor=sklearn_processor,
inputs=[
ProcessingInput(source=input_data, destination='/opt/ml/processing/input'),
],
outputs=[ProcessingOutput(output_name="train",
source='/opt/ml/processing/output/train'),
ProcessingOutput(output_name="test",
source='/opt/ml/processing/output/test')],
job_arguments=["--split_rate", f"{split_rate}"],
code= 'src/preprocessing.py',
cache_config = cache_config, # 캐시 정의
)
###Output
_____no_output_____
###Markdown
4.4 모델 학습을 위한 학습단계 정의 하이퍼파라미터 세팅
###Code
base_hyperparameters = {
"scale_pos_weight" : "29",
"max_depth": "6",
"alpha" : "0",
"eta": "0.3",
"min_child_weight": "1",
"objective": "binary:logistic",
"num_round": "100",
}
###Output
_____no_output_____
###Markdown
Estimator 생성Estimator 생성시에 인자가 필요 합니다. 주요한 인자만 보겠습니다.- 사용자 훈련 코드 ""xgboost_script.py"- 훈련이 끝난 후에 결과인 모델 아티펙트의 경로 "estimator_output_path" 지정 합니다. 지정 안할 시에는 디폴트 경로로 저장 됩니다.
###Code
from sagemaker.xgboost.estimator import XGBoost
estimator_output_path = f's3://{bucket}/{project_prefix}/training_jobs'
print("estimator_output_path: \n", estimator_output_path)
xgb_train = XGBoost(
entry_point = "xgboost_script.py",
source_dir = "src",
output_path = estimator_output_path,
hyperparameters = base_hyperparameters,
role = role,
instance_count = training_instance_count,
instance_type = training_instance_type,
framework_version = "1.0-1")
###Output
estimator_output_path:
s3://sagemaker-us-east-1-051065130547/sagemaker-webinar-pipeline-advanced/training_jobs
###Markdown
모델 훈련 스탭 생성스텝 생성시에 위에서 생성한 Estimator 입력 및 입력 데이타로서 전처리 데이터가 존재하는 S3 경로를 제공합니다. 훈련의 입력이 이전 전처리의 결과가 제공됩니다.- `step_process.properties.ProcessingOutputConfig.Outputs["train"].S3Output.S3Uri`
###Code
from sagemaker.inputs import TrainingInput
from sagemaker.workflow.steps import TrainingStep
step_train = TrainingStep(
name= "Fraud-Advance-Train",
estimator=xgb_train,
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"train"
].S3Output.S3Uri,
# s3_data= train_preproc_dir_artifact,
content_type="text/csv"
),
},
cache_config = cache_config, # 캐시 정의
)
###Output
/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/sagemaker/workflow/steps.py:283: UserWarning: Profiling is enabled on the provided estimator. The default profiler rule includes a timestamp which will change each time the pipeline is upserted, causing cache misses. If profiling is not needed, set disable_profiler to True on the estimator.
warnings.warn(msg)
###Markdown
4.5 모델 평가 단계 SKLearn Processor 생성SKLearn Processor 생성시에 인자가 필요 합니다.
###Code
from sagemaker.processing import ScriptProcessor
script_eval = SKLearnProcessor(
framework_version= "0.23-1",
role=role,
instance_type=processing_instance_type,
instance_count=1,
base_job_name="script-fraud-scratch-eval",
)
###Output
_____no_output_____
###Markdown
Property 파일 정의- PropertyFile 은 step_eval 단계가 실행 한 후에 모델 평가 지표의 결과 파일 내용을 정의하는데 사용 됩니다.```형식: = PropertyFile( name="", output_name="", path="")예시:evaluation_report = PropertyFile( name="EvaluationReport", output_name="evaluation", path="evaluation.json")```- 위의 PropertyFile 의 output_name="evaluation" 이고, 파일 이름은 evaluation.json" 라는 것을 의미 합니다. "evaluation.json" 파일 안에는 아래의 값이 저장이 됩니다.``` report_dict = { "binary_classification_metrics": { "auc": { "value": , "standard_deviation" : "NaN", }, }, }```- 최종적으로 evaluation.json 안의 \ 값이 추후 (조건 스텝) 에 사용이 됩니다.- step_eval 이 실행이 되면 `evaluation.json` 이 S3에 저장이 됩니다. 참고- 참고 자료: [Property Files and JsonGet](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/build-and-manage-propertyfile.html)
###Code
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.steps import ProcessingStep
from sagemaker.workflow.properties import PropertyFile
evaluation_report = PropertyFile(
name="EvaluationReport",
output_name="evaluation",
path="evaluation.json"
)
###Output
_____no_output_____
###Markdown
모델 평가 스텝 정의
###Code
step_eval = ProcessingStep(
name= "Fraud-Advance-Evaluation",
processor=script_eval,
inputs=[
ProcessingInput(
source= step_train.properties.ModelArtifacts.S3ModelArtifacts,
destination="/opt/ml/processing/model"
),
ProcessingInput(
source=step_process.properties.ProcessingOutputConfig.Outputs[
"test"
].S3Output.S3Uri,
destination="/opt/ml/processing/test"
)
],
outputs=[
ProcessingOutput(output_name="evaluation", source="/opt/ml/processing/evaluation"),
],
code="src/evaluation.py",
cache_config = cache_config, # 캐시 정의
property_files=[evaluation_report], # 현재 이 라인을 넣으면 에러 발생
)
###Output
_____no_output_____
###Markdown
4.6 모델 등록 스텝 모델 그룹 생성- 참고 - 모델 그룹 릭스팅 API: [ListModelPackageGroups](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ListModelPackageGroups.html) - 모델 지표 등록: [Model Quality Metrics](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/model-monitor-model-quality-metrics.html)
###Code
model_package_group_name = f"{project_prefix}"
model_package_group_input_dict = {
"ModelPackageGroupName" : model_package_group_name,
"ModelPackageGroupDescription" : "Sample model package group"
}
response = sm_client.list_model_package_groups(NameContains=model_package_group_name)
if len(response['ModelPackageGroupSummaryList']) == 0:
print("No model group exists")
print("Create model group")
create_model_pacakge_group_response = sm_client.create_model_package_group(**model_package_group_input_dict)
print('ModelPackageGroup Arn : {}'.format(create_model_pacakge_group_response['ModelPackageGroupArn']))
else:
print(f"{model_package_group_name} exitss")
###Output
sagemaker-webinar-pipeline-advanced exitss
###Markdown
모델 평가 메트릭 정의
###Code
from sagemaker.workflow.step_collections import RegisterModel
from sagemaker.model_metrics import MetricsSource, ModelMetrics
model_metrics = ModelMetrics(
model_statistics=MetricsSource(
s3_uri="{}/evaluation.json".format(
step_eval.arguments["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"]
),
content_type="application/json"
)
)
###Output
_____no_output_____
###Markdown
모델 등록 스텝 정의
###Code
step_register = RegisterModel(
name= "Fraud-Advance-Model_Register",
estimator=xgb_train,
image_uri= step_train.properties.AlgorithmSpecification.TrainingImage,
model_data= step_train.properties.ModelArtifacts.S3ModelArtifacts,
content_types=["text/csv"],
response_types=["text/csv"],
inference_instances=["ml.t2.medium", "ml.m5.xlarge"],
transform_instances=["ml.m5.xlarge"],
model_package_group_name=model_package_group_name,
approval_status=model_approval_status,
model_metrics=model_metrics,
)
###Output
_____no_output_____
###Markdown
4.7 세이지 메이커 모델 스텝 생성- 아래 두 파리미터의 입력이 이전 스텝의 결과가 제공됩니다. - image_uri= step_train.properties.AlgorithmSpecification.TrainingImage, - model_data= step_train.properties.ModelArtifacts.S3ModelArtifacts, 세이지 메이커 모델 생성
###Code
from sagemaker.model import Model
model = Model(
image_uri= step_train.properties.AlgorithmSpecification.TrainingImage,
model_data= step_train.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=sagemaker_session,
role=role,
)
###Output
_____no_output_____
###Markdown
세이지 메이커 모델 스탭 정의
###Code
from sagemaker.inputs import CreateModelInput
from sagemaker.workflow.steps import CreateModelStep
inputs = CreateModelInput(
instance_type="ml.m5.large",
# accelerator_type="ml.eia1.medium",
)
step_create_model = CreateModelStep(
name= "Fraud-Advance-Create-SageMaker-Model",
model=model,
inputs=inputs,
)
###Output
_____no_output_____
###Markdown
4.8 HPO 스텝 튜닝할 하이퍼파라미터 범위 설정여기서는 `eta, min_child_weight, alpha, max_depth` 를 튜닝 합니다.
###Code
from sagemaker.tuner import (
IntegerParameter,
CategoricalParameter,
ContinuousParameter,
HyperparameterTuner,
)
hyperparameter_ranges = {
"eta": ContinuousParameter(0, 1),
"min_child_weight": ContinuousParameter(1, 10),
"alpha": ContinuousParameter(0, 2),
"max_depth": IntegerParameter(1, 10),
}
###Output
_____no_output_____
###Markdown
튜너 설정 및 생성- xbg_estimator 정의된 estimator 기술- `objective_metric_name = "validation:auc"` 튜닝을 하고자 하는 지표 기술 - 이 지표의 경우는 훈련 코드에서 정의 및 기록을 해야만 합니다.- `hyperparameter_ranges` 튜닝하고자 하는 파라미터의 범위 설정- `max_jobs` 기술 - 총 훈련잡의 갯수 입니다.- `max_parallel_jobs` 기술 - 병렬로 실행할 훈련잡의 개수 (리소스 제한에 따라서 에러가 발생할 수 있습니다. 이 경우에 줄여 주세요.)
###Code
objective_metric_name = "validation:auc"
tuner = HyperparameterTuner(
xgb_train, objective_metric_name, hyperparameter_ranges,
max_jobs=5,
max_parallel_jobs=5,
)
###Output
_____no_output_____
###Markdown
튜닝 단계 정의
###Code
from sagemaker.workflow.steps import TuningStep
step_tuning = TuningStep(
name = "Fraud-Advance-HPO",
tuner = tuner,
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"train"
].S3Output.S3Uri,
# s3_data= train_preproc_dir_artifact,
content_type="text/csv"
),
},
cache_config = cache_config, # 캐시 정의
)
###Output
_____no_output_____
###Markdown
4.9 조건 스텝 조건 단계 정의- 조건 단계에서 사용하는 ConditionLessThanOrEqualTo 에서 evaluation.json 을 로딩하여 내용을 확인```형식:cond_lte = ConditionLessThanOrEqualTo( left=JsonGet( step=step_eval, property_file=, json_path="test_metrics.roc.value", ), right=6.0)에시:cond_lte = ConditionLessThanOrEqualTo( left=JsonGet( step=step_eval, property_file=evaluation_report, json_path="binary_classification_metrics.auc.value", ), right=6.0)```- property_file=evaluation_report 는 위의 모델 평가 스텝에서 정의한 PropertyFile-evaluation_report 를 사용합니다. evaluation_report 에서 정의한 evaluation.json 파일 안의 "binary_classification_metrics.auc.value" 의 값을 사용한다는 것을 의미 합니다.
###Code
from sagemaker.workflow.conditions import ConditionLessThanOrEqualTo
from sagemaker.workflow.condition_step import (
ConditionStep,
JsonGet,
)
cond_lte = ConditionLessThanOrEqualTo(
left=JsonGet(
step=step_eval,
property_file=evaluation_report,
json_path="binary_classification_metrics.auc.value",
),
# right=8.0
right = model_eval_threshold
)
step_cond = ConditionStep(
name= "Fraud-Advance-Condition",
conditions=[cond_lte],
if_steps=[step_tuning],
else_steps=[step_register, step_create_model],
)
###Output
The class JsonGet has been renamed in sagemaker>=2.
See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.
###Markdown
5. 파리마터, 단계, 조건을 조합하여 최종 파이프라인 정의 및 실행이제 지금까지 생성한 단계들을 하나의 파이프라인으로 조합하고 실행하도록 하겠습니다.파이프라인은 name, parameters, steps 속성이 필수적으로 필요합니다. 여기서 파이프라인의 이름은 (account, region) 조합에 대하여 유일(unique))해야 합니다.우리는 또한 여기서 Experiment 설정을 추가 하여, 실험에 등록 합니다.주의:- 정의에 사용한 모든 파라미터가 존재해야 합니다.- 파이프라인으로 전달된 단계(step)들은 실행순서와는 무관합니다. SageMaker Pipeline은 단계가 실행되고 완료될 수 있도록 의존관계를를 해석합니다. 5.1 파이프라인 정의 위에서 정의한 아래의 4개의 스텝으로 파이프라인 정의를 합니다.- steps=[step_process, step_train, step_create_model, step_deploy],- 아래는 약 20분 정도 소요 됩니다.
###Code
from sagemaker.workflow.pipeline import Pipeline
project_prefix = 'sagemaker-pipeline-phase2-step-by-step'
pipeline_name = project_prefix
pipeline = Pipeline(
name=pipeline_name,
parameters=[
processing_instance_type,
processing_instance_count,
training_instance_type,
training_instance_count,
input_data,
model_eval_threshold,
model_approval_status,
],
# steps=[step_process, step_train, step_register, step_eval, step_cond],
steps=[step_process, step_train, step_eval, step_cond],
)
###Output
_____no_output_____
###Markdown
5.2 파이프라인 정의 확인위에서 정의한 파이프라인 정의는 Json 형식으로 정의 되어 있습니다.
###Code
import json
definition = json.loads(pipeline.definition())
definition
###Output
No finished training job found associated with this estimator. Please make sure this estimator is only used for building workflow config
###Markdown
5.3 파이프라인 정의를 제출하고 실행하기 파이프라인 정의를 파이프라인 서비스에 제출합니다. 함께 전달되는 역할(role)을 이용하여 AWS에서 파이프라인을 생성하고 작업의 각 단계를 실행할 것입니다.
###Code
pipeline.upsert(role_arn=role)
execution = pipeline.start()
execution.describe()
###Output
_____no_output_____
###Markdown
5.4 파이프라인 실행 기다리기
###Code
execution.wait()
###Output
_____no_output_____
###Markdown
실행이 완료될 때까지 기다립니다. 실행된 단계들을 리스트업합니다. 파이프라인의 단계실행 서비스에 의해 시작되거나 완료된 단계를 보여줍니다. 5.5 파이프라인 실행 단계 기록 보기
###Code
execution.list_steps()
###Output
_____no_output_____
###Markdown
6. 세이지 메이커 스튜디오에서 실행 확인 하기  7. Pipeline 캐싱 및 파라미터 이용한 실행- 캐싱은 2021년 7월 현재 Training, Processing, Transform 의 Step에 적용이 되어 있습니다.- 상세 사항은 여기를 확인하세요. --> [캐싱 파이프라인 단계](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/pipelines-caching.html) 7.1 캐싱을 이용한 파이프라인 실행생성한 파이프라인을 다른 파라미터값을 이용하여 다시 실행할 수 있습니다. 파라미터정보는 딕셔너리 형태로 파라미터이름과 값을 지정하여 전달하면 디폴트값을 오버라이드하게 됩니다. 모델의 성능에 따라 이번에는 컴퓨팅최적화된 인스턴스 타입을 이용하여 파이프라인을 실행하고 승인 상태를 자동으로 "Approved"로 설정하고 싶다면 다음 셀의 코드를 실행할 수 있습니다. 모델의 승인상태가 "Approved"라는 의미는 `RegisterModel` 단계에서 패키지버전이 등록될 때 자동으로 CI/CD 파이프라인에 의해 배포가능한 상태가 된다는 것을 의미합니다. 이후 배포파이프라인 프로세스는 SageMaker project를 통하여 자동화할 수 있습니다.
###Code
is_cache = True
%%time
from IPython.display import display as dp
import time
if is_cache:
execution = pipeline.start(
parameters=dict(
model2eval2threshold=0.8,
)
)
# execution = pipeline.start()
time.sleep(10)
dp(execution.list_steps())
execution.wait()
if is_cache:
dp(execution.list_steps())
###Output
_____no_output_____
###Markdown
7.1 캐싱을 이용한 파이프라인 실행 결과 보기  8. 계보(Lineage)파이프라인에 의해 생성된 아티팩트의 계보를 살펴봅니다.
###Code
import time
from sagemaker.lineage.visualizer import LineageTableVisualizer
viz = LineageTableVisualizer(sagemaker.session.Session())
for execution_step in reversed(execution.list_steps()):
print(execution_step)
display(viz.show(pipeline_execution_step=execution_step))
time.sleep(1)
###Output
{'StepName': 'Fraud-Advance-Preprocess', 'StartTime': datetime.datetime(2022, 3, 21, 13, 12, 51, 543000, tzinfo=tzlocal()), 'EndTime': datetime.datetime(2022, 3, 21, 13, 12, 52, 89000, tzinfo=tzlocal()), 'StepStatus': 'Succeeded', 'CacheHitResult': {'SourcePipelineExecutionArn': 'arn:aws:sagemaker:us-east-1:051065130547:pipeline/sagemaker-pipeline-phase2-step-by-step/execution/491akks96l6o'}, 'AttemptCount': 0, 'Metadata': {'ProcessingJob': {'Arn': 'arn:aws:sagemaker:us-east-1:051065130547:processing-job/pipelines-491akks96l6o-fraud-advance-prepro-qbjssubgds'}}}
|
gc3_query/var/scratchpad/beta_01/Instances_status_01.ipynb | ###Markdown
Oracle Cloud Instance POC
###Code
import os
import json
from pathlib import Path
from typing import Dict
from dataclasses import dataclass
import bravado
from bravado.client import SwaggerClient, CallableOperation
# from bravado.requests_client import RequestsClient
from gc3_query.lib.gc3_bravado.requests_client import OCRequestsClient
from bravado.requests_client import RequestsResponseAdapter
from bravado.swagger_model import load_file
from bravado_core.exception import MatchingResponseNotFound
from bravado.exception import HTTPBadRequest
from bravado.http_future import HttpFuture
from tinydb import TinyDB, Query
# from prettyprinter import pprint, pformat
from pprint import pprint, pformat
## https://medium.com/@betz.mark/validate-json-models-with-swagger-and-bravado-5fad6b21a825
# Validate json models with swagger and bravado
from bravado_core.spec import Spec
from gc3_query.lib import List, Optional, Any, Callable, Dict, Tuple, Union, Set, Generator, Path
ORACLE_VPN_CONNECTED=False
# from secrets import opc_username, opc_password
import keyring
opc_username = "[email protected]"
opc_password = keyring.get_password("gc3", "gc30003")
print(f"opc_username={opc_username}, opc_password={opc_password}")
opc_password = keyring.get_password(service_name="gc3@gc30003", username="[email protected]")
print(f"opc_username={opc_username}, opc_password={opc_password}")
idm_domain_name = 'gc30003'
idm_service_instance_id = '587626604'
iaas_rest_endpoint = r'https://compute.uscom-central-1.oraclecloud.com'
iaas_auth_endpoint = f'{iaas_rest_endpoint}/authenticate/'
print(f'iaas_rest_endpoint: {iaas_rest_endpoint}')
print(f'iaas_auth_endpoint: {iaas_auth_endpoint}\n')
### Username/pass setup
idm_domain_username = f'/Compute-{idm_domain_name}/{opc_username}'
idm_service_instance_username = f'/Compute-{idm_service_instance_id}/{opc_username}'
# username = traditional_iaas_username
username = idm_service_instance_username
# basic_auth_cred = _basic_auth_str(username, opc_password)
json_data = {"user": username, "password": opc_password}
print(f'idm_domain_username: {idm_domain_username}')
print(f'idm_service_instance_username: {idm_service_instance_username}')
print(f'username: {username}')
proxies = {
'http': 'http://www-proxy-ash7.us.oracle.com:80',
'https': 'https://www-proxy-ash7.us.oracle.com:80',
}
###Output
_____no_output_____
###Markdown
Create DB to store Instance data
###Code
instances_db = TinyDB('instances.tdb.json')
###Output
_____no_output_____
###Markdown
Authenticate and get header token
###Code
headers = dict([('Content-Type', 'application/oracle-compute-v3+json'),
('Accept', 'application/oracle-compute-v3+directory+json'),
])
requests_client = OCRequestsClient()
requests_client.session.headers.update(headers)
if ORACLE_VPN_CONNECTED:
requests_client.session.proxies.update(proxies)
# print(f"requests_client.session.headers before update: {requests_client.session.headers}\n")
requests_client.session.headers.update(headers)
# print(f"requests_client.session.headers after update: {requests_client.session.headers}\n")
response = requests_client.session.post(url=iaas_auth_endpoint, json=json_data)
print(f'Response OK: {response.ok}, Status Code: {response.status_code}, URL: {response.url}')
if response.ok and 'Set-Cookie' in response.headers:
print(f"Auth request succeess.\n")
### The auth cookie is already placed in the session ... nothing else needs to be done.
# print(f"\nSession Cookies: {requests_client.session.cookies}")
# print(f"\nResponse Headers['Set-Cookie']: {response.headers['Set-Cookie']}")
else:
print(f'Something failed! Response OK: {response.ok}, Status Code: {response.status_code}')
# print(f"requests_client.session.headers before update: {requests_client.session.headers}\n")
cookie_header = {'Cookie': response.headers['Set-Cookie']}
# print(f"cookie_header: {cookie_header}\n")
requests_client.session.headers.update(cookie_header)
# print(f"requests_client.session.headers after update: {requests_client.session.headers}\n")
## Update the swagger spec to use https
spec_file_path = Path().joinpath('open_api_definitions/iaas_instances.json').resolve()
spec_dict = load_file(spec_file_path)
spec_dict['schemes'] = ['https']
headers = dict([('Content-Type', 'application/oracle-compute-v3+json'),
('Accept', 'application/oracle-compute-v3+json, application/oracle-compute-v3+directory+json, json, text/html')
])
# headers = dict([('Content-Type', 'application/oracle-compute-v3+json'),
# ('Accept', 'application/oracle-compute-v3+json, json, text/html')])
requests_client = OCRequestsClient()
requests_client.session.headers.update(headers)
if ORACLE_VPN_CONNECTED:
requests_client.session.proxies.update(proxies)
# requests_client.session.proxies.update(proxies)
requests_client.session.headers.update(cookie_header)
swagger_client = SwaggerClient.from_spec(spec_dict=spec_dict,
origin_url=iaas_rest_endpoint,
http_client=requests_client,
config={'also_return_response': True,
'validate_responses': True,
'validate_requests': True,
'validate_swagger_spec': True})
# config={'also_return_response': True,
# 'validate_responses': False,
# 'validate_requests': True,
# 'validate_swagger_spec': True})
# config={'also_return_response': True,
# 'validate_responses': False,
# 'validate_requests': False,
# 'validate_swagger_spec': False})
instances_resource = swagger_client.Instances
for operation_name in dir(instances_resource):
callable_operation = getattr(instances_resource, operation_name)
print(f"{callable_operation.operation.operation_id}\t\t http_method={callable_operation.operation.http_method}\tpath={callable_operation.operation.path_name}")
callable_operation
###Output
_____no_output_____
###Markdown
discoverRootInstance{ "swagger" : "2.0", "info" : { "version" : "17.4.2-20171207.013930", "description" : "A Compute Classic instance is a virtual machine running a specific operating system and with CPU and memory resources that you specify. See About Instances in Using Oracle Cloud Infrastructure Compute Classic.You can view and delete instances using the HTTP requests listed below.", "title" : "Instances" }, "schemes" : [ "http" ], "consumes" : [ "application/oracle-compute-v3+json", "application/oracle-compute-v3+directory+json" ], "produces" : [ "application/oracle-compute-v3+json", "application/oracle-compute-v3+directory+json" ], "paths" : { "/instance/" : { "get" : { "tags" : [ "Instances" ], "summary" : "Retrieve Names of Containers", "description" : "Retrieves the names of containers that contain objects that you can access. You can use this information to construct the multipart name of an object.Required Role:To complete this task, you must have the Compute_Monitor or Compute_Operations role. If this role isn't assigned to you or you're not sure, then ask your system administrator to ensure that the role is assigned to you in Oracle Cloud My Services. See Modifying User Roles in Managing and Monitoring Oracle Cloud.", "operationId" : "discoverRootInstance ", "responses" : { "200" : { "headers" : { "set-cookie" : { "type" : "string", "description" : "The cookie value is returned if the session is extended" } }, "description" : "OK. See Status Codes for information about other possible HTTP status codes.", "schema" : { "$ref" : "/definitions/Instance-discover-response" } } }, "consumes" : [ "application/oracle-compute-v3+json" ], "produces" : [ "application/oracle-compute-v3+directory+json" ], "parameters" : [ { "name" : "Cookie", "in" : "header", "type" : "string", "description" : "The Cookie: header must be included with every request to the service. It must be set to the value of the set-cookie header in the response received to the POST /authenticate/ call." } ] } },... "definitions" : {... "Instance-discover-response" : { "properties" : { "result" : { "items" : { "type" : "string" }, "type" : "array" } } },
###Code
@dataclass
class GetOpReturn:
operation_details: Dict[str, Any]
operation_response: RequestsResponseAdapter
def get_op(operation_id: str, **wkargs) -> Tuple[Dict[str, Any], RequestsResponseAdapter]:
print(f"Operation {operation_id} starting.")
try:
operation_future = getattr(instances_resource, operation_id)(**wkargs)
url = operation_future.future.request.url
print(f"REST url for {operation_id}: {url}")
operation_result, operation_response = operation_future.result()
except HTTPBadRequest:
print("Request failed for {operation_id}! ")
print(f"URL: {operation_future.future.request.url}")
raise
operation_details = json.loads(operation_result)
return GetOpReturn(operation_details,operation_response )
# print("\n{} operation_details:\nHTTP method: {}\nAPI url: {}:\n {}\n".format(operation_id, operation_future.operation.http_method, url, pformat(operation_details)))
# print(f"Operation {operation_id} finished.\n")
operation_id = "discoverRootInstance"
print(f"Operation {operation_id} starting.")
try:
operation_future = getattr(instances_resource, operation_id)()
url = operation_future.future.request.url
print(f"REST url for {operation_id}: {url}")
operation_result, operation_response = operation_future.result()
except HTTPBadRequest:
print("Request failed for {operation_id}! ")
print(f"URL: {operation_future.future.request.url}")
raise
operation_details = json.loads(operation_result)
print("\n{} operation_details:\nHTTP method: {}\nAPI url: {}:\n {}\n".format(operation_id, operation_future.operation.http_method, url, pformat(operation_details)))
print(f"Operation {operation_id} finished.\n")
operation_id = "discoverInstance"
print(f"Operation {operation_id} starting.")
try:
# API url: https://compute.uscom-central-1.oraclecloud.com/instance/Compute-587626604
container = operation_details['result'][0].lstrip('/').rstrip('/')
# # API url: https://compute.uscom-central-1.oraclecloud.com/instance/Compute-587626604/[email protected]
# container = f"{container}/{opc_username}"
# # API url: https://compute.uscom-central-1.oraclecloud.com/instance/Compute-587626604/[email protected]/
container = f"{container}/{opc_username}/"
print(f"container: {container}")
operation_future = getattr(instances_resource, operation_id)(container=container)
url = operation_future.future.request.url
print(f"REST url for {operation_id}: {url}")
operation_result, operation_response = operation_future.result()
except HTTPBadRequest:
print("Request failed for {operation_id}! ")
print(f"URL: {operation_future.future.request.url}")
raise
operation_details = json.loads(operation_result)
print("\n{} operation_details:\nHTTP method: {}\nAPI url: {}:\n {}\n".format(operation_id, operation_future.operation.http_method, url, pformat(operation_details)))
print(f"Operation {operation_id} finished.\n")
operation_id = "listInstance"
print(f"Operation {operation_id} starting.")
try:
# container = operation_details['result'][0].lstrip('/').rstrip('/')
# container = 'Compute-587626604/[email protected]'
container = 'Compute-587626604/[email protected]/GC3NAAC-CDMT-LWS1'
print(f"container: {container}")
operation_future = getattr(instances_resource, operation_id)(container=container)
url = operation_future.future.request.url
print(f"REST url for {operation_id}: {url}")
operation_result, operation_response = operation_future.result()
except HTTPBadRequest:
print("Request failed for {operation_id}! ")
print(f"URL: {operation_future.future.request.url}")
raise
operation_details = json.loads(operation_result)
print("\n{} operation_details:\nHTTP method: {}\nAPI url: {}:\n {}\n".format(operation_id, operation_future.operation.http_method, url, pformat(operation_details)))
print(f"Operation {operation_id} finished.")
operation_details
###Output
_____no_output_____
###Markdown
Back to our example(psmcli) [root@eharris-lnxobi-01 ~] opc -f json compute instance list -h NAME: compute instance list - Retrieve Details of all Instances in a ContainerUSAGE: compute instance list container [options...]DESCRIPTION: Retrieve Details of all Instances in a ContainerREQUIRED ARGUMENTS: container - /Compute-identity_domain/user or /Compute-identity_domainOPTIONS: --availability-domain value The availability domain the instance is in --tags value Strings used to tag the instance. When you specify tags, only instances tagged with the specified value are displayed.(psmcli) [root@eharris-lnxobi-01 ~]Instances Swagger Definition{ "swagger" : "2.0", "info" : { "version" : "17.4.2-20171207.013930", "description" : "A Compute Classic instance is a virtual machine running a specific operating system and with CPU and memory resources that you specify. See About Instances in Using Oracle Cloud Infrastructure Compute Classic.You can view and delete instances using the HTTP requests listed below.", "title" : "Instances" }, "schemes" : [ "https" ], "consumes" : [ "application/oracle-compute-v3+json", "application/oracle-compute-v3+directory+json" ], "produces" : [ "application/oracle-compute-v3+json", "application/oracle-compute-v3+directory+json" ], "paths" : { . . . "/instance/{container}/" : { "get" : { "tags" : [ "Instances" ], "summary" : "Retrieve Details of all Instances in a Container", "description" : "Retrieves details of the instances that are in the specified container and match the specified query criteria. If you don't specify any query criteria, then details of all the instances in the container are displayed. To filter the search results, you can pass one or more of the following query parameters, by appending them to the URI in the following syntax:?parameter1=value1&parameter2=value2&parameterN=valueNRequired Role: To complete this task, you must have the Compute_Monitor or Compute_Operations role. If this role isn't assigned to you or you're not sure, then ask your system administrator to ensure that the role is assigned to you in Oracle Cloud My Services. See Modifying User Roles in Managing and Monitoring Oracle Cloud.", "operationId" : "listInstance", "responses" : { "200" : { "headers" : { "set-cookie" : { "type" : "string", "description" : "The cookie value is returned if the session is extended" } }, "description" : "OK. See Status Codes for information about other possible HTTP status codes.", "schema" : { "$ref" : "/definitions/Instance-list-response" } } }, "consumes" : [ "application/oracle-compute-v3+json" ], "produces" : [ "application/oracle-compute-v3+json" ], "parameters" : [ { "name" : "container", "in" : "path", "description" : "/Compute-identity_domain/user or /Compute-identity_domain", "required" : true, "type" : "string" }, { "name" : "availability_domain", "in" : "query", "description" : "The availability domain the instance is in", "required" : false, "type" : "string" }, { "name" : "tags", "in" : "query", "description" : "Strings used to tag the instance. When you specify tags, only instances tagged with the specified value are displayed.", "required" : false, "type" : "array", "items" : { "type" : "string" } }, { "name" : "Cookie", "in" : "header", "type" : "string", "description" : "The Cookie: header must be included with every request to the service. It must be set to the value of the set-cookie header in the response received to the POST /authenticate/ call." } ] } },
###Code
operation_details['result']
headers = dict([('Content-Type', 'application/oracle-compute-v3+json'),
('Accept', 'application/oracle-compute-v3+json, json, text/html')])
requests_client = OCRequestsClient()
requests_client.session.headers.update(headers)
if ORACLE_VPN_CONNECTED:
requests_client.session.proxies.update(proxies)
# requests_client.session.proxies.update(proxies)
requests_client.session.headers.update(cookie_header)
swagger_client = SwaggerClient.from_spec(spec_dict=spec_dict,
origin_url=iaas_rest_endpoint,
http_client=requests_client,
# config={'also_return_response': True,
# 'validate_responses': True,
# 'validate_requests': True,
# 'validate_swagger_spec': True})
# config={'also_return_response': True,
# 'validate_responses': False,
# 'validate_requests': True,
# 'validate_swagger_spec': True})
config={'also_return_response': True,
'validate_responses': False,
'validate_requests': False,
'validate_swagger_spec': False})
instances_resource = swagger_client.Instances
###Output
_____no_output_____
###Markdown
"Instance-response" : { "properties" : { "account" : { "type" : "string", "description" : "Shows the default account for your identity domain." }, "attributes" : { "additionalProperties" : { "type" : "object" }, "type" : "object", "description" : "A dictionary of attributes to be made available to the instance. A value with the key \"userdata\" will be made available in an EC2-compatible manner." }, "availability_domain" : { "type" : "string", "description" : "The availability domain the instance is in" }, "boot_order" : { "items" : { "type" : "integer" }, "type" : "array", "description" : "Boot order list." }, "desired_state" : { "type" : "string", "description" : "Desired state for the instance. The value can be shutdown or running to shutdown an instance or to restart a previously shutdown instance respectively." }, "disk_attach" : { "type" : "string", "description" : "A label assigned by the user to identify disks." }, "domain" : { "type" : "string", "description" : "The default domain to use for the hostname and for DNS lookups." }, "entry" : { "type" : "integer", "description" : "Optional imagelistentry number (default will be used if not specified)." }, "error_reason" : { "type" : "string", "description" : "The reason for the instance going to error state, if available." }, "fingerprint" : { "type" : "string", "description" : "SSH server fingerprint presented by the instance." }, "hostname" : { "type" : "string", "description" : "The hostname for this instance." }, "hypervisor" : { "additionalProperties" : { "type" : "object" }, "type" : "object", "description" : "A dictionary of hypervisor-specific attributes." }, "image_format" : { "type" : "string", "description" : "The format of the image." }, "imagelist" : { "type" : "string", "description" : "Name of imagelist to be launched." }, "ip" : { "type" : "string", "description" : "IP address of the instance." }, "label" : { "type" : "string", "description" : "A label assigned by the user, specifically for defining inter-instance relationships." }, "name" : { "type" : "string", "description" : "Multipart name of the instance." }, "networking" : { "additionalProperties" : { "type" : "object" }, "type" : "object", "description" : "Mapping of to network specifiers for virtual NICs to be attached to this instance." }, "placement_requirements" : { "items" : { "type" : "string" }, "type" : "array", "description" : "A list of strings specifying arbitrary tags on nodes to be matched on placement." }, "platform" : { "type" : "string", "description" : "The OS platform for the instance." }, "priority" : { "type" : "string", "description" : "The priority at which this instance will be run." }, "quota" : { "type" : "string", "description" : "Not used" }, "relationships" : { "items" : { "additionalProperties" : { "type" : "object" }, "type" : "object" }, "type" : "array", "description" : "A list of relationship specifications to be satisfied on this instance's placement" }, "resolvers" : { "items" : { "type" : "string" }, "type" : "array", "description" : "Resolvers to use instead of the default resolvers." }, "reverse_dns" : { "type" : "boolean", "description" : "Add PTR records for the hostname." }, "shape" : { "type" : "string", "description" : "A shape is a resource profile that specifies the number of CPU threads and the amount of memory (in MB) to be allocated to an instance." }, "sshkeys" : { "items" : { "type" : "string" }, "type" : "array", "description" : "SSH keys that will be exposed to the instance." }, "start_time" : { "type" : "string", "description" : "Start time of the instance." }, "state" : { "type" : "string", "description" : "State of the instance." }, "storage_attachments" : { "items" : { "type" : "string" }, "type" : "array", "description" : "List of dictionaries containing storage attachment Information." }, "tags" : { "items" : { "type" : "string" }, "type" : "array", "description" : "Comma-separated list of strings used to tag the instance." }, "uri" : { "type" : "string", "description" : "Uniform Resource Identifier" }, "vcable_id" : { "type" : "string", "description" : "vCable for this instance." }, "vnc" : { "type" : "string", "description" : "IP address and port of the VNC console for the instance." } } },
###Code
operation_id = "getInstance"
print(f"\nOperation {operation_id} starting.")
try:
name = operation_details['result'][0].lstrip('/').rstrip('/')
print(f"name: {name}")
operation_future: HttpFuture = getattr(instances_resource, operation_id)(name=name)
url = operation_future.future.request.url
print(f"REST url for {operation_id}: {url}")
print(f"Accept Header=[{operation_future.future.session.headers['Accept']}], Content-Type Header=[{operation_future.future.session.headers['Content-Type']}]")
operation_result, operation_response = operation_future.result()
except bravado.exception.HTTPBadRequest:
print("Request failed for {operation_id}! ")
print(f"URL: {operation_future.future.request.url}")
raise
except bravado.exception.HTTPNotFound:
print("Request failed for {operation_id}! ")
print(f"URL: {operation_future.future.request.url}")
raise
operation_details = json.loads(operation_result)
print("\n{} operation_details:\nHTTP method: {}\nAPI url: {}:\n {}\n".format(operation_id, operation_future.operation.http_method, url, pformat(operation_details)))
print(f"Operation {operation_id} finished.")
operation_details
operation_id = "getInstance"
print(f"\nOperation {operation_id} starting.")
try:
name = operation_details['result'][0].lstrip('/').rstrip('/')
print(f"name: {name}")
operation_future: HttpFuture = getattr(instances_resource, operation_id)(name=name)
url = operation_future.future.request.url
print(f"REST url for {operation_id}: {url}")
print(f"Accept Header=[{operation_future.future.session.headers['Accept']}], Content-Type Header=[{operation_future.future.session.headers['Content-Type']}]")
operation_result, operation_response = operation_future.result()
except bravado.exception.HTTPBadRequest:
print("Request failed for {operation_id}! ")
print(f"URL: {operation_future.future.request.url}")
raise
except bravado.exception.HTTPNotFound:
print("Request failed for {operation_id}! ")
print(f"URL: {operation_future.future.request.url}")
raise
operation_details = json.loads(operation_result)
# print("\n{} operation_details:\nHTTP method: {}\nAPI url: {}:\n {}\n".format(operation_id, operation_future.operation.http_method, url, pformat(operation_details)))
print(f"Operation {operation_id} finished.")
###Output
Operation getInstance starting.
###Markdown
"get" : { "tags" : [ "Instances" ], "summary" : "Retrieve Details of an Instance", "description" : "Retrieves details of the specified instance.Required Role: To complete this task, you must have the Compute_Monitor or Compute_Operations role. If this role isn't assigned to you or you're not sure, then ask your system administrator to ensure that the role is assigned to you in Oracle Cloud My Services. See Modifying User Roles in Managing and Monitoring Oracle Cloud.", "operationId" : "getInstance", "responses" : { "200" : { "headers" : { "set-cookie" : { "type" : "string", "description" : "The cookie value is returned if the session is extended" } }, "description" : "OK. See Status Codes for information about other possible HTTP status codes.", "schema" : { "$ref" : "/definitions/Instance-response" } } }, "consumes" : [ "application/oracle-compute-v3+json" ], "produces" : [ "application/oracle-compute-v3+json" ], "parameters" : [ { "name" : "name", "in" : "path", "description" : "Multipart name of the object.", "required" : true, "type" : "string" }, { "name" : "Cookie", "in" : "header", "type" : "string", "description" : "The Cookie: header must be included with every request to the service. It must be set to the value of the set-cookie header in the response received to the POST /authenticate/ call." } ] } } }, . . . "Instance-response" : { "properties" : { "account" : { "type" : "string", "description" : "Shows the default account for your identity domain." }, "attributes" : { "additionalProperties" : { "type" : "object" }, "type" : "object", "description" : "A dictionary of attributes to be made available to the instance. A value with the key \"userdata\" will be made available in an EC2-compatible manner." }, "availability_domain" : { "type" : "string", "description" : "The availability domain the instance is in" }, "boot_order" : { "items" : { "type" : "integer" }, "type" : "array", "description" : "Boot order list." }, "desired_state" : { "type" : "string", "description" : "Desired state for the instance. The value can be shutdown or running to shutdown an instance or to restart a previously shutdown instance respectively." }, "disk_attach" : { "type" : "string", "description" : "A label assigned by the user to identify disks." }, "domain" : { "type" : "string", "description" : "The default domain to use for the hostname and for DNS lookups." }, "entry" : { "type" : "integer", "description" : "Optional imagelistentry number (default will be used if not specified)." }, "error_reason" : { "type" : "string", "description" : "The reason for the instance going to error state, if available." }, "fingerprint" : { "type" : "string", "description" : "SSH server fingerprint presented by the instance." }, "hostname" : { "type" : "string", "description" : "The hostname for this instance." }, "hypervisor" : { "additionalProperties" : { "type" : "object" }, "type" : "object", "description" : "A dictionary of hypervisor-specific attributes." }, "image_format" : { "type" : "string", "description" : "The format of the image." }, "imagelist" : { "type" : "string", "description" : "Name of imagelist to be launched." }, "ip" : { "type" : "string", "description" : "IP address of the instance." }, "label" : { "type" : "string", "description" : "A label assigned by the user, specifically for defining inter-instance relationships." }, "name" : { "type" : "string", "description" : "Multipart name of the instance." }, "networking" : { "additionalProperties" : { "type" : "object" }, "type" : "object", "description" : "Mapping of to network specifiers for virtual NICs to be attached to this instance." }, "placement_requirements" : { "items" : { "type" : "string" }, "type" : "array", "description" : "A list of strings specifying arbitrary tags on nodes to be matched on placement." }, "platform" : { "type" : "string", "description" : "The OS platform for the instance." }, "priority" : { "type" : "string", "description" : "The priority at which this instance will be run." }, "quota" : { "type" : "string", "description" : "Not used" }, "relationships" : { "items" : { "additionalProperties" : { "type" : "object" }, "type" : "object" }, "type" : "array", "description" : "A list of relationship specifications to be satisfied on this instance's placement" }, "resolvers" : { "items" : { "type" : "string" }, "type" : "array", "description" : "Resolvers to use instead of the default resolvers." }, "reverse_dns" : { "type" : "boolean", "description" : "Add PTR records for the hostname." }, "shape" : { "type" : "string", "description" : "A shape is a resource profile that specifies the number of CPU threads and the amount of memory (in MB) to be allocated to an instance." }, "sshkeys" : { "items" : { "type" : "string" }, "type" : "array", "description" : "SSH keys that will be exposed to the instance." }, "start_time" : { "type" : "string", "description" : "Start time of the instance." }, "state" : { "type" : "string", "description" : "State of the instance." }, "storage_attachments" : { "items" : { "type" : "string" }, "type" : "array", "description" : "List of dictionaries containing storage attachment Information." }, "tags" : { "items" : { "type" : "string" }, "type" : "array", "description" : "Comma-separated list of strings used to tag the instance." }, "uri" : { "type" : "string", "description" : "Uniform Resource Identifier" }, "vcable_id" : { "type" : "string", "description" : "vCable for this instance." }, "vnc" : { "type" : "string", "description" : "IP address and port of the VNC console for the instance." } } },
###Code
type(operation_details)
print(operation_details.keys())
###Output
_____no_output_____
###Markdown
Insert results in to DB
###Code
instances_db.insert(operation_details)
instances_db.all()[-1]
###Output
_____no_output_____ |
oldassyrian/display.ipynb | ###Markdown
---To get started: consult [start](start.ipynb)--- DisplayWe show the ins and outs of displaying cuneiform ATF transcriptions.
###Code
%load_ext autoreload
%autoreload 2
from tf.app import use
A = use("oldassyrian:clone", checkout="clone", hoist=globals())
# A = use('oldassyrian', hoist=globals())
###Output
_____no_output_____
###Markdown
We pick an example face with which we illustrate many ways to represent cuneiform text.
###Code
exampleFace = ("P361249", "obverse")
f = T.nodeFromSection(exampleFace)
lines = L.d(f, otype="line")
###Output
_____no_output_____
###Markdown
Raw textThe most basic way is to show the source material for each line, which is in the feature `srcLn`.This feature has been filled by mere copying the numbered lines from the CDLI ATF sources.
###Code
for ln in lines:
print(F.srcLn.v(ln))
###Output
1. 2/2(disz) _ma-na ku3-babbar_
2. s,a-ru-pa2-am i-s,e2-er
3. ha-nu-nu _dumu_ en-um-a-szur
4. li2-qe2-ep _dumu_ puzur2-esz18-dar i-szu
5. _iti-kam_ ab2 sza-ra-ni
6. li-mu-um szu-ra-ma
7. 1/3(disz) _ma-na ku3-babbar_ a-na
8. 1(disz) sza-na-at i-sza-qal
9. 1/3(disz) _ma-na ku3-babbar_ a-na
10. a-na 2(disz) sza-na-at i-sza-qal
11. szu-ma la2 isz-qu2-ul
###Markdown
or, slightly easier"
###Code
print(*A.getSource(f), sep="\n")
###Output
@obverse
1. 2/2(disz) _ma-na ku3-babbar_
2. s,a-ru-pa2-am i-s,e2-er
3. ha-nu-nu _dumu_ en-um-a-szur
4. li2-qe2-ep _dumu_ puzur2-esz18-dar i-szu
5. _iti-kam_ ab2 sza-ra-ni
6. li-mu-um szu-ra-ma
7. 1/3(disz) _ma-na ku3-babbar_ a-na
8. 1(disz) sza-na-at i-sza-qal
9. 1/3(disz) _ma-na ku3-babbar_ a-na
10. a-na 2(disz) sza-na-at i-sza-qal
11. szu-ma la2 isz-qu2-ul
###Markdown
Text formatsThe TF API supports *text formats*. Text formats make selections and apply templates and styles basedon the analysed features of the text. For example: a text-format may ignore flags or clusters, orformat numerals in special ways.Text formats are not baked into TF, but they are defined in the feature `otext` of the corpus.Moreover, for this corpus a TF app has been build that defines additional text-formats.Whereas the formats defined in `otext` are strictly plain text formats, the formatsdefined in the app are able to use typographic styles to shape the text, such as bold, italic, colors, etc.Here is the list of all formats.
###Code
T.formats
###Output
_____no_output_____
###Markdown
Plain text formatsThe formats whose names start with `text-` are the plain text formats. `text-orig-full`This format is really close to the ATF. It contains all original information.This is the default format. We do not have to specify it.
###Code
for ln in lines:
print(ln, T.text(ln))
A.plain(ln)
###Output
865417 2/2(disz) _ma-na ku3-babbar_
###Markdown
The `plain()` function focuses on the *contents*, and instead of the line number, it gives a full specificationof the location, linked to the online source on CDLI.But we can omit the locations:
###Code
for ln in lines:
A.plain(ln, withPassage=False)
###Output
_____no_output_____
###Markdown
`text-orig-plain`This is a somewhat reduced format. It omits all flags and bracketing constructs.For clarity, adjacent signs are separated with a `⁼` character.
###Code
for ln in lines:
A.plain(ln, fmt="text-orig-plain")
###Output
_____no_output_____
###Markdown
`text-orig-rich`This format is a bit prettier: instead of the strict ASCII encoding used by the CDLI archive, it usescharacters with diacritics.There is no flag/cluster information in this representation.
###Code
for ln in lines:
A.plain(ln, fmt="text-orig-rich")
###Output
_____no_output_____
###Markdown
`text-orig-unicode`This format uses the Cuneiform Unicode characters.Numerals with repeats are represented by placing that many copies of the character in question.Readings that could not be found in the[mapping](https://github.com/Nino-cunei/tfFromAtf/blob/master/writing/GeneratedSignList.json)we use, appear in latin characters.There is no flag/cluster information in this representation.
###Code
for ln in lines:
A.plain(ln, fmt="text-orig-unicode")
###Output
_____no_output_____
###Markdown
**Note that we haven't yet properly mapped `2/2(disz)` to unicode!** Styled text formatsThe formats whose names start with `layout-` are the styled text formats. `layout-orig-rich`This format looks like `text-orig-rich`, but now we re-introduce the flags and clusters by specificlayout devices.See below for detailed examples.
###Code
for ln in lines:
A.plain(ln, fmt="layout-orig-rich")
###Output
_____no_output_____
###Markdown
`layout-orig-unicode`This format looks like `text-orig-unicode`, but now we re-introduce the flags and clusters by specificlayout devices.See below for detailed examples.
###Code
for ln in lines:
A.plain(ln, fmt="layout-orig-unicode")
###Output
_____no_output_____
###Markdown
Here is the text of the face in each of the plain text formats, i.e. no additional HTML formatting is applied. PrettyThe ultimate of graphical display is by means of the `pretty()` function.This display is less useful for reading, but instead optimized for showing all information that you mightwish for.It shows a base representation according to a text format of your choice(here we choose `layout-orig-rich`), and it shows the valuesof a standard set of features.
###Code
w = F.otype.s("word")[1]
F.atf.v(w)
A.pretty(w)
A.pretty(w, fmt="layout-orig-unicode", withNodes=True)
###Output
_____no_output_____
###Markdown
By default, pretty displays descend to the word level, but you can also descend to the sign level:
###Code
A.pretty(w, baseTypes="sign")
A.pretty(w, fmt="layout-orig-unicode", baseTypes="sign", withNodes=True)
###Output
_____no_output_____
###Markdown
Later on, in the [search](search.ipynb) tutorial we see that `pretty()` can also display other features,even features that you or other people have created and added later.Here we call for the feature `atf`, which shows the original atf for the sign in questionexcluding the bracketing characters.Consult the[feature documentation](https://github.com/Nino-cunei/atfFromTf/blob/master/docs/transcription.md)to see what information is stored in all the features.We show it with node numbers, but you could leave them out in an obvious way.
###Code
A.pretty(f, extraFeatures="atf", fmt="layout-orig-rich", withNodes=True)
###Output
_____no_output_____
###Markdown
We do not see much, because the default condense type is `line`, and a `document` is bigger than that.Objects bigger than de condense type will be abbreviated to a label that indicates their identity,not their contents.But we can override this by adding `full=True`.See also the documentation on [`pretty`](https://annotation.github.io/text-fabric/tf/advanced/display.htmltf.advanced.display.pretty).
###Code
A.pretty(f, extraFeatures="atf", fmt="layout-orig-rich", withNodes=True, full=True)
###Output
_____no_output_____
###Markdown
Layout formats: the detailsWe give detailed examples of how the material is styled in the `layout-` formats.We show the representation of all kinds of signs and also what the influence ofclustering and flags are.Here are the design principles:* all flags ` ? ! *` cause the preceding sign to be in bold* damage `` and missing `[ ]` text is blurry and in grey* questioned `?` and uncertain `( )` text is in italics* remarkable `!` and supplied `` text is overlined, supplied text is in blue* excised `>` text has a strike-through and is in red* collated `*` text is underlined**Numerals** are written with repeats/fractions and the repeated material is in `⌈ ⌉`.If represented in cuneiform unicode, repeated material is actually repeated that many times, and the repeat number andthe brackets are not shown.**Ligatures** (the `x` operator as in `kux(DU)`) are written with the `␣` character between the operands, and the secondoperand (`DU`) is written between `⌈ ⌉`.**Corrections** (as in `ku!(LU)`) are written as `ku=⌈LU⌉`.Just a quick overview of the sign types:
###Code
F.type.freqList("sign")
###Output
_____no_output_____
###Markdown
Styled display of ATF text
###Code
lines = (
(("P361247", "obverse", "3"), ("cluster: language", [5, 6, 7])),
(("P360975", "obverse", "1"), ("cluster: determinative", [3])),
(("P360975", "reverse", "17'"), ("cluster: missing", [5, 6, 7])),
(("P390636", "obverse", "31"), ("cluster: uncertain", [2])),
(("P361588", "obverse", "4"), ("cluster: supplied", [5, 6, 7, 8, 9])),
(("P361599", "obverse", "3"), ("cluster: excised", [1, 2, 3])),
(("P361599", "obverse", "10"), ("flag: damage", [4, 5])),
(("P390624", "reverse", "7"), ("flag: question", [4])),
(("P361599", "obverse", "6"), ("flag: remarkable", [3, 4])),
(("P293386", "edge", "1"), ("flag: damage + question", [10])),
(("P290549", "obverse", "12"), ("flag: damage + remarkable", [4])),
(("P358477", "obverse", "5"), ("sign: comment", [1])),
(("P500569", "seal - surface a", "2:4"), ("sign: grapheme", [1])),
(("P393106", "obverse", "6"), ("sign: correction", [2])),
(("P393106", "obverse", "1"), ("sign: numeral", [1, 2, 5, 6])),
(("P360690", "reverse", "10"), ("sign: ligature", [10])),
(("P360690", "reverse", "10"), ("sign: word divider", [6])),
(("P393106", "reverse", "8"), ("sign: unknown and ellipsis", [1, 2, 3, 4, 5])),
)
for (line, (desc, positions)) in lines:
ln = T.nodeFromSection(line)
A.dm("---\n# {}\n\nLocation: {} {}:{}".format(desc, *line))
s = L.d(ln, otype="sign")[0]
highlights = {s + p - 1 for p in positions}
print(*A.getSource(ln), sep="\n")
A.plain(ln, fmt="layout-orig-rich", highlights=highlights)
A.plain(ln, fmt="layout-orig-unicode", highlights=highlights)
A.pretty(
ln,
extraFeatures="atf",
fmt="text-orig-rich",
baseTypes="sign",
highlights=highlights,
)
###Output
_____no_output_____ |
NIPS_2019_Notebooks/Latent dimension comparisons-New dataset.ipynb | ###Markdown
Latent dimension comparisons - New dataset Goal - Compare the `average per sample mse loss` and `average per sample kl loss` for different number of latent dimensions to determine what number of latent dimensions to use for the VAE Imports
###Code
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Check the validity of the dumped arrays by ensuring that all the samples used for testing are present in the validation for all the latent dimension tests
###Code
latent_dims = [16, 32, 64, 128, 256]
dumps = ["20190912_205410", "20190912_205459", "20190912_205557", "20190912_205708", "20190912_205730"]
# First check that all the indices from the test validation set exist in all the dumps
ldump_idx_arr = None
# Iterate over the dumps and check the indices
for latent_dim, dump in zip(latent_dims, dumps):
print("----------------------------------------------------")
print("Reading metrics from VAE with {0} latent dimensions :".format(latent_dim))
print("----------------------------------------------------")
dump_npz_path = "/home/akajal/WatChMaL/VAE/dumps/{0}/test_validation_iteration_metrics.npz".format(dump)
dump_npz_arr = np.load(dump_npz_path)
dump_indices = np.sort(dump_npz_arr["indices"])
if ldump_idx_arr is not None:
if not np.array_equal(dump_indices, ldump_idx_arr):
print("Index array for latent dims {0} not equal to all the other.".format(latent_dim))
else:
print("Index array equal to the first index array")
else:
ldump_idx_arr = dump_indices
###Output
----------------------------------------------------
Reading metrics from VAE with 16 latent dimensions :
----------------------------------------------------
----------------------------------------------------
Reading metrics from VAE with 32 latent dimensions :
----------------------------------------------------
Index array equal to the first index array
----------------------------------------------------
Reading metrics from VAE with 64 latent dimensions :
----------------------------------------------------
Index array equal to the first index array
----------------------------------------------------
Reading metrics from VAE with 128 latent dimensions :
----------------------------------------------------
Index array equal to the first index array
----------------------------------------------------
Reading metrics from VAE with 256 latent dimensions :
----------------------------------------------------
Index array equal to the first index array
###Markdown
For each configuration of the latent dimensions, print the `average per sample mse loss` with its `standard deviation` and `standard error` and print the `average per sample kl loss` with its `standard deviation` and `standard error`
###Code
# Collect the metrics for plotting as well
recon_loss_values, kl_loss_values = [], []
recon_std_values, kl_std_values = [], []
recon_stderr_values, kl_stderr_values = [], []
# Iterate over the dumps and check the indices
for latent_dim, dump in zip(latent_dims, dumps):
print("----------------------------------------------------")
print("Printing metrics for VAE with {0} latent dimensions :".format(latent_dim))
print("----------------------------------------------------")
dump_npz_path = "/home/akajal/WatChMaL/VAE/dumps/{0}/test_validation_iteration_metrics.npz".format(dump)
npz_arr = np.load(dump_npz_path)
dump_recon_loss, dump_kl_loss = npz_arr["recon_loss"], npz_arr["kl_loss"]
mean_recon_loss, std_recon_loss = np.mean(dump_recon_loss), np.std(dump_recon_loss)
stderr_recon_loss = std_recon_loss/math.sqrt(dump_recon_loss.shape[0])
recon_loss_values.append(mean_recon_loss)
recon_std_values.append(std_recon_loss)
recon_stderr_values.append(stderr_recon_loss)
mean_kl_loss, std_kl_loss = np.mean(dump_kl_loss), np.std(dump_kl_loss)
stderr_kl_loss = std_kl_loss/math.sqrt(dump_kl_loss.shape[0])
kl_loss_values.append(mean_kl_loss)
kl_std_values.append(std_kl_loss)
kl_stderr_values.append(stderr_kl_loss)
print("Recon Loss metrics")
print("Mean Recon loss : {0}".format(mean_recon_loss))
print("Std Recon loss : {0}".format(std_recon_loss))
print("Stderr Recon loss : {0}\n".format(stderr_recon_loss))
print("KL Loss metrics")
print("Mean KL loss : {0}".format(mean_kl_loss))
print("Std KL loss : {0}".format(std_kl_loss))
print("Stderr KL loss : {0}".format(stderr_kl_loss))
###Output
----------------------------------------------------
Printing metrics for VAE with 16 latent dimensions :
----------------------------------------------------
Recon Loss metrics
Mean Recon loss : 8683.6806640625
Std Recon loss : 5577.626953125
Stderr Recon loss : 5.8834483514321265
KL Loss metrics
Mean KL loss : 54.161556243896484
Std KL loss : 17.71230125427246
Stderr KL loss : 0.0186834670891951
----------------------------------------------------
Printing metrics for VAE with 32 latent dimensions :
----------------------------------------------------
Recon Loss metrics
Mean Recon loss : 8402.435546875
Std Recon loss : 5365.22802734375
Stderr Recon loss : 5.659403588267478
KL Loss metrics
Mean KL loss : 67.3596420288086
Std KL loss : 44.95058822631836
Stderr KL loss : 0.04741522988515116
----------------------------------------------------
Printing metrics for VAE with 64 latent dimensions :
----------------------------------------------------
Recon Loss metrics
Mean Recon loss : 8507.375
Std Recon loss : 5459.2216796875
Stderr Recon loss : 5.758550914464524
KL Loss metrics
Mean KL loss : 67.39708709716797
Std KL loss : 24.927616119384766
Stderr KL loss : 0.02629439781385114
----------------------------------------------------
Printing metrics for VAE with 128 latent dimensions :
----------------------------------------------------
Recon Loss metrics
Mean Recon loss : 8443.9794921875
Std Recon loss : 5391.650390625
Stderr Recon loss : 5.687274690260219
KL Loss metrics
Mean KL loss : 70.20600891113281
Std KL loss : 24.082477569580078
Stderr KL loss : 0.025402920300319286
----------------------------------------------------
Printing metrics for VAE with 256 latent dimensions :
----------------------------------------------------
Recon Loss metrics
Mean Recon loss : 8513.2470703125
Std Recon loss : 5433.09375
Stderr Recon loss : 5.730990389865415
KL Loss metrics
Mean KL loss : 75.30889892578125
Std KL loss : 26.900619506835938
Stderr KL loss : 0.028375581016819904
###Markdown
For each of the latent dimensions, plot the `average per sample mse loss` with its `stderr` as the error bar and plot the `average per sample kl loss` with its `stderr` as the error bar
###Code
# Plot the metrics for the training subset
# Initialize the plot
fig, ax1 = plt.subplots(figsize=(16,9))
# Set the x-axes ticks for the plot
ax1.set_xticks(latent_dims)
# Use the same x-axis to plot the KL loss
ax2 = ax1.twinx()
xticks = np.arange(len(latent_dims))
xticks_labels = latent_dims
ax1.xaxis.set_ticks(xticks) #set the ticks to be a
ax1.xaxis.set_ticklabels(xticks_labels)
# Plot the MSE values collected above
ax1.errorbar(xticks, recon_loss_values, yerr=recon_stderr_values,
linestyle='dashed', marker='o', markersize=10,
elinewidth=3, capsize=4.0, color="blue",
label="Average per sample MSE Loss")
# Plot the KL values collected above
ax2.errorbar(xticks, kl_loss_values, yerr=kl_stderr_values,
linestyle='dashed', marker='o', markersize=10,
elinewidth=3, capsize=4.0, color="red",
label="Average per sample KL Loss")
# Setup plot characteristics
ax1.tick_params(axis="x", labelsize=30)
ax1.set_xlabel("Number of latent dimensions", fontsize=30)
#ax1.set_xscale("log", basex=2)
ax1.set_ylabel("MSE Loss", fontsize=30, color="blue")
ax1.tick_params(axis="y", labelsize=30, colors="blue")
ax2.set_ylabel("KL Loss", fontsize=30, color="red")
ax2.tick_params(axis="y", labelsize=30, colors="red")
plt.margins(0.2)
ax1.grid(True)
ax2.grid(True)
ax1.set_facecolor('white')
ax2.set_facecolor('white')
lgd = fig.legend(prop={"size":30}, loc='center', bbox_to_anchor=(0.5, 0.75), fancybox=True, framealpha=0.5)
plt.savefig("figures/beta_latent_dimensions_vs_mse_and_kl_loss.pdf", format="pdf", dpi=600, bbox_inches="tight")
###Output
_____no_output_____
###Markdown
From here, we choose the model with 128 dimensions as our reference model On a per sample basis, compute the delta difference b/w the MSE Loss and KL Loss for that sample with a model with `x` latent dimensions and a model with 128 dimensions Steps : 1. Construct 5 dataframes (one for each number of latent dimensions) 2. Sort the dataframes along the dataset index axis 3. Calculate the delta difference for each of the dataframe w.r.t. the dataframe corresponding to the reference model 4. Plot the delta difference values Construct 5 dataframes (one for each number of latent dimensions)
###Code
dump_dfs = []
# Iterate over the dumps and check the indices
for latent_dim, dump in zip(latent_dims, dumps):
print("----------------------------------------------------")
print("Reading metrics from VAE with {0} latent dimensions :".format(latent_dim))
print("----------------------------------------------------")
dump_npz_path = "/home/akajal/WatChMaL/VAE/dumps/{0}/test_validation_iteration_metrics.npz".format(dump)
dump_npz_arr = np.load(dump_npz_path)
dump_dfs.append(pd.DataFrame(data={"index":dump_npz_arr["indices"], "recon_loss":dump_npz_arr["recon_loss"],
"kl_loss":dump_npz_arr["kl_loss"]}))
print("Done.")
###Output
----------------------------------------------------
Reading metrics from VAE with 16 latent dimensions :
----------------------------------------------------
Done.
----------------------------------------------------
Reading metrics from VAE with 32 latent dimensions :
----------------------------------------------------
Done.
----------------------------------------------------
Reading metrics from VAE with 64 latent dimensions :
----------------------------------------------------
Done.
----------------------------------------------------
Reading metrics from VAE with 128 latent dimensions :
----------------------------------------------------
Done.
----------------------------------------------------
Reading metrics from VAE with 256 latent dimensions :
----------------------------------------------------
Done.
###Markdown
Sort the dataframes along the index axis
###Code
for df in dump_dfs:
df.sort_values(by="index", inplace=True)
###Output
_____no_output_____
###Markdown
Set the index of the reference dataframe to use
###Code
ref_df_idx = 1
###Output
_____no_output_____
###Markdown
Calculate the delta differenced values for each number of latent dimensions
###Code
for df in dump_dfs:
df["delta recon_loss"] = df["recon_loss"].values - dump_dfs[ref_df_idx]["recon_loss"].values
df["delta kl_loss"] = df["kl_loss"].values - dump_dfs[ref_df_idx]["kl_loss"].values
###Output
_____no_output_____
###Markdown
Find the mean and stderr of the delta differenced values for each number of latent dimensions
###Code
delta_recon_mean, delta_kl_mean, delta_recon_stderr, delta_kl_stderr = [], [], [], []
for df in dump_dfs:
delta_recon_loss, delta_kl_loss = df["delta recon_loss"], df["delta kl_loss"]
delta_recon_mean.append(np.mean(delta_recon_loss.values))
delta_kl_mean.append(np.mean(delta_kl_loss.values))
delta_recon_stderr.append(np.std(delta_recon_loss.values)/math.sqrt(df["delta recon_loss"].values.shape[0]))
delta_kl_stderr.append(np.std(delta_kl_loss.values)/math.sqrt(df["delta kl_loss"].values.shape[0]))
print(delta_recon_mean)
print(delta_kl_mean)
print(delta_recon_stderr)
print(delta_kl_stderr)
###Output
[0.8729610103611225, 0.0, 0.742196789605224, 0.882746194093707, 0.7329792594546576]
[0.04050325723852688, 0.0, 0.040461509720143084, 0.03708978699032252, 0.040498770637756]
###Markdown
Plot the delta differenced values collected above
###Code
# Plot the metrics for the training subset
# Initialize the plot
fig, ax1 = plt.subplots(figsize=(16,9))
# Set the x-axes ticks for the plot
ax1.set_xticks(latent_dims)
# Use the same x-axis to plot the KL loss
ax2 = ax1.twinx()
# Plot the MSE values collected above
ax1.errorbar(latent_dims, delta_recon_mean, yerr=delta_recon_stderr,
linestyle='dashed', marker='o', markersize=10,
elinewidth=3, capsize=4.0, color="blue",
label=r"Average per sample $\Delta$ MSE Loss")
# Plot the KL values collected above
ax2.errorbar(latent_dims, delta_kl_mean, yerr=delta_kl_stderr,
linestyle='dashed', marker='o', markersize=10,
elinewidth=3, capsize=4.0, color="red",
label=r"Average per sample $\Delta$ KL Loss")
# Setup plot characteristics
ax1.tick_params(axis="x", labelsize=25)
ax1.set_xlabel("Number of latent dimensions", fontsize=25)
ax1.set_ylabel(r"$\Delta$ MSE Loss", fontsize=25, color="blue")
ax1.tick_params(axis="y", labelsize=25, colors="blue")
ax2.set_ylabel(r"$\Delta$ KL Loss", fontsize=25, color="red")
ax2.tick_params(axis="y", labelsize=25, colors="red")
plt.margins(0.2)
ax1.grid(True)
ax2.grid(True)
lgd = fig.legend(prop={"size":25}, loc='center', bbox_to_anchor=(0.5, 0.75))
plt.savefig("figures/latent_dimensions_vs_delta_differenced_loss.pdf", format="pdf", dpi=600, bbox_inches="tight")
###Output
_____no_output_____ |
4-assets/BOOKS/Jupyter-Notebooks/00-Collections-Module.ipynb | ###Markdown
______Content Copyright by Pierian Data Collections ModuleThe collections module is a built-in module that implements specialized container data types providing alternatives to Python’s general purpose built-in containers. We've already gone over the basics: dict, list, set, and tuple.Now we'll learn about the alternatives that the collections module provides. Counter*Counter* is a *dict* subclass which helps count hashable objects. Inside of it elements are stored as dictionary keys and the counts of the objects are stored as the value.Let's see how it can be used:
###Code
from collections import Counter
###Output
_____no_output_____
###Markdown
**Counter() with lists**
###Code
lst = [1,2,2,2,2,3,3,3,1,2,1,12,3,2,32,1,21,1,223,1]
Counter(lst)
###Output
_____no_output_____
###Markdown
**Counter with strings**
###Code
Counter('aabsbsbsbhshhbbsbs')
###Output
_____no_output_____
###Markdown
**Counter with words in a sentence**
###Code
s = 'How many times does each word show up in this sentence word times each each word'
words = s.split()
Counter(words)
# Methods with Counter()
c = Counter(words)
c.most_common(2)
###Output
_____no_output_____
###Markdown
Common patterns when using the Counter() object sum(c.values()) total of all counts c.clear() reset all counts list(c) list unique elements set(c) convert to a set dict(c) convert to a regular dictionary c.items() convert to a list of (elem, cnt) pairs Counter(dict(list_of_pairs)) convert from a list of (elem, cnt) pairs c.most_common()[:-n-1:-1] n least common elements c += Counter() remove zero and negative counts defaultdictdefaultdict is a dictionary-like object which provides all methods provided by a dictionary but takes a first argument (default_factory) as a default data type for the dictionary. Using defaultdict is faster than doing the same using dict.set_default method.**A defaultdict will never raise a KeyError. Any key that does not exist gets the value returned by the default factory.**
###Code
from collections import defaultdict
d = {}
d['one']
d = defaultdict(object)
d['one']
for item in d:
print(item)
###Output
one
###Markdown
Can also initialize with default values:
###Code
d = defaultdict(lambda: 0)
d['one']
###Output
_____no_output_____
###Markdown
namedtupleThe standard tuple uses numerical indexes to access its members, for example:
###Code
t = (12,13,14)
t[0]
###Output
_____no_output_____
###Markdown
For simple use cases, this is usually enough. On the other hand, remembering which index should be used for each value can lead to errors, especially if the tuple has a lot of fields and is constructed far from where it is used. A namedtuple assigns names, as well as the numerical index, to each member. Each kind of namedtuple is represented by its own class, created by using the namedtuple() factory function. The arguments are the name of the new class and a string containing the names of the elements.You can basically think of namedtuples as a very quick way of creating a new object/class type with some attribute fields.For example:
###Code
from collections import namedtuple
Dog = namedtuple('Dog',['age','breed','name'])
sam = Dog(age=2,breed='Lab',name='Sammy')
frank = Dog(age=2,breed='Shepard',name="Frankie")
###Output
_____no_output_____
###Markdown
We construct the namedtuple by first passing the object type name (Dog) and then passing a string with the variety of fields as a string with spaces between the field names. We can then call on the various attributes:
###Code
sam
sam.age
sam.breed
sam[0]
###Output
_____no_output_____ |
NLP (Naive_Bayes) for Twitter Event Prediction.ipynb | ###Markdown
###Code
!pip install utils
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import nltk
from nltk.corpus import twitter_samples
import matplotlib.pyplot as plt
import random
nltk.download('stopwords')
import re
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
#from utils import lookup
import inspect
def lookup(freqs, word, label):
pair = (word, label)
if (pair in freqs):
n = freqs[pair]
return n
def process_tweet(tweet):
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
tweet = re.sub(r'\$\w*', '', tweet)
tweet = re.sub(r'^RT[\s]+', '', tweet)
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
tweet = re.sub(r'#', '', tweet)
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (word not in stopwords_english and
word not in string.punctuation):
stem_word = stemmer.stem(word) # stemming word
tweets_clean.append(stem_word)
return tweets_clean
from google.colab import drive
drive.mount('/content/drive')
df = pd.read_csv('/content/drive/MyDrive/nlp-getting-started/train.csv')
df2 = pd.read_csv('/content/drive/MyDrive/nlp-getting-started/test.csv')
df.head()
train_x = df['text']
train_y = df['target']
test_x = df2['text']
def count_tweets(result, tweets, ys):
'''
Input:
result: a dictionary that will be used to map each pair to its frequency
tweets: a list of tweets
ys: a list corresponding to the sentiment of each tweet (either 0 or 1)
Output:
result: a dictionary mapping each pair to its frequency
'''
for y, tweet in zip(ys, tweets):
for word in process_tweet(tweet):
pair = (word,y)
if pair in result:
result[pair] += 1
else:
result[pair] = 1
return result
freqs = count_tweets({}, train_x, train_y)
def train_naive_bayes(freqs, train_x, train_y):
loglikelihood = {}
logprior = 0
vocab = set([pair[0] for pair in freqs.keys()])
V = len(vocab)
N_pos = N_neg = 0
for pair in freqs.keys():
if pair[1] > 0:
N_pos += freqs[pair]
else:
N_neg += freqs[pair]
D = len(train_y)
D_pos = (len(list(filter(lambda x: x > 0, train_y))))
D_neg = (len(list(filter(lambda x: x <= 0, train_y))))
logprior = np.log(D_pos) - np.log(D_neg)
for word in vocab:
freq_pos = lookup(freqs,word,1)
freq_neg = lookup(freqs,word,0)
p_w_pos = (freq_pos + 1) / (N_pos + V)
p_w_neg = (freq_neg + 1) / (N_neg + V)
loglikelihood[word] = np.log(p_w_pos/p_w_neg)
return logprior, loglikelihood
def naive_bayes_predict(tweet, logprior, loglikelihood):
word_l = process_tweet(tweet)
p = 0
p += logprior
for word in word_l:
if word in loglikelihood:
p += loglikelihood[word]
return p
logprior, loglikelihood = train_naive_bayes(freqs, train_x, train_y)
print(logprior)
print(len(loglikelihood))
my_tweet = 'There is a wildfire'
p = naive_bayes_predict(my_tweet, logprior, loglikelihood)
print('The expected output is', p)
result = []
for i in test_x:
p = naive_bayes_predict(i, logprior, loglikelihood)
if p>0:
result.append(1)
else:
result.append(0)
id = pd.DataFrame(df2['id'])
preds = pd.DataFrame(result, columns=['target'])
final = pd.concat([id, preds], axis=1)
final.to_csv('final_submission.csv',index=False)
###Output
_____no_output_____ |
python-tuts/1-intermediate/01 - Sequences/02 - Mutable Sequence Types.ipynb | ###Markdown
Mutable Sequences When dealing with mutable sequences, we have a few more things we can do - essentially adding, removing and replacing elements in the sequence.This **mutates** the sequence. The sequence's memory address has not changed, but the internal **state** of the sequence has. Replacing Elements We can replace a single element as follows:
###Code
l = [1, 2, 3, 4, 5]
print(id(l))
l[0] = 'a'
print(id(l), l)
###Output
140473999516160
140473999516160 ['a', 2, 3, 4, 5]
###Markdown
We can remove all elements from the sequence:
###Code
l = [1, 2, 3, 4, 5]
l.clear()
print(l)
###Output
[]
###Markdown
Note that this is **NOT** the same as doing this:
###Code
l = [1, 2, 3, 4, 5]
l = []
print(l)
###Output
[]
###Markdown
The net effect may look the same, `l` is an empty list, but observe the memory addresses:
###Code
l = [1, 2, 3, 4, 5]
print(id(l))
l.clear()
print(l, id(l))
###Output
140473999743680
[] 140473999743680
###Markdown
vs
###Code
l = [1, 2, 3, 4, 5]
print(id(l))
l = []
print(l, id(l))
###Output
140473999528640
[] 140473999516672
###Markdown
In the second case you can see that the object referenced by `l` has changed, but not in the first case. Why might this be important? Suppose you have the following setup:
###Code
suits = ['Spades', 'Hearts', 'Diamonds', 'Clubs']
alias = suits
suits = []
print(suits, alias)
###Output
[] ['Spades', 'Hearts', 'Diamonds', 'Clubs']
###Markdown
But using clear:
###Code
suits = ['Spades', 'Hearts', 'Diamonds', 'Clubs']
alias = suits
suits.clear()
print(suits, alias)
###Output
[] []
###Markdown
Big difference!! We can also replace elements using slicing and extended slicing. Here's an example, but we'll come back to this in a lot of detail:
###Code
l = [1, 2, 3, 4, 5]
print(id(l))
l[0:2] = ['a', 'b', 'c', 'd', 'e']
print(id(l), l)
###Output
140473999525440
140473999525440 ['a', 'b', 'c', 'd', 'e', 3, 4, 5]
###Markdown
Appending and Extending We can also append elements to the sequence (note that this is **not** the same as concatenation):
###Code
l = [1, 2, 3]
print(id(l))
l.append(4)
print(l, id(l))
###Output
140473999797056
[1, 2, 3, 4] 140473999797056
###Markdown
If we had "appended" the value `4` using concatenation:
###Code
l = [1, 2, 3]
print(id(l))
l = l + [4]
print(id(l), l)
###Output
140473999794624
140473999797056 [1, 2, 3, 4]
###Markdown
If we want to add more than one element at a time, we can extend a sequence with the contents of any iterable (not just sequences):
###Code
l = [1, 2, 3, 4, 5]
print(id(l))
l.extend({'a', 'b', 'c'})
print(id(l), l)
###Output
140473999525440
140473999525440 [1, 2, 3, 4, 5, 'b', 'a', 'c']
###Markdown
Of course, since we extended using a set, there was not gurantee of positional ordering. If we extend with another sequence, then positional ordering is retained:
###Code
l = [1, 2, 3]
l.extend(('a', 'b', 'c'))
print(l)
###Output
[1, 2, 3, 'a', 'b', 'c']
###Markdown
Removing Elements We can remove (and retrieve at the same time) an element from a mutable sequence:
###Code
l = [1, 2, 3, 4]
print(id(l))
popped = l.pop(1)
print(id(l), popped, l)
###Output
140473999772160
140473999772160 2 [1, 3, 4]
###Markdown
If we do not specify an index for `pop`, then the **last** element is popped:
###Code
l = [1, 2, 3, 4]
popped = l.pop()
print(popped)
print(id(l), popped, l)
###Output
4
140473999776192 4 [1, 2, 3]
###Markdown
Inserting Elements We can insert an element at a specific index. What this means is that the element we are inserting will be **at** that index position, and element that was at that position and all the remaining elements to the right are pushed out:
###Code
l = [1, 2, 3, 4]
print(id(l))
l.insert(1, 'a')
print(id(l), l)
###Output
140473999528448
140473999528448 [1, 'a', 2, 3, 4]
###Markdown
Reversing a Sequence We can also do in-place reversal:
###Code
l = [1, 2, 3, 4]
print(id(l))
l.reverse()
print(id(l), l)
###Output
140473999776320
140473999776320 [4, 3, 2, 1]
###Markdown
We can also reverse a sequence using extended slicing (we'll come back to this later):
###Code
l = [1, 2, 3, 4]
l[::-1]
###Output
_____no_output_____
###Markdown
But this is **NOT** mutating the sequence - the slice is returning a **new** sequence - that happens to be reversed.
###Code
l = [1, 2, 3, 4]
print(id(l))
l = l[::-1]
print(id(l), l)
###Output
140474000220608
140474000196096 [4, 3, 2, 1]
###Markdown
Copying Sequences We can create a copy of a sequence:
###Code
l = [1, 2, 3, 4]
print(id(l))
l2 = l.copy()
print(id(l2), l2)
###Output
140473999743360
140473999776192 [1, 2, 3, 4]
###Markdown
Note that the `id` of `l` and `l2` is not the same. In this case, using slicing does work the same as using the `copy` method:
###Code
l = [1, 2, 3, 4]
print(id(l))
l2 = l[:]
print(id(l2), l2)
###Output
140473999757760
140473999506816 [1, 2, 3, 4]
|
notebooks/primary_analysis.ipynb | ###Markdown
Part 1 - Load and Validate the DataPart 2 - Joining (Merging) DataframesPart 3 - Data ExplorationPart 4 - Visualizations **Part I - Load and Validate the Data*** Load the data as a pandas data frame.* Validate that it has the appropriate number of observations (checking the raw file, and also read the dataset description from data.world [MovieLens](https://grouplens.org/datasets/movielens/latest/) ).
###Code
"""
Import Statments:
"""
#classics
import pandas as pd
import os
#plotting
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS
pwd
ls
###Output
[34mData[m[m/ README.md [34mnotebooks[m[m/
LICENSE application.py predict_test.ipynb
###Markdown
Reading Movies & Ratings data:
###Code
data_path = '/Users/jorge/CS-Data-Science-Build-Week-1/data'
movies_path = '/Users/jorge/CS-Data-Science-Build-Week-1/data/movies.csv'
ratings_path = '/Users/jorge/CS-Data-Science-Build-Week-1/data/ratings.csv'
movies = pd.read_csv(
os.path.join(data_path, movies_path),
usecols=['movieId', 'title'],
dtype={'movieId': 'int32', 'title': 'str'})
ratings = pd.read_csv(
os.path.join(data_path, ratings_path),
usecols=['userId', 'movieId', 'rating'],
dtype={'userId': 'int32', 'movieId': 'int32', 'rating': 'float32'})
###Output
_____no_output_____
###Markdown
Movies
###Code
print(movies.shape)
movies.head()
###Output
(9742, 2)
###Markdown
Ratings
###Code
print(ratings.shape)
ratings.head()
###Output
(100836, 3)
###Markdown
**Part II - Joining (Merging) Dataframes*** movies: shows information about movies, namely a unique movieID* ratings: shows the rating that a particular userID gave to a particular movieID* This was accomplished by "joining" (or "merging") the DataFrames using the Pandas merge function:
###Code
movies.columns
ratings.columns
df = pd.merge(movies, ratings)
df.columns
print(df.shape)
df.head()
print(movies.shape)
print(ratings.shape)
print(df.shape)
###Output
(9742, 2)
(100836, 3)
(100836, 4)
###Markdown
**Part III - Data Exploration**
###Code
df.dtypes
# Here we can see the breakdown of the count, mean, std, min, and percentile by 25%,50%,75%
df.describe()
# A great way to get an overview of our data:
df.info()
df['title'].value_counts().head()
# nulls as missing values.
# We'll subset our data by choosing ALL the rows that have missing values.
print('The number of rows with Missing Values are: ')
df.isnull().any(axis=1).sum()
print(df.nunique())
# Here we are verifing the amount of content in this Netflix dataset
print('The number of titles in this dataset:',df['title'].nunique())
###Output
The number of titles in this dataset: 9719
###Markdown
EDA
###Code
print("Numerical describe of distribuition title")
print(df.groupby("title")["rating"].count())
print("Percentage of distribuition Type ")
print((df.groupby("title")["rating"].count() / len(df.title) * 100).round(decimals=2))
###Output
Numerical describe of distribuition title
title
'71 (2014) 1
'Hellboy': The Seeds of Creation (2004) 1
'Round Midnight (1986) 2
'Salem's Lot (2004) 1
'Til There Was You (1997) 2
'Tis the Season for Love (2015) 1
'burbs, The (1989) 17
'night Mother (1986) 1
(500) Days of Summer (2009) 42
*batteries not included (1987) 7
...All the Marbles (1981) 1
...And Justice for All (1979) 3
00 Schneider - Jagd auf Nihil Baxter (1994) 1
1-900 (06) (1994) 1
10 (1979) 4
10 Cent Pistol (2015) 2
10 Cloverfield Lane (2016) 14
10 Items or Less (2006) 3
10 Things I Hate About You (1999) 54
10 Years (2011) 1
10,000 BC (2008) 17
100 Girls (2000) 4
100 Streets (2016) 1
101 Dalmatians (1996) 47
101 Dalmatians (One Hundred and One Dalmatians) (1961) 44
101 Dalmatians II: Patch's London Adventure (2003) 1
101 Reykjavik (101 Reykjavík) (2000) 1
102 Dalmatians (2000) 9
10th & Wolf (2006) 1
10th Kingdom, The (2000) 2
..
Zero Dark Thirty (2012) 14
Zero Effect (1998) 15
Zero Theorem, The (2013) 3
Zero de conduite (Zero for Conduct) (Zéro de conduite: Jeunes diables au collège) (1933) 1
Zeus and Roxanne (1997) 1
Zipper (2015) 1
Zodiac (2007) 38
Zombeavers (2014) 2
Zombie (a.k.a. Zombie 2: The Dead Are Among Us) (Zombi 2) (1979) 2
Zombie Strippers! (2008) 1
Zombieland (2009) 53
Zone 39 (1997) 1
Zone, The (La Zona) (2007) 2
Zookeeper (2011) 4
Zoolander (2001) 54
Zoolander 2 (2016) 3
Zoom (2006) 1
Zoom (2015) 1
Zootopia (2016) 32
Zulu (1964) 4
Zulu (2013) 1
[REC] (2007) 8
[REC]² (2009) 3
[REC]³ 3 Génesis (2012) 2
anohana: The Flower We Saw That Day - The Movie (2013) 1
eXistenZ (1999) 22
xXx (2002) 24
xXx: State of the Union (2005) 5
¡Three Amigos! (1986) 26
À nous la liberté (Freedom for Us) (1931) 1
Name: rating, Length: 9719, dtype: int64
Percentage of distribuition Type
title
'71 (2014) 0.00
'Hellboy': The Seeds of Creation (2004) 0.00
'Round Midnight (1986) 0.00
'Salem's Lot (2004) 0.00
'Til There Was You (1997) 0.00
'Tis the Season for Love (2015) 0.00
'burbs, The (1989) 0.02
'night Mother (1986) 0.00
(500) Days of Summer (2009) 0.04
*batteries not included (1987) 0.01
...All the Marbles (1981) 0.00
...And Justice for All (1979) 0.00
00 Schneider - Jagd auf Nihil Baxter (1994) 0.00
1-900 (06) (1994) 0.00
10 (1979) 0.00
10 Cent Pistol (2015) 0.00
10 Cloverfield Lane (2016) 0.01
10 Items or Less (2006) 0.00
10 Things I Hate About You (1999) 0.05
10 Years (2011) 0.00
10,000 BC (2008) 0.02
100 Girls (2000) 0.00
100 Streets (2016) 0.00
101 Dalmatians (1996) 0.05
101 Dalmatians (One Hundred and One Dalmatians) (1961) 0.04
101 Dalmatians II: Patch's London Adventure (2003) 0.00
101 Reykjavik (101 Reykjavík) (2000) 0.00
102 Dalmatians (2000) 0.01
10th & Wolf (2006) 0.00
10th Kingdom, The (2000) 0.00
...
Zero Dark Thirty (2012) 0.01
Zero Effect (1998) 0.01
Zero Theorem, The (2013) 0.00
Zero de conduite (Zero for Conduct) (Zéro de conduite: Jeunes diables au collège) (1933) 0.00
Zeus and Roxanne (1997) 0.00
Zipper (2015) 0.00
Zodiac (2007) 0.04
Zombeavers (2014) 0.00
Zombie (a.k.a. Zombie 2: The Dead Are Among Us) (Zombi 2) (1979) 0.00
Zombie Strippers! (2008) 0.00
Zombieland (2009) 0.05
Zone 39 (1997) 0.00
Zone, The (La Zona) (2007) 0.00
Zookeeper (2011) 0.00
Zoolander (2001) 0.05
Zoolander 2 (2016) 0.00
Zoom (2006) 0.00
Zoom (2015) 0.00
Zootopia (2016) 0.03
Zulu (1964) 0.00
Zulu (2013) 0.00
[REC] (2007) 0.01
[REC]² (2009) 0.00
[REC]³ 3 Génesis (2012) 0.00
anohana: The Flower We Saw That Day - The Movie (2013) 0.00
eXistenZ (1999) 0.02
xXx (2002) 0.02
xXx: State of the Union (2005) 0.00
¡Three Amigos! (1986) 0.03
À nous la liberté (Freedom for Us) (1931) 0.00
Name: rating, Length: 9719, dtype: float64
###Markdown
**Part IV - Visualizations**
###Code
print("Top 10 Rating by user")
print(df["rating"].value_counts().head(10))
plt.figure(figsize=(8,6))
#Total rating distribuition
g = sns.distplot(df["rating"], bins=20)
g.set_title("Rating distribuition", size = 20)
g.set_xlabel('Rating', fontsize=15)
# Here we learned to make a word cloud!
# We used the 'title' to see and help if the ratings meet the demand within
# The MovieLens library. Max out 1000 words..
# This graph is in the blog.
# Here are the most popular shows:
plt.figure(figsize=(10,10))
wordcloud = WordCloud(max_font_size=500, max_words=1000, background_color="white",width=2000,height=1000).generate(" ".join(df['title']))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# This did not make the cut for blog post.
# Heatmap of all of the dataframe via df.corr()
# This was a test to see if I can make a heatmap
# after watching a video or two this was the outcome:
%matplotlib inline
plt.figure(figsize=(8,8))
sns.heatmap(df.corr(),annot =True, linewidth=0.5,cmap='YlGnBu',vmin=0, vmax=0.5)
plt.show()
###Output
_____no_output_____ |
notebooks/enzyme/RNN_regression.ipynb | ###Markdown
Loading preprosed data
###Code
import pandas as pd
PATH = "saved_models/attention/version_1/version.cpkt"
file_path = "regression.csv"
data = pd.read_csv(file_path, sep=',', header=(0), skipinitialspace=True)
data['Y'].max(), data['Y'].min()
norm_data = data.copy()
#norm_data['Y']=(norm_data['Y']-norm_data['Y'].min())/(norm_data['Y'].max()-norm_data['Y'].min())
norm_data['Y']=(norm_data['Y']-norm_data['Y'].mean())/norm_data['Y'].std()
norm_data['Y'].max(), norm_data['Y'].min()
norm_data.iloc[:,:-1] = norm_data.iloc[:,:-1].astype(int)
from sklearn.model_selection import train_test_split
train, test = train_test_split(norm_data, test_size=0.2)
train.shape
train_data = train.iloc[:,:-1].values
train_label = train.iloc[:,-1].values
val_data = test.iloc[:,:-1].values
val_label = test.iloc[:,-1].values
train_data.shape, train_label.shape, val_data.shape, val_label.shape
train_data[0], train_label[0]
import numpy as np
NUM_OF_ACIDS = 4
EMBEDDING_SIZE = 32
NUM_CLASSES = np.amax(val_label, axis=0)+1
NUM_EPOCH=5
###Output
_____no_output_____
###Markdown
Model One hot encoding
###Code
import tensorflow as tf
tf.__version__
###Output
_____no_output_____
###Markdown
Training
###Code
def train_input():
return (tf.data.Dataset.from_tensor_slices((train_data, train_label))
.shuffle(buffer_size=10000, reshuffle_each_iteration=True)
.batch(64)
.repeat(NUM_EPOCH))
def eval_input():
return (tf.data.Dataset.from_tensor_slices((val_data, val_label))
.batch(64).repeat(1))
def model(features, is_training):
acid_embeddings = tf.get_variable("acid_embeddings", [NUM_OF_ACIDS, EMBEDDING_SIZE])
embedded_acids = tf.nn.embedding_lookup(acid_embeddings, features)
embedded_acids_flatten = tf.layers.flatten(embedded_acids)
# Build RNN cell
encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units)
# Run Dynamic RNN
# encoder_outputs: [max_time, batch_size, num_units]
# encoder_state: [batch_size, num_units]
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell, encoder_emb_inp,
sequence_length=source_sequence_length, time_major=True)
# Build RNN cell
decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units)
# Helper
helper = tf.contrib.seq2seq.TrainingHelper(decoder_emb_inp, decoder_lengths, time_major=True)
# Decoder
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, helper, encoder_state,
output_layer=projection_layer)
# Dynamic decoding
outputs, _ = tf.contrib.seq2seq.dynamic_decode(decoder, ...)
logits = outputs.rnn_output
return x
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(features, is_training=False)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
logits = model(features, is_training=True)
loss = tf.losses.absolute_difference(labels=labels, predictions=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(features, is_training=False)
loss = tf.losses.absolute_difference(labels=labels, predictions=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss)
enzyme_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=PATH)
enzyme_classifier.train(input_fn=train_input)
eval_results = enzyme_classifier.evaluate(input_fn=eval_input)
print()
print('Evaluation results:\n\t%s' % eval_results)
def prod_input():
index = 10
print (val_data[0:index])
print (val_label[0:index])
return (tf.data.Dataset.from_tensor_slices((val_data[0:index], val_label[0:index])).batch(index))
predict = enzyme_classifier.predict(input_fn=prod_input)
print (predict)
for p in predict:
print(p)
###Output
<generator object Estimator.predict at 0x000002A493B3A6D0>
[[2 2 2 3 0 0 0 2 1 1 1 0 0 1 2 2 2 1 0 0]
[1 3 0 0 3 2 1 3 1 3 1 0 2 3 0 3 1 3 1 3]
[1 2 0 2 3 0 3 2 1 3 2 1 1 0 1 3 1 1 1 1]
[1 0 3 2 1 3 2 1 0 1 1 0 0 2 2 1 0 3 0 1]
[1 2 1 0 1 1 3 2 2 1 1 2 2 3 0 2 3 0 2 3]
[3 3 3 2 1 1 2 2 3 0 2 1 1 1 2 2 0 2 0 1]
[1 3 0 1 0 3 1 2 3 1 3 2 3 2 1 2 1 1 1 0]
[1 2 2 3 1 2 2 1 3 2 1 1 2 3 2 3 1 2 1 3]
[0 3 3 2 2 2 0 1 1 2 3 1 1 1 2 3 2 1 3 0]
[3 2 0 2 1 0 2 2 3 1 2 1 1 1 0 2 3 1 3 1]]
[ 1.8371417 -0.76188853 -1.00000062 -0.99341339 -1.06843164 1.06686331
-0.74164628 0.72892113 0.36989805 0.81223369]
INFO:tensorflow:Restoring parameters from saved_models/regression/version_1/version.cpkt\model.ckpt-4005
0.22486427
0.11091128
-0.090491116
0.014593732
0.048556305
-0.07160731
0.0857504
-0.035996374
-0.06441446
-0.27501512
|
Single-word-transformations.ipynb | ###Markdown
Copyright 2021 Andrew M. Olney and made available under [CC BY-SA](https://creativecommons.org/licenses/by-sa/4.0) for text and [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) for code. Single word transformationsIn [Text as data](Text-as-data.ipynb), we discussed how we can think about words as reflecting latent variables.From this perspective, the words themselves are not what we are interested in - rather they are indicators of some latent variable of interest.In this notebook, we expand on this idea by considering transformations on words, token for token, that take us closer, in some sense, to the latent variable we care about. What you will learnYou will learn about transformations of single words that can be used as features in predictive models. We will cover:- Parts of speech- Stemming and lemmatization- Dictionary and regular expression tagging When to use single word transformationsSingle word transformations can be thought of as a kind of preprocessing that converts words into features.The transformations we discuss take a nontrivial amount of a data to learn, so they can also be viewed as a kind of data augmentation (or reduction) that is useful when you are working with a small amount of data: by redescribing your text data using a smaller set of tokens, you are making it easier for your model to learn the structure of the data.While some transformations we'll cover are fairly theory-neutral, we will also describe transformations that allow you to impose your own theory regarding the latent variables behind the data. Parts of speechWe can infer parts of speech through a process called part of speech tagging.Every word has one and only one part of speech (i.e. a single feature), but there are many part of speech categories.Below is a list of Penn Treebank tags, which are widely used, to give you a sense of the part of speech tagging problem.Note that some tagging systems have more tags and some have less.|Tag|Gloss||:---|:---||CC|Coordinating conjunction||CD|Cardinal number||DT|Determiner||EX|Existential there||FW|Foreign word||IN|Preposition or subordinating conjunction||JJ|Adjective||JJR|Adjective, comparative||JJS|Adjective, superlative||LS|List item marker||MD|Modal||NN|Noun, singular or mass||NNS|Noun, plural||NNP|Proper noun, singular||NNPS|Proper noun, plural||PDT|Predeterminer||POS|Possessive ending||PRP|Personal pronoun||PRP\$|Possessive pronoun||RB|Adverb||RBR|Adverb, comparative||RBS|Adverb, superlative||RP|Particle||SYM|Symbol||TO|to||UH|Interjection||VB|Verb, base form||VBD|Verb, past tense||VBG|Verb, gerund or present participle||VBN|Verb, past participle||VBP|Verb, non-3rd person singular present||VBZ|Verb, 3rd person singular present||WDT|Wh-determiner||WP|Wh-pronoun||WP\$|Possessive wh-pronoun||WRB|Wh-adverb||.|period||,|comma||:|colon||(|left separator||)|right separator||$|dollar sign||\`\`|open double quotes||''|close double quotes|Let's start by importing `nltk` and storing the words that we will use in our running example:- `import nltk as nltk`- Set `words` to make list from text `"I like to fly more than a fly in the ointment . My tooth is aching ."` with delimiter `" "` (a single space)
###Code
import nltk as nltk
words = 'I like to fly more than a fly in the ointment . My tooth is aching .'.split(' ')
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="sHgTZgsI9vJQ3M#lu+,B">nltk</variable><variable id="cc)LJ@o:#+Q/bjV+nw7E">words</variable></variables><block type="importAs" id="|Lgb)._R/(OoNvhctV=J" x="139" y="155"><field name="libraryName">nltk</field><field name="libraryAlias" id="sHgTZgsI9vJQ3M#lu+,B">nltk</field><next><block type="variables_set" id="Y`HIOjJh?SQ$=OMt4Pr5"><field name="VAR" id="cc)LJ@o:#+Q/bjV+nw7E">words</field><value name="VALUE"><block type="lists_split" id="LW@]%KZ*Y}C:2NUEomQ@"><mutation mode="SPLIT"></mutation><field name="MODE">SPLIT</field><value name="INPUT"><block type="text" id="45|G8`H+}-r%H.,1lqpB"><field name="TEXT">I like to fly more than a fly in the ointment . My tooth is aching .</field></block></value><value name="DELIM"><shadow type="text" id="BkEBlQE=V]{t?pMU|`%c"><field name="TEXT"> </field></shadow></value></block></value></block></next></block></xml>
###Output
_____no_output_____
###Markdown
To tag the words, do: - Set `pos` to with `nltk` do `pos_tag` using `words`- Display `pos`
###Code
pos = nltk.pos_tag(words)
pos
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="U`QjFH|a^qhm])]1I$;@">pos</variable><variable id="sHgTZgsI9vJQ3M#lu+,B">nltk</variable><variable id="cc)LJ@o:#+Q/bjV+nw7E">words</variable></variables><block type="variables_set" id="~_HKg/pHwNNe1hYFjUZk" x="58" y="176"><field name="VAR" id="U`QjFH|a^qhm])]1I$;@">pos</field><value name="VALUE"><block type="varDoMethod" id="BMN2~lB}esR80hrHf_#T"><field name="VAR" id="sHgTZgsI9vJQ3M#lu+,B">nltk</field><field name="MEMBER">pos_tag</field><data>nltk:pos_tag</data><value name="INPUT"><block type="variables_get" id=",t9APXKJ5Yhm/t2ZaL-J"><field name="VAR" id="cc)LJ@o:#+Q/bjV+nw7E">words</field></block></value></block></value></block><block type="variables_get" id="xh)]qdv`jesXCXAU%[iP" x="57" y="237"><field name="VAR" id="U`QjFH|a^qhm])]1I$;@">pos</field></block></xml>
###Output
_____no_output_____
###Markdown
Notice that we have two senses of `fly` in this text.One is `to fly` (a verb) and the other is `insect fly` (a noun).This is one example use of part of speech, which is to disambiguate.For disambiguation, we could transform our words into `word/pos`:- Create a list with one element containing - for each item `i` in list `pos` - yield a freestyle `i[0] + "/" + i[1]`
###Code
[i[0] + "/" + i[1] for i in pos]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="K-[j~N(`e*]acWp:SRf{">i</variable><variable id="U`QjFH|a^qhm])]1I$;@">pos</variable></variables><block type="lists_create_with" id="..T%=|_^VRJTL`kjaSn(" x="8" y="60"><mutation items="1"></mutation><value name="ADD0"><block type="comprehensionForEach" id="eI@m:2o_oaXDe_A6h(If"><field name="VAR" id="K-[j~N(`e*]acWp:SRf{">i</field><value name="LIST"><block type="variables_get" id="C^.`U:$]*?]bnNC7nVWN"><field name="VAR" id="U`QjFH|a^qhm])]1I$;@">pos</field></block></value><value name="YIELD"><block type="dummyOutputCodeBlock" id="_g*G1XZ6S*Y7nl^ek~Yr"><field name="CODE">i[0] + "/" + i[1]</field></block></value></block></value></block></xml>
###Output
_____no_output_____
###Markdown
We can then use these transformed tokens in the place of words in later analysis, e.g. we could vectorize them. Stemming and lemmatizationStemming and lemmatization both address the problem of morphology, but in different ways.Stemming applies fairly simple rules to strip affixes (see [here](https://tartarus.org/martin/PorterStemmer/) for a description of the popular Porter stemmer).Lemmatization also seeks to remove affixes, and otherwise reduce words to their base form (e.g. `is` to `be`), but without creating non-words in the process, which stemming often does.In other words, if you want a fast approach that can create nonwords, stemming is fine, otherwise you want lemmatization. There are [many stemmers](https://www.nltk.org/api/nltk.stem.html?highlight=stemming) available in NLTK; here we will demonstrate the famous Porter stemmer.- Create variable `stemmer` and set to `with nltk create PorterStemmer`NLTK doesn't have a similar diversity of lemmatizers - there's just the `WordNetLemmatizer`.This is possibly because a good lemmatizer needs access to a lexical database (like WordNet); otherwise, how could it know things like `be` is the base form of `is`?- Create variable `lemmatizer` and set to `with nltk create WordNetLemmatizer`
###Code
stemmer = nltk.PorterStemmer()
lemmatizer = nltk.WordNetLemmatizer()
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="gc[OXAJ:}?^trO)TCN#S">stemmer</variable><variable id="sHgTZgsI9vJQ3M#lu+,B">nltk</variable><variable id="aZCw6y4p!sn)Cw$X1ojp">lemmatizer</variable></variables><block type="variables_set" id="[sj83m4H-6.6Jzxp[$:d" x="15" y="249"><field name="VAR" id="gc[OXAJ:}?^trO)TCN#S">stemmer</field><value name="VALUE"><block type="varCreateObject" id="/F8MZ{fsYHeL6}c4r%)p"><field name="VAR" id="sHgTZgsI9vJQ3M#lu+,B">nltk</field><field name="MEMBER">PorterStemmer</field><data>nltk:PorterStemmer</data></block></value><next><block type="variables_set" id="=j$`NjMR@F]pC/vWJjFB"><field name="VAR" id="aZCw6y4p!sn)Cw$X1ojp">lemmatizer</field><value name="VALUE"><block type="varCreateObject" id="5L`9Lg_!?klsQH=nGlb1"><field name="VAR" id="sHgTZgsI9vJQ3M#lu+,B">nltk</field><field name="MEMBER">WordNetLemmatizer</field><data>nltk:WordNetLemmatizer</data></block></value></block></next></block></xml>
###Output
_____no_output_____
###Markdown
The lemmatizer needs part of speech to work correctly, otherwise it assumes everything is a noun.NLTK doesn't handle this well, so execute the code below to do the mapping for us.
###Code
def map_tag(tag):
if tag.startswith('J'):
return 'a'
elif tag.startswith('V'):
return 'v'
elif tag.startswith('R'):
return 'r'
else:
return 'n'
###Output
_____no_output_____
###Markdown
Let's compare their outputs:- print a list with one element containing - for each item `i` in list `words` - yield with `stemmer` to `stem` using `i`- print a list with one element containing - for each item `i` in list `pos` - yield with `lemmatizer` to `lemmatize` using a list containing - freestyle `i[0]` (the word) - freestyle `map_tag(i[1])` (the pos, mapped using the function above)
###Code
print([(stemmer.stem(i)) for i in words])
print([(lemmatizer.lemmatize(i[0], map_tag(i[1]))) for i in pos])
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="K-[j~N(`e*]acWp:SRf{">i</variable><variable id="cc)LJ@o:#+Q/bjV+nw7E">words</variable><variable id="gc[OXAJ:}?^trO)TCN#S">stemmer</variable><variable id="U`QjFH|a^qhm])]1I$;@">pos</variable><variable id="aZCw6y4p!sn)Cw$X1ojp">lemmatizer</variable></variables><block type="text_print" id="VT`s0$yo./*;G`3Utl;[" x="67" y="134"><value name="TEXT"><shadow type="text" id="D|ngef97g0q4Yp?G~3+("><field name="TEXT">abc</field></shadow><block type="lists_create_with" id="}[B$VVkV,eLCd,;ybax}"><mutation items="1"></mutation><value name="ADD0"><block type="comprehensionForEach" id="g/M{1rT*bV3r7?-0dkmR"><field name="VAR" id="K-[j~N(`e*]acWp:SRf{">i</field><value name="LIST"><block type="variables_get" id="E/y?aR*D.LCQ*IJASFdA"><field name="VAR" id="cc)LJ@o:#+Q/bjV+nw7E">words</field></block></value><value name="YIELD"><block type="varDoMethod" id="]!Ow/xb#c|t[-C4tT2;~"><field name="VAR" id="gc[OXAJ:}?^trO)TCN#S">stemmer</field><field name="MEMBER">stem</field><data>i:</data><value name="INPUT"><block type="variables_get" id="USK=d/dWP/OpD7hGS(bA"><field name="VAR" id="K-[j~N(`e*]acWp:SRf{">i</field></block></value></block></value></block></value></block></value><next><block type="text_print" id="ZF#vpBn`Yo=184o-|m-j"><value name="TEXT"><shadow type="text" id="{d5gr^GWdYLq.+2O~D1!"><field name="TEXT">abc</field></shadow><block type="lists_create_with" id="*Ru~d3Ng[kEjZ~rN^@vf"><mutation items="1"></mutation><value name="ADD0"><block type="comprehensionForEach" id="YUUS}RShyry+ioEtv6q4"><field name="VAR" id="K-[j~N(`e*]acWp:SRf{">i</field><value name="LIST"><block type="variables_get" id="fQ8c6s3lO09lbBM?s2zm"><field name="VAR" id="U`QjFH|a^qhm])]1I$;@">pos</field></block></value><value name="YIELD"><block type="varDoMethod" id="OL/x56hpr7m}u:$XhDR*"><field name="VAR" id="aZCw6y4p!sn)Cw$X1ojp">lemmatizer</field><field name="MEMBER">lemmatize</field><data>i:</data><value name="INPUT"><block type="lists_create_with" id=":TvIV]_46$E8Ap#;%]*R"><mutation items="2"></mutation><value name="ADD0"><block type="dummyOutputCodeBlock" id="=]1kVhsPh30IaTigFV%~"><field name="CODE">i[0]</field></block></value><value name="ADD1"><block type="dummyOutputCodeBlock" id="jXZ(Gvks.QEs2QV_H3=6"><field name="CODE">map_tag(i[1])</field></block></value></block></value></block></value></block></value></block></value></block></next></block></xml>
###Output
['I', 'like', 'to', 'fli', 'more', 'than', 'a', 'fli', 'in', 'the', 'ointment', '.', 'My', 'tooth', 'is', 'ach', '.']
['I', 'like', 'to', 'fly', 'more', 'than', 'a', 'fly', 'in', 'the', 'ointment', '.', 'My', 'tooth', 'be', 'ache', '.']
['n', 'v', 'n', 'v', 'a', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'v', 'v', 'n']
###Markdown
There are 3 differences in how words are handled:- `fli` vs `fly`- `is` vs `be`- `ach` vs `ache`These are consistent with our expectations: **stemmers can return nonwords and miss base forms for irregular forms.** Dictionary and regular expression taggingSo far the word transformations we've looked at are relatively theory neutral (if you'll excuse the particular tagset used in part of speech tagging).However, sometimes we want to do tagging according to our own theory.NLTK has some utility classes that allow us to define our own taggers fairly simply, using either dictionaries or regular expressions.We've glancingly looked at dictionaries before: a **dictionary** is a variable that holds other variables, like a **list**.The difference between a dictionary and a list is that you can get things by **position in a list** but you get things by **name in a dictionary**.The name is typically called a **key** and the thing you stored is called a **value**.We can define a dictionary in many different ways, but the most convenient for us is to start with a list of tuples representing (key, value) pairs and then run the list through `dict` to make a dictionary.Let's make a simple dictionary-based tagger that can work on our running example.| Word | Tag ||:--------|:-------|| like | Affect || aching | Affect || I | Person || my | Person |It's important to note that there's no limit on the number of words or classes we could use in the dictionary.Create the dictionary and give it to the tagger to use:- Set `dictionary` to dict of a list of a list containing - A tuple for each entry in the table, where the first element (Word) is the key and the second element (Tag) is the value (both as strings)- Set `tagger` to with `nltk` create `UnigramTagger` using freestyle `model=dictionary`
###Code
dictionary = dict([('like','Affect'), ('aching','Affect'), ('I','Person'), ('my','Person')])
tagger = nltk.UnigramTagger(model=dictionary)
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="4OkA/upbH!NKP#DwLQ}E">dictionary</variable><variable id="{3t%G%Q5rLV)rz:2c,Yg">tagger</variable><variable id="sHgTZgsI9vJQ3M#lu+,B">nltk</variable></variables><block type="variables_set" id="tD^8gW0/~Y^(+;8=bg(_" x="3" y="279"><field name="VAR" id="4OkA/upbH!NKP#DwLQ}E">dictionary</field><value name="VALUE"><block type="dictBlock" id="^[zw5Ls)E6hDn7R(_ozN"><value name="x"><block type="lists_create_with" id="QT(9-/+45@YdXbG@Y+,T"><mutation items="1"></mutation><value name="ADD0"><block type="lists_create_with" id="#-x[a7w92yQ,!{FJ!@CB"><mutation items="4"></mutation><value name="ADD0"><block type="tupleBlock" id="},c(jbT(Ig-):JcYJp.8"><value name="FIRST"><block type="text" id="aN}f]yCy6Eqrj8{*[JS3"><field name="TEXT">like</field></block></value><value name="SECOND"><block type="text" id="-qFHPe8Xx;V9gr!zl,;1"><field name="TEXT">Affect</field></block></value></block></value><value name="ADD1"><block type="tupleBlock" id="52qn-~6K-x*LYt]}zc]m"><value name="FIRST"><block type="text" id="[24JJpm7GAb$R@xde|ta"><field name="TEXT">aching</field></block></value><value name="SECOND"><block type="text" id="VyD|N4:w$3l*#;].w[j/"><field name="TEXT">Affect</field></block></value></block></value><value name="ADD2"><block type="tupleBlock" id="K/+d_/@];GR|2};Bzkm."><value name="FIRST"><block type="text" id="V^aF-50?5E}ZJEy@[iGv"><field name="TEXT">I</field></block></value><value name="SECOND"><block type="text" id="E*Bzdp@4_.rVAjST%ORO"><field name="TEXT">Person</field></block></value></block></value><value name="ADD3"><block type="tupleBlock" id="mlIYIFMZ!8NRVirF6iz$"><value name="FIRST"><block type="text" id=":^z1a|kk5RxaN9SUDu:s"><field name="TEXT">my</field></block></value><value name="SECOND"><block type="text" id="5/5|fI1h7B`IBxbU!e`J"><field name="TEXT">Person</field></block></value></block></value></block></value></block></value></block></value></block><block type="variables_set" id="a{z?s[rNDGmZyw8Z*hj~" x="32" y="466"><field name="VAR" id="{3t%G%Q5rLV)rz:2c,Yg">tagger</field><value name="VALUE"><block type="varCreateObject" id="2zq,D/1d1OCD6b)%vD,4"><field name="VAR" id="sHgTZgsI9vJQ3M#lu+,B">nltk</field><field name="MEMBER">UnigramTagger</field><data>nltk:UnigramTagger</data><value name="INPUT"><block type="dummyOutputCodeBlock" id="|CGNfhm5=]nr8kB!X*ED"><field name="CODE">model=dictionary</field></block></value></block></value></block></xml>
###Output
_____no_output_____
###Markdown
Now tag our text:- with `tagger` do `tag` using `words`
###Code
tagger.tag(words)
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="{3t%G%Q5rLV)rz:2c,Yg">tagger</variable><variable id="cc)LJ@o:#+Q/bjV+nw7E">words</variable></variables><block type="varDoMethod" id="6GlJD++{XCVAkG_`xtUK" x="8" y="188"><field name="VAR" id="{3t%G%Q5rLV)rz:2c,Yg">tagger</field><field name="MEMBER">tag</field><data>tagger:tag</data><value name="INPUT"><block type="variables_get" id="C?S#j}!jz#@zPn]p*(id"><field name="VAR" id="cc)LJ@o:#+Q/bjV+nw7E">words</field></block></value></block></xml>
###Output
_____no_output_____
###Markdown
If we were only interested in words in our dictionary, we could filter out all words matching `None`, which is the default tag. Suppose we like the idea of dictionary tagging, but we want to capture multiple word forms that resolve to the same base word.One way to do this would be to use stemming/lemmatization first, followed by a dictionary tagger defined on the stems/lemmas.Another way is to combine the effect of stemming/lemmatization with the dictionary using **regular expressions**.Regular expressions are a notation for searching text for matching text.You've seen them before as wildcards, e.g. `*` or `.*`, however they can be much more complicated than that, e.g. `^H.+` matches an `H` at the start of a line, followed by at least one other character.There's more to regular expressions than makes sense for us to try to cover right now. [Here is a cheat sheet that covers the essentials](http://web.mit.edu/hackl/www/lab/turkshop/slides/regex-cheatsheet.pdf).A well-known program called [LIWC](http://liwc.wpengine.com/) defines [categories of words based on regular expressions](https://repositories.lib.utexas.edu/bitstream/handle/2152/31333/LIWC2015_LanguageManual.pdf)), and these are commonly used in the social sciences to transform words into more meaningful categories.We're going to use regular expressions to partially replicate the LIWC category of `Affect`, i.e. we will tag words that represent affect/emotion.The process for making a tagger based on regular expressions is nearly identical to making one with a dictionary; the only difference is that `dict` is not used, and regular expressions are used instead of words.Make the tagger:- Set `patterns` to a list of tuples where the first element is a regular expression (see below) and the second element is `"Affect"`. - Regular expressions: `"abandon.*"`, `"abuse.*"`, `"ache.*"`, `"aching"`, `"active.*"`, `"abusive"`- Set tagger to with `nltk` create `RegexpTagger` using patterns
###Code
patterns = [('abandon.*','Affect'), ('abuse.*','Affect'), ('ache.*','Affect'), ('aching','Affect'), ('active.*','Affect'), ('abusive','Affect')]
tagger = nltk.RegexpTagger(patterns)
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="{3t%G%Q5rLV)rz:2c,Yg">tagger</variable><variable id="M^Y}Gmy;f?HQ$[nPsp+j">patterns</variable><variable id="sHgTZgsI9vJQ3M#lu+,B">nltk</variable></variables><block type="variables_set" id="pdC#N(zVL|0sG6AC7l8e" x="33" y="242"><field name="VAR" id="M^Y}Gmy;f?HQ$[nPsp+j">patterns</field><value name="VALUE"><block type="lists_create_with" id="wg#F}P#qId-L/guUEp0^"><mutation items="6"></mutation><value name="ADD0"><block type="tupleBlock" id="OyL93(-/sW$j3L~Inmm^"><value name="FIRST"><block type="text" id="JFvZDY/LnafV2UUiY*Tm"><field name="TEXT">abandon.*</field></block></value><value name="SECOND"><block type="text" id="|6Gh[or6uHrv9%.xl*5Q"><field name="TEXT">Affect</field></block></value></block></value><value name="ADD1"><block type="tupleBlock" id="Q/vqLzK/*IitkMbb{xEC"><value name="FIRST"><block type="text" id="PuxeBwe(0s4qh_20{l/9"><field name="TEXT">abuse.*</field></block></value><value name="SECOND"><block type="text" id="KXnCDfJ}Mo#4}nBxRpK*"><field name="TEXT">Affect</field></block></value></block></value><value name="ADD2"><block type="tupleBlock" id="G9U%JaEabqM96?9hM#?q"><value name="FIRST"><block type="text" id="K0C3Wc7C?O{*c9SfM2}#"><field name="TEXT">ache.*</field></block></value><value name="SECOND"><block type="text" id="{@o7nd6IP]S3KF@J$^lQ"><field name="TEXT">Affect</field></block></value></block></value><value name="ADD3"><block type="tupleBlock" id="L^Oo$tYEnwon]gG_Gf%Z"><value name="FIRST"><block type="text" id="B[XQ}(p3d|sUg#ExW{7P"><field name="TEXT">aching</field></block></value><value name="SECOND"><block type="text" id="7!7Grc6qL.S3Q~.]3*q]"><field name="TEXT">Affect</field></block></value></block></value><value name="ADD4"><block type="tupleBlock" id="a|B]ra~za,/=:;T)rOSg"><value name="FIRST"><block type="text" id="t0c(b{85rw!br;Uphp.~"><field name="TEXT">active.*</field></block></value><value name="SECOND"><block type="text" id="QU?|cn~Ji~hK=BON6#51"><field name="TEXT">Affect</field></block></value></block></value><value name="ADD5"><block type="tupleBlock" id="/a,s]-:tc1A2b@ils5zO"><value name="FIRST"><block type="text" id="yp|+wHafRYv4NYRKA-y8"><field name="TEXT">abusive</field></block></value><value name="SECOND"><block type="text" id="TP,b,9CAIEh/77[M)1E:"><field name="TEXT">Affect</field></block></value></block></value></block></value></block><block type="variables_set" id="9cJ2Pr@IscZEuDldNW:|" x="21" y="523"><field name="VAR" id="{3t%G%Q5rLV)rz:2c,Yg">tagger</field><value name="VALUE"><block type="varCreateObject" id=".e+S!DKKN=zd@95a[MLp"><field name="VAR" id="sHgTZgsI9vJQ3M#lu+,B">nltk</field><field name="MEMBER">RegexpTagger</field><data>nltk:RegexpTagger</data><value name="INPUT"><block type="variables_get" id="(|^:5?A~QUcLCcB1r+}`"><field name="VAR" id="M^Y}Gmy;f?HQ$[nPsp+j">patterns</field></block></value></block></value></block></xml>
###Output
_____no_output_____
###Markdown
Now tag our text:- with `tagger` do `tag` using `words`
###Code
tagger.tag(words)
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="{3t%G%Q5rLV)rz:2c,Yg">tagger</variable><variable id="cc)LJ@o:#+Q/bjV+nw7E">words</variable></variables><block type="varDoMethod" id="6GlJD++{XCVAkG_`xtUK" x="8" y="188"><field name="VAR" id="{3t%G%Q5rLV)rz:2c,Yg">tagger</field><field name="MEMBER">tag</field><data>tagger:tag</data><value name="INPUT"><block type="variables_get" id="C?S#j}!jz#@zPn]p*(id"><field name="VAR" id="cc)LJ@o:#+Q/bjV+nw7E">words</field></block></value></block></xml>
###Output
_____no_output_____ |
Prediction_Electric_Consumption/3 - Development, and Evaluation of ML models.ipynb | ###Markdown
Development, and Evaluation of ML modelsBecause we are dealing with a critical electricity consumption (KWH) problem, we need a model with high performance. Here, I am going to try 4 machine learning algorithms, with the aim of finding the machine learning algorithm that works best. In addition, we will perform a precision comparison between the ML models using several metrics for the final choice. Since the project is a regression problem, the algorithms that we will choose are the following:* K-Nearest Neighbor Regressor (KNN), Random Forest Regressor (RF), XGBoost Regressor (XGB), Linear Regression (LR) Development of ML model life cycle 1. Load cleaned data (train and test) 2. Development and evaluation of ML models$\;\;$ 2.1 K-Nearest Neighbor (KNN)$\;\;$ 2.2 Random Forest Regressor (RF)$\;\;$ 2.3 XGBoost Regressor (XGB)$\;\;$ 2.4 Linear Regression (LR) 3. Comparison between ML models on the test set and train set (cross validation) 4. Hyperparametres tuning 5. Conclusion
###Code
# Importing the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
from sklearn import metrics
from datetime import datetime
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
###Output
_____no_output_____
###Markdown
1. Load cleaned data (train and test)
###Code
# load train and test data
# Train data
path = "data/"
final_df_train = pd.read_csv(path+"df_train_out.csv")
X_train = final_df_train.drop(columns = ["kwh"], axis=1)
y_train = final_df_train.kwh
# Test data
path = "data/"
final_df_test = pd.read_csv(path+"df_test_out.csv")
X_test = final_df_test.drop(columns = ["kwh"], axis=1)
y_test = final_df_test.kwh
###Output
_____no_output_____
###Markdown
2. Development and evaluation of ML models
###Code
# Timer function
def timer(start_time=None):
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
time = str(round(thour, 2))+":"+str(round(tmin, 2))+":"+str(round(tsec, 2))
print('Time taken: %i hours %i minutes et %s seconds.' % (thour, tmin, round(tsec, 4)),"\n")
return time
# Metrics calculation function
def print_metrics(y_test,y_pred):
print('Results of the evaluation of model with different Measures on test set:',"\n")
mae = np.round(metrics.mean_absolute_error(y_test, y_pred), 3)
print('MAE:', mae)
rmse = np.round(np.sqrt(metrics.mean_squared_error(y_test, y_pred)), 3)
print('RMSE:', rmse)
mesdian_abs = np.round(metrics.median_absolute_error(y_test, y_pred), 3)
print('Median ABS Error:', mesdian_abs)
r2_score_test = np.round(metrics.r2_score(y_test,y_pred), 3)
print('R2 (score):', r2_score_test)
return mae, rmse, mesdian_abs, r2_score_test
# Function for running models and Metrics calculation
def run_model(model, X_train, y_train, X_test, y_test):
start_time = timer(None) # the train timer starts from this point
model.fit(X_train, y_train)
print("Training:","\n")
time_train = timer(start_time) # the train timer ends here
# Cross validation on train data set
kf = KFold(n_splits=5, random_state=None)
r2_mean_cv = (cross_val_score(model , X_train, y_train, scoring="r2", cv = kf)).mean()
print("Avg of R2 (score) based on Cross Validation with 5 folder for train set:", np.round(r2_mean_cv,3),"\n")
print("="*80)
start_time = timer(None) # the test timer starts from this point
y_pred = model.predict(X_test)
print("Test:","\n")
time_test = timer(start_time) # the test timer ends here
mae, rmse, mesdian_abs, r2_score_test = print_metrics(y_test,y_pred)
return mae, rmse, mesdian_abs, r2_score_test, r2_mean_cv, y_pred, model
###Output
_____no_output_____
###Markdown
2.1 K-Nearest Neighbor
###Code
# K-Nearest Neighbor
KNN_mae, KNN_rmse, KNN_mesdian_abs, KNN_r2_score_test, KNN_r2_mean_cv, KNN_y_pred, KNN_model = run_model(KNeighborsRegressor(n_neighbors=2),
X_train, y_train, X_test, y_test)
print("\n","="*80)
sns.regplot(x=y_test,y=KNN_y_pred,fit_reg=True)
# Comparison between the predicted and the actual consumption (KWH)
print("Comparison between the predicted and the actual consumption (KWH):")
df_res = pd.DataFrame({'Actual': np.round(y_test, 0), 'Predicted': np.round(KNN_y_pred, 0)})
df_res.head(5)
###Output
Training:
Time taken: 0 hours 0 minutes et 0.0122 seconds.
Avg of R2 (score) based on Cross Validation with 5 folder for train set: 0.947
================================================================================
Test:
Time taken: 0 hours 0 minutes et 0.0237 seconds.
Results of the evaluation of model with different Measures on test set:
MAE: 663.049
RMSE: 1191.641
Median ABS Error: 380.5
R2 (score): 0.972
================================================================================
Comparison between the predicted and the actual consumption (KWH):
###Markdown
2.2 Random Forest Regressor
###Code
# RandomForestRegressor
RF_mae, RF_rmse, RF_mesdian_abs, RF_r2_score_test, RF_r2_mean_cv, RF_y_pred, RF_model = run_model(RandomForestRegressor(random_state = 42),
X_train, y_train, X_test, y_test)
print("\n","="*80)
sns.regplot(x=y_test,y=RF_y_pred,fit_reg=True)
# Comparison between the predicted and the actual consumption (KWH)
print("Comparison between the predicted and the actual consumption (KWH)")
df_res = pd.DataFrame({'Actual': np.round(y_test, 0), 'Predicted': np.round(RF_y_pred, 0)})
df_res.head(5)
###Output
Training:
Time taken: 0 hours 0 minutes et 5.4463 seconds.
Avg of R2 (score) based on Cross Validation with 5 folder for train set: 0.971
================================================================================
Test:
Time taken: 0 hours 0 minutes et 0.0414 seconds.
Results of the evaluation of model with different Measures on test set:
MAE: 452.659
RMSE: 1024.258
Median ABS Error: 200.39
R2 (score): 0.979
================================================================================
Comparison between the predicted and the actual consumption (KWH)
###Markdown
2.3 XGBoost Regressor
###Code
# XGBoost Regressor
XGB_mae, XGB_rmse, XGB_mesdian_abs, XGB_r2_score_test, XGB_r2_mean_cv, XGB_y_pred, XGB_model = run_model(XGBRegressor(),
X_train, y_train, X_test, y_test)
print("\n","="*80)
sns.regplot(x=y_test,y=XGB_y_pred,fit_reg=True)
# Comparison between the predicted and the actual consumption (KWH)
print("Comparison between the predicted and the actual consumption (KWH)")
df_res = pd.DataFrame({'Actual': np.round(y_test, 0), 'Predicted': np.round(XGB_y_pred, 0)})
df_res.head(5)
###Output
Training:
Time taken: 0 hours 0 minutes et 0.3708 seconds.
Avg of R2 (score) based on Cross Validation with 5 folder for train set: 0.967
================================================================================
Test:
Time taken: 0 hours 0 minutes et 0.0015 seconds.
Results of the evaluation of model with different Measures on test set:
MAE: 448.705
RMSE: 962.753
Median ABS Error: 263.807
R2 (score): 0.982
================================================================================
Comparison between the predicted and the actual consumption (KWH)
###Markdown
2.4 Linear Regression
###Code
# Linear Regression
LR_mae, LR_rmse, LR_mesdian_abs, LR_r2_score_test, LR_r2_mean_cv, LR_y_pred, LR_model = run_model(LinearRegression(),
X_train, y_train, X_test, y_test)
print("\n","="*80)
sns.regplot(x=y_test,y=LR_y_pred,fit_reg=True)
# Comparison between the predicted and the actual consumption (KWH)
print("Comparison between the predicted and the actual consumption (KWH)")
df_res = pd.DataFrame({'Actual': np.round(y_test, 0), 'Predicted': np.round(LR_y_pred, 0)})
df_res.head(5)
###Output
Training:
Time taken: 0 hours 0 minutes et 0.0024 seconds.
Avg of R2 (score) based on Cross Validation with 5 folder for train set: 0.999
================================================================================
Test:
Time taken: 0 hours 0 minutes et 0.0011 seconds.
Results of the evaluation of model with different Measures on test set:
MAE: 79.881
RMSE: 224.436
Median ABS Error: 46.107
R2 (score): 0.999
================================================================================
Comparison between the predicted and the actual consumption (KWH)
###Markdown
3. Comparison between ML models on the test set and train set (cross validation)
###Code
models = ['K-Nearest Neighbor', 'Random Forest', 'XGBoost', 'Linear Regression']
f2_score_train = [KNN_r2_mean_cv, RF_r2_mean_cv, XGB_r2_mean_cv, LR_r2_mean_cv]
f2_score_test = [KNN_r2_score_test, RF_r2_score_test, XGB_r2_score_test, LR_r2_score_test]
RMSE = [KNN_rmse, RF_rmse, XGB_rmse, LR_rmse]
MAE = [KNN_mae, RF_mae, XGB_rmse, LR_mae]
Median_ABS_Error = [KNN_mesdian_abs, RF_mesdian_abs, XGB_mesdian_abs, LR_mesdian_abs]
d = {'f2_score_train_CV': f2_score_train, 'f2_score_test': f2_score_test, 'RMSE': RMSE, 'MAE': MAE,
'Median_ABS_Error': Median_ABS_Error}
df_metrics = pd.DataFrame(d, index = models)
df_metrics
###Output
_____no_output_____ |
lessons/lesson06-object-oriented-programming/inheritance.ipynb | ###Markdown
InheritanceIn the previous section, we covered what goes into creating a class. Creating and using classes as well as their methods for them can be beneficial in our programming tasks but what happens when we create objects that are similar? We should strive to organize classes in a way such that common methods can be shared and don't need to be re-created for every class. This is where inheritance comes into play, we can create a _base_ (or parent) class which is used to create an _inherited_ (or child) class. As mentioned in the earlier section, let's use the example of vehicles Parent & Child Classes
###Code
class Vehicle:
def __init__(self, wheels=4):
self._wheels = wheels
def __str__(self):
return f"I am a Vehicle with {self._wheels} wheels"
def drive(self):
print("Vroom Vroom" * self._wheels)
class Motorbike(Vehicle):
def __init__(self, wheels=2, bikestand_active=False):
self._wheels = wheels
self.bikestand_active = bikestand_active
def drive(self):
print("Skrrrrrt" * self._wheels)
big_truck = Vehicle(wheels=16)
print(big_truck)
big_truck.drive()
bike = Motorbike()
print(bike)
bike.drive()
###Output
I am a Vehicle with 16 wheels
Vroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom VroomVroom Vroom
I am a Vehicle with 2 wheels
SkrrrrrtSkrrrrrt
###Markdown
In the example above, you can tell that there are different `__init__` methods to create the two classes but since the Motorbike _inherits_ the vehicle, it doesn't NEED to define the `__str__` function, it can just use the parents. As you may suspect, if the Vehicle class had more functions, the Motorbike would be able to inherit/use them without issue. We can also see that when the child has a same-named method defined as the parent, when calling it, only the child method is run. You also have the ability to run the parent's implementation by the use of `super` super()
###Code
class Vehicle:
def __init__(self, wheels=4):
self._wheels = wheels
def __str__(self):
return f"I am a Vehicle with {self._wheels} wheels"
def drive(self):
print("Vroom Vroom" * self._wheels)
class Motorbike(Vehicle):
def __init__(self, wheels=2, bikestand_active=False):
self._wheels = wheels
self.bikestand_active = bikestand_active
def drive(self):
print("Skrrrrrt" * self._wheels)
super().drive()
bike = Motorbike()
print(bike)
bike.drive()
###Output
I am a Vehicle with 2 wheels
SkrrrrrtSkrrrrrt
Vroom VroomVroom Vroom
|
code/models/models.ipynb | ###Markdown
ind = pd.Series(data.columns[-1])ind = ind.append(data.columns[1:-1].to_series())ind = ind.append(pd.Series(data.columns[0]))data = data.reindex(columns=ind)data.to_csv("../datasets/{}/{}.csv".format(folderName, fileName), index=False)
###Code
y = np.array(data['Goal'] - 1)
x = np.array(data.drop('Goal', axis=1))
###Output
_____no_output_____
###Markdown
ohe = OneHotEncoder()non_numerical = x[:, -1]x = np.delete(x, -1, axis=1)x = np.append(x, ohe.fit_transform(non_numerical.reshape(-1, 1)).toarray(), axis=1) data['Goal'] = pd.Series([re.search('\d+', label).group() for label in y])data.to_csv("../datasets/{}/{}.csv".format(folderName, fileName), index=False) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
###Code
class DCModel(BaseEstimator, ClassifierMixin):
def __init__(self, k):
self.k = k
self.nodes = [None] * (4 * k + 1)
def __fit_node_on_range(self, l, r, X, y):
ind = np.where(np.logical_and(y >= l, y <= r))
m = (l + r) // 2
x_in_range = X[ind]
y_in_range = np.where(y[ind] > m, 1, 0)
return SGDClassifier(loss='log').fit(x_in_range, y_in_range)
def fit(self, X, y):
def build(v, l, r):
if l == r:
return
self.nodes[v] = self.__fit_node_on_range(l, r, X, y)
m = (l + r) // 2
build(2 * v, l, m)
build(2 * v + 1, m + 1, r)
build(1, 0, self.k - 1)
return self
def predict(self, X):
def run_dc(x, v, l, r):
if l == r:
return np.full(x.shape[0], l)
m = (l + r) // 2
if(x.shape[0] == 0):
return np.array([])
pred_binary = self.nodes[v].predict(x)
indices_left = pred_binary == 0
indices_right = pred_binary == 1
preds_left = run_dc(x[indices_left], 2 * v, l, m)
preds_right = run_dc(x[indices_right], 2 * v + 1, m + 1, r)
pred = np.empty(x.shape[0])
pred[indices_left] = preds_left
pred[indices_right] = preds_right
return pred
return run_dc(X, 1, 0, self.k - 1)
class FHModel(BaseEstimator, ClassifierMixin):
def __init__(self, k):
self.k = k
self.models = [None] * (k - 1)
def fit(self, X, y):
for i in range(self.k - 1):
y_relative = np.where(y > i, 1, 0)
self.models[i] = SGDClassifier(loss='log').fit(X, y_relative)
return self
def predict(self, X):
pred = np.array([model.predict_proba(X)[:, 1] for model in self.models]).T
r = np.append(pred, np.zeros((X.shape[0], 1)), axis=1)
l = np.insert(pred, 0, np.ones(X.shape[0]), axis=1)
return np.argmax(l - r, axis=1)
def MSE(y, y_pred):
conf_mat = confusion_matrix(y, y_pred)
m = y.shape[0]
diffs = [[None] * k for _ in range(k)]
for i in range(k):
for j in range(k):
diffs[i][j] = (i - j) ** 2
return 1 / m * np.sum(np.multiply(conf_mat, np.array(diffs)))
score = make_scorer(MSE, greater_is_better=False)
MSE_ovr = 0
MSE_dc = 0
MSE_fh = 0
for i in range(10):
print('Starting test number {}'.format(i))
dc = DCModel(k)
fh = FHModel(k)
ovr = SGDClassifier(loss='log')
MSE_ovr += cross_val_score(ovr, x, y, scoring=score, cv=10).mean()
MSE_dc += cross_val_score(dc, x, y, scoring=score, cv=10).mean()
MSE_fh += cross_val_score(fh, x, y, scoring=score, cv=10).mean()
print(-MSE_ovr / 10, -MSE_fh / 10, -MSE_dc / 10)
###Output
Starting test number 0
Starting test number 1
Starting test number 2
Starting test number 3
Starting test number 4
Starting test number 5
Starting test number 6
Starting test number 7
Starting test number 8
Starting test number 9
4.5218090399798285 4.420866157397922 4.873901598720394
|
SciPy/Clean your Data With NumPy and Pandas_1.ipynb | ###Markdown
Clean your Data With NumPy and Pandas
###Code
#Import Library
import numpy as np
import pandas as pd
# Read Data from your datasets folder
data = pd.read_csv('../datasets/clean-data/Book.csv')
# Create DataFrame from the CSV file
data.head() # To see First Five entery with column names
###Output
_____no_output_____
###Markdown
Data exploration and Basic Hygiene
###Code
# Print Missing value
print(data.isnull().sum())
# Drop columns with any missing values
c=data.dropna(axis='columns')
c.head()
# Missing value is not Found as it is removed
print(c.isnull().sum())
# Drop the rows where at least one element is missing.
data.dropna()
###Output
_____no_output_____
###Markdown
Link For Data operation using pandas https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dropna.html We can remove columns by passing columns names directly to columns parameters data.drop(columns=drop_col, inplace=True) data.head()
###Code
# drop unnessary columns
# inplace and axis make change to object
# axis =1 drop column wise axis =0 for row wise
drop_col = ['Edition Statement',
'Corporate Author',
'Corporate Contributors',
'Former owner',
'Engraver',
'Contributors',
'Issuance type',
'Shelfmarks']
data.drop(drop_col, inplace = True, axis = 1)
data.head()
# Missing value is not Found as it is removed
print(data.isnull().sum())
###Output
Identifier 0
Place of Publication 0
Date of Publication 181
Publisher 4195
Title 0
Author 1778
Flickr URL 0
dtype: int64
###Markdown
Change Index of Dataframe
###Code
# Check unique values in Columns
data['Identifier'].is_unique
data['Place of Publication'].is_unique
data['Date of Publication'].is_unique
data['Publisher'].is_unique
data['Title'].is_unique
data['Author'].is_unique
data['Flickr URL'].is_unique
# Replace the existing index with column name with unique value using set_index :
# To make change directly to object
data.set_index('Identifier', inplace=True)
data.head()
###Output
_____no_output_____
###Markdown
Replace the existing index with column name with unique value using set_index : data = data.set_index('Identifier') data.head()
###Code
# Access each record with loc[]
data.loc[480]
data.iloc[480]
data.iloc[5]
###Output
_____no_output_____
###Markdown
We can see from above with loc[] => help in accessing data based on index whereas iloc[] => help in accessing data based on position index [480] data is same as in position [5] Clean Fields in the Data
###Code
# All data types are in object dtype
data.get_dtype_counts()
data.loc[1905:, 'Date of Publication'].head(10)
###Output
_____no_output_____
###Markdown
As stated above some book have multiple date of publication. Book can have only one date of publication. We need to enforce a numeric value for date of publication to do calculations as:Remove square brackets, like present: 1879 [1878]Convert date ranges to their “start date”, wherever present: 1860-63; 1839, 38-54Remove the dates like: [1897?] replace them with NumPy’s NaN : Convert the string NaN to NumPy’s NaN value
###Code
ex = data['Date of Publication'].str.extract(r'^(\d{4})', expand=False)
ex.head()
# Convert to numerical values
data['Date of Publication'] = pd.to_numeric(ex)
data['Date of Publication'].dtypes
data['Date of Publication'].isnull().sum() / len(data)
###Output
_____no_output_____
###Markdown
Use str Methods with NumPy to clean Columns To clean Place of Publication we use str method with NumPy
###Code
data['Place of Publication'].head()
data.loc[data['Place of Publication']== 'London']
data.loc[data['Place of Publication']== 'Oxford']
###Output
_____no_output_____
###Markdown
We see some row with Place of Publication as 'London' and 'Oxford' have unnecessary information. Clean this column in one go use str.contains() to get a boolean mask.
###Code
london = data['Place of Publication'].str.contains('London')
london.head()
oxford = data['Place of Publication'].str.contains('Oxford')
oxford.head()
# Combine all with NumPy
data['Place of Publication'] = np.where(london, 'London',
np.where(oxford, 'Oxford',
data['Place of Publication'].str.replace('-', ' ')))
data.head()
###Output
_____no_output_____ |
notebooks/2.LWA_atmos.ipynb | ###Markdown
Calculate local finite-amplitude wave activity**22 March 2022 by MiniUFO**---[TOC]--- 1. IntroductionWave play an important role of rearranging angular momentum in the atmosphere. This is expressed by the generalized Eliassen-Palm (E-P) relation:$$\begin{align}\frac{\partial A}{\partial t}+\nabla\cdot\mathbf F=D+O(\alpha^3) \tag{1}\end{align}$$where $A$ is the density of wave activity (negative angular pseudomomentum), $\mathbf F$ the generalized E-P flux for a slowly modulated **small-amplitude** wave, and $D$ the nonconservative effects on wave density. For small-amplitude wave, $A$ is given as:$$\begin{align}A=\frac{1}{2}\frac{\overline{q'^2}}{\partial \overline{q}/\partial y} \tag{2}\end{align}$$[Nakamura and Zhu (2010)](https://journals.ametsoc.org/view/journals/atsc/67/9/2010jas3432.1.xml) have proposed a new definition of wave activity to account for **finite-amplitude wave** (FAWA) and [Huang and Nakamura (2016)](https://journals.ametsoc.org/view/journals/atsc/73/1/jas-d-15-0194.1.xml) extended FAWA to be defined locally, which is called local finite-amplitude wave activity (LWA). A simple zonal mean of LWA recovers FAWA exactly. The key to LWA is to first sorting the PV in the meridional direction conservatively and obtain a sorted PV profile instead of a traditional zonal mean state to define eddy portion. After some email discussions with Prof. Nakamura, I recently realize that **LWA defined in the horizontal $x-y$ plane is conceptually equivalent to local available potential energy (APE) in the vertical $x-z$ plane** (see this [notebook for local APE]()). Here we will demonstrate how to calculate LWA under the help of [xcontour](https://github.com/miniufo/xcontour). It will show that the whole calculation procedure is very clean and clear.--- 2. Theoretical guideThe LWA is defined, according to [Huang and Nakamura (2016)](https://journals.ametsoc.org/view/journals/atsc/73/1/jas-d-15-0194.1.xml), as:$$\begin{align}\tilde A\left(x, y, t\right)=-\int_0^{\eta\left(x,y,t\right)}q\left(x,y+y',t\right)-Q\left(y,t\right)dy' \tag{3}\end{align}$$where $\eta(x,y,t)$ represents the displacement of a tracer contour from its sorted place.--- 3. Calculation procedure 3.1 Load dataHere for demonstrating purpose, we repeat the result of [this notebooke from Clare Huang's github repo](https://github.com/csyhuang/hn2016_falwa/blob/master/examples/simple/Example_barotropic.ipynb). The data, barotropic (absolute) vorticity over the globe, are also downloaded from there.First, we read in the data and calculate the conservative sorted state:
###Code
import xarray as xr
import numpy as np
import sys
sys.path.append('../../')
from xcontour.xcontour import Contour2D, latitude_lengths_at, add_latlon_metrics
################################## load data #######################################
dset = xr.open_dataset('../Data/barotropic_vorticity.nc')
# add metrics for xgcm.Grid
dset, grid = add_latlon_metrics(dset)
# get vorticity as a tracer
tracer = dset.absolute_vorticity
########################### calculate sorted state #################################
# Initialize equally-spaced contours from minimum value to maximum
# value (within lat/lon dims). Here will implicitly loop over each isentropic level
%matplotlib inline
N = 121 # increase the contour number may get non-monotonic A(q) relation
increase = True # Y-index increases with latitude (sometimes not)
lt = True # northward of PV contours (larger than) is inside the contour
# change this should not change the result of Keff, but may alter
# the values at boundaries
dtype = np.float32 # use float32 to save memory
undef = -9.99e8 # for maskout topography if present
# initialize a Contour2D analysis class using grid and tracer
analysis = Contour2D(grid, tracer,
dims={'X':'longitude','Y':'latitude'},
dimEq={'Y':'latitude'},
increase=increase,
lt=lt)
# evenly-spaced contours
ctr = analysis.cal_contours(N)
# Mask for A(q) relation table.
mask = xr.where(tracer!=undef, 1, 0).astype(dtype)
# calculate related quantities for
table = analysis.cal_area_eqCoord_table(mask) # A(Yeq) table
area = analysis.cal_integral_within_contours(ctr).rename('intArea')
latEq = table.lookup_coordinates(area).rename('latEq')
##################### interpolat to equivalent latitudes ###########################
# combined the results
ds_contour = xr.merge([ctr, area, latEq])
# interpolate from contour space to original latitudes
preLats = dset.latitude.astype(dtype)
# results in latEq space
ds_latEq = analysis.interp_to_dataset(preLats, latEq, ds_contour)
###Output
case 1: increase & lt
###Markdown
When we get the sorted profile of vorticity, we can calculate the LWA.
###Code
#%% calculate local finite-amplitude wave activity
lwa, contours, masks = analysis.cal_local_wave_activity(tracer, # the original vorticiy
ds_latEq.absolute_vorticity, # the sorted vorticity
mask_idx=[37,125,170,213]) # select these mask to see the area of LWA
###Output
_____no_output_____
###Markdown
Now we can take a look at the results.
###Code
import proplot as pplt
fig, axes = pplt.subplots(nrows=2, ncols=2, figsize=(9, 6), sharex=0, sharey=3)
fontsize = 12
ax = axes[0, 0]
m1=ax.pcolormesh(tracer*1e5, levels=np.linspace(-17, 17, 69), cmap='jet')
ax.colorbar(m1, loc='r', ticks=4, label='')
ax.set_title('absolute vorticity $q\\times 10^5$', fontsize=fontsize)
ax.set_xlabel('longitude', fontsize=fontsize-1)
ax.set_ylabel('latitude', fontsize=fontsize-1)
ax.set_xticks([0, 60, 120, 180, 240, 300, 360])
ax.set_yticks([-90, -60, -30, 0, 30, 60, 90])
ax = axes[0, 1]
m1=ax.pcolormesh(lwa, levels=np.linspace(0, 28, 57), cmap='viridis')
ax.colorbar(m1, loc='r', ticks=4, label='')
ax.set_title('LWA $\\tilde{A}$', fontsize=fontsize)
ax.set_xlabel('longitude', fontsize=fontsize-1)
ax.set_ylabel('latitude', fontsize=fontsize-1)
ax.set_xticks([0, 60, 120, 180, 240, 300, 360])
ax.set_yticks([-90, -60, -30, 0, 30, 60, 90])
ax = axes[1, 0]
m1=ax.contourf(sum(masks).where(sum(masks)!=0), cmap='bwr')
ax.contour(tracer, levels=xr.concat(contours, 'latitude').values, color='k', lw=0.6)
ax.colorbar(m1, loc='r', ticks=1, label='')
ax.set_title('masks for 4 contours of LWA $\\tilde{A}$', fontsize=fontsize)
ax.set_xlabel('longitude', fontsize=fontsize-1)
ax.set_ylabel('latitude', fontsize=fontsize-1)
ax.set_xticks([0, 60, 120, 180, 240, 300, 360])
ax.set_yticks([-90, -60, -30, 0, 30, 60, 90])
ax = axes[1, 1]
m1=ax.plot(tracer.mean('longitude')*1e5, tracer.latitude, label='zonal mean $\\overline{q}$')
m2=ax.plot(ds_latEq.absolute_vorticity*1e5, tracer.latitude, label='sorted $Q$')
m3=ax.plot(lwa.mean('longitude'), tracer.latitude, label='FAWA $A$')
ax.set_title('meridional profiles of $\\overline{q}$, $Q$, and $A$', fontsize=fontsize)
ax.set_xlabel('vorticity', fontsize=fontsize-1)
ax.set_ylabel('latitude', fontsize=fontsize-1)
ax.set_xticks([-12, -8, -4, 0, 4, 8, 12])
ax.legend([m1,m2,m3], loc='lr', ncols=1, fontsize=fontsize-1)
axes.format(abc='(a)')
###Output
_____no_output_____ |
Mtoto New Childline Kenya Call Prediction Challenge/Solution 1/DataPreparation-checkpoint.ipynb | ###Markdown
Splitting the calldate
###Code
from datetime import datetime
df.calldate=pd.to_datetime(df.calldate)
df["newcalldate"]=df["calldate"].apply(lambda x: datetime.strftime(x,"%Y%m%d%H%M%S"))
df["year"]=df["calldate"].apply(lambda x: datetime.strftime(x,"%Y"))
df["month"]=df["calldate"].apply(lambda x: datetime.strftime(x,"%m"))
df["day"]=df["calldate"].apply(lambda x: datetime.strftime(x,"%d"))
df["hour"]=df["calldate"].apply(lambda x: datetime.strftime(x,"%H"))
df["minute"]=df["calldate"].apply(lambda x: datetime.strftime(x,"%M"))
df["sec"]=df["calldate"].apply(lambda x: datetime.strftime(x,"%S"))
df.drop(["calldate","newcalldate","year","minute","sec"],axis=1,inplace=True)
df.head(3)
###Output
_____no_output_____
###Markdown
Counting Number Of Calls Per Hour Per Day
###Code
df["num_calls"]=1
data= (df.groupby(['month',"day", 'hour'])['num_calls'].sum().reset_index())
data.head(3)
###Output
_____no_output_____
###Markdown
Inserting KenyaPublicHolidays2016 and NairobiSchoolDates2016 from the csv_files provided by Zindi
###Code
pub_holiday=pd.Series([])
for i in range(len(data)):
if data["month"][i]=="01" and data["day"][i]=="01":
pub_holiday[i]="holiday"
elif data["month"][i]=="03" and data["day"][i]=="25":
pub_holiday[i]="holiday"
elif data["month"][i]=="03" and data["day"][i]=="27":
pub_holiday[i]="holiday"
elif data["month"][i]=="05" and data["day"][i]=="02":
pub_holiday[i]="holiday"
elif data["month"][i]=="06" and data["day"][i]=="01":
pub_holiday[i]="holiday"
elif data["month"][i]=="06" and data["day"][i]=="07":
pub_holiday[i]="holiday"
elif data["month"][i]=="09" and data["day"][i]=="07":
pub_holiday[i]="holiday"
else :
pub_holiday[i]="nopublicholiday"
data.insert(4,"pubHolidays",pub_holiday)
schdate=pd.Series([])
for i in range(len(data)):
if data["month"][i]=="02" or data["month"][i]=="03":
schdate[i]="schooltime"
elif data["month"][i]=="01" and (data["day"][i]=="01"):
schdate[i]="notschooltime"
elif data["month"][i]=="01" and (data["day"][i]=="02"):
schdate[i]="notschooltime"
elif data["month"][i]=="01" and (data["day"][i]=="03"):
schdate[i]="notschooltime"
elif data["month"][i]=="04" and data["day"][i]=="01" :
schdate[i]="schooltime"
elif data["month"][i]=="04" and (data["day"][i]=="02"):
schdate[i]="schooltime"
elif data["month"][i]=="04" and (data["day"][i]=="03"):
schdate[i]="schooltime"
elif data["month"][i]=="04" and (data["day"][i]=="04"):
schdate[i]="schooltime"
elif data["month"][i]=="04" and (data["day"][i]=="05"):
schdate[i]="schooltime"
elif data["month"][i]=="04" and (data["day"][i]=="06"):
schdate[i]="schooltime"
elif data["month"][i]=="04" and (data["day"][i]=="07"):
schdate[i]="schooltime"
elif data["month"][i]=="04" and (data["day"][i]=="08"):
schdate[i]="schooltime"
elif data["month"][i]=="04":
schdate[i]="notschooltime"
elif data["month"][i]=="06" or data["month"][i]=="07":
schdate[i]="schooltime"
elif data["month"][i]=="05" and data["day"][i]=="01":
schdate[i]="notschooltime"
elif data["month"][i]=="05" and data["day"][i]!="01":
schdate[i]="schooltime"
else:
schdate[i]="schooltime"
data.insert(5,"schooltimes",schdate)
data["year"]="2016"
data.head()
data['time_index'] = data[['year','month',"day","hour"]].apply(lambda x: '-'.join(x), axis=1)
data.head()
data.to_csv("ts.csv",index=False)#this is the training dataset
###Output
_____no_output_____
###Markdown
Generating the dates that we'll Forcast on its num_calls
###Code
date_rng = pd.date_range(start='13/07/2016', end='16/09/2016', freq='H')
df2 = pd.DataFrame(date_rng, columns=['date'])
df2.shape
df2.head(2)
final_test=df2.iloc[0:1344]
final_test.shape
final_test.head(3)
final_test.date=pd.to_datetime(final_test.date)
final_test["year"]=final_test["date"].apply(lambda x: datetime.strftime(x,"%Y"))
final_test["month"]=final_test["date"].apply(lambda x: datetime.strftime(x,"%m"))
final_test["day"]=final_test["date"].apply(lambda x: datetime.strftime(x,"%d"))
final_test["hour"]=final_test["date"].apply(lambda x: datetime.strftime(x,"%H"))
final_test.head()
###Output
_____no_output_____
###Markdown
Inserting KenyaPublicHolidays2016 and NairobiSchoolDates2016 from the csv_files provided by Zindi
###Code
pub_holiday=pd.Series([])
for i in range(len(final_test)):
if final_test["month"][i]=="09" and final_test["day"][i]=="07":
pub_holiday[i]="holiday"
else :
pub_holiday[i]="nopublicholiday"
final_test.insert(4,"pubHolidays",pub_holiday)
schdate=pd.Series([])
for i in range(len(final_test)):
if final_test["month"][i]=="07":
schdate[i]="schooltime"
if final_test["month"][i]=="08" and final_test["day"][i]=="01" :
schdate[i]="schooltime"
elif final_test["month"][i]=="08" and final_test["day"][i]=="02":
schdate[i]="schooltime"
elif final_test["month"][i]=="08" and final_test["day"][i]=="03":
schdate[i]="schooltime"
elif final_test["month"][i]=="08" and final_test["day"][i]=="04":
schdate[i]="schooltime"
elif final_test["month"][i]=="08" and final_test["day"][i]=="05":
schdate[i]="schooltime"
elif final_test["month"][i]=="08":
schdate[i]="notschooltime"
elif final_test["month"][i]=="09" and final_test["day"][i]=="01":
schdate[i]="notschooltime"
elif final_test["month"][i]=="09" and final_test["day"][i]=="02":
schdate[i]="notschooltime"
elif final_test["month"][i]=="09" and final_test["day"][i]=="03":
schdate[i]="notschooltime"
elif final_test["month"][i]=="09" and final_test["day"][i]=="04":
schdate[i]="notschooltime"
elif final_test["month"][i]=="09":
schdate[i]="schooltime"
else:
schdate[i]="schooltime"
final_test.insert(6,"schooltimes",schdate)
final_test['time_index'] = final_test[['year','month',"day","hour"]].apply(lambda x: ''.join(x), axis=1)
final_test.head()
final_test.to_csv("tsTest.csv",index=False)#this is the test dataset
testdataset=final_test["time_index"]#extracting the time_index from the test dataset
testdataset.to_csv("submission.csv",index=False)#holds the time_index column
###Output
/home/lawrence/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:1: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.
"""Entry point for launching an IPython kernel.
|
PythonJupyterNotebooks/Week12-Day2-Activity2-bossy_words.ipynb | ###Markdown
Who are the Bossy Words?In this activity you will use TF-IDF to find the most relevant words in news articles that talk about money in the [Reuters Corpus](https://www.nltk.org/book/ch02.htmlreuters-corpus) bundled in `NLTK`. Once you find the most relevant words, you should create a word cloud.
###Code
# initial imports
import nltk
from nltk.corpus import reuters
import numpy as np
import pandas as pd
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
plt.style.use("seaborn-whitegrid")
mpl.rcParams["figure.figsize"] = [20.0, 10.0]
###Output
_____no_output_____
###Markdown
Loading the Reuters Corpus The first step is to load the Reuters Corpus.
###Code
# Download/update the Reuters dataset
nltk.download("reuters")
###Output
[nltk_data] Downloading package reuters to
[nltk_data] /Users/satishsurath/nltk_data...
[nltk_data] Package reuters is already up-to-date!
###Markdown
Getting the News About Money You will analyze only news that talks about _money_. There are two categories in the Reuters Corpus that talk about money: `money-fx` and `money-supply`. In this section, you will filter the news by these categories. Take a look into the [Reuters Corpus documentation](https://www.nltk.org/book/ch02.htmlreuters-corpus) and check how you can retrieve the categories of a document using the `reuters.categories()` method; write some lines of code to retrieve all the news articles that are under the `money-fx` or the `money-supply` categories. **Hint:** You can use a comprehension list or a for-loop to accomplish this task.
###Code
# Getting all documents ids under the money-fx and money-supply categories
categories = ["money-fx", "money-supply"]
all_docs_id = reuters.fileids()
print(len(all_docs_id))
# Creating the working corpus containing the text from all the news articles about money
corpus_id = all_docs_id[0:1000]
corpus = [reuters.raw(doc) for doc in corpus_id]
# Printing a sample article
print(corpus[100])
###Output
CITYTRUST BANCORP INC <CITR> 1ST QTR NET
Shr 1.40 dlrs vs 1.16 dlrs
Net 5,776,000 vs 4,429,000
Avg shrs 4,132,828 vs 3,834,117
###Markdown
Calculating the TF-IDF Weights Calculate the TF-IDF weight for each word in the working corpus using the `TfidfVectorizer()` class. Remember to include the `stop_words='english'` parameter.
###Code
# Calculating TF-IDF for the working corpus.
# Getting the TF-IDF
vectorizer = TfidfVectorizer(stop_words="english")
X_corpus = vectorizer.fit_transform(corpus)
print(X_corpus)
# Getting matrix info
print(f"Matrix shape: {X_corpus.shape}")
print(f"Total number of documents: {X_corpus.shape[0]}")
print(f"Total number of unique words (tokens): {X_corpus.shape[1]}")
# Retrieve words list from corpus
words_corpus = vectorizer.get_feature_names()
print(words_corpus)
###Output
['00', '000', '0000', '001', '002', '003', '004', '005', '007', '008', '009', '01', '010', '011', '012', '013', '014', '015', '017', '018', '019', '02', '020', '021', '022', '023', '025', '027', '028', '029', '03', '030', '031', '032', '033', '034', '036', '037', '039', '04', '040', '0400', '042', '044', '045', '047', '049', '05', '050', '051', '053', '054', '055', '056', '057', '058', '059', '05p', '06', '060', '061', '062', '063', '064', '065', '066', '067', '069', '07', '070', '073', '076', '078', '08', '080', '081', '0838', '085', '086', '087', '088', '089', '09', '090', '0905', '0930', '094', '095', '096', '098', '099', '0p', '10', '100', '1000', '101', '102', '103', '104', '105', '106', '107', '108', '109', '11', '110', '1100', '111', '112', '113', '114', '115', '116', '117', '118', '11895', '119', '12', '120', '1200', '121', '122', '123', '124', '125', '126', '12664', '127', '128', '129', '13', '130', '131', '132', '133', '1330', '134', '135', '136', '137', '1375', '138', '139', '1398', '14', '140', '1409', '141', '142', '143', '144', '145', '146', '147', '148', '149', '14p', '15', '150', '1500', '1508', '151', '1510', '152', '153', '154', '155', '1550', '156', '157', '158', '159', '16', '160', '161', '162', '163', '164', '165', '166', '167', '167a', '168', '169', '16th', '17', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '17p', '18', '180', '1800', '181', '182', '183', '184', '185', '186', '187', '188', '189', '19', '190', '1900', '191', '192', '193', '1932', '194', '195', '196', '1968', '197', '1970', '1970s', '1971', '1973', '1974', '1976', '1977', '1978', '1979', '198', '1980', '1980s', '1981', '1982', '1983', '1984', '1985', '1986', '1987', '1988', '1989', '199', '1990', '1990s', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1p', '1st', '20', '200', '2000', '2002', '2005', '201', '2014', '201p', '202', '203', '204', '205', '2053', '206', '206p', '207', '2073', '208', '209', '2095', '21', '210', '2107', '211', '212', '213', '214', '215', '21563', '216', '217', '218', '219', '21p', '21st', '22', '220', '2200', '221', '222', '223', '224', '225', '2250', '2252', '226', '2267', '227', '228', '2289', '229', '2291', '2292', '22p', '23', '230', '2304', '231', '2315', '232', '233', '2339', '234', '235', '2358', '236', '237', '238', '239', '23p', '24', '240', '2400', '241', '241p', '242', '243', '244', '245', '247', '248', '249', '25', '250', '251', '2515', '252', '253', '254', '255', '256', '257', '2570', '258', '259', '25p', '26', '260', '261', '263', '264', '265', '265p', '266', '266p', '267', '268', '269', '26p', '27', '270', '271', '272', '273', '273p', '274', '2743', '275', '276', '277', '278', '278p', '28', '280', '281', '283', '284', '285', '286', '2867', '287', '288', '289', '29', '290', '291', '292', '293', '294', '295', '296', '298', '299', '2nd', '2p', '30', '300', '301', '302', '303', '305', '306', '307', '308', '309', '3090', '30s', '31', '310', '311', '312', '313', '314', '315', '316', '316p', '317', '318', '318p', '319', '3198', '32', '320', '321', '322', '323', '324', '325', '326', '3267', '327', '328', '32876', '329', '33', '330', '331', '332', '333', '334', '335', '336', '337', '338', '339', '34', '340', '341', '342', '343', '344', '345', '346', '347', '348', '3489', '349', '35', '350', '35000', '351', '353', '354', '355', '356', '357', '358', '359', '35p', '36', '360', '361', '362', '363', '364', '365', '367', '368', '369', '37', '370', '371', '372', '374', '375', '376', '377', '378', '379', '38', '380', '381', '382', '383', '385', '386', '387', '388', '39', '390', '391', '392', '393', '395', '397', '398', '39844', '399', '3p', '3rd', '40', '400', '400p', '401', '402', '403', '404', '405', '406', '407', '408', '409p', '41', '410', '410p', '412', '414', '415', '416', '417', '418', '419', '419p', '42', '420', '421', '422', '423', '425', '426', '427', '428', '429', '43', '430', '431', '432', '433', '434', '435', '436', '438', '43896', '439', '44', '440', '441', '444', '445', '446', '447', '448', '449', '45', '450', '451', '452', '453', '45313', '454', '455', '456', '457', '458', '459', '46', '460', '462', '463', '464', '465', '466', '468', '468p', '47', '470', '471', '472', '474', '475', '476', '477', '479', '48', '480', '481', '481p', '482', '483', '484', '485', '486', '487', '488', '49', '490', '491', '491p', '492', '493', '494', '495', '496', '497', '498', '499', '4p', '4th', '4thh', '50', '500', '501', '502', '503', '504', '505', '50586', '506', '508', '5085', '51', '510', '511', '512', '5120', '513', '514', '515', '516', '517', '518', '51953', '52', '520', '521', '522', '523', '524', '525', '526', '527', '528', '529', '53', '530', '531', '533', '534', '536', '537', '538', '539', '54', '540', '543', '544', '545', '546', '548', '549', '54p', '55', '550', '551', '553', '554', '555', '556', '557', '5571', '558', '559', '56', '561', '562', '563', '564', '565', '566', '567', '568', '569', '57', '570', '571', '572', '574', '575', '576', '577', '578', '579', '58', '580', '581', '583', '584', '586', '587', '588', '589', '59', '590', '591', '593', '594', '595', '596', '597', '598', '599', '5p', '60', '600', '601', '602', '603', '604', '605', '607', '608', '609', '61', '610', '612', '613', '614', '615', '616', '617', '618', '6190', '6195', '62', '620', '622', '6226', '623', '624', '625', '627', '628', '629', '63', '630', '631', '632', '633', '634', '635', '636', '637', '638', '639', '64', '640', '642', '643', '645', '646', '647', '648', '649', '65', '650', '651', '652', '653', '654', '655', '656', '657', '658', '659', '66', '660', '661', '662', '663', '664', '665', '666', '668', '669', '67', '670', '671', '672', '673', '675', '676', '677', '678', '679', '68', '680', '682', '6834', '684', '685', '686', '687', '688', '689', '69', '690', '691', '692', '693', '695', '696', '698', '699', '6p', '70', '700', '702', '703', '705', '706', '707', '708', '71', '710', '711', '712', '713', '714', '717', '718', '719', '72', '720', '721', '722', '723', '724', '725', '726', '727', '7289', '729', '73', '730', '731', '732', '733', '734', '735', '736', '737', '738', '739', '74', '740', '741', '742', '743', '744', '7449', '746', '747', '749', '7498', '75', '750', '7500', '7503', '7507', '751', '753', '754', '755', '756', '757', '758', '759', '75p', '76', '760', '761', '762', '763', '764', '765', '767', '767p', '769', '77', '770', '772', '773', '774', '775', '776', '777', '778', '78', '780', '781', '782', '783', '784', '785', '785p', '787', '788', '79', '790', '791', '792', '793', '79348', '795', '796', '7p', '80', '800', '803', '804', '805', '806', '807', '808', '81', '810', '812', '813', '814', '815', '8155', '816', '817', '818', '8187', '819', '82', '820', '822', '823', '825', '826', '8270', '828', '829', '83', '830', '831', '832', '833', '834', '835', '836', '8365', '837', '838', '839', '84', '840', '842', '843', '844', '8440', '846', '847', '848', '849', '85', '850', '851', '852', '854', '855', '856', '857', '858', '859', '86', '862', '863', '864', '865', '866', '868', '869', '87', '870', '871', '872', '873', '874', '875', '8750', '876', '877', '878', '879', '88', '880', '881', '882', '883', '884', '885', '888', '889', '89', '890', '891', '892', '893', '895', '896', '8999', '90', '900', '9009', '901', '902', '904', '905', '906', '907', '908', '909', '91', '910', '911', '912', '915', '918', '919', '91p', '92', '920', '923', '924', '925', '926', '927', '929', '93', '930', '931', '933', '934', '935', '937', '9370', '938', '94', '941', '942', '943', '944', '945', '946', '947', '948', '95', '950', '951', '952', '953', '955', '956', '957', '959', '95913', '95p', '96', '960', '961', '964', '965', '966', '967', '968', '969', '97', '970', '971', '973', '974', '975', '976', '977', '978', '978825', '98', '980', '981', '982', '983', '985', '99', '994', '996', '99ct', '9p', 'aa', 'aabex', 'aart', 'ab', 'abandoned', 'abate', 'abatement', 'abatements', 'abattoirs', 'abbett', 'abbk', 'abbott', 'abbreviated', 'abdul', 'abe', 'abegglen', 'abga', 'ability', 'abington', 'able', 'abnormal', 'abolish', 'aborted', 'abouth', 'abroad', 'abruptly', 'absb', 'absence', 'absent', 'absorb', 'absorbing', 'absorption', 'abt', 'abuses', 'accelerate', 'accelerated', 'accelerating', 'accept', 'acceptable', 'acceptance', 'acceptances', 'accepted', 'accepts', 'access', 'accessories', 'accident', 'accommodative', 'accomodate', 'accompanied', 'accompanying', 'accomplishment', 'accord', 'accordance', 'according', 'account', 'accountability', 'accounted', 'accountemps', 'accounting', 'accounts', 'accrual', 'accruing', 'accumulated', 'accumulations', 'accurate', 'accuse', 'accused', 'accusing', 'achievable', 'achieve', 'achieved', 'achievement', 'ackerman', 'acknowledged', 'acknowledging', 'acm', 'acmb', 'acquire', 'acquired', 'acquirer', 'acquires', 'acquiring', 'acquisition', 'acquisitions', 'acquisittion', 'acquistion', 'acr', 'acre', 'acreage', 'acres', 'act', 'acted', 'acting', 'action', 'actions', 'activator', 'active', 'actively', 'activities', 'activitrax', 'activity', 'acts', 'actu', 'actual', 'actually', 'acuqired', 'ad', 'adams', 'adapt', 'add', 'added', 'adding', 'addison', 'addition', 'additional', 'additive', 'additives', 'addressed', 'addressing', 'adds', 'adelaide', 'adequate', 'adhere', 'adherence', 'adjacent', 'adjust', 'adjusted', 'adjusting', 'adjustment', 'adjustments', 'adjusts', 'adm', 'administration', 'administrative', 'administrator', 'admitted', 'adoped', 'adopt', 'adopted', 'adoption', 'adulterated', 'advance', 'advanced', 'advantage', 'advantages', 'adversarial', 'adverse', 'adversely', 'advertisement', 'advertising', 'advised', 'advisers', 'advisor', 'advisors', 'advisory', 'adx', 'aegean', 'aegn', 'aegon', 'aero', 'aerospace', 'afbd', 'afcx', 'affair', 'affairs', 'affect', 'affected', 'affecting', 'affects', 'affiliate', 'affiliated', 'affiliates', 'affirmative', 'affirmed', 'afford', 'afg', 'afraid', 'africa', 'african', 'aft', 'afternoon', 'afterward', 'ag', 'agencies', 'agency', 'agenda', 'agent', 'aggregate', 'aggregates', 'aggression', 'aggressive', 'aggressively', 'aghia', 'ago', 'agony', 'agr', 'agree', 'agreed', 'agreeing', 'agreement', 'agreements', 'agrees', 'agressively', 'agrico', 'agricultural', 'agriculture', 'agrimont', 'agrio', 'agrnomics', 'agro', 'aguila', 'ahe', 'ahead', 'ahm', 'ahmad', 'ahmanson', 'ahvaz', 'ai', 'aid', 'aided', 'aiding', 'aids', 'ailing', 'aim', 'aimed', 'aiming', 'aims', 'air', 'aircraft', 'aires', 'airline', 'airlines', 'airport', 'airports', 'airsensors', 'akers', 'akiman', 'akio', 'al', 'ala', 'alan', 'alarm', 'alaska', 'alava', 'albany', 'albert', 'alberta', 'alberto', 'ald', 'alex', 'alexander', 'alexanders', 'algeria', 'algh', 'ali', 'allan', 'allegations', 'alleged', 'allegedly', 'allegheny', 'alleging', 'allegis', 'allen', 'alliance', 'allied', 'allies', 'allocated', 'allocates', 'allocation', 'allocations', 'allotment', 'allotted', 'allow', 'allowance', 'allowances', 'allowed', 'allowing', 'allows', 'alloy', 'allwaste', 'alois', 'alp', 'als', 'alta', 'alter', 'alternate', 'alternative', 'alternatives', 'altogether', 'alumina', 'aluminium', 'aluminum', 'alvear', 'alws', 'amaa', 'amadeus', 'amatil', 'amax', 'amb', 'ambassador', 'ambrose', 'ambrosiano', 'amended', 'amendend', 'amendment', 'amendments', 'amends', 'america', 'american', 'americans', 'amertek', 'amicable', 'amid', 'amkg', 'aml', 'ammount', 'amo', 'amortization', 'amoskeag', 'amounted', 'amounts', 'ample', 'amsouth', 'amstel', 'amsterdam', 'amstutz', 'amusement', 'amx', 'analyst', 'analysts', 'analyzing', 'anchor', 'anderson', 'andover', 'andreas', 'andres', 'angeles', 'angell', 'anger', 'angered', 'angry', 'anheuser', 'anhui', 'anibal', 'animal', 'animals', 'annesley', 'annoucement', 'announce', 'announced', 'announcement', 'announcements', 'announces', 'announcing', 'annual', 'annualised', 'annually', 'annuities', 'annul', 'annum', 'anodes', 'ans', 'answer', 'answering', 'anthony', 'anti', 'antibiotics', 'anticipated', 'anticipates', 'anticipating', 'anticipation', 'antitrust', 'antwerp', 'anwalt', 'anymore', 'anz', 'anza', 'apart', 'apartheid', 'aphids', 'apiece', 'app', 'apparel', 'apparent', 'apparently', 'appeal', 'appeals', 'appear', 'appeared', 'appears', 'appease', 'appetite', 'applebaum', 'apples', 'appliances', 'applicable', 'application', 'applications', 'applied', 'apply', 'applying', 'appoint', 'appointed', 'appraisal', 'appraisals', 'appraising', 'appreciate', 'appreciated', 'appreciating', 'appreciation', 'approach', 'approached', 'approaching', 'appropriate', 'approval', 'approvals', 'approve', 'approved', 'approves', 'approximately', 'apr', 'april', 'aqaba', 'aqazadeh', 'aquino', 'aquire', 'aquired', 'aquisition', 'arab', 'arabia', 'arabian', 'arango', 'aray', 'arb', 'arbitrager', 'arbitragers', 'arbitration', 'arc', 'architectural', 'area', 'areas', 'aren', 'argentina', 'argentine', 'argimont', 'argue', 'argues', 'arguing', 'argument', 'ariadne', 'arise', 'arising', 'aristech', 'arizona', 'ark', 'arm', 'armacost', 'armco', 'armed', 'armel', 'armen', 'arms', 'arrange', 'arranged', 'arrangement', 'arrangements', 'arranger', 'arranging', 'arrays', 'arrested', 'arrival', 'arrivals', 'arrive', 'arrived', 'arriving', 'arrogant', 'arrow', 'arroyos', 'arrranged', 'ars', 'arsn', 'arthur', 'arthurs', 'article', 'articles', 'artificially', 'artillery', 'asahi', 'asamoah', 'asarco', 'asbestos', 'ascs', 'ash', 'ashai', 'asher', 'ashland', 'asia', 'asian', 'aside', 'ask', 'asked', 'asking', 'aso', 'aspects', 'assault', 'assay', 'assayed', 'assembly', 'assessing', 'assessment', 'asset', 'assets', 'assetsof', 'assist', 'assistance', 'assistant', 'associate', 'associated', 'associates', 'association', 'associations', 'assume', 'assumed', 'assumes', 'assuming', 'assumption', 'assurance', 'assurances', 'assure', 'assured', 'astin', 'astra', 'asts', 'asylums', 'atco', 'atcor', 'atekf', 'athens', 'athwart', 'atlanta', 'atlantic', 'atlas', 'atlc', 'atlf', 'atrocious', 'attach', 'attached', 'attack', 'attacked', 'attain', 'attaka', 'attempt', 'attempted', 'attempting', 'attempts', 'attend', 'attended', 'attending', 'attention', 'attibuted', 'attitude', 'attorney', 'attorneys', 'attract', 'attracted', 'attracting', 'attraction', 'attractive', 'attractiveness', 'attributable', 'attributed', 'attributibuted', 'attributing', 'aubrey', 'auction', 'auctioned', 'auctions', 'audio', 'audit', 'auditing', 'auditors', 'aug', 'augers', 'augurs', 'august', 'aurora', 'australasia', 'australes', 'australia', 'australian', 'australs', 'austwhim', 'authorisations', 'authorise', 'authorised', 'authorises', 'authorities', 'authority', 'authorization', 'authorizations', 'authorized', 'auto', 'automatic', 'automatically', 'automobile', 'automotive', 'autumn', 'auxt', 'auxton', 'av', 'ava', 'availability', 'available', 'availalbe', 'avenues', 'average', 'averaged', 'averages', 'averaging', 'avert', 'averted', 'averting', 'avg', 'avge', 'avgpric', 'avgprice', 'avia', 'avialable', 'aviation', 'avoid', 'avoiding', 'avondale', 'avowed', 'awaited', 'awaiting', 'awaits', 'award', 'awarded', 'awards', 'aware', 'away', 'awb', 'awfully', 'ay', 'bache', 'backdoor', 'backdrop', 'backed', 'background', 'backing', 'backlog', 'bacterium', 'bad', 'badly', 'baer', 'bag', 'baggage', 'bagged', 'baghdad', 'bags', 'bahamas', 'bahia', 'bahrain', 'baht', 'bail', 'bailed', 'baker', 'baking', 'bakr', 'balance', 'balanced', 'balances', 'balancing', 'balao', 'baldwin', 'bales', 'ball', 'balladur', 'balloting', 'ballpark', 'baluch', 'ban', 'banana', 'banca', 'bancgroup', 'banco', 'bancorp', 'bancorporation', 'bancshares', 'band', 'bangemann', 'bangladesh', 'bank', 'banker', 'bankers', 'banking', 'bankruptcy', 'bankrupted', 'banks', 'bankshares', 'banned', 'banner', 'banning', 'banponce', 'banque', 'bans', 'bar', 'barbara', 'barber', 'bargain', 'bargaining', 'barge', 'barley', 'barnett', 'barney', 'barred', 'barrel', 'barrels', 'barriers', 'barring', 'bars', 'barter', 'barwon', 'base', 'based', 'baseload', 'bashaw', 'basic', 'basically', 'basicly', 'basin', 'basis', 'basket', 'basle', 'basra', 'batignolles', 'batiment', 'baton', 'battered', 'batteries', 'battery', 'battle', 'bay', 'baybanks', 'bazoli', 'bbc', 'bbcz', 'bbf', 'bbnk', 'bcil', 'bcmp', 'bd', 'bdep', 'beach', 'beam', 'beans', 'bear', 'bearing', 'bearish', 'bearishness', 'beckman', 'bedding', 'beds', 'beef', 'beer', 'beet', 'beg', 'began', 'beggar', 'begin', 'begining', 'beginning', 'begins', 'begun', 'behalf', 'behavior', 'beijing', 'belgian', 'belgium', 'belgrade', 'belief', 'believe', 'believed', 'believes', 'bell', 'bellies', 'belo', 'belongs', 'belt', 'benchmark', 'benedetti', 'beneficial', 'benefit', 'benefited', 'benefits', 'benefitted', 'benel', 'bennett', 'benson', 'bentsen', 'bergen', 'berlin', 'bernard', 'beryl', 'best', 'bethlehem', 'better', 'betz', 'bev', 'beverage', 'beverages', 'beverly', 'bevi', 'bevis', 'bfcs', 'bfi', 'bhd', 'bickering', 'bicol', 'bicycles', 'bid', 'bidder', 'bidders', 'bidding', 'bids', 'big', 'biggest', 'bil', 'bilateral', 'bilion', 'billet', 'billion', 'billions', 'bills', 'binding', 'bioleaching', 'biotech', 'biotechnology', 'birdseed', 'bit', 'bitter', 'bjoern', 'bjorn', 'blair', 'blame', 'blamed', 'blampied', 'blanca', 'blaylock', 'blc', 'bleached', 'blla', 'bloc', 'block', 'blocking', 'blocks', 'blount', 'blow', 'blown', 'blt', 'blue', 'blueprint', 'blunt', 'blunts', 'bmy', 'bnch', 'bnls', 'board', 'boards', 'boats', 'bob', 'bodes', 'body', 'boesky', 'bold', 'bolstered', 'boma', 'bona', 'bond', 'bonds', 'bonn', 'bonus', 'bonuses', 'bonwit', 'book', 'booked', 'bookings', 'books', 'boom', 'booming', 'boost', 'boosted', 'boosting', 'boosts', 'boothe', 'bor', 'borax', 'border', 'borg', 'borrow', 'borrowed', 'borrowers', 'borrowing', 'borrowings', 'borrrowings', 'boss', 'boston', 'bothersome', 'botswana', 'bottler', 'bottles', 'bottling', 'bottoming', 'bought', 'bound', 'boveri', 'bovine', 'bowater', 'bowling', 'box', 'boxes', 'bp', 'bpcc', 'bpcl', 'bpd', 'bpi', 'bpii', 'br', 'brace', 'brackets', 'bradford', 'braga', 'brake', 'braking', 'bramall', 'bran', 'branch', 'branches', 'brand', 'branded', 'branding', 'brands', 'branko', 'brasil', 'brasilia', 'brazil', 'brazilian', 'breach', 'breached', 'bread', 'breadmaking', 'break', 'breakdown', 'breakdowns', 'breakup', 'breeder', 'brenda', 'brennan', 'brent', 'brewer', 'breweries', 'brewery', 'brewing', 'brian', 'briar', 'bridge', 'bridges', 'bridgestone', 'brief', 'briefcase', 'briefly', 'bright', 'brightened', 'bring', 'bringing', 'brings', 'brisbane', 'bristol', 'brit', 'britain', 'british', 'brito', 'brix', 'broad', 'broadcast', 'broadcasting', 'broadly', 'brockton', 'broilers', 'broke', 'broken', 'broker', 'brokerage', 'brokering', 'brokers', 'brooke', 'brooks', 'brophy', 'brothers', 'brought', 'broussard', 'brown', 'browning', 'bruce', 'brunswick', 'brusca', 'brussels', 'bs', 'bsd', 'bt', 'bti', 'btu', 'bu', 'bucking', 'bucks', 'bud', 'budget', 'budgeted', 'buenaventura', 'buenos', 'buffer', 'build', 'builders', 'building', 'buildings', 'buildup', 'built', 'buitoni', 'bula', 'bulgaria', 'bulgur', 'bulk', 'bulletin', 'bullion', 'bullish', 'bundaberg', 'bundebank', 'bundesbank', 'bundey', 'bunge', 'buoy', 'buoyancy', 'buoyant', 'bur', 'burdekin', 'burden', 'burdened', 'burdens', 'burdensome', 'bureau', 'bureaucratic', 'bureaucrats', 'burgeoning', 'burke', 'burkhard', 'burlington', 'burm', 'burmah', 'burnell', 'burnham', 'burning', 'burrillville', 'busch', 'bushel', 'bushels', 'business', 'businesses', 'businessmen', 'busy', 'button', 'buttrose', 'butyl', 'buy', 'buyback', 'buyer', 'buyers', 'buying', 'buyout', 'buys', 'bwtr', 'bybee', 'ca', 'cabinet', 'cabk', 'cable', 'cacex', 'cadillac', 'caesars', 'caicos', 'cain', 'cake', 'calabria', 'calculate', 'calculated', 'calculating', 'calculation', 'calculations', 'calendar', 'calgary', 'calif', 'california', 'called', 'calling', 'calls', 'calm', 'cam', 'camden', 'came', 'campaign', 'campeau', 'campo', 'canada', 'canadian', 'canadians', 'canam', 'canary', 'canberra', 'cancellation', 'cancellations', 'cancelled', 'cancels', 'candidate', 'cane', 'canion', 'canned', 'canning', 'cannon', 'canron', 'capable', 'capacity', 'cape', 'capel', 'capital', 'capitalisation', 'capitalize', 'capitalized', 'capitalizing', 'capitals', 'capitol', 'car', 'carbide', 'carbon', 'cardenas', 'cardiac', 'cardiovascualr', 'cardiovascular', 'cards', 'care', 'careful', 'carefully', 'cares', 'cargill', 'cargo', 'cargoes', 'carlo', 'carload', 'carlos', 'carlson', 'carney', 'carolina', 'carpets', 'carried', 'carrier', 'carries', 'carry', 'carryforward', 'carryforwards', 'carrying', 'carryover', 'cars', 'carson', 'cartel', 'cascade', 'cascavel', 'case', 'cases', 'cash', 'casino', 'cast', 'casting', 'casualty', 'cat', 'catalog', 'catalogs', 'catalyst', 'catanduva', 'catchup', 'category', 'caterpillar', 'catherine', 'catheters', 'cathode', 'cattle', 'caught', 'cause', 'caused', 'causes', 'causing', 'cautioned', 'cautions', 'cautious', 'cautiously', 'cavaco', 'caw', 'cawl', 'cax', 'cayb', 'cayuga', 'cb', 'cban', 'cbio', 'cbs', 'cbsi', 'cbtb', 'ccc', 'ccf', 'ccfp', 'cci', 'ccic', 'ccl', 'ccx', 'ccxn', 'cd', 'ceased', 'cebu', 'cecos', 'cehave', 'ceiling', 'ceilings', 'celanese', 'celeron', 'celltronics', 'celt', 'cement', 'censure', 'census', 'cent', 'centel', 'center', 'centerre', 'centers', 'centigrade', 'central', 'centre', 'centred', 'centres', 'cents', 'centurion', 'century', 'ceramic', 'ceramics', 'cereal', 'cereals', 'ceroni', 'certain', 'certainly', 'certificate', 'certificated', 'certificates', 'certification', 'certified', 'certify', 'cfcf', 'cff', 'cg', 'cge', 'ch', 'chaco', 'chain', 'chairman', 'challenge', 'challenging', 'chamber', 'champion', 'chance', 'chancellor', 'chances', 'change', 'changed', 'changeover', 'changes', 'changing', 'channels', 'chaper', 'chapman', 'chapter', 'characterized', 'charge', 'charged', 'chargeoffs', 'charges', 'charging', 'charles', 'chart', 'charter', 'chartered', 'charterer', 'charterers', 'chartham', 'chase', 'cheap', 'cheaper', 'cheaply', 'checking', 'checks', 'cheif', 'chemical', 'chemicals', 'cheques', 'chesebrough', 'chewing', 'cheyenne', 'chicago', 'chicken', 'chief', 'chile', 'chilean', 'chilton', 'china', 'chinese', 'chip', 'chips', 'chl', 'choice', 'choke', 'choose', 'chose', 'chosen', 'chris', 'christian', 'christie', 'christopher', 'chronar', 'chronowitz', 'chrysler', 'chung', 'chv', 'cia', 'cie', 'cif', 'cigar', 'cigarettes', 'ciif', 'cincinnati', 'cip', 'circle', 'circles', 'circuit', 'circulated', 'circulation', 'circumstances', 'cite', 'cited', 'cites', 'citibank', 'citicorp', 'cities', 'citing', 'citizen', 'citr', 'citrus', 'city', 'citytrust', 'civilian', 'cj', 'cla', 'claim', 'claims', 'clamping', 'clarence', 'clarification', 'clarifies', 'clarify', 'clarifying', 'clarity', 'clark', 'clarke', 'class', 'classes', 'classified', 'claude', 'claus', 'clayton', 'clbga', 'clear', 'clearer', 'clearing', 'clearly', 'clev', 'clevite', 'clients', 'climate', 'climbed', 'climbs', 'clinton', 'close', 'closed', 'closely', 'closer', 'closes', 'closing', 'closings', 'closure', 'clothiers', 'clothing', 'cloud', 'clouded', 'clt', 'clutch', 'cmb', 'cmca', 'cmco', 'cme', 'cmh', 'cnt', 'coal', 'coarse', 'coast', 'coated', 'cocoa', 'coconut', 'coda', 'code', 'coebergh', 'coercive', 'coface', 'coffee', 'cohen', 'coil', 'cojuangco', 'cold', 'collander', 'collapse', 'collateral', 'colleagues', 'collection', 'collective', 'collectively', 'college', 'colombia', 'colombian', 'colombo', 'colon', 'colonial', 'color', 'colson', 'colt', 'columbia', 'colville', 'comalco', 'comanav', 'comapny', 'combat', 'combination', 'combinations', 'combined', 'come', 'comerica', 'comes', 'cominco', 'coming', 'comite', 'command', 'commanding', 'commencing', 'comment', 'commented', 'commenting', 'comments', 'commerce', 'commercial', 'commercialize', 'commercially', 'commerical', 'comminuique', 'commission', 'commissioner', 'commissions', 'commitment', 'commitments', 'committed', 'committee', 'committees', 'commodities', 'commodity', 'common', 'commonly', 'commons', 'commonwealth', 'communicate', 'communication', 'communications', 'communique', 'communities', 'community', 'compact', 'compagnia', 'compagnie', 'companies', 'company', 'companywide', 'compaq', 'comparable', 'comparative', 'compare', 'compared', 'compares', 'comparison', 'comparisons', 'compass', 'compatible', 'compensate', 'compensated', 'compensation', 'compensatory', 'compete', 'competing', 'competition', 'competitive', 'competitor', 'competitors', 'compiled', 'compiling', 'complained', 'complaining', 'complaint', 'complaints', 'complete', 'completed', 'completely', 'completes', 'completing', 'completion', 'complex', 'compliance', 'complicated', 'comply', 'component', 'components', 'composite', 'compound', 'compounder', 'comprehensive', 'comprise', 'comprised', 'comprising', 'compromise', 'compute', 'computer', 'computers', 'computing', 'computrac', 'comserv', 'comstock', 'conable', 'conceded', 'concentrate', 'concentrated', 'concentrates', 'concentration', 'concept', 'concern', 'concerned', 'concerning', 'concerns', 'concerted', 'concession', 'concessionaires', 'concessions', 'conciliatory', 'concluded', 'concord', 'concrete', 'concretely', 'condemned', 'condensate', 'condition', 'conditional', 'conditionality', 'conditioned', 'conditions', 'condon', 'conduct', 'conducting', 'conergic', 'confectionery', 'confederation', 'conference', 'confidence', 'confident', 'confidential', 'confidentiality', 'confined', 'confirm', 'confirmation', 'confirmed', 'confirms', 'conflict', 'conformity', 'confrontation', 'conger', 'conglomerate', 'congress', 'congressional', 'congressman', 'coniston', 'conjunction', 'connect', 'connected', 'connecticut', 'connection', 'conoco', 'conrac', 'consecutive', 'consensus', 'consent', 'consents', 'consequence', 'consequent', 'consequently', 'conserv', 'conservation', 'consider', 'considerable', 'considerably', 'consideration', 'considerations', 'considered', 'considering', 'considers', 'consisted', 'consistent', 'consistently', 'consisting', 'consists', 'consolidated', 'consolidates', 'consolidating', 'consolidation', 'consortia', 'consortium', 'constant', 'constitucion', 'constituents', 'constitute', 'constraints', 'construction', 'constructions', 'consulate', 'consultancy', 'consultants', 'consultation', 'consultations', 'consulting', 'consume', 'consumer', 'consumers', 'consuming', 'consummation', 'consumption', 'cont', 'contacted', 'contacts', 'contain', 'contained', 'container', 'containers', 'containing', 'contains', 'contaminated', 'contempory', 'contend', 'content', 'contents', 'contest', 'contested', 'context', 'continent', 'continental', 'continentale', 'contingent', 'continuation', 'continue', 'continued', 'continues', 'continuing', 'continuous', 'contract', 'contracted', 'contracting', 'contractionary', 'contracts', 'contrary', 'contrast', 'contravene', 'contravention', 'contribute', 'contributed', 'contributing', 'contribution', 'contributions', 'control', 'controlled', 'controller', 'controlling', 'controls', 'controversial', 'controversy', 'contruction', 'convened', 'conventional', 'conversion', 'convert', 'converted', 'convertible', 'converting', 'converts', 'conveyor', 'convince', 'convinced', 'convincing', 'cooking', 'cooling', 'cooper', 'cooperate', 'cooperation', 'cooperative', 'coopervision', 'coordinate', 'coordinated', 'coordination', 'cope', 'copper', 'copy', 'coquilles', 'corazon', 'cordoba', 'core', 'corn', 'cornering', 'corona', 'corp', 'corporate', 'corporation', 'correct', 'corrected', 'correction', 'corrective', 'correctly', 'corrects', 'correspond', 'correspondents', 'corresponding', 'corrientes', 'corroding', 'corrupt', 'cory', 'cos', 'cosentino', 'cosmetic', 'cosmetics', 'cost', 'costa', 'costly', 'costs', 'cot', 'cotton', 'cottonseed', 'council', 'counsel', 'counselor', 'count', 'counter', 'counterparts', 'counterpoint', 'counterproductive', 'counties', 'countries', 'country', 'county', 'couple', 'coupon', 'coupons', 'courier', 'course', 'court', 'courts', 'cover', 'coverage', 'covered', 'covering', 'covers', 'cows', 'cp', 'cpa', 'cper', 'cpo', 'cpq', 'cra', 'craa', 'cracked', 'cracking', 'crafting', 'crash', 'crazy', 'create', 'created', 'creating', 'creation', 'credence', 'credibility', 'credible', 'credit', 'credited', 'creditors', 'credits', 'creek', 'crews', 'crises', 'crisis', 'criteria', 'criticised', 'criticism', 'criticized', 'critics', 'crnr', 'croesus', 'crop', 'cropped', 'crops', 'crosby', 'cross', 'crow', 'crowley', 'crowns', 'crucial', 'crucially', 'crucible', 'crude', 'crudes', 'crumbling', 'crush', 'crushed', 'crushers', 'crushing', 'crushings', 'cruz', 'crystal', 'crzy', 'cs', 'csa', 'csn', 'csr', 'csra', 'ct', 'ctbc', 'ctec', 'cts', 'ctyn', 'cubic', 'cuevas', 'cumulative', 'curb', 'curbing', 'curbs', 'cure', 'curious', 'currencies', 'currency', 'current', 'currently', 'curtail', 'curtailed', 'custom', 'customer', 'customers', 'customs', 'cut', 'cuts', 'cutting', 'cvsi', 'cwt', 'cwts', 'cxr', 'cxrl', 'cxts', 'cyclical', 'cytotech', 'da', 'daily', 'dairy', 'dakota', 'dallas', 'damage', 'damaged', 'damages', 'damaging', 'damp', 'dampen', 'dampening', 'dana', 'danforth', 'danger', 'dangerous', 'dangers', 'daniel', 'danworth', 'daps', 'darby', 'darlington', 'darman', 'darrell', 'dartmouth', 'dashed', 'dashing', 'data', 'datagraph', 'dataproducts', 'datatrak', 'date', 'dated', 'dates', 'datr', 'datron', 'datx', 'dave', 'david', 'davis', 'day', 'days', 'dayton', 'dbd', 'dcf', 'dcn', 'dd', 'dds', 'dead', 'deadline', 'deadlock', 'deal', 'dealer', 'dealers', 'dealing', 'dealings', 'deals', 'dealter', 'dean', 'death', 'debate', 'debenture', 'debentures', 'debilitating', 'debit', 'debits', 'debt', 'debtholders', 'debtor', 'debtors', 'debts', 'dec', 'decade', 'december', 'decertification', 'decide', 'decided', 'decides', 'decision', 'decisions', 'declaration', 'declare', 'declared', 'declares', 'declaring', 'decline', 'declined', 'declines', 'declining', 'decrease', 'decreased', 'decreases', 'decreasing', 'decribes', 'deep', 'deepens', 'deeply', 'default', 'defence', 'defend', 'defended', 'defending', 'defense', 'defensive', 'defer', 'deferral', 'deficiencies', 'deficit', 'deficits', 'defined', 'definition', 'definitive', 'defuse', 'degradation', 'degrade', 'degree', 'dekalb', 'dekuyper', 'del', 'delaware', 'delay', 'delayed', 'delaying', 'delays', 'delegate', 'delegates', 'delegation', 'delegations', 'delhi', 'deliberate', 'deliberations', 'delicate', 'deliver', 'deliverable', 'delivered', 'deliveries', 'delivery', 'dell', 'delmed', 'deloitte', 'delta', 'demand', 'demanding', 'demands', 'democracies', 'democrat', 'democratic', 'democrats', 'demonstrate', 'demonstrates', 'demonstrating', 'den', 'denationalisation', 'denial', 'denied', 'denies', 'denko', 'denmark', 'denotes', 'denshin', 'denver', 'denwa', 'deodorised', 'departing', 'department', 'departments', 'departure', 'depend', 'depended', 'dependence', 'dependency', 'dependent', 'dependents', 'depending', 'depends', 'deposit', 'deposited', 'depositors', 'depository', 'deposits', 'depreciation', 'depressed', 'depressing', 'deps', 'deputy', 'der', 'deregulate', 'deregulation', 'derivative', 'derivatives', 'des', 'descent', 'described', 'describes', 'describing', 'designated', 'designed', 'designer', 'desirable', 'desire', 'desireable', 'deskpro', 'despite', 'desposits', 'desptp', 'destabilizing', 'destination', 'destinations', 'destrehan', 'destroyed', 'detailed', 'details', 'detected', 'detecting', 'detective', 'deteriorate', 'deteriorated', 'deterioration', 'determine', 'determined', 'determines', 'determining', 'detroit', 'devaluation', 'devaluations', 'devalued', 'devalues', 'develop', 'developed', 'developer', 'developing', 'development', 'developments', 'deviations', 'device', 'devices', 'devote', 'dey', 'dh', 'diagnostic', 'dialogue', 'diametrically', 'diamond', 'diaz', 'did', 'didn', 'die', 'diebold', 'died', 'diego', 'diesel', 'dieter', 'differences', 'different', 'differential', 'differentials', 'differently', 'differing', 'difficult', 'difficulties', 'difficulty', 'dig', 'digest', 'digesting', 'digit', 'dilemma', 'diligence', 'dillard', 'dillon', 'dilulted', 'diluted', 'diluting', 'dilutions', 'diminish', 'diminishing', 'dinar', 'dinars', 'ding', 'diplomacy', 'diplomatic', 'diplomats', 'dipped', 'direct', 'directed', 'direction', 'directive', 'directly', 'director', 'directors', 'disadvantage', 'disappear', 'disappearing', 'disappointing', 'disappointment', 'disbursed', 'disc', 'discharged', 'discharges', 'discipline', 'disciplined', 'disclose', 'disclosed', 'disclosure', 'discontinuance', 'discontinued', 'discontinuing', 'discord', 'discount', 'discounted', 'discounts', 'discourage', 'discouraged', 'discover', 'discovers', 'discovery', 'discreet', 'discrepancies', 'discrepancy', 'discriminatory', 'discuss', 'discussed', 'discussing', 'discussion', 'discussions', 'disease', 'disillusionment', 'disincentive', 'disk', 'dismal', 'disney', 'display', 'disposal', 'dispose', 'disposed', 'disposition', 'dispute', 'disputed', 'disputes', 'disregard', 'disrupt', 'disrupted', 'disruption', 'disruptions', 'dissatisfied', 'dissemination', 'dissenting', 'distance', 'distaste', 'distillate', 'distilled', 'distillers', 'distilling', 'distortion', 'distortive', 'distributed', 'distribution', 'distributions', 'distributive', 'distributor', 'distributors', 'district', 'disturbance', 'disturbances', 'ditch', 'div', 'dive', 'diversification', 'diversified', 'diversify', 'diversion', 'divert', 'diverted', 'divested', 'divesting', 'divestiture', 'divests', 'divi', 'divided', 'dividend', 'dividends', 'division', 'divisional', 'divisions', 'divs', 'diw', 'dj', 'dklbb', 'dlr', 'dlres', 'dlrs', 'dls', 'dmd', 'dmp', 'dns', 'documents', 'doe', 'doemstic', 'does', 'doesn', 'doherty', 'doing', 'dollar', 'dollars', 'dom', 'dome', 'domes', 'domest', 'domestic', 'domestically', 'dominant', 'dominate', 'dominated', 'dominguez', 'dominican', 'dominick', 'dominion', 'domino', 'don', 'donald', 'donaldson', 'donegan', 'donis', 'door', 'doors', 'doorstep', 'dosher', 'double', 'doubled', 'doubles', 'doubling', 'doubt', 'doubtful', 'doubts', 'doug', 'douglas', 'dow', 'downplayed', 'downside', 'downsize', 'downstream', 'downswing', 'downtrend', 'downturn', 'downward', 'downwards', 'dozen', 'dpc', 'dr', 'draft', 'dragging', 'drain', 'dramatic', 'dramatically', 'drastic', 'draw', 'drawing', 'drawn', 'dressed', 'dressing', 'drew', 'drexel', 'dreyfus', 'drift', 'drifted', 'drifts', 'drilling', 'drink', 'drive', 'driven', 'drives', 'driving', 'drls', 'drop', 'dropoff', 'dropped', 'dropping', 'drought', 'drove', 'drug', 'drugs', 'dry', 'drying', 'dryness', 'dslt', 'dsm', 'dsmn', 'dso', 'dtrk', 'du', 'duck', 'dufour', 'duluth', 'dumping', 'duncan', 'dunkirk', 'dupont', 'duration', 'durum', 'dutch', 'duties', 'duty', 'duvalier', 'dynamic', 'e12c', 'e15b', 'eager', 'eagle', 'earlier', 'early', 'earmarked', 'earn', 'earned', 'earner', 'earning', 'earnings', 'earthquake', 'ease', 'eased', 'eases', 'easier', 'easily', 'easing', 'east', 'easter', 'eastern', 'eastover', 'easts', 'eat', 'eaton', 'ec', 'echoing', 'economic', 'economical', 'economically', 'economics', 'economies', 'economist', 'economists', 'economy', 'ecu', 'ecuador', 'ecuadorean', 'ecuadorian', 'ecus', 'ecusta', 'eddie', 'edelman', 'edge', 'edged', 'edgy', 'edible', 'edict', 'edition', 'editions', 'edmonton', 'edouard', 'edp', 'edt', 'eduardo', 'educational', 'edward', 'effect', 'effected', 'effective', 'effectively', 'effectiveness', 'effects', 'efficiency', 'efficient', 'effiency', 'effort', 'efforts', 'efh', 'efthymia', 'eggs', 'egypt', 'egyptian', 'eia', 'eileen', 'eishiro', 'el', 'elaborate', 'elaborating', 'elcor', 'elder', 'elders', 'elect', 'election', 'elections', 'electric', 'electrical', 'electricity', 'electro', 'electrohome', 'electrolysis', 'electrolytic', 'electronic', 'electronics', 'element', 'elements', 'elevator', 'elevators', 'eligible', 'eliminate', 'eliminated', 'elimination', 'elk', 'elmo', 'elrc', 'elxa', 'emanating', 'embarked', 'embezzlement', 'embodied', 'embraced', 'embroiled', 'emcor', 'emcore', 'emerge', 'emerged', 'emergence', 'emergency', 'emerging', 'emerling', 'emirate', 'emirates', 'emotional', 'empa', 'emphasised', 'emphasized', 'emphasizing', 'empire', 'employed', 'employee', 'employees', 'employer', 'employers', 'employment', 'employs', 'ems', 'enable', 'enabled', 'enables', 'enact', 'enacted', 'encor', 'encourage', 'encouraged', 'encouragement', 'encouraging', 'encroachments', 'end', 'endangered', 'endangering', 'ended', 'ending', 'endorsed', 'endorses', 'ends', 'enemy', 'energie', 'energy', 'enforced', 'enforcement', 'engage', 'engaged', 'engine', 'engineering', 'engineers', 'engines', 'england', 'englebright', 'enhance', 'enhancement', 'enhancer', 'enjoyed', 'enjoying', 'enlarged', 'enormous', 'enquiries', 'enquiry', 'enron', 'ensure', 'entails', 'enter', 'entered', 'entering', 'enterprise', 'enterprises', 'entertain', 'entg', 'enthusiasm', 'entire', 'entirely', 'entities', 'entitle', 'entitled', 'entitlement', 'entitles', 'entitling', 'entourage', 'entre', 'entrenched', 'entry', 'envases', 'environment', 'environmental', 'envisioned', 'epa', 'epidemic', 'eprom', 'equador', 'equal', 'equalled', 'equally', 'equals', 'equiment', 'equipment', 'equities', 'equity', 'equivalent', 'eradicated', 'erasable', 'erased', 'erecting', 'ericsson', 'ericy', 'eridania', 'erik', 'eroded', 'eromanga', 'erosion', 'errors', 'erskine', 'escalated', 'escort', 'escrow', 'esk', 'eskey', 'especially', 'essen', 'essential', 'essentially', 'esso', 'establish', 'established', 'establishing', 'establishment', 'estate', 'estates', 'estimate', 'estimated', 'estimates', 'estimating', 'et', 'ether', 'ethyl', 'ethylene', 'etn', 'etre', 'ettore', 'eua', 'eugene', 'eurodollar', 'eurodollars', 'euromark', 'euromarket', 'europa', 'europe', 'european', 'evaded', 'evaluate', 'evaluation', 'eve', 'evening', 'event', 'eventually', 'everybody', 'evidence', 'evident', 'ewing', 'ex', 'exactly', 'exaggerated', 'examination', 'examine', 'examines', 'example', 'exasperated', 'exceed', 'exceeded', 'exceeding', 'exceedingly', 'exceeds', 'excellent', 'exception', 'exceptional', 'exceptionally', 'excess', 'excessive', 'excessively', 'exchange', 'exchangeable', 'exchanged', 'exchanges', 'exchequer', 'excise', 'exclude', 'excluded', 'excludes', 'excluding', 'exclusive', 'exclusivity', 'excuse', 'execute', 'executed', 'execution', 'executive', 'executives', 'exempt', 'exercisable', 'exercise', 'exercised', 'exile', 'exist', 'existence', 'existing', 'exists', 'exlude', 'exp', 'expand', 'expanded', 'expanding', 'expansion', 'expansionary', 'expansive', 'expd', 'expect', 'expectation', 'expectations', 'expected', 'expecting', 'expects', 'expediency', 'expedite', 'expellers', 'expenditures', 'expense', 'expenses', 'expensive', 'experience', 'experienced', 'experimental', 'expert', 'expertise', 'experts', 'expiration', 'expire', 'expired', 'expireon', 'expires', 'expiring', 'expiry', 'explain', 'explained', 'explaining', 'explanation', 'explanations', 'exploitation', 'exploration', 'exploring', 'explosion', 'export', 'exported', 'exporter', 'exporters', 'exporting', 'exports', 'exposure', 'express', 'expressed', 'expressing', 'expression', 'expt', 'exptect', 'extend', 'extended', 'extending', 'extends', 'extension', 'extensive', 'extent', 'external', 'extra', 'extraordianry', 'extraordinarily', 'extraordinary', 'extreme', 'extremely', 'exxon', 'eye', 'eyes', 'fab', 'fabric', 'fabricated', 'fabrics', 'face', 'faced', 'faces', 'facilitated', 'facilities', 'facility', 'facing', 'fact', 'faction', 'factions', 'facto', 'factor', 'factors', 'factory', 'facts', 'fail', 'failed', 'fails', 'failure', 'fair', 'fairer', 'fairly', 'fairness', 'fairview', 'faith', 'faked', 'fakkan', 'falkenstein', 'fall', 'fallen', 'falling', 'falls', 'false', 'family', 'fao', 'far', 'fares', 'fargo', 'farina', 'farm', 'farmer', 'farmers', 'farmlands', 'farms', 'fashions', 'fast', 'faster', 'fasting', 'fat', 'fault', 'faulty', 'favor', 'favorable', 'favored', 'favors', 'favour', 'favourable', 'favoured', 'fax', 'fayetteville', 'faygo', 'fboh', 'fbs', 'fcbn', 'fcoj', 'fcola', 'fcom', 'fdgc', 'fe', 'fear', 'feared', 'fears', 'feasibility', 'feature', 'features', 'feb', 'febc', 'febraury', 'february', 'fecal', 'fed', 'federal', 'federated', 'federation', 'fee', 'feeble', 'feed', 'feeder', 'feedgrain', 'feedgrains', 'feedstocks', 'feel', 'feeling', 'fees', 'feet', 'fefac', 'fekete', 'felix', 'fell', 'felt', 'fema', 'fen', 'fences', 'fend', 'ferc', 'ferdinand', 'ferraz', 'ferris', 'ferrous', 'ferruzzi', 'fertiliser', 'fertilisers', 'fertilizer', 'fertlizer', 'feudal', 'fewer', 'ffom', 'fge', 'fgi', 'fhlb', 'fhlbb', 'fhsy', 'fiat', 'fiber', 'fiberoptic', 'ficr', 'fid', 'fidata', 'fidelcor', 'fidelity', 'fiduciary', 'field', 'fieldcrest', 'fields', 'fifth', 'fifths', 'fight', 'fighting', 'figure', 'figures', 'fiji', 'file', 'filed', 'filing', 'filipino', 'filipinos', 'filled', 'filtration', 'final', 'finalized', 'finally', 'finance', 'financed', 'financial', 'financiere', 'financing', 'finding', 'fine', 'finely', 'finished', 'finland', 'finns', 'fio', 'fired', 'firm', 'firmed', 'firmer', 'firming', 'firmness', 'firms', 'firstbanc', 'fiscal', 'fishing', 'fit', 'fitchburg', 'fitzwater', 'fix', 'fixed', 'fixing', 'fla', 'flag', 'flags', 'flank', 'flare', 'flat', 'flavor', 'flavoring', 'fld', 'fled', 'fledgling', 'fleet', 'flevoland', 'flexibility', 'flexible', 'flight', 'flip', 'float', 'floating', 'floor', 'florence', 'florida', 'flotation', 'flour', 'flow', 'flowing', 'flows', 'flt', 'fluctuate', 'fluctuation', 'fluctuations', 'fluorocarbon', 'flurry', 'flush', 'fly', 'fm', 'fms', 'fnb', 'fnlt', 'fnni', 'fnv', 'foam', 'fob', 'focus', 'focused', 'focusing', 'focussing', 'foiled', 'fokida', 'follow', 'followed', 'following', 'follows', 'followthrough', 'fondiaria', 'food', 'foodgrain', 'foodgrains', 'foods', 'foodstuff', 'foodstuffs', 'foot', 'foothill', 'foothold', 'footwear', 'forbes', 'forbidden', 'forbidding', 'force', 'forced', 'forces', 'forcing', 'ford', 'forecast', 'forecasting', 'forecasts', 'foreign', 'foreigners', 'foremost', 'foresaw', 'foreseeable', 'forest', 'forestall', 'forints', 'form', 'formal', 'formalise', 'formality', 'formed', 'forming', 'formosa', 'forms', 'formula', 'forrest', 'forthcoming', 'fortunes', 'forum', 'forward', 'foster', 'fostered', 'foundation', 'foundations', 'fourteen', 'fourth', 'fpa', 'fractional', 'fragile', 'framework', 'frampton', 'franc', 'franca', 'france', 'franchise', 'franchises', 'franchisor', 'francisco', 'francs', 'frankfurt', 'franklin', 'fraud', 'fraudalent', 'fraught', 'freddie', 'free', 'freefall', 'freer', 'freight', 'freighting', 'freights', 'french', 'fresh', 'friction', 'friday', 'friendly', 'friesian', 'frightening', 'fritz', 'fron', 'frontier', 'frost', 'froze', 'frozen', 'frsl', 'frtr', 'fruehauf', 'fruit', 'frustrated', 'frustration', 'fsb', 'fsis', 'fslic', 'fuel', 'fuelled', 'fuels', 'fukukawa', 'fulfill', 'fulfilled', 'fulfilling', 'fuller', 'fully', 'func', 'functions', 'fund', 'fundamental', 'fundamentals', 'funding', 'fundo', 'funds', 'furniture', 'future', 'futures', 'fwc', 'gabelli', 'gaeo', 'gaf', 'gain', 'gained', 'gains', 'galanter', 'galileo', 'galley', 'gallon', 'gallons', 'gals', 'galveston', 'gamble', 'game', 'gami', 'gaming', 'gander', 'gaons', 'gap', 'gaps', 'garfinckel', 'garments', 'garo', 'gas', 'gasoil', 'gasoline', 'gateway', 'gather', 'gathering', 'gatt', 'gaurd', 'gave', 'gban', 'gbylf', 'gc', 'gdp', 'gdw', 'gdyn', 'ge', 'gear', 'geared', 'gebrueder', 'gec', 'gecl', 'gelco', 'gem', 'gemina', 'gen', 'gencorp', 'gene', 'genentech', 'general', 'generale', 'generalize', 'generally', 'generate', 'generated', 'generating', 'genetics', 'geneva', 'geni', 'gently', 'genuine', 'geodynamics', 'geographically', 'geologists', 'george', 'georges', 'gerais', 'gerard', 'gerhard', 'german', 'germans', 'germany', 'germination', 'gets', 'getting', 'getty', 'ggfh', 'gghf', 'ghana', 'ghanaian', 'ghent', 'gholamreza', 'gianfranco', 'giant', 'gibraltar', 'gilberto', 'gilbey', 'gilgore', 'gin', 'gins', 'giordano', 'giovanni', 'gisela', 'given', 'gives', 'giving', 'glamorous', 'glass', 'glatfelter', 'glaxo', 'gldc', 'glen', 'glimpse', 'glitches', 'global', 'globe', 'glt', 'gluck', 'glut', 'glutted', 'glxo', 'gmbh', 'gmt', 'gndr', 'gnn', 'gnp', 'goal', 'goals', 'gobain', 'gobert', 'goes', 'going', 'gold', 'golden', 'goldendale', 'goldman', 'goldsmith', 'gone', 'gonzaga', 'good', 'goodall', 'goodrich', 'goods', 'goodwill', 'goodyear', 'gooseneck', 'gordex', 'gordon', 'goria', 'gormley', 'got', 'gothenburg', 'gottardo', 'govenment', 'governing', 'government', 'governmental', 'governments', 'governor', 'governors', 'gowland', 'gr', 'gra', 'graan', 'grace', 'grade', 'grades', 'grading', 'gradual', 'gradually', 'grain', 'grained', 'grains', 'gramm', 'grammes', 'grams', 'granaries', 'grand', 'grandad', 'grande', 'grando', 'grant', 'granted', 'grantes', 'grants', 'graphics', 'gre', 'great', 'greater', 'greatest', 'greatly', 'greece', 'greek', 'green', 'greener', 'greenshields', 'greenstein', 'greenville', 'greenwood', 'gres', 'grew', 'grim', 'grind', 'grindings', 'grl', 'gros', 'gross', 'grossa', 'ground', 'groundnut', 'groundnutseed', 'grounds', 'groundwork', 'group', 'groups', 'grove', 'grow', 'grower', 'growers', 'growing', 'growmark', 'grown', 'growth', 'gt', 'gte', 'gty', 'guangdong', 'guanta', 'guarantee', 'guaranteed', 'guarantees', 'guaranty', 'guard', 'guardian', 'guards', 'guaxupe', 'guesses', 'guesstimated', 'guesstimates', 'guesstimating', 'guidance', 'guidelines', 'guilder', 'guilders', 'guinea', 'gulf', 'gum', 'guminski', 'gundy', 'gunships', 'gw', 'gway', 'gy', 'ha', 'haba', 'hackensack', 'hads', 'hadson', 'hague', 'hailed', 'hainan', 'haiti', 'haitian', 'haitians', 'half', 'hall', 'hallmarked', 'halt', 'halted', 'halting', 'halts', 'hamburg', 'hamind', 'hamley', 'hampered', 'hampering', 'hampshire', 'han', 'hand', 'handed', 'handler', 'handling', 'handouts', 'hands', 'hanover', 'hans', 'hanson', 'happen', 'happened', 'happening', 'happens', 'happy', 'harahap', 'harbor', 'harcourt', 'hard', 'harder', 'hardly', 'hardship', 'harm', 'harmoko', 'harold', 'harris', 'harry', 'harsh', 'hart', 'hartford', 'hartmarx', 'hartselle', 'harvest', 'harvested', 'harvesting', 'harvests', 'hasco', 'haser', 'haskins', 'hasrul', 'hatching', 'haute', 'haven', 'having', 'hawaii', 'hawaiian', 'hawkeye', 'hayes', 'hbj', 'hbo', 'hboc', 'head', 'headed', 'headquartered', 'headquarters', 'heads', 'health', 'healthy', 'hearing', 'hearings', 'heart', 'heartrate', 'heat', 'heated', 'heating', 'heavily', 'heavy', 'hebei', 'heck', 'hecla', 'hectarage', 'hectare', 'hectares', 'hectic', 'hectoliters', 'hedge', 'heed', 'heeding', 'heer', 'hefty', 'heightened', 'heineken', 'heinken', 'heiwa', 'held', 'hele', 'helen', 'helicopter', 'hell', 'heller', 'helmets', 'helmut', 'help', 'helped', 'helpful', 'helping', 'helps', 'hemisphere', 'henan', 'henderson', 'henry', 'hepc', 'hepworth', 'herculaneum', 'herds', 'heritage', 'hermiston', 'herpolsheimer', 'hertz', 'het', 'heublein', 'heyman', 'hfet', 'hia', 'hibbert', 'hiccup', 'higest', 'high', 'higher', 'highest', 'highland', 'highlights', 'highly', 'hike', 'hikes', 'hill', 'hillsdown', 'hilton', 'hinged', 'hins', 'hire', 'hired', 'hirschberg', 'hiss', 'historic', 'historical', 'historically', 'history', 'hit', 'hitter', 'hk', 'hldn', 'hly', 'hm', 'hmsb', 'hmx', 'hoechst', 'hog', 'hogg', 'hogs', 'hold', 'holder', 'holders', 'holding', 'holdings', 'holdover', 'holds', 'holiday', 'hollis', 'holly', 'holmes', 'holstein', 'holt', 'home', 'homes', 'homestake', 'hon', 'honduras', 'honeywell', 'hong', 'honored', 'honoring', 'honour', 'hope', 'hoped', 'hopeful', 'hopefully', 'hopes', 'hoping', 'hormuz', 'hospital', 'hostages', 'hostile', 'hot', 'hotel', 'hotels', 'hour', 'hours', 'house', 'housecleaning', 'households', 'houses', 'housewares', 'housing', 'houssels', 'houston', 'hovered', 'hovering', 'howard', 'howell', 'hrp', 'hrs', 'hrw', 'hsld', 'hss', 'hua', 'hudson', 'huge', 'huggler', 'hugh', 'hum', 'human', 'humana', 'hundreds', 'hundreth', 'hung', 'hungarians', 'hungary', 'hunting', 'huron', 'hurt', 'hurting', 'hurts', 'husain', 'husky', 'hutton', 'hwa', 'hwkb', 'hydrocarbons', 'hyo', 'iad', 'iae', 'iagifu', 'ibcp', 'ibm', 'ibrahim', 'icahn', 'icch', 'icr', 'idaho', 'idea', 'ideas', 'identical', 'identifed', 'identified', 'identify', 'identifying', 'identity', 'idle', 'ifo', 'ignite', 'ignore', 'ignored', 'ignoring', 'ihs', 'ii', 'iii', 'ill', 'illegally', 'illinois', 'illness', 'illustrated', 'imaging', 'imbalance', 'imbalances', 'imf', 'immature', 'immediate', 'immediately', 'imo', 'impact', 'impacted', 'imperial', 'impetus', 'implement', 'implementation', 'implemented', 'implementing', 'implications', 'implicit', 'implied', 'implies', 'implying', 'import', 'importance', 'important', 'imported', 'importer', 'importers', 'importing', 'imports', 'impose', 'imposed', 'imposes', 'imposing', 'imposition', 'impressed', 'impression', 'improper', 'improperly', 'improve', 'improved', 'improvement', 'improvements', 'improving', 'imputation', 'inability', 'inaccurate', 'inaction', 'inadequate', 'incapable', 'incentives', 'inch', 'inches', 'inchon', 'incidental', 'include', 'included', 'includes', 'including', 'inclusion', 'income', 'incomes', 'incomex', 'incoming', 'incorporated', 'incorrect', 'increase', 'increased', 'increases', 'increasing', 'increasingly', 'incremental', 'incur', 'incurred', 'incurring', 'ind', 'indentifying', 'independence', 'independent', 'index', 'india', 'indicate', 'indicated', 'indicates', 'indicating', 'indication', 'indications', 'indicative', 'indicator', 'indicators', 'indirect', 'indirectly', 'individuals', 'indonesia', 'indonesian', 'indosuez', 'induce', 'industrial', 'industrialised', 'industrialists', 'industrialized', 'industrie', 'industrielle', 'industries', 'industritillbehor', 'industry', 'industrywide', 'inefficient', 'inevitable', 'infant', 'infinitesimal', 'inflation', 'inflationary', 'inflict', 'inflow', 'inflows', 'influence', 'influenced', 'influences', 'influential', 'influx', 'inform', 'informal', 'informally', 'information', 'infringement', 'infusion', 'ingemar', 'ingersoll', 'ingot', 'ingots', 'ingredients', 'inho', 'initial', 'initially', 'initiative', 'initiatives', 'initital', 'iniziativa', 'injected', 'injection', 'injections', 'injunction', 'injured', 'ink', 'inland', 'inma', 'input', 'inputs', 'inquiries', 'inserting', 'inside', 'insider', 'insists', 'inspected', 'inspection', 'inspections', 'inspectors', 'inspects', 'instability', 'installations', 'installed', 'installing', 'instance', 'instead', 'institute', 'institutes', 'institution', 'institutional', 'institutions', 'instructed', 'instructions', 'instrument', 'instruments', 'insufficient', 'insurance', 'insurances', 'insure', 'insured', 'insurer', 'insures', 'insuring', 'int', 'intellectual', 'intelsa', 'intend', 'intended', 'intends', 'intense', 'intensification', 'intensify', 'intent', 'intented', 'intention', 'intentions', 'inter', 'interactive', 'interagency', 'interallianz', 'interbank', 'intercare', 'interested', 'interesting', 'interests', 'interface', 'interfered', 'interferes', 'interim', 'interior', 'intermagnetics', 'intermediate', 'intermittently', 'internal', 'internally', 'international', 'internationalise', 'internationalized', 'internationally', 'interpretation', 'interpreted', 'interrupted', 'interstate', 'intervene', 'intervened', 'intervenes', 'intervention', 'interview', 'interviewers', 'intestines', 'intf', 'intitial', 'introduce', 'introduced', 'introduces', 'introduction', 'inundated', 'inventories', 'inventory', 'invest', 'invested', 'investigate', 'investigation', 'investigations', 'investigatory', 'investment', 'investments', 'investor', 'investors', 'inview', 'invited', 'involve', 'involved', 'involvement', 'involves', 'involving', 'inzi', 'ion', 'iowa', 'ipco', 'iran', 'iranian', 'iranians', 'iraq', 'iraqi', 'iraqis', 'iridium', 'irna', 'iron', 'irrigation', 'irtiza', 'irvine', 'irving', 'irwin', 'ishihara', 'isis', 'islamic', 'island', 'islands', 'isn', 'isola', 'israel', 'issuance', 'issue', 'issued', 'issues', 'issuing', 'istat', 'isum', 'isuzu', 'ita', 'italian', 'italiana', 'italy', 'itc', 'ite', 'item', 'items', 'iu', 'iv', 'ivan', 'ivb', 'ivory', 'ixl', 'jacksons', 'jacob', 'jacobs', 'jacor', 'jacques', 'jamail', 'james', 'jan', 'jannock', 'janos', 'january', 'janunary', 'japan', 'japanese', 'jard', 'jardine', 'jaw', 'jax', 'jc', 'jcor', 'jcp', 'je', 'jean', 'jeddah', 'jeff', 'jeffrey', 'jenrette', 'jeopardised', 'jeopardize', 'jeopardy', 'jerry', 'jersey', 'jestin', 'jet', 'jetty', 'jewellery', 'jewelry', 'jiangsu', 'jin', 'jn', 'joaquin', 'job', 'jobless', 'jobs', 'john', 'johnson', 'johnston', 'johnstown', 'johor', 'joined', 'joining', 'joins', 'joint', 'jointly', 'joked', 'jones', 'jordan', 'jorge', 'jose', 'joseph', 'joske', 'jouhin', 'journal', 'journalists', 'jovanovich', 'jp', 'jpi', 'jpm', 'jr', 'jsbk', 'judging', 'judgment', 'judson', 'judy', 'juergen', 'juggling', 'juha', 'juice', 'juices', 'julius', 'july', 'jump', 'jumped', 'jumping', 'jumps', 'june', 'junger', 'jungle', 'junin', 'junior', 'jury', 'just', 'justed', 'justice', 'justifiably', 'justified', 'justify', 'kahn', 'kaines', 'kakuei', 'kampala', 'kansas', 'kaplan', 'karl', 'kasch', 'kato', 'kaunda', 'kdi', 'keen', 'keeping', 'keidanren', 'keith', 'keizai', 'kell', 'kembla', 'kemper', 'kenneth', 'kent', 'kentucky', 'kenya', 'kenyan', 'kept', 'kerb', 'kernel', 'kernels', 'kerosene', 'kerr', 'kertih', 'keswick', 'ketchup', 'key', 'kg', 'khalifa', 'khor', 'kicking', 'kidder', 'kiel', 'kiena', 'kiichi', 'kill', 'killed', 'kilo', 'kilolitres', 'kilos', 'kilowatt', 'kim', 'kimberley', 'kin', 'kinark', 'kind', 'kindled', 'kinds', 'king', 'kingdom', 'kinnear', 'kio', 'kirk', 'kl', 'kleinwort', 'klopfenstein', 'km', 'kmg', 'kms', 'knew', 'know', 'knowing', 'knowledge', 'known', 'knows', 'knuckle', 'knudsen', 'koa', 'koaq', 'koch', 'koehler', 'koh', 'kohlberg', 'kokusai', 'komatsu', 'kong', 'korea', 'korean', 'koreans', 'kravis', 'krumper', 'ktxa', 'ktxh', 'kuppenheimer', 'kuroda', 'kurz', 'kuwait', 'kuwaiti', 'kwacha', 'kwp', 'ky', 'la', 'label', 'labor', 'laboratories', 'laboratory', 'labour', 'lack', 'lacking', 'lackluster', 'lacklustre', 'lag', 'lago', 'lags', 'laid', 'lakes', 'lambert', 'lame', 'lammers', 'land', 'landholdings', 'landing', 'lands', 'lane', 'languetin', 'lanka', 'lanston', 'lapse', 'large', 'largely', 'larger', 'largest', 'larosiere', 'larry', 'laser', 'lasers', 'lastest', 'lasting', 'late', 'lately', 'later', 'latest', 'latin', 'launch', 'launched', 'launches', 'launching', 'law', 'lawmakers', 'lawrence', 'laws', 'lawson', 'lawsuit', 'lawsuits', 'laydays', 'laying', 'lazo', 'lazr', 'lb', 'lbo', 'lbs', 'ldp', 'leach', 'lead', 'leader', 'leaders', 'leading', 'leads', 'lean', 'leap', 'leaseback', 'leases', 'leasing', 'leave', 'leaves', 'leaving', 'lebanese', 'lecture', 'led', 'leduc', 'leeway', 'left', 'legal', 'legality', 'legally', 'legislation', 'lehman', 'leigh', 'leisure', 'lend', 'lender', 'lenders', 'lending', 'length', 'lengthy', 'lent', 'leonard', 'leonardo', 'les', 'leslie', 'lessen', 'lessened', 'lesser', 'let', 'letter', 'lettershop', 'leung', 'leuzzi', 'level', 'levels', 'leveraged', 'levied', 'levies', 'levy', 'lewis', 'lex', 'lexl', 'lfdi', 'li', 'liabilities', 'liability', 'liberal', 'liberalisation', 'liberalise', 'liberty', 'libya', 'licence', 'licences', 'licensing', 'lich', 'lichtblau', 'lichter', 'lieu', 'life', 'lifeline', 'lift', 'lifted', 'lifts', 'light', 'like', 'likelihood', 'likely', 'limit', 'limited', 'limiting', 'limits', 'limt', 'linares', 'lind', 'lindberg', 'line', 'lined', 'linens', 'lines', 'liniers', 'link', 'linked', 'linking', 'links', 'linseed', 'lint', 'lipstick', 'liptons', 'liquefied', 'liqueurs', 'liquid', 'liquidation', 'liquidity', 'liquids', 'liquified', 'liquor', 'lire', 'lisbon', 'list', 'listed', 'listen', 'listened', 'listing', 'listings', 'lite', 'liters', 'lithographing', 'litigation', 'little', 'live', 'lived', 'lives', 'livestock', 'living', 'livingston', 'livre', 'll', 'llb', 'lloyd', 'lloyds', 'llx', 'lme', 'ln', 'lng', 'load', 'loaded', 'loading', 'loan', 'loans', 'local', 'localized', 'locally', 'located', 'locates', 'locating', 'location', 'locations', 'locke', 'logical', 'lohn', 'lomak', 'lombard', 'london', 'londrina', 'long', 'longer', 'look', 'looked', 'looking', 'looks', 'loose', 'lord', 'lornex', 'lorries', 'los', 'lose', 'loses', 'loss', 'losses', 'lost', 'lot', 'lots', 'louis', 'louisiana', 'louisville', 'lousiana', 'louvre', 'low', 'lower', 'lowered', 'lowering', 'lowers', 'lowest', 'lows', 'lp', 'lpfa', 'lt', 'lubbock', 'lucien', 'luck', 'lucky', 'lucrative', 'lufkin', 'luiz', 'lump', 'lux', 'luxembourg', 'luxuries', 'lvi', 'lyle', 'lyles', 'lynch', 'lyng', 'lyon', 'mac', 'macandrews', 'macanrews', 'macedo', 'machine', 'machinery', 'machines', 'mackay', 'mackes', 'macroeconomic', 'madam', 'madeira', 'mae', 'magma', 'magnitude', 'mahn', 'mail', 'mailed', 'mailers', 'main', 'mainframe', 'mainframes', 'mainichi', 'mainly', 'mainstay', 'maintain', 'maintained', 'maintaining', 'maintenance', 'maioled', 'maione', 'maize', 'major', 'majority', 'make', 'maker', 'makers', 'makes', 'making', 'makoto', 'malayan', 'malaysia', 'malaysian', 'male', 'malpractice', 'malt', 'man', 'manac', 'manage', 'managed', 'management', 'managements', 'manager', 'managers', 'manages', 'managing', 'mananger', 'mandatory', 'manhattan', 'manila', 'manipulating', 'manoeuvres', 'manpower', 'manufacture', 'manufactured', 'manufacturer', 'manufacturers', 'manufactures', 'manufacturing', 'maputo', 'mar', 'marathon', 'marble', 'marc', 'march', 'marcos', 'marcus', 'margaret', 'margin', 'marginal', 'margins', 'maria', 'mariano', 'marine', 'maringa', 'maritime', 'mark', 'markdowns', 'marked', 'markedly', 'markest', 'market', 'marketable', 'marketer', 'marketing', 'markets', 'marking', 'marks', 'marlin', 'marris', 'marshall', 'mart', 'martin', 'marty', 'maryland', 'masbate', 'mask', 'masland', 'massachusetts', 'massive', 'master', 'mat', 'matabeleland', 'match', 'matches', 'material', 'materials', 'materiel', 'matheson', 'matsushita', 'matsuya', 'matter', 'matters', 'matthey', 'mattress', 'mature', 'matured', 'matures', 'maturing', 'maturities', 'maturity', 'max', 'maximize', 'maximum', 'maximun', 'mayaguez', 'mayban', 'maybe', 'mayfair', 'mbkm', 'mc', 'mcc', 'mccarthy', 'mcdonnell', 'mcf', 'mcgee', 'mcgraw', 'mckinnon', 'mcl', 'mclaughlin', 'mcneill', 'mct', 'md', 'mdt', 'mea', 'mead', 'meal', 'mean', 'meaning', 'meaningful', 'means', 'meant', 'meantime', 'measure', 'measured', 'measures', 'meat', 'mechanical', 'mechanisms', 'med', 'medfirst', 'media', 'mediator', 'medical', 'medicine', 'mediterranean', 'meditrust', 'medium', 'medtronic', 'medtronics', 'meet', 'meeting', 'meetings', 'meets', 'meiko', 'meinert', 'mel', 'melbourne', 'mellon', 'member', 'members', 'membership', 'membrane', 'memorandum', 'memory', 'mental', 'mention', 'mentioned', 'mepc', 'mer', 'mercantile', 'mercer', 'merchandise', 'merchant', 'merchanting', 'merchants', 'merely', 'merge', 'merged', 'merger', 'mergewr', 'merging', 'merit', 'merits', 'merlin', 'merrill', 'merry', 'message', 'met', 'meta', 'metal', 'metallgesellschaft', 'metals', 'metalurgicos', 'meterological', 'meters', 'methane', 'method', 'methods', 'methyl', 'metropolitan', 'mets', 'mexican', 'mexico', 'meyer', 'mgcr', 'mgen', 'mgmt', 'mgre', 'mhp', 'mi', 'miami', 'michael', 'michel', 'michelle', 'michigan', 'micro', 'microchip', 'microchips', 'microcomputer', 'microoganisms', 'microsystems', 'microwave', 'mid', 'midday', 'middle', 'middling', 'mideast', 'midges', 'midl', 'midland', 'midlantic', 'midnight', 'midpoint', 'midsession', 'midwestern', 'migrates', 'miguel', 'mij', 'mikulic', 'milan', 'mildew', 'mildly', 'mile', 'miles', 'milford', 'military', 'milk', 'milled', 'miller', 'millet', 'millets', 'millimetres', 'million', 'millions', 'mills', 'mim', 'min', 'minas', 'miner', 'mineral', 'minerals', 'miners', 'mines', 'minicomputers', 'minimise', 'minimum', 'mining', 'minister', 'ministerial', 'ministers', 'ministry', 'minn', 'minneapolis', 'minnesota', 'minntech', 'minor', 'minorities', 'minority', 'minus', 'misc', 'miscellaneous', 'misinterpreted', 'misiones', 'miss', 'misses', 'mississippi', 'mistake', 'mistakes', 'mite', 'miti', 'mitsubishi', 'mitsuru', 'mix', 'mixed', 'mixture', 'miyazawa', 'mki', 'mkt', 'mln', 'mlns', 'mm', 'mmc', 'mn', 'mnra', 'mntx', 'mo', 'mob', 'mobil', 'mobiliere', 'mode', 'models', 'moderate', 'moderately', 'modern', 'modernise', 'modernization', 'modernizing', 'modest', 'modestly', 'modification', 'modifications', 'modified', 'mohammad', 'mohammed', 'moisture', 'molding', 'molybdenum', 'mombasa', 'moment', 'momentum', 'monday', 'monetarist', 'monetary', 'money', 'moni', 'monier', 'monitor', 'monitoring', 'monobloc', 'monolith', 'monopolies', 'monopolizes', 'monroe', 'monsanto', 'monsoon', 'montagne', 'montana', 'montauk', 'montedison', 'montgomery', 'month', 'monthly', 'months', 'montreal', 'monumental', 'mood', 'moore', 'moran', 'morgan', 'morita', 'morning', 'morocco', 'morris', 'morrison', 'mortgage', 'mortner', 'moscow', 'moslem', 'mothball', 'motives', 'motor', 'motors', 'mounds', 'mountain', 'mounting', 'mourao', 'mouse', 'mouth', 'moved', 'movement', 'movements', 'moves', 'moving', 'mp', 'mpmts', 'mpt', 'mr', 'mrbl', 'msai', 'mse', 'msr', 'mt', 'mtbe', 'mtc', 'mth', 'mthly', 'mths', 'mti', 'mtrus', 'mts', 'mtu', 'mulls', 'multi', 'multilateral', 'multiples', 'munich', 'murdochville', 'murray', 'murtha', 'muscle', 'museveni', 'mutsuki', 'mutual', 'mye', 'myers', 'myfra', 'mykon', 'myrtle', 'naantali', 'nabisco', 'nainggolan', 'nairobi', 'nakasone', 'named', 'names', 'napa', 'napco', 'naphtha', 'naples', 'narrow', 'narrowed', 'narrowing', 'narrows', 'nasdaq', 'nashville', 'nassau', 'nat', 'natg', 'nathan', 'natiionale', 'nation', 'national', 'nationale', 'nationalised', 'nationally', 'nations', 'nationwide', 'natl', 'natned', 'nato', 'natural', 'nature', 'naval', 'nazmi', 'nbc', 'ncr', 'ncro', 'near', 'nearby', 'nearer', 'nearly', 'nears', 'necessarily', 'necessary', 'necochea', 'nederlanden', 'need', 'needed', 'needless', 'needs', 'negative', 'negatively', 'negatives', 'negligible', 'negotiate', 'negotiated', 'negotiating', 'negotiation', 'negotiations', 'negotiators', 'neighbour', 'neighbourhood', 'neighbouring', 'neil', 'nekoosa', 'nelson', 'nendick', 'neoy', 'neptunia', 'ner', 'nerci', 'nerco', 'nervous', 'nervousness', 'nesb', 'neste', 'net', 'netback', 'netbacks', 'netherlands', 'netted', 'network', 'networks', 'neutral', 'nevada', 'new', 'newcastle', 'newhall', 'newly', 'newmont', 'neworld', 'news', 'newsletter', 'newspaper', 'newspapers', 'newworld', 'nher', 'niche', 'nichols', 'nickel', 'nicolas', 'nics', 'nigel', 'nigerian', 'night', 'nightmare', 'nihon', 'nikko', 'nil', 'nip', 'nippon', 'nisker', 'nl', 'nlcs', 'nms', 'noh', 'noir', 'nominal', 'nominate', 'nominee', 'nominees', 'non', 'nonaccrual', 'nonetheless', 'nonperforming', 'nonrecurring', 'noranda', 'norcros', 'normal', 'normally', 'norman', 'norms', 'norske', 'nortek', 'north', 'northair', 'northeast', 'northern', 'northwest', 'northwestern', 'norway', 'norwegian', 'nosedive', 'noses', 'notable', 'notably', 'note', 'noted', 'noteholders', 'notes', 'notice', 'notification', 'notificaton', 'notifying', 'noting', 'notion', 'nova', 'november', 'npco', 'nqrlf', 'nr', 'nrm', 'ns', 'nspa', 'nstc', 'nsw', 'ntk', 'ntnn', 'ntrs', 'nttn', 'nuclear', 'nudged', 'nudging', 'number', 'numbers', 'nuovo', 'nursing', 'nurtured', 'nutrition', 'nutritional', 'nutritious', 'nv', 'nva', 'nvbc', 'nvh', 'nvhomes', 'nwnl', 'nwor', 'ny', 'nza', 'oak', 'oates', 'oats', 'obed', 'object', 'objection', 'objections', 'objective', 'objectives', 'obligation', 'obligations', 'obliges', 'observed', 'obstacle', 'obstacles', 'obstructionist', 'obtain', 'obtainable', 'obtained', 'obtaining', 'obvious', 'obviously', 'occasion', 'occasions', 'occupied', 'occur', 'occurance', 'occurred', 'occurs', 'ocean', 'oceania', 'oct', 'octane', 'october', 'odd', 'oecf', 'offensive', 'offer', 'offered', 'offering', 'offerings', 'offers', 'office', 'officer', 'officers', 'offices', 'official', 'officially', 'officials', 'offs', 'offset', 'offsetting', 'offshore', 'ohio', 'oil', 'oilfield', 'oilfields', 'oils', 'oilseed', 'oilseeds', 'oj', 'okay', 'ol', 'old', 'oldham', 'olein', 'olii', 'olin', 'oljeselskap', 'olsen', 'omc', 'omit', 'omits', 'omt', 'ondustries', 'ones', 'ongoing', 'onshore', 'ontario', 'onwards', 'opec', 'open', 'opened', 'opening', 'opens', 'oper', 'operate', 'operated', 'operates', 'operating', 'operation', 'operational', 'operations', 'operator', 'operators', 'opinion', 'opinions', 'oppenheimer', 'opportunities', 'opportunity', 'opposed', 'opposition', 'optic', 'optics', 'optimism', 'optimistic', 'optimum', 'option', 'optional', 'options', 'oral', 'orange', 'oranges', 'oranje', 'order', 'ordered', 'orderly', 'orders', 'ordinaries', 'ordinarily', 'ordinary', 'ore', 'oreffice', 'organisation', 'organization', 'organizations', 'organized', 'orginially', 'origin', 'original', 'originally', 'origins', 'orleans', 'osaka', 'osl', 'oslo', 'oswald', 'otto', 'ought', 'ounce', 'ounces', 'ousting', 'outbreak', 'outcome', 'outcry', 'outdoor', 'outfitters', 'outflow', 'outflows', 'outgoing', 'outlays', 'outlined', 'outlines', 'outlook', 'outpaced', 'output', 'outraged', 'outright', 'outside', 'outskirts', 'outstanding', 'outweighed', 'ovens', 'overall', 'overallotments', 'overdue', 'overfunded', 'overfunding', 'overhead', 'overheated', 'overly', 'overnight', 'overprescription', 'overreact', 'overseas', 'overshoot', 'overstressed', 'overtly', 'overturned', 'owed', 'owned', 'owner', 'owners', 'ownership', 'owning', 'owns', 'ownserhip', 'oxygenator', 'oy', 'ozal', 'pa', 'pace', 'pacemaker', 'pacemakers', 'pacific', 'pack', 'package', 'packaged', 'packages', 'packaging', 'packed', 'packet', 'packing', 'pact', 'pacts', 'padaeng', 'page', 'paian', 'paid', 'painewebber', 'paints', 'pakistan', 'pakistani', 'paks', 'palay', 'pall', 'palladium', 'pallice', 'palm', 'palms', 'pampa', 'pancanadian', 'panel', 'panels', 'panic', 'papandreou', 'paper', 'paperboard', 'papers', 'papua', 'papuan', 'par', 'para', 'paragraph', 'paralyse', 'paramus', 'parana', 'paranavai', 'parc', 'parcel', 'parent', 'parentheses', 'pari', 'paribas', 'paris', 'parity', 'park', 'parker', 'parliament', 'parliamentary', 'parted', 'partially', 'participants', 'participate', 'participated', 'participating', 'participation', 'particular', 'particularly', 'parties', 'partly', 'partner', 'partners', 'partnership', 'partnerships', 'parts', 'party', 'pass', 'passed', 'passenger', 'passo', 'past', 'pat', 'patent', 'paths', 'patience', 'patient', 'paton', 'pattern', 'paul', 'paulo', 'paxar', 'pay', 'payable', 'payed', 'paying', 'payment', 'payments', 'payout', 'payouts', 'payroll', 'pb', 'pbkb', 'pc', 'pcc', 'pcgg', 'pcpi', 'pcro', 'pct', 'peabody', 'peak', 'peaks', 'peanuts', 'peavey', 'pedro', 'peeled', 'peer', 'pei', 'peking', 'pellets', 'pemberton', 'penalties', 'penalty', 'pence', 'pending', 'penetrate', 'penetration', 'peninsular', 'penney', 'pennsylvania', 'pennzoil', 'pension', 'pensions', 'pentagon', 'pentland', 'peo', 'people', 'peoples', 'pepper', 'pepperell', 'peptic', 'perceived', 'percentage', 'perelman', 'performance', 'performances', 'performed', 'performers', 'performing', 'performs', 'perfume', 'perfumes', 'pergamon', 'period', 'periods', 'peripheral', 'permanent', 'permissible', 'permit', 'permits', 'permitting', 'perserve', 'pershing', 'persistent', 'person', 'personal', 'personally', 'personnel', 'persons', 'persuade', 'persuading', 'peru', 'perugina', 'peseta', 'pesetas', 'peso', 'pesos', 'pessimistic', 'pesticides', 'pests', 'pete', 'peter', 'peters', 'petition', 'petitioning', 'petrochemical', 'petrochemicals', 'petrodollar', 'petrol', 'petroleum', 'petroleums', 'petronas', 'petsos', 'pettee', 'pex', 'pfbs', 'pfennig', 'pga', 'pharmaceutical', 'phase', 'phased', 'phasing', 'phbk', 'phfc', 'philadelphia', 'philip', 'philippine', 'philippines', 'phillips', 'philosophy', 'phoenix', 'phone', 'photographic', 'photonics', 'physical', 'physicals', 'physio', 'phyt', 'picco', 'pick', 'pickens', 'picker', 'pickup', 'picture', 'pie', 'piecemeal', 'piedmont', 'pierre', 'pii', 'pik', 'pill', 'pillar', 'pills', 'pillsbury', 'pilot', 'pilots', 'pin', 'pineapples', 'pipe', 'pipeline', 'pipelines', 'pirie', 'pisgah', 'pit', 'pkd', 'pl', 'place', 'placed', 'placement', 'places', 'placing', 'plagued', 'plaintiff', 'plan', 'planes', 'planned', 'planning', 'plannned', 'plans', 'plant', 'plantations', 'planted', 'planters', 'planting', 'plantings', 'plants', 'plasminogen', 'plastics', 'plate', 'platform', 'platinum', 'play', 'played', 'player', 'players', 'plaza', 'plc', 'pldt', 'pleasantly', 'pleased', 'pledge', 'pledged', 'pledges', 'plentiful', 'plenty', 'pll', 'plunge', 'plunging', 'plus', 'plymouth', 'pmb', 'pneumatics', 'poehl', 'point', 'pointed', 'pointing', 'points', 'poised', 'poison', 'poland', 'police', 'policies', 'policy', 'policymakers', 'political', 'politically', 'politicians', 'pollard', 'polled', 'polls', 'polycast', 'polyester', 'polymer', 'polystyrene', 'pomeroy', 'ponce', 'pond', 'ponderosa', 'ponds', 'pont', 'ponta', 'pontas', 'pool', 'pooled', 'poor', 'poorest', 'popular', 'pork', 'poroduction', 'port', 'portable', 'porter', 'portfolio', 'portion', 'portland', 'portrayed', 'ports', 'portugal', 'portuguese', 'pose', 'posed', 'poses', 'position', 'positioned', 'positions', 'positive', 'positively', 'positives', 'possibilities', 'possibility', 'possible', 'possibly', 'post', 'postal', 'posted', 'posting', 'postponed', 'posts', 'posture', 'postwar', 'potato', 'potatoes', 'potent', 'potential', 'potentially', 'poultry', 'pound', 'pounds', 'pouring', 'poverty', 'powdery', 'power', 'powerful', 'powerhouse', 'powerplants', 'powers', 'ppl', 'practicable', 'practice', 'practices', 'prai', 'pratt', 'pre', 'precedent', 'preceding', 'preceeding', 'precious', 'precipitous', 'preclude', 'precursor', 'predecessor', 'predict', 'predicted', 'predicting', 'predictions', 'predicts', 'prefer', 'preference', 'preferences', 'preferential', 'preferred', 'preferring', 'prejudice', 'preliminary', 'premium', 'premiums', 'prepackers', 'preparation', 'preparatory', 'prepare', 'prepared', 'preparing', 'prepayment', 'prerequisites', 'presence', 'present', 'presentation', 'presented', 'presenting', 'presently', 'preservation', 'preside', 'president', 'presidente', 'presidential', 'presidents', 'press', 'pressed', 'pressing', 'pressure', 'pressured', 'pressures', 'pressuring', 'preston', 'presumed', 'pretax', 'prev', 'prevail', 'prevailing', 'prevent', 'prevented', 'preventing', 'prevention', 'preventive', 'previous', 'previously', 'price', 'priced', 'prices', 'pricing', 'primarily', 'primary', 'prime', 'primedical', 'principal', 'principally', 'principle', 'principles', 'print', 'printer', 'printing', 'prior', 'priority', 'pritzker', 'pritzkers', 'private', 'privately', 'privatisation', 'privatise', 'privatised', 'privee', 'privileged', 'pro', 'probability', 'probable', 'probably', 'probe', 'problem', 'problems', 'procedural', 'procedure', 'procedures', 'proceed', 'proceeding', 'proceedings', 'proceeds', 'process', 'processed', 'processing', 'processor', 'processors', 'produc', 'produce', 'produced', 'producer', 'producers', 'produces', 'producing', 'product', 'production', 'productions', 'productive', 'productivity', 'products', 'produkte', 'professional', 'professionals', 'professor', 'profit', 'profitability', 'profitable', 'profits', 'progas', 'program', 'programmable', 'programme', 'programs', 'progress', 'progressed', 'progresses', 'progressing', 'project', 'projected', 'projection', 'projections', 'projects', 'prolonged', 'prominence', 'prominent', 'promise', 'promised', 'promote', 'promoting', 'promotion', 'promotions', 'prompt', 'prompted', 'promptly', 'pronged', 'pronounced', 'proof', 'prop', 'propane', 'propel', 'proper', 'properly', 'properties', 'property', 'proposal', 'proposals', 'propose', 'proposed', 'proposes', 'proposing', 'proprietary', 'propsective', 'proration', 'prospect', 'prospective', 'prospects', 'prospectus', 'prot', 'protect', 'protected', 'protecting', 'protection', 'protectionism', 'protectionist', 'protective', 'protein', 'proteins', 'protest', 'protesting', 'prove', 'proved', 'proven', 'provide', 'provided', 'providence', 'provides', 'providing', 'province', 'provinces', 'provincial', 'proving', 'provision', 'provisional', 'provisions', 'provoking', 'prowess', 'proximity', 'proxmire', 'proxy', 'prudence', 'prudent', 'prudente', 'prudential', 'psarouthakis', 'psd', 'psychological', 'psychology', 'ptcc', 'pti', 'ptrk', 'ptt', 'pty', 'public', 'publication', 'publicity', 'publicize', 'publicly', 'publics', 'publish', 'published', 'publishing', 'pueblo', 'puerto', 'pull', 'pulled', 'pulls', 'pulp', 'pulses', 'pumped', 'punitive', 'purchase', 'purchased', 'purchasers', 'purchases', 'purchasing', 'purees', 'purely', 'purge', 'purity', 'purolator', 'purpose', 'purposes', 'purse', 'pursuant', 'pursue', 'pursued', 'pursuing', 'purveyors', 'push', 'pushed', 'pushes', 'pushing', 'puts', 'putting', 'pwj', 'pwr', 'pzl', 'q4a', 'q5c', 'qassem', 'qtly', 'qtr', 'qtrly', 'qtrs', 'quadrangle', 'quadrupled', 'qualified', 'qualify', 'qualities', 'quality', 'quantitative', 'quantities', 'quantity', 'quarter', 'quarterlies', 'quarterly', 'quarters', 'quartz', 'quasi', 'quebec', 'queen', 'queensland', 'query', 'quest', 'question', 'questionable', 'questioned', 'questions', 'queues', 'quick', 'quickly', 'quiet', 'quietly', 'quite', 'quito', 'quota', 'quotas', 'quotation', 'quote', 'quoted', 'quotes', 'quoting', 'rac', 'rachmat', 'racket', 'rad', 'radevormwald', 'radial', 'radio', 'radios', 'rai', 'raid', 'raider', 'railway', 'rain', 'rainfall', 'rains', 'raise', 'raised', 'raises', 'raising', 'rally', 'ralph', 'ramadan', 'ramon', 'ramp', 'ramps', 'ran', 'ranching', 'ranchman', 'rand', 'range', 'ranged', 'ranges', 'ranging', 'rank', 'rapeseed', 'rapeseeed', 'rapid', 'rapidly', 'rashid', 'rate', 'rates', 'ratified', 'rating', 'ratio', 'rationalisation', 'rationalise', 'rationalization', 'raw', 'raws', 'ray', 'raybestos', 'raytech', 'raytheon', 'rbd', 'rbk', 'rca', 'rci', 'rd', 'rdld', 'reach', 'reached', 'reaching', 'reacived', 'react', 'reacted', 'reacting', 'reaction', 'reactivate', 'read', 'reader', 'ready', 'reafffirmation', 'reaffirm', 'reaffirmation', 'reaffirmed', 'reaffirming', 'reaffirms', 'reagan', 'real', 'realised', 'realistic', 'reality', 'realize', 'realized', 'really', 'realty', 'reappeared', 'reason', 'reasonable', 'reasonably', 'reasons', 'reassure', 'rebate', 'rebates', 'rebbok', 'rebound', 'rebounded', 'rebuild', 'recalled', 'recalls', 'recapitalization', 'receipt', 'receivable', 'receivables', 'receive', 'received', 'receives', 'receiving', 'recent', 'recently', 'recession', 'recieve', 'reckoned', 'reckons', 'reclassification', 'recognized', 'recommend', 'recommendation', 'recommendations', 'recommended', 'recommending', 'recommends', 'reconsider', 'reconvene', 'reconvened', 'reconvenes', 'record', 'recorded', 'recording', 'records', 'recouped', 'recourse', 'recover', 'recoverable', 'recovered', 'recoveries', 'recovery', 'recruitment', 'recurring', 'red', 'redbook', 'redeem', 'redeemable', 'redeemed', 'redesigned', 'redirect', 'redispositions', 'redistribution', 'redland', 'redress', 'redressing', 'reduce', 'reduced', 'reduces', 'reducing', 'reduction', 'reductions', 'redundancies', 'redundancy', 'redundant', 'reebok', 'refer', 'reference', 'referral', 'referred', 'referring', 'refers', 'refinance', 'refinancing', 'refined', 'refiner', 'refineries', 'refinery', 'refining', 'reflate', 'reflect', 'reflected', 'reflecting', 'reflection', 'reflects', 'reform', 'reforms', 'refrain', 'refrigerators', 'refuel', 'refund', 'refusal', 'refused', 'refusing', 'refute', 'regain', 'regard', 'regarded', 'regarding', 'regardless', 'regent', 'regie', 'region', 'regional', 'regionalized', 'regions', 'registered', 'registering', 'registration', 'registrations', 'regret', 'regrets', 'regrettably', 'regular', 'regularly', 'regulated', 'regulates', 'regulation', 'regulations', 'regulators', 'regulatory', 'rehabilitation', 'rehearing', 'reichhold', 'rein', 'reinforce', 'reinsurance', 'reinvest', 'reiterated', 'rejected', 'rejecting', 'rejects', 'rekindle', 'related', 'relation', 'relations', 'relationship', 'relative', 'relatively', 'relaxation', 'relaxed', 'release', 'released', 'relected', 'relevance', 'relevant', 'reliable', 'reliance', 'reliant', 'relied', 'relief', 'relieved', 'reluctant', 'rely', 'relying', 'remain', 'remainder', 'remained', 'remaining', 'remains', 'remark', 'remarkable', 'remarks', 'remedial', 'remedy', 'remittance', 'removal', 'remove', 'removed', 'renato', 'renault', 'rendered', 'rene', 'renegotiating', 'renegotiation', 'renew', 'renewal', 'renewed', 'renouncing', 'rent', 'rental', 'rentals', 'renunciation', 'reopened', 'reord', 'reorganization', 'reorganized', 'rep', 'repaid', 'repair', 'repay', 'repayment', 'repayments', 'repealed', 'repeals', 'repeated', 'repeatedly', 'repeating', 'replace', 'replaced', 'replacement', 'replaces', 'replacing', 'replied', 'reply', 'repoprt', 'report', 'reported', 'reportedly', 'reporter', 'reporters', 'reporting', 'reports', 'repos', 'represent', 'representation', 'representations', 'representative', 'representatives', 'represented', 'representing', 'represents', 'repubblica', 'republic', 'repurchase', 'repurchased', 'repurchases', 'request', 'requested', 'requesting', 'require', 'required', 'requirement', 'requirements', 'requires', 'requiring', 'requirments', 'rescheduling', 'resdel', 'research', 'reselling', 'reser', 'reserve', 'reserves', 'reshaping', 'residential', 'residual', 'residues', 'resignation', 'resigned', 'resilience', 'resist', 'resistance', 'resisted', 'resisting', 'resolution', 'resolve', 'resolved', 'resonably', 'resorting', 'resource', 'resources', 'respected', 'respective', 'respectively', 'respiratory', 'respond', 'responded', 'responding', 'responds', 'response', 'responses', 'responsibility', 'responsible', 'responsive', 'rest', 'restart', 'restated', 'restaurant', 'restitution', 'restore', 'restored', 'restrain', 'restrained', 'restraining', 'restraint', 'restricted', 'restriction', 'restrictions', 'restrictive', 'restruction', 'restructure', 'restructured', 'restructuring', 'rests', 'result', 'resulted', 'resulting', 'results', 'resume', 'resumed', 'resumes', 'resumption', 'resurgence', 'retail', 'retailer', 'retailers', 'retailing', 'retain', 'retained', 'retaining', 'retains', 'retaliate', 'retaliation', 'retaliatory', 'retire', 'retirement', 'retractable', 'retraction', 'retroactively', 'return', 'returned', 'returning', 'reunion', 'reuter', 'reuters', 'rev', 'revaluation', 'revalue', 'reveal', 'revealed', 'reveals', 'revenue', 'revenues', 'revenueslast', 'reverberated', 'reversal', 'reverse', 'reverses', 'reversionary', 'review', 'reviewed', 'reviewing', 'reviews', 'revise', 'revised', 'revises', 'revising', 'revisions', 'revival', 'revive', 'revived', 'revlon', 'revoked', 'revolution', 'revolutionary', 'revs', 'rexham', 'reynolds', 'rgm', 'rhd', 'rhetoric', 'rhineland', 'rhoads', 'rhode', 'rhodes', 'rhodium', 'rht', 'rice', 'richard', 'richards', 'richardson', 'richfield', 'rico', 'ridden', 'ride', 'ridiculous', 'rift', 'rigaud', 'right', 'rightful', 'rightholders', 'rights', 'rigid', 'rinehart', 'ringgit', 'rio', 'rios', 'riots', 'rise', 'risen', 'rises', 'rising', 'risk', 'risks', 'risky', 'rite', 'rival', 'river', 'riyal', 'riyals', 'rj', 'rjr', 'rnd', 'road', 'roastings', 'rober', 'robert', 'roberts', 'robertson', 'robin', 'robusta', 'rock', 'rockport', 'rod', 'rodino', 'rodney', 'roebuck', 'roger', 'roland', 'role', 'roll', 'rolled', 'rolling', 'rollover', 'rolls', 'roma', 'romania', 'rome', 'ronald', 'ronnie', 'roof', 'roofing', 'room', 'root', 'rosa', 'rosario', 'rose', 'ross', 'rot', 'rothman', 'rothmans', 'rotterdam', 'rotting', 'roubles', 'rough', 'roughly', 'round', 'rounding', 'rounds', 'rout', 'route', 'routes', 'row', 'rowe', 'rows', 'roy', 'royal', 'royce', 'royex', 'rpt', 'rsdl', 'rsla', 'rtn', 'rtrs', 'rtz', 'rtzl', 'rubber', 'rubbermaid', 'rubio', 'rudman', 'ruin', 'rule', 'ruled', 'rules', 'ruling', 'rumor', 'rumored', 'rumors', 'rumour', 'rumours', 'run', 'running', 'runs', 'runup', 'rupees', 'rupert', 'rupiah', 'ruthenium', 'rwanda', 'rxh', 'rydin', 'sa', 'sabah', 'sachs', 'sacks', 'safco', 'safe', 'safeguarding', 'safely', 'safer', 'safety', 'said', 'sail', 'sailed', 'saint', 'sait', 'saito', 'salary', 'sale', 'saleh', 'salem', 'sales', 'salick', 'sallie', 'salmonella', 'salomon', 'salt', 'salvage', 'sam', 'sama', 'samples', 'samuel', 'san', 'sanctioned', 'sanctions', 'sank', 'sanm', 'santa', 'santiago', 'santos', 'sao', 'sapped', 'sarji', 'satellite', 'satisfactory', 'satisfied', 'satoshi', 'saturated', 'saturday', 'sauce', 'saudi', 'saunders', 'saver', 'saving', 'savings', 'savona', 'saw', 'sawn', 'say', 'saying', 'says', 'sbar', 'sbo', 'scale', 'scaled', 'scaleup', 'scallop', 'scandal', 'scandals', 'scandinavia', 'scapegoat', 'scarcely', 'scare', 'scattered', 'scenarios', 'sceptical', 'scf', 'schaik', 'schedule', 'scheduled', 'schedules', 'scheme', 'schlesinger', 'schloss', 'schmahl', 'schn', 'schneider', 'schoenheimer', 'schreyer', 'schwietert', 'sci', 'science', 'scientific', 'scientists', 'scoffed', 'scope', 'scot', 'scotia', 'scott', 'scrambled', 'scratch', 'sdhares', 'sdhr', 'sdn', 'sdr', 'sea', 'seaforth', 'seag', 'seagram', 'sealy', 'search', 'searle', 'sears', 'season', 'seasonal', 'seasonally', 'seasons', 'seat', 'seated', 'seattle', 'seax', 'seaxe', 'sec', 'second', 'secondary', 'secret', 'secretary', 'sections', 'sector', 'sectors', 'secure', 'secured', 'securing', 'securities', 'securitiesd', 'security', 'seed', 'seeded', 'seeds', 'seeing', 'seek', 'seeking', 'seeks', 'seen', 'sees', 'segments', 'sehl', 'seize', 'sejerstad', 'selected', 'selecting', 'self', 'sell', 'seller', 'selling', 'selloff', 'sells', 'semi', 'semiannual', 'semiconductor', 'semiconductors', 'semolina', 'senate', 'senator', 'sends', 'senior', 'sennen', 'sense', 'sent', 'sentence', 'sentiment', 'sentiments', 'seoul', 'sep', 'separate', 'separately', 'separates', 'separating', 'separatist', 'sept', 'september', 'sequestered', 'seram', 'series', 'seriously', 'seriousness', 'serre', 'serve', 'served', 'serves', 'service', 'services', 'servicing', 'serving', 'session', 'set', 'setbacks', 'sets', 'setting', 'settle', 'settled', 'settlement', 'seven', 'seventh', 'severe', 'severely', 'sftpf', 'sgen', 'sgep', 'shading', 'shaken', 'shall', 'shamrock', 'shandong', 'shanghai', 'shanxi', 'shape', 'shapes', 'share', 'shareby', 'shared', 'shareholder', 'shareholders', 'shareholding', 'shareholdings', 'shareholdrs', 'shareholkders', 'shares', 'sharp', 'sharply', 'shas', 'shasta', 'shawmut', 'shci', 'shearson', 'shed', 'sheen', 'sheep', 'sheer', 'sheet', 'sheets', 'sheikh', 'sheldon', 'shelf', 'shell', 'shelled', 'sheltering', 'shield', 'shift', 'shifted', 'shifting', 'shifts', 'shilling', 'shillings', 'shimbun', 'shinbun', 'shinji', 'shintaro', 'ship', 'shipment', 'shipments', 'shipped', 'shippers', 'shipping', 'ships', 'shipyards', 'shirts', 'shop', 'shopping', 'shops', 'short', 'shortage', 'shortfall', 'shortfalls', 'shortly', 'shot', 'showa', 'showboat', 'showed', 'showers', 'showing', 'shown', 'shows', 'shp', 'shr', 'shrank', 'shrinking', 'shrs', 'shrug', 'shutdown', 'shutdowns', 'si', 'sichuan', 'sickly', 'sidelined', 'sides', 'sideways', 'siemens', 'sierras', 'sierrita', 'siew', 'sifi', 'sight', 'sign', 'signal', 'signaling', 'signals', 'signed', 'signficant', 'significance', 'significant', 'significantly', 'signifying', 'signs', 'silicon', 'silva', 'silver', 'silvertown', 'silvia', 'simao', 'sime', 'similar', 'similarly', 'simmons', 'simon', 'simplify', 'simply', 'simultaneous', 'simultaneously', 'sinclair', 'singapore', 'single', 'sinking', 'sino', 'sir', 'site', 'situation', 'sixth', 'size', 'sized', 'sji', 'skb', 'skin', 'slackening', 'slackens', 'slap', 'slash', 'slat', 'slate', 'slated', 'slater', 'slaughter', 'slaughtering', 'slcn', 'sleep', 'sleepeeze', 'slide', 'slight', 'slightly', 'slimmed', 'slipped', 'slm', 'sloan', 'slope', 'sloshing', 'slow', 'slowdown', 'slowed', 'slower', 'slowing', 'slowly', 'sluggish', 'slump', 'slumped', 'sm', 'small', 'smaller', 'smart', 'smc', 'smelter', 'smelters', 'smelting', 'smith', 'smithkline', 'smithson', 'smls', 'smoking', 'smoothing', 'smsc', 'smuggled', 'smuggling', 'sne', 'soaf', 'soared', 'soares', 'soaring', 'soars', 'sobeys', 'social', 'socialist', 'socialists', 'societe', 'sod', 'soft', 'softening', 'softer', 'software', 'sogenal', 'sogo', 'soil', 'sold', 'sole', 'soliciation', 'solicit', 'solid', 'solidarity', 'solitron', 'soluble', 'solution', 'solutions', 'solve', 'solved', 'solving', 'somebody', 'somewhat', 'son', 'sons', 'sony', 'soon', 'sorghum', 'soriano', 'sort', 'sorts', 'sosnoff', 'sought', 'sound', 'sounded', 'sour', 'source', 'sources', 'south', 'southeast', 'southern', 'southington', 'southmark', 'southwest', 'southwestern', 'soviet', 'soviets', 'sowing', 'sown', 'sows', 'soy', 'soya', 'soybean', 'soybeans', 'soyoil', 'spa', 'space', 'spain', 'span', 'spanish', 'sparked', 'spartech', 'spc', 'speaking', 'spearhead', 'special', 'specialise', 'speciality', 'specialty', 'specific', 'specifically', 'specified', 'specify', 'spectacular', 'specter', 'spectrum', 'speculate', 'speculated', 'speculating', 'speculation', 'speculative', 'speculators', 'speech', 'speed', 'spell', 'spelled', 'spencer', 'spend', 'spending', 'spent', 'sperry', 'spie', 'spike', 'spilled', 'spiller', 'spin', 'spirit', 'spirits', 'spit', 'spite', 'split', 'splits', 'splitting', 'spokesman', 'spokesmen', 'spokeswoman', 'sponsored', 'sportswear', 'spot', 'spotlight', 'spotty', 'spr', 'spread', 'spree', 'sprh', 'spring', 'sprinkel', 'sprint', 'sptn', 'spurred', 'sqd', 'square', 'squared', 'squeeze', 'squire', 'srd', 'sri', 'ssbb', 'ssoa', 'ssss', 'st', 'stabilisation', 'stabilise', 'stabilised', 'stabilising', 'stability', 'stabilization', 'stabilize', 'stabilized', 'stabilizing', 'stable', 'stadelmann', 'staff', 'staffer', 'staffing', 'stage', 'staged', 'stages', 'stagflation', 'stagnate', 'stagnated', 'stagnating', 'stake', 'stakes', 'stance', 'stand', 'standard', 'standards', 'standing', 'stands', 'standstill', 'stanley', 'staple', 'star', 'stark', 'start', 'started', 'starting', 'starts', 'startup', 'stat', 'stata', 'state', 'stated', 'stategy', 'statement', 'statements', 'states', 'station', 'stations', 'statistical', 'statistics', 'statoil', 'stats', 'status', 'stauffer', 'stay', 'staying', 'stb', 'stch', 'stcks', 'steadied', 'steadier', 'steadily', 'steady', 'steagall', 'steaks', 'steal', 'stearn', 'stearns', 'steel', 'steelmaking', 'steels', 'steep', 'steeply', 'steer', 'steering', 'steers', 'steinhaeuser', 'steinweg', 'stem', 'stemmed', 'stems', 'stenholm', 'step', 'stephen', 'stepped', 'steps', 'sterivet', 'sterling', 'steve', 'stevens', 'stevenson', 'stewart', 'stg', 'stgm', 'sti', 'stibnite', 'stick', 'stiff', 'stimulate', 'stimulating', 'stimulation', 'stimulus', 'stipulates', 'stks', 'stn', 'stock', 'stockbrokers', 'stocked', 'stockholder', 'stockholders', 'stockpile', 'stockpiled', 'stockpiles', 'stocks', 'stoltenberg', 'stood', 'stop', 'stopped', 'storage', 'store', 'stores', 'storing', 'story', 'stos', 'stowe', 'strackbein', 'straining', 'strains', 'strait', 'strata', 'strategic', 'strategies', 'strategy', 'strathfield', 'stream', 'streamlined', 'streamlining', 'street', 'strength', 'strengthen', 'strengthened', 'strengthening', 'stress', 'stressed', 'strict', 'stricter', 'strictly', 'strike', 'strikes', 'stringent', 'stringfellow', 'strings', 'strong', 'stronger', 'strongly', 'struck', 'structural', 'structure', 'struggled', 'struggling', 'sttement', 'stuart', 'stubborn', 'stuck', 'student', 'study', 'studying', 'stuh', 'stvtf', 'style', 'styrene', 'sub', 'subcommittee', 'subdued', 'subject', 'subjective', 'submit', 'submitted', 'subordinated', 'subroto', 'subscribers', 'subscriptions', 'subsequent', 'subsidaries', 'subsidiaries', 'subsidiary', 'subsidies', 'subsidizing', 'subsidy', 'substantial', 'substantially', 'substantiate', 'substantive', 'substituted', 'substituting', 'subtracting', 'subverting', 'succed', 'succeed', 'succeeds', 'success', 'successful', 'successfully', 'successive', 'sudden', 'suddenly', 'suffer', 'suffered', 'suffering', 'sufficed', 'sufficient', 'sugar', 'suger', 'suggest', 'suggested', 'suggesting', 'suggestion', 'suggests', 'suiker', 'suisse', 'suit', 'suitable', 'suits', 'sul', 'sullivan', 'sulpetro', 'sulphide', 'sulphur', 'sulz', 'sulzer', 'sum', 'sumatran', 'sumi', 'sumita', 'sumitomo', 'summarized', 'summer', 'summit', 'sums', 'sun', 'sunday', 'sunflower', 'sunflowers', 'sunflowerseed', 'suntrust', 'super', 'superfan', 'superior', 'superseded', 'superstores', 'supervisory', 'supplement', 'supplemental', 'supplementary', 'supplied', 'supplier', 'suppliers', 'supplies', 'supply', 'supplying', 'support', 'supported', 'supporting', 'supports', 'supposed', 'supreme', 'surcharges', 'sure', 'surfside', 'surge', 'surged', 'surmount', 'surpass', 'surpasssing', 'surpised', 'surplus', 'surpluses', 'surprise', 'surprised', 'surprises', 'surprising', 'surrey', 'surveillance', 'survey', 'survival', 'survive', 'susan', 'susbidiaries', 'suspect', 'suspected', 'suspend', 'suspended', 'suspending', 'suspension', 'suspicion', 'sustain', 'sustainable', 'sustained', 'svu', 'swap', 'swaziland', 'sweden', 'swedish', 'sweeping', 'sweet', 'sweetened', 'swell', 'swelled', 'swhi', 'swift', 'swiftly', 'swing', 'swiss', 'switch', 'switched', 'switzerland', 'sydney', 'syndicate', 'synthetic', 'sysa', 'systemic', 'systems', 'ta', 'table', 'tables', 'tabulated', 'tackle', 'tactics', 'tae', 'taft', 'tag', 'tagamet', 'tailored', 'tails', 'taipei', 'taiwan', 'taiwanese', 'takashi', 'taken', 'takeover', 'takeovers', 'takes', 'taking', 'talk', 'talking', 'talks', 'tally', 'tanaka', 'tandem', 'tandil', 'tangible', 'tanker', 'tankers', 'tanks', 'tape', 'tapie', 'target', 'targeted', 'targeting', 'targets', 'targetted', 'targetting', 'tariff', 'tariffs', 'tarriffs', 'task', 'taste', 'tat', 'tate', 'taw', 'tax', 'taxabale', 'taxation', 'taxes', 'tbcx', 'tbn', 'tcjc', 'tdd', 'tea', 'teacher', 'teaches', 'team', 'teams', 'tech', 'technical', 'technicalities', 'technically', 'technologies', 'technology', 'teck', 'tedious', 'tehran', 'telcom', 'telecom', 'telecommucations', 'telecommunications', 'telecomputing', 'telecrafter', 'telefonica', 'telemarketing', 'telephone', 'telephones', 'teleprinters', 'television', 'telkom', 'telkon', 'tell', 'tellers', 'telling', 'temp', 'temperatures', 'temples', 'temporarily', 'temporary', 'tempt', 'temuco', 'tend', 'tender', 'tendered', 'tendering', 'tenderloins', 'tenders', 'tending', 'tenn', 'tennessee', 'tennis', 'tension', 'tentatively', 'tenth', 'terence', 'term', 'termed', 'terminal', 'terminals', 'terminated', 'terminates', 'terminating', 'termination', 'terming', 'terms', 'terre', 'terrific', 'territorrial', 'territory', 'tertiary', 'test', 'tested', 'testimony', 'testing', 'tests', 'tetra', 'tex', 'texaco', 'texas', 'text', 'textile', 'textiles', 'tfb', 'thai', 'thailand', 'thames', 'thatcher', 'theme', 'theodor', 'theodore', 'theoretically', 'therapeutic', 'thermal', 'thermo', 'thermoset', 'thing', 'things', 'think', 'thinking', 'thinks', 'thirds', 'thirty', 'thomas', 'thompson', 'thomson', 'thought', 'thous', 'thousand', 'thousands', 'threat', 'threaten', 'threatened', 'threatening', 'threatens', 'threshold', 'throw', 'throwing', 'thrown', 'thrust', 'thursday', 'tian', 'ticket', 'tie', 'tied', 'tier', 'tiered', 'ties', 'tight', 'tighten', 'tightened', 'tightening', 'tighter', 'tightness', 'tile', 'timber', 'time', 'timecharter', 'timed', 'timely', 'times', 'timescale', 'timetable', 'timidity', 'timing', 'timothy', 'tin', 'tinker', 'tinto', 'tip', 'tire', 'tissue', 'tit', 'title', 'tl', 'tlx', 'tnz', 'tobacco', 'tod', 'today', 'todd', 'toiletry', 'token', 'tokyo', 'told', 'toledo', 'tolerate', 'tolerated', 'tolerating', 'tom', 'tomato', 'tomorrow', 'ton', 'tone', 'tonight', 'tonnage', 'tonnages', 'tonne', 'tonner', 'tonnes', 'tons', 'took', 'topics', 'topped', 'toppled', 'toronto', 'torrid', 'total', 'totaled', 'totaling', 'totalled', 'totalling', 'totally', 'totals', 'touch', 'touche', 'touched', 'tough', 'tougher', 'tourism', 'towels', 'town', 'townson', 'tpa', 'track', 'tracking', 'tracts', 'trade', 'traded', 'trader', 'traders', 'trades', 'tradevest', 'trading', 'traditional', 'traditionally', 'traffic', 'trail', 'training', 'trans', 'transaction', 'transactions', 'transcanada', 'transfer', 'transferred', 'transfers', 'transit', 'transition', 'transitional', 'transitory', 'translated', 'translation', 'transmission', 'transparency', 'transport', 'transportation', 'transporting', 'tranzonic', 'trapped', 'travaux', 'travel', 'traveled', 'treasury', 'treatment', 'tree', 'trees', 'trend', 'trends', 'trengganu', 'tres', 'trial', 'triangle', 'tried', 'tries', 'trieste', 'trigger', 'triggered', 'trillion', 'trillions', 'trim', 'trip', 'tripled', 'triton', 'troops', 'trouble', 'troubled', 'trough', 'trow', 'troy', 'trp', 'truck', 'trucking', 'true', 'trump', 'truscott', 'trusses', 'trust', 'trustcorp', 'trusts', 'try', 'trying', 'tt', 'ttco', 'ttl', 'tube', 'tuberculosis', 'tubes', 'tucker', 'tuesday', 'tug', 'tully', 'tumble', 'tumbled', 'tumbling', 'tunisia', 'turbulence', 'turf', 'turgut', 'turkey', 'turkeys', 'turkish', 'turks', 'turmoil', 'turn', 'turnaround', 'turned', 'turning', 'turnover', 'tuskaloosa', 'tv', 'tvx', 'tvxg', 'twice', 'tx', 'tying', 'tyler', 'type', 'typical', 'tyreb', 'tyres', 'uae', 'ual', 'uap', 'uchida', 'ucpb', 'uct', 'ufc', 'uganda', 'ugandan', 'uht', 'uk', 'ulcer', 'ulcerants', 'ulcers', 'ulric', 'ultimate', 'ultimately', 'umbiz', 'umuarama', 'unable', 'unadjusted', 'unaltered', 'unanimously', 'unaudited', 'unauthorized', 'unavailable', 'unbleached', 'uncertain', 'uncertainty', 'unchanged', 'unclear', 'uncoded', 'uncollectible', 'uncompetitive', 'unconsolidated', 'unconvinced', 'uncorrected', 'und', 'undeground', 'undercut', 'undercutting', 'underestimated', 'undergoing', 'underground', 'underlying', 'undermine', 'underpinned', 'underscored', 'underscoring', 'undersecretary', 'understand', 'understanding', 'understood', 'undertakings', 'undertook', 'undervalued', 'underway', 'underwriter', 'underwriters', 'underwriting', 'undeveloped', 'undisclosed', 'undoes', 'unemployed', 'unemployment', 'uneven', 'unevenly', 'unexpectedly', 'unf', 'unfabricated', 'unfairly', 'unfavorable', 'unfavourable', 'unfiltered', 'unforeseen', 'unfortunate', 'unfortunately', 'unfriendly', 'unhappy', 'unidentified', 'unie', 'unifirst', 'unilaterally', 'unilever', 'uninsured', 'union', 'unions', 'unit', 'united', 'unitholders', 'units', 'universal', 'universidade', 'university', 'unknown', 'unleaded', 'unless', 'unlike', 'unlikely', 'unlimited', 'unload', 'unloaded', 'unloading', 'unmilled', 'unnamed', 'unnecessarily', 'unnecessary', 'unnoticed', 'unobtainable', 'unofficial', 'unposted', 'unprecedented', 'unqtd', 'unquoted', 'unrealistic', 'unreasonable', 'unrecoverable', 'unrelated', 'unrest', 'unscheduled', 'unseasonably', 'unseasonal', 'unsettled', 'unsold', 'unsolicted', 'unspecified', 'unspectacular', 'unstable', 'unsuccesful', 'unsuccessful', 'unsuccessfully', 'unsustainable', 'unused', 'unusual', 'unusually', 'unwanted', 'unwilling', 'upbeat', 'upcoming', 'updated', 'upgraded', 'upgrading', 'upham', 'upland', 'upper', 'uproar', 'ups', 'upswing', 'uptrend', 'upturn', 'upward', 'upwards', 'urge', 'urged', 'urgency', 'urgent', 'urges', 'urging', 'uruguay', 'usa', 'usage', 'usair', 'usbc', 'usda', 'usdaprj', 'use', 'used', 'useful', 'users', 'uses', 'usfsp', 'usg', 'usines', 'using', 'ussr', 'ustc', 'ustr', 'usual', 'usually', 'usurp', 'usx', 'ut', 'utah', 'utilisation', 'utilities', 'utility', 'utilization', 'utp', 'utx', 'v2500', 'va', 'vacancies', 'vacuum', 'vague', 'valex', 'valid', 'validly', 'valley', 'valp', 'valu', 'valuable', 'valuation', 'valuations', 'value', 'valued', 'values', 'valuing', 'valves', 'vamand', 'van', 'vancouver', 'variables', 'variation', 'varied', 'varieites', 'varies', 'variety', 'various', 'vast', 'vastly', 'vbi', 'vcs', 've', 'vegetable', 'vegetables', 'veghel', 'vehemence', 'vehicle', 'vehicles', 'venalum', 'venezuela', 'venice', 'venture', 'ventures', 'verdict', 'vermin', 'version', 'versus', 'vessel', 'vessels', 'veterinary', 'vexing', 'vi', 'viability', 'viable', 'vice', 'vicious', 'victoria', 'victors', 'victory', 'video', 'vieille', 'view', 'views', 'vigilant', 'vigrously', 'vik', 'villa', 'village', 'villespedue', 'vincent', 'violate', 'violated', 'violating', 'violations', 'virginia', 'virtual', 'virtually', 'vision', 'visit', 'visiting', 'visits', 'vismara', 'visualize', 'vital', 'vitality', 'viviez', 'vlsi', 'vmnb', 'vno', 'vo', 'vodka', 'voice', 'voids', 'volatile', 'volatility', 'volcker', 'volkswagen', 'volume', 'volumes', 'volvo', 'voplex', 'vornado', 'vot', 'vote', 'voted', 'votes', 'voting', 'votuporanga', 'vowg', 'voyage', 'voyages', 'vs', 'vul', 'vulcan', 'vulnerable', 'vw', 'wab', 'waccamaw', 'waf', 'wag', 'wage', 'wages', 'wagner', 'wagons', 'wait', 'waiting', 'waive', 'waiver', 'wake', 'wal', 'wales', 'walgreen', 'walk', 'wall', 'wallin', 'wallis', 'walls', 'waltham', 'wang', 'want', 'wanted', 'wants', 'war', 'warburg', 'ward', 'warehouse', 'warehouses', 'warm', 'warmer', 'warn', 'warned', 'warner', 'warning', 'warnings', 'warns', 'warplanes', 'warrant', 'warranted', 'warrants', 'warren', 'wary', 'waseda', 'wash', 'washington', 'wasn', 'waste', 'watch', 'watched', 'water', 'watered', 'waters', 'wave', 'waves', 'wax', 'way', 'wayne', 'ways', 'wcix', 'wdca', 'wdg', 'weak', 'weaken', 'weakened', 'weakening', 'weaker', 'weakness', 'wealthier', 'weapons', 'wear', 'wearhouse', 'weather', 'wedgestone', 'wednesbury', 'wednesday', 'week', 'weekend', 'weekly', 'weeks', 'weigh', 'weighed', 'weighhouse', 'weighing', 'weight', 'weighted', 'weil', 'weis', 'welbac', 'welch', 'welcome', 'welcomed', 'welcomes', 'welcoming', 'wells', 'welsh', 'wendel', 'went', 'west', 'westamerica', 'westco', 'western', 'westhem', 'westin', 'westinghouse', 'westminster', 'westwego', 'wet', 'wfc', 'wfpr', 'wfsl', 'whatsoever', 'wheat', 'wheats', 'wheeler', 'whim', 'whirlpool', 'whiskey', 'whisky', 'white', 'whites', 'whitney', 'whitten', 'wholesale', 'wholly', 'whr', 'wide', 'widely', 'widen', 'widened', 'widens', 'wider', 'widescale', 'widespread', 'wilf', 'willa', 'willard', 'william', 'williams', 'willing', 'willingness', 'willy', 'wilson', 'wimpey', 'win', 'winchester', 'wind', 'windfall', 'window', 'windsor', 'wine', 'wines', 'wing', 'wingers', 'winn', 'winning', 'wins', 'winston', 'winter', 'winterhalter', 'winterkill', 'wiped', 'wireless', 'wis', 'wisdom', 'wish', 'withdraw', 'withdrawal', 'withdrawals', 'withdrawing', 'withdrawn', 'withdrew', 'withholding', 'witness', 'witter', 'wk', 'wlbk', 'wmb', 'wmc', 'wmk', 'wmng', 'wmpy', 'wnn', 'wntlc', 'wojnilower', 'women', 'won', 'wood', 'woods', 'woolworth', 'word', 'worded', 'wording', 'words', 'work', 'worked', 'workers', 'workforce', 'working', 'works', 'workstations', 'world', 'worldwide', 'worried', 'worries', 'worry', 'worrying', 'worse', 'worsened', 'worst', 'worth', 'worthwhile', 'wouldn', 'wound', 'wounded', 'wpm', 'wrii', 'write', 'writedown', 'writedowns', 'writeoff', 'writeoffs', 'writing', 'written', 'wrong', 'wrongly', 'wrote', 'wrought', 'wsam', 'wtaf', 'wtc', 'wx', 'wyo', 'xebc', 'xebec', 'xico', 'xicor', 'xingang', 'xon', 'xt', 'yale', 'yankee', 'yaounde', 'yasuhiro', 'year', 'years', 'yemen', 'yen', 'yes', 'yesterday', 'yeutter', 'yield', 'yielding', 'yields', 'yld', 'ynk', 'yohai', 'york', 'yoweri', 'yuan', 'yugoslav', 'yugoslavia', 'yugoslavs', 'yunfu', 'zaire', 'zambia', 'zambian', 'zayre', 'zealand', 'zeebregts', 'zeeuw', 'zen', 'zenex', 'zero', 'zimbabwe', 'zimbabwean', 'zinc', 'zond', 'zondervan', 'zone', 'zones', 'zurich', 'zy']
###Markdown
Create a DataFrame representation of the TF-IDF weights of each term in the working corpus. Use the `sum(axis=0)` method to calculate a measure similar to the term frequency based on the TF-IDF weight, this value will be used to rank the terms for the word cloud creation.
###Code
# Creating a DataFrame Representation of the TF-IDF results
#money_news_df = pd.DataFrame(
# list(zip(words_corpus, np.ravel(X_corpus.mean(axis=0)))), columns=["Word", "TF-IDF"]
#)
# Order the DataFrame by word frequency in descending order
#money_news_df = money_news_df.sort_values(by=["TF-IDF"], ascending=False)
# Creating a DataFrame Representation of the TF-IDF results
money_news_df_with_freq = pd.DataFrame(
list(zip(vectorizer.get_feature_names(), np.ravel(X_corpus.sum(axis=0)))),
columns=["Word", "Frequency"],
)
# Order the DataFrame by word frequency in descending order
money_news_df_with_freq = money_news_df_with_freq.sort_values(by=["Frequency"], ascending=False)
# Print the top 10 words
money_news_df_with_freq.head(10)
money_news_df_with_freq.head(10)
###Output
_____no_output_____
###Markdown
Retrieving the Top Words In order to create the word cloud you should get the top words; in this case we will use a rule of thumb that has been empirically tested by some NLP experts that states that words with a frequency between 10 and 30 might be the most relevant in a corpus. Following this rule, create a new DataFrame containing only those words with the mentioned frequency.
###Code
# Top words will be those with a frequency between 10 ans 30 (thumb rule)
top_words = money_news_df_with_freq[
(money_news_df_with_freq["Frequency"] >= 10) & (money_news_df_with_freq["Frequency"] <= 30)
]
top_words.head(10)
###Output
_____no_output_____
###Markdown
Creating Word Cloud Now you have all the pieces needed to create a word cloud based on TF-IDF weights, so use the `WordCloud` library to create it.
###Code
# Create a string list of terms to generate the word cloud
terms_list = str(top_words["Word"].tolist())
# Create the word cloud
wordcloud = WordCloud(colormap="RdYlBu").generate(terms_list)
plt.imshow(wordcloud)
plt.axis("off")
fontdict = {"fontsize": 20, "fontweight": "bold"}
plt.title("Money News Word Cloud", fontdict=fontdict)
plt.show()
###Output
_____no_output_____
###Markdown
Challenge: Looking for Documents that Contain Top Words Finally you might find it interesting to search those articles that contain the most relevant words. Create a function called `retrieve_docs(terms)` that receives a list of terms as parameter and extracts from the working corpus all those news articles that contain the search terms. In this function you should use the `reuters.words()` method to retrieve the tokenized version of each article as can be seen in the [Reuters Corpus documentation](https://www.nltk.org/book/ch02.htmlreuters-corpus). **Hint:** To find any occurrence of the search terms you might find it useful to check out [this post on StackOverflow](https://stackoverflow.com/a/25102099/4325668). You should also lowercase all the words to ease your terms search.
###Code
def retrieve_docs(terms):
result_docs = []
for doc_id in money_news_ids:
found_terms = [
word
for word in reuters.words(doc_id)
if any(term in word.lower() for term in terms)
]
if len(found_terms) > 0:
result_docs.append(doc_id)
return result_docs
###Output
_____no_output_____
###Markdown
Question 1: How many articles talk about Yen?
###Code
len(retrieve_docs(["yen"]))
###Output
_____no_output_____
###Markdown
Question 2: How many articles talk about Japan or Banks?
###Code
len(retrieve_docs(["japan", "banks"]))
###Output
_____no_output_____
###Markdown
Question 3: How many articles talk about England or Dealers?
###Code
len(retrieve_docs(["england", "dealers"]))
###Output
_____no_output_____ |
00_Curso_Folder/2_Applications/Class_22/ei_iesti01_keyword_spotting_project_nn_classifier.ipynb | ###Markdown
Download the data - after extracting features through a processing block - so we can train a machine learning model.
###Code
import numpy as np
import requests
API_KEY = 'ei_98b6c42cd610ed6b1f4354ff1d2b8ccb2a0bc1694b165bfda5fa2ff0ea93bea5'
def download_data(url):
response = requests.get(url, headers={'x-api-key': API_KEY})
if response.status_code == 200:
return response.content
else:
print(response.content)
raise ConnectionError('Could not download data file')
X = download_data('https://studio.edgeimpulse.com/v1/api/38744/training/5/x')
Y = download_data('https://studio.edgeimpulse.com/v1/api/38744/training/5/y')
###Output
_____no_output_____
###Markdown
Store the data in a temporary file, and load it back through Numpy.
###Code
with open('x_train.npy', 'wb') as file:
file.write(X)
with open('y_train.npy', 'wb') as file:
file.write(Y)
X = np.load('x_train.npy')
Y = np.load('y_train.npy')[:,0]
X.shape, Y.shape
image = X[0]
img_length = image.shape[0]
img_length
columns = 13
rows = int(img_length / columns)
image = np.reshape(image, (rows, columns))
image.shape
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(12,6))
sns.heatmap(image.T);
###Output
_____no_output_____
###Markdown
Define our labels and split the data up in a test and training set:
###Code
import sys, os, random
import tensorflow as tf
from sklearn.model_selection import train_test_split
import logging
tf.get_logger().setLevel(logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Set random seeds for repeatable results
RANDOM_SEED = 3
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
classes_values = [ "iesti", "silence", "unifei" ]
classes = len(classes_values)
Y = tf.keras.utils.to_categorical(Y - 1, classes)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=1)
input_length = X_train[0].shape[0]
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, Y_train))
validation_dataset = tf.data.Dataset.from_tensor_slices((X_test, Y_test))
def get_reshape_function(reshape_to):
def reshape(image, label):
return tf.reshape(image, reshape_to), label
return reshape
callbacks = []
###Output
_____no_output_____
###Markdown
Train the model: 
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Conv2D, Flatten, Reshape, MaxPooling1D, MaxPooling2D, BatchNormalization
from tensorflow.keras.optimizers import Adam
# model architecture
model = Sequential()
model.add(Reshape((int(input_length / 13), 13), input_shape=(input_length, )))
model.add(Conv1D(8, kernel_size=3, activation='relu', padding='same'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding='same'))
model.add(Dropout(0.25))
model.add(Conv1D(16, kernel_size=3, activation='relu', padding='same'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(classes, activation='softmax', name='y_pred'))
model.summary()
# this controls the learning rate
opt = Adam(lr=0.005, beta_1=0.9, beta_2=0.999)
# this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself
BATCH_SIZE = 32
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=False)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False)
# train the neural network
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
history = model.fit(train_dataset, epochs=100, validation_data=validation_dataset, verbose=2, callbacks=callbacks)
def get_test_accuracy(model, x_test, y_test):
"""Test model classification accuracy"""
test_loss, test_acc = model.evaluate(x=x_test, y=y_test, verbose=0)
print('accuracy: {acc:0.3f}'.format(acc=test_acc))
def plot_result(history, res= 'loss'):
plt.plot(history.history[res])
plt.plot(history.history['val_'+res])
plt.title(res+' vs. epochs')
plt.ylabel(res)
plt.xlabel('Epoch')
if res == 'loss':
plt.legend(['Training', res], loc='upper right')
else:
plt.legend(['Training', res], loc='lower right')
plt.show()
plot_result(history, res= 'loss')
plot_result(history, res= 'accuracy')
get_test_accuracy(model, X_test, Y_test)
# Save the model to disk
model.save('cnn_v1_saved_model')
###Output
_____no_output_____
###Markdown
Convert and save TF model to a tflite model
###Code
# Convert TF model to a tflite model
from tensorflow.keras.models import load_model
model_cnn_v1 = load_model('cnn_v1_saved_model')
converter = tf.lite.TFLiteConverter.from_keras_model(model_cnn_v1)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
tflite_model_size = open("cnn_v1.tflite","wb").write(tflite_model)
print("Quantized model (DEFAULT) is {:,} bytes".format(tflite_model_size))
###Output
Quantized model (DEFAULT) is 11,536 bytes
|
python-tilt-example.ipynb | ###Markdown
InstallationInstall the python client library using pip. See the [project page](https://pypi.org/project/tilt/).
###Code
!pip3 install tilt
###Output
Requirement already satisfied: tilt in /usr/local/lib/python3.8/site-packages (0.0.1)
###Markdown
Basic usage1) Import the transparency information language binding/library.2) Create your first object, e.g. a Data Protection Officer with their contact details.3) Continue creating your objects, i.e. a Controller and its Representative.4) ... (add all other fields, not shown in here) ...
###Code
from tilt import tilt
dpo = tilt.DataProtectionOfficer(name='Max Ninjaturtle', address='21 Jump Street', country='DE', email='[email protected]', phone='0142 43333')
print(dpo.to_dict())
# {'address': '21 Jump Street', 'country': 'DE', 'email': '[email protected]', 'name': 'Max Ninjaturtle', 'phone': '0142 43333'}
r = tilt.ControllerRepresentative(name='Maxi Müller', email='[email protected]', phone=None)
c = tilt.Controller(name='MyCompany', address='Straße des 17. Juni', country='DE', division='Main', representative=r)
print(c.to_dict())
# {'address': 'Straße des 17. Juni', 'country': 'DE', 'division': 'Main', 'name': 'MyCompany', 'representative': {'email': '[email protected]', 'name': 'Maxi Müller', 'phone': None}}
###Output
{'address': '21 Jump Street', 'country': 'DE', 'email': '[email protected]', 'name': 'Max Ninjaturtle', 'phone': '0142 43333'}
{'address': 'Straße des 17. Juni', 'country': 'DE', 'division': 'Main', 'name': 'MyCompany', 'representative': {'email': '[email protected]', 'name': 'Maxi Müller', 'phone': None}}
###Markdown
Import existing documentsIn order to import exisiting tilt documents (we call them instances), you can use your favorite HTTP client or load from your local disk. Then you can use the native python objects and do any manipulations as you like.[](http://)
###Code
import json
import requests
file = requests.get('https://raw.githubusercontent.com/Transparency-Information-Language/schema/master/tilt.json')
instance = tilt.tilt_from_dict(json.loads(file.content))
print(instance.controller.to_dict())
# {'address': 'Wolfsburger Ring 2, 38440 Berlin', 'country': 'DE', 'division': 'Product line e-mobility', 'name': 'Green Company AG', 'representative': {'email': '[email protected]', 'name': 'Jane Super', 'phone': '0049 151 1234 5678'}}
for element in list(instance.data_disclosed):
for recipient in element.recipients:
print(recipient.category)
# Marketing content provider
# Responsible Statistical Institutes
instance.controller.name = 'Yellow Company Ltd.'
print(instance.controller.to_dict())
# {'address': 'Wolfsburger Ring 2, 38440 Berlin', 'country': 'DE', 'division': 'Product line e-mobility', 'name': 'Yellow Company Ltd.', 'representative': {'email': '[email protected]', 'name': 'Jane Super', 'phone': '0049 151 1234 5678'}}
###Output
{'address': 'Wolfsburger Ring 2, 38440 Berlin', 'country': 'DE', 'division': 'Product line e-mobility', 'name': 'Green Company AG', 'representative': {'email': '[email protected]', 'name': 'Jane Super', 'phone': '0049 151 1234 5678'}}
Marketing content provider
Responsible Statistical Institutes
{'address': 'Wolfsburger Ring 2, 38440 Berlin', 'country': 'DE', 'division': 'Product line e-mobility', 'name': 'Yellow Company Ltd.', 'representative': {'email': '[email protected]', 'name': 'Jane Super', 'phone': '0049 151 1234 5678'}}
###Markdown
Create new documents from scratchIn the example below we are using standard libraries (e.g. sha256 or datetime) in order to create formatted strings. All objects have `from_dict()` and `to_dict()` functions which help you to build or export them.
###Code
from hashlib import sha256
from datetime import datetime
result = {}
result["_hash"] = sha256('<insert hashable content here>'.encode('utf-8')).hexdigest()
result["_id"] = '<your-id-01>'
result["created"] = '2020-10-02T22:08:12.510696'
result["language"] = 'en'
result["modified"] = datetime.now().isoformat()
result["name"] = 'Green Compancy SE'
result["status"] = 'active'
result["url"] = 'https://greencompany.implementation.cloud'
result["version"] = 42
meta = tilt.Meta.from_dict(result)
print(meta)
# <tilt.tilt.Meta object at 0x7fef287928d0>
print(meta.to_dict())
# {'_hash': 'bd8f3c314b73d85175c8ccf15b4b8d26348beca96c9df39ba98fa5dda3f60fcc', '_id': '<your-id-01>', 'created': '2020-10-02T22:08:12.510696', 'language': 'en', 'modified': '2020-07-27T15:14:35.689606', 'name': 'Green Compancy SE', 'status': 'active', 'url': 'https://greencompany.implementation.cloud', 'version': 42}
###Output
<tilt.tilt.Meta object at 0x10efb4fa0>
{'_hash': 'bd8f3c314b73d85175c8ccf15b4b8d26348beca96c9df39ba98fa5dda3f60fcc', '_id': '<your-id-01>', 'created': '2020-10-02T22:08:12.510696', 'language': 'en', 'modified': '2020-07-30T16:34:11.486918', 'name': 'Green Compancy SE', 'status': 'active', 'url': 'https://greencompany.implementation.cloud', 'version': 42}
###Markdown
Validate documentsSee the following example code on how to validate documents using [fastjsonschema](https://horejsek.github.io/python-fastjsonschema/).
###Code
import fastjsonschema
import json
import requests
# Load schema to validate against
file = requests.get('https://raw.githubusercontent.com/Transparency-Information-Language/schema/master/tilt-schema.json')
schema = json.loads(file.content)
# Load instance/document to validate;
# you may use your own tilt object with .to_dict() here
file = requests.get('https://raw.githubusercontent.com/Transparency-Information-Language/schema/master/tilt.json')
instance = json.loads(file.content)
# Compile schema
validate_func = fastjsonschema.compile(schema)
# Validate instance against schema
validate_func(instance)
## {'meta': {'_id': 'f1424f86-ca0f-4f0c-9438-43cc00509931', 'name': 'Green Company', 'created': '2020-04-03T15:53:05.929588', 'modified': '2020-04-03T15:53:05.929588',...
## => document is valid
# Load another example
file = requests.get('https://raw.githubusercontent.com/Transparency-Information-Language/schema/master/tilt-NOT-valid.json')
instance = json.loads(file.content)
# Validate another example
validate_func(instance)
## JsonSchemaException: data.controller must contain ['name', 'address', 'country', 'representative'] properties
## => document is invalid
###Output
_____no_output_____ |
Deep_Learning/ResNet/ResNet_Implementations.ipynb | ###Markdown
Resnet Implementation I have basically followed this [blog](https://towardsdatascience.com/residual-network-implementing-resnet-a7da63c7b278). The author has provided step by step, which made the understanding of the whole concept really easy. Importing some basic libraries
###Code
import numpy as np
from functools import partial
import matplotlib.pyplot as plt
import torch.nn as nn
import torch
%matplotlib inline
###Output
_____no_output_____
###Markdown
Basic BlockBasically adding the same padding to Conv2d of pytroch. Unlike other frameworks, they do not provide us with the option of just adding a parameter.
###Code
class Conv2dAuto(nn.Conv2d):
def __init__(self, *args, **kwargs) :
super().__init__(*args, **kwargs)
self.padding = (self.kernel_size[0]//2, self.kernel_size[1]//2)
conv3x3 = partial(Conv2dAuto, kernel_size=3, bias=False)
conv = conv3x3(in_channels=32, out_channels=64)
print(conv)
del conv
###Output
Conv2dAuto(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
###Markdown
ModuleDict We create a dictionary with different activation functiosn, this will be handy later. The ModuleDict is comes very useful if we want to change the activation fucntions. Also the concept of ModuleDict is pretty cool.
###Code
def activation_func(activation) :
return nn.ModuleDict([
['relu', nn.ReLU(inplace = True)],
['leaky_relu', nn.LeakyReLU(negative_slope = 0.01, inplace = True)],
['selu', nn.SELU(inplace=True)],
['none', nn.Identity()]
])[activation]
###Output
_____no_output_____
###Markdown
Residual Block This class just have a very basic structure. It just defines the basic structure of the residual block. We will step by step add functionalities to this block. nn.identity act as place holders for other modules.
###Code
class ResidualBlock(nn.Module) :
def __init__(self, in_channels, out_channels, activation = 'relu'):
super().__init__()
self.in_channels, self.out_channels, self.activation = in_channels, out_channels, activation
self.blocks = nn.Identity()
self.activate = activation_func(activation)
self.shortcut = nn.Identity()
def forward(self, x):
residual = x
if self.should_apply_shortcut : residual = self.shortcut(x)
x = self.blocks(x)
x += residual
x = self.activate(x)
return x
@property
def should_apply_shortcut(self):
return self.in_channels != self.out_channels
dummy = torch.ones((1, 1, 1, 1))
block = ResidualBlock(1, 64)
block(dummy)
###Output
_____no_output_____
###Markdown
ResNetResidualBlock This block is extension of basic structure of ResidualBlock. In this class we define the self.shortcut of the residual block. in_channels : No of input channels to the Residual Block. out_channels : No of output channels to the Residual Block. Look at the Downsampling.
###Code
class ResNetResidualBlock(ResidualBlock) :
def __init__(self, in_channels, out_channels, expansion=1, downsampling=1,conv=conv3x3, *args, **kwargs):
super().__init__(in_channels, out_channels, *args, **kwargs)
self.expansion = expansion
self.downsampling = downsampling
self.conv = conv
self.shortcut = nn.Sequential(
nn.Conv2d(self.in_channels, self.expanded_channels, kernel_size=1, stride=self.downsampling, bias=False),
nn.BatchNorm2d(self.expanded_channels)) if self.should_apply_shortcut else None
@property
def expanded_channels(self):
return self.out_channels * self.expansion
@property
def should_apply_shortcut(self) :
return self.in_channels != self.expanded_channels
ResNetResidualBlock(32, 64)
###Output
_____no_output_____
###Markdown
ResNet Basic Block Now we extend ResNetResidualBlock to define self.blocksHence therefore completing creating a very basic residual block.
###Code
def conv_bn(in_channels, out_channels, conv, *args, **kwargs):
return nn.Sequential(conv(in_channels, out_channels,*args, **kwargs),
nn.BatchNorm2d(out_channels))
class ResNetBasicBlock(ResNetResidualBlock):
expansion = 1
def __init__(self, in_channels, out_channels, *args, **kwargs):
super().__init__(in_channels, out_channels, *args, **kwargs)
self.blocks = nn.Sequential(
conv_bn(self.in_channels, self.out_channels, conv=self.conv, bias=False, stride=self.downsampling),
activation_func(self.activation),
conv_bn(self.out_channels, self.expanded_channels, conv=self.conv, bias=False),
)
dummy = torch.ones((1, 32, 224, 224))
block = ResNetBasicBlock(32, 64)
block(dummy).shape
print(block)
###Output
ResNetBasicBlock(
(blocks): Sequential(
(0): Sequential(
(0): Conv2dAuto(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): ReLU(inplace=True)
(2): Sequential(
(0): Conv2dAuto(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(activate): ReLU(inplace=True)
(shortcut): Sequential(
(0): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
###Markdown
ResNetBottleNeckBlock ResNetBottleNeckBlock extends the ResNetResidualBlock to match the authors description of the bottleneck.
###Code
class ResNetBottleNeckBlock(ResNetResidualBlock):
expansion = 4
def __init__(self, in_channels, out_channels, *args, **kwargs):
super().__init__(in_channels, out_channels, expansion=4, *args, **kwargs)
self.blocks = nn.Sequential(
conv_bn(self.in_channels, self.out_channels, self.conv, kernel_size=1),
activation_func(self.activation),
conv_bn(self.out_channels, self.out_channels, self.conv, kernel_size=3, stride=self.downsampling),
activation_func(self.activation),
conv_bn(self.out_channels, self.expanded_channels, self.conv, kernel_size=1),
)
dummy = torch.ones((1, 32, 10, 10))
block = ResNetBottleNeckBlock(32, 64)
block(dummy).shape
print(block)
###Output
ResNetBottleNeckBlock(
(blocks): Sequential(
(0): Sequential(
(0): Conv2dAuto(32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): ReLU(inplace=True)
(2): Sequential(
(0): Conv2dAuto(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(3): ReLU(inplace=True)
(4): Sequential(
(0): Conv2dAuto(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(activate): ReLU(inplace=True)
(shortcut): Sequential(
(0): Conv2d(32, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
###Markdown
ResNetLayer Finally we are moving towards building a resnet layer. In the ResNetLayer we just stack some ResNetBasicBlocks to get a layer.Doubt : Why the downsampling is defined so?
###Code
class ResNetLayer(nn.Module):
def __init__(self, in_channels, out_channels, block=ResNetBasicBlock, n=1, *args, **kwargs):
super().__init__()
downsampling = 2 if in_channels != out_channels else 1 #Why
self.blocks = nn.Sequential(
block(in_channels, out_channels, *args, **kwargs, downsampling=downsampling),
*[block(out_channels*block.expansion, out_channels, downsampling=1, *args, **kwargs) for _ in range(n-1)]
)
def forward(self, x) :
x = self.blocks(x)
return x
dummy = torch.ones((1, 64, 48, 48))
layer = ResNetLayer(64, 128, block=ResNetBasicBlock, n=3)
print(layer)
layer(dummy).shape
###Output
ResNetLayer(
(blocks): Sequential(
(0): ResNetBasicBlock(
(blocks): Sequential(
(0): Sequential(
(0): Conv2dAuto(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): ReLU(inplace=True)
(2): Sequential(
(0): Conv2dAuto(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(activate): ReLU(inplace=True)
(shortcut): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): ResNetBasicBlock(
(blocks): Sequential(
(0): Sequential(
(0): Conv2dAuto(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): ReLU(inplace=True)
(2): Sequential(
(0): Conv2dAuto(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(activate): ReLU(inplace=True)
(shortcut): None
)
(2): ResNetBasicBlock(
(blocks): Sequential(
(0): Sequential(
(0): Conv2dAuto(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): ReLU(inplace=True)
(2): Sequential(
(0): Conv2dAuto(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(activate): ReLU(inplace=True)
(shortcut): None
)
)
)
###Markdown
Encoder Now the encoder is composed of multiple layers.
###Code
class ResNetEncoder(nn.Module):
def __init__(self, in_channels=3, blocks_sizes=[64, 128, 256, 512], deepths=[2,2,2,2],
activation='relu', block=ResNetBasicBlock, *args, **kwargs):
super().__init__()
self.blocks_sizes = blocks_sizes
self.gate = nn.Sequential(
nn.Conv2d(in_channels, self.blocks_sizes[0], kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(self.blocks_sizes[0]),
activation_func(activation),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
self.in_out_block_sizes = list(zip(blocks_sizes, blocks_sizes[1:]))
self.blocks = nn.ModuleList([
ResNetLayer(blocks_sizes[0], blocks_sizes[0], n=deepths[0], activation=activation,
block=block,*args, **kwargs),
*[ResNetLayer(in_channels * block.expansion,
out_channels, n=n, activation=activation,
block=block, *args, **kwargs)
for (in_channels, out_channels), n in zip(self.in_out_block_sizes, deepths[1:])]
])
def forward(self, x):
x = self.gate(x)
for block in self.blocks :
x = block(x)
return x
###Output
_____no_output_____
###Markdown
DecoderThe last component of the ResNet finally!!Doubt: What is adaptive average pool?
###Code
class ResnetDecoder(nn.Module):
def __init__(self, in_features, n_classes):
super().__init__()
self.avg = nn.AdaptiveAvgPool2d((1, 1))
self.decoder = nn.Linear(in_features, n_classes)
def forward(self, x):
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.decoder(x)
return x
###Output
_____no_output_____
###Markdown
ResNet Finally defining the ResNet!!
###Code
class ResNet(nn.Module):
def __init__(self, in_channels, n_classes, *args, **kwargs):
super().__init__()
self.encoder = ResNetEncoder(in_channels, *args, **kwargs)
self.decoder = ResnetDecoder(self.encoder.blocks[-1].blocks[-1].expanded_channels, n_classes)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def resnet18(in_channels, n_classes, block=ResNetBasicBlock, *args, **kwargs):
return ResNet(in_channels, n_classes, block=block, deepths=[2, 2, 2, 2], *args, **kwargs)
def resnet34(in_channels, n_classes, block=ResNetBasicBlock, *args, **kwargs):
return ResNet(in_channels, n_classes, block=block, deepths=[3, 4, 6, 3], *args, **kwargs)
def resnet50(in_channels, n_classes, block=ResNetBottleNeckBlock, *args, **kwargs):
return ResNet(in_channels, n_classes, block=block, deepths=[3, 4, 6, 3], *args, **kwargs)
def resnet101(in_channels, n_classes, block=ResNetBottleNeckBlock, *args, **kwargs):
return ResNet(in_channels, n_classes, block=block, deepths=[3, 4, 23, 3], *args, **kwargs)
def resnet152(in_channels, n_classes, block=ResNetBottleNeckBlock, *args, **kwargs):
return ResNet(in_channels, n_classes, block=block, deepths=[3, 8, 36, 3], *args, **kwargs)
res1 = ResNet(3, 2)
dummy = torch.ones((1, 3, 224, 224))
print(res1(dummy))
###Output
tensor([[-0.1159, -0.2839]], grad_fn=<AddmmBackward>)
|
R/6_Using_POST_answers.ipynb | ###Markdown
Exercises 6 – answers1\. Fetch the all the transcripts of *ESPN* using the lookup function. Fetch the cDNA sequences of all transcripts using a single POST request, and print in FASTA format.
###Code
library(httr)
library(jsonlite)
fetch_endpoint <- function(server, request, content_type){
r <- GET(paste(server, request, sep = ""), accept(content_type))
stop_for_status(r)
if (content_type == 'application/json'){
return (fromJSON(content(r, "text", encoding = "UTF-8")))
} else {
return (content(r, "text", encoding = "UTF-8"))
}
}
fetch_endpoint_POST <- function(server, request, data, content_type='application/json'){
r <- POST(paste(server, request, sep = ""), content_type("application/json"), accept("application/json"), body = data)
stop_for_status(r)
if (content_type == 'application/json'){
return (fromJSON(content(r, "text", encoding = "UTF-8")))
} else {
return (content(r, "text", encoding = "UTF-8"))
}
}
# define the gene name
gene_name <- "ESPN"
transcripts <- vector()
# define the general URL parameters
server <- "http://rest.ensembl.org/"
con <- "application/json"
# define REST query to get the gene ID from the gene name
ext_get_gene <- paste("lookup/symbol/homo_sapiens/", gene_name, "?expand=1;", sep ="")
get_gene <- fetch_endpoint(server, ext_get_gene, con)
# make a vector of the transcripts then convert to a json object
transcripts <- get_gene$Transcript$id
data <- toJSON(list(ids=transcripts))
ext_sequence <- '/sequence/id/'
sequences <- fetch_endpoint_POST(server, ext_sequence, data, con)
sequences[, c("id", "seq")]
###Output
_____no_output_____
###Markdown
2\. You have the following list of variants:```rs1415919662, rs957333053, rs762944488, rs1372123943, rs553810871, rs1451237599, rs751376931```Get the variant class, evidence attributes, source and the most_severe_consequence for all variants using the variant POST endpoint.
###Code
library(httr)
library(jsonlite)
fetch_endpoint <- function(server, request, content_type){
r <- GET(paste(server, request, sep = ""), accept(content_type))
stop_for_status(r)
if (content_type == 'application/json'){
return (fromJSON(content(r, "text", encoding = "UTF-8")))
} else {
return (content(r, "text", encoding = "UTF-8"))
}
}
fetch_endpoint_POST <- function(server, request, data, content_type='application/json'){
r <- POST(paste(server, request, sep = ""), content_type("application/json"), accept("application/json"), body = data)
stop_for_status(r)
if (content_type == 'application/json'){
return (fromJSON(content(r, "text", encoding = "UTF-8")))
} else {
return (content(r, "text", encoding = "UTF-8"))
}
}
# define the general URL parameters
server <- "http://rest.ensembl.org/"
con <- "application/json"
# make a vector listing all the variants then convert to a json list
variants <- c("rs1415919662", "rs957333053", "rs762944488", "rs1372123943", "rs553810871", "rs1451237599", "rs751376931")
data <- toJSON(list(ids=variants))
# run a post query with the list of variants
var_ext <- "variation/homo_sapiens"
post_variants <- fetch_endpoint_POST(server, var_ext, data, con)
for (variant in post_variants){
print( paste(variant$name, variant$var_class, variant$evidence, variant$source, variant$most_severe_consequence, sep = ", "))
}
###Output
_____no_output_____ |
Air_Quality_Index/.ipynb_checkpoints/data_clean-checkpoint.ipynb | ###Markdown
Interpretation
###Code
data.dropna(inplace=True)
data.drop(columns='Unnamed: 0',inplace=True)
data=data.apply(pd.to_numeric)
sns.heatmap(data.corr())
data.corr()
###Output
_____no_output_____
###Markdown
high Correlation - drop TM,Tm,VM column
###Code
data.drop(columns=['TM','Tm','VM'],inplace=True)
###Output
_____no_output_____ |
week7/data_aug.ipynb | ###Markdown
Aug 1
###Code
sentences = []
for i in range(len(data_train)):
for _ in range(3):
s1 = data_train['sentence_A'][i]
s2 = data_train['sentence_B'][i]
score = data_train['relatedness_score'][i]
s1 = aug.augment(s1)
s2 = aug.augment(s2)
sentences.append([s1, s2, score])
pd.DataFrame(sentences, columns=['sent_1', 'sent_2', 'sim']).to_csv('./data/semeval_train_aug.csv', index=False)
sentences = []
for i in range(len(data)):
for _ in range(3):
s1 = data['sentence_A'][i]
s2 = data['sentence_B'][i]
score = data['relatedness_score'][i]
s1 = aug.augment(s1)
s2 = aug.augment(s2)
sentences.append([s1, s2, score])
pd.DataFrame(sentences, columns=['sent_1', 'sent_2', 'sim']).to_csv('./data/semeval_test_aug.csv', index=False)
###Output
_____no_output_____
###Markdown
Aug 2
###Code
embed_aug = EmbeddingAugmenter()
easy_aug = EasyDataAugmenter()
sentences = []
for i in range(len(data_train)):
s1 = data_train['sentence_A'][i]
s2 = data_train['sentence_B'][i]
score = data_train['relatedness_score'][i]
easy_sentences1 = easy_aug.augment(s1)
easy_sentences2 = easy_aug.augment(s2)
# embed_sentences1 = list()
# embed_sentences2 = list()
# for _ in range(3):
# embed_sentences1.append(embed_aug.augment(s1)[0])
# embed_sentences2.append(embed_aug.augment(s2)[0])
for j in range(len(easy_sentences1)):
sentences.append([easy_sentences1[j], easy_sentences2[j], score])
# for k in range(len(embed_sentences1)):
# sentences.append([embed_sentences1[k], embed_sentences2[k], score])
pd.DataFrame(sentences, columns=['sent_1', 'sent_2', 'sim']).to_csv('./data/semeval_train_aug.csv', index=False)
embed_aug = EmbeddingAugmenter()
easy_aug = EasyDataAugmenter()
sentences = []
for i in range(len(data)):
s1 = data['sentence_A'][i]
s2 = data['sentence_B'][i]
score = data['relatedness_score'][i]
easy_sentences1 = easy_aug.augment(s1)
easy_sentences2 = easy_aug.augment(s2)
# embed_sentences1 = list()
# embed_sentences2 = list()
# for _ in range(3):
# embed_sentences1.append(embed_aug.augment(s1)[0])
# embed_sentences2.append(embed_aug.augment(s2)[0])
for j in range(len(easy_sentences1)):
sentences.append([easy_sentences1[j], easy_sentences2[j], score])
# for k in range(len(embed_sentences1)):
# sentences.append([embed_sentences1[k], embed_sentences2[k], score])
pd.DataFrame(sentences, columns=['sent_1', 'sent_2', 'sim']).to_csv('./data/semeval_test_aug.csv', index=False)
###Output
_____no_output_____
###Markdown
Aug 3
###Code
from googletrans import Translator
translator = Translator()
sentences = list()
for i in tqdm.tqdm(range(len(data_train))):
sentence1 = data_train['sentence_A'][i]
sentence2 = data_train['sentence_B'][i]
score = data_train['relatedness_score'][i]
# To KO
ko_result1 = translator.translate(sentence1, src='en', dest='ko').text
ko_result2 = translator.translate(sentence2, src='en', dest='ko').text
# And back
en_result1 = translator.translate(ko_result1, src='ko', dest='en').text
en_result2 = translator.translate(ko_result2, src='ko', dest='en').text
sentences.append([en_result1, en_result2, score])
pd.DataFrame(sentences, columns=['sent_1', 'sent_2', 'sim']).to_csv('./data/semeval_train_ko.csv', index=False)
sentences = list()
for i in tqdm.tqdm(range(len(data))):
sentence1 = data['sentence_A'][i]
sentence2 = data['sentence_B'][i]
score = data['relatedness_score'][i]
# To KO
ko_result1 = translator.translate(sentence1, src='en', dest='ko').text
ko_result2 = translator.translate(sentence2, src='en', dest='ko').text
# And back
en_result1 = translator.translate(ko_result1, src='ko', dest='en').text
en_result2 = translator.translate(ko_result2, src='ko', dest='en').text
sentences.append([en_result1, en_result2, score])
# pd.DataFrame(sentences, columns=['sent_1', 'sent_2', 'sim']).to_csv('./data/semeval_test_ko.csv', index=False)
df.to_csv('./data/semeval_train_ko.csv', index=False)
# Occasionally, the API doesn't actually translate...
sent1_idx = list()
sent2_idx = list()
for i in range(len(df)):
if 'e' not in df['sent_1'][i].lower():
if 'a' not in df['sent_1'][i].lower():
if 'i' not in df['sent_1'][i].lower():
if 'o' not in df['sent_1'][i].lower():
sent1_idx.append(i)
if 'e' not in df['sent_2'][i].lower():
if 'a' not in df['sent_2'][i].lower():
if 'i' not in df['sent_2'][i].lower():
if 'o' not in df['sent_2'][i].lower():
sent2_idx.append(i)
sent1_fix = list()
for idx in sent1_idx:
sent1_fix.append(translator.translate(df['sent_1'][idx], src='ko', dest='en').text)
# time.sleep(2)
sent2_fix = list()
for idx in sent2_idx:
sent2_fix.append(translator.translate(df['sent_2'][idx], src='ko', dest='en').text)
# time.sleep(2)
###Output
_____no_output_____ |
notebooks/TimeSeriesInsights.ipynb | ###Markdown
Getting insights into the CORONAVIRUS using time series- Using the COVID-19 time series data from the Johns Hopkins University
###Code
from datetime import date, timedelta
import matplotlib.pyplot as plt
import pandas
import urllib.request
class hopkins_timeseries:
def __init__(self, states, regions, timeseries):
self.__states = states
self.__regions = regions
self.__timeseries = timeseries
def get(self):
return self.__timeseries
def plot(self, names, last_days=60):
height = 2 * len(names)
if 0 < height:
end_index = date.today()
start_index = end_index-timedelta(days=last_days)
self.__timeseries[start_index:end_index][names].plot(subplots=True, figsize=(15, height))
def plot_all(self, data, last_days=60):
height = 4 * len(data.columns)
if 0 < height:
end_index = date.today()
start_index = end_index-timedelta(days=last_days)
data.loc[start_index:end_index].plot(subplots=True, figsize=(15, height))
def plot_regions(self, names, last_days=60):
aggregated_regions = self.__aggregate_regions_by_sum(names)
self.plot_all(aggregated_regions, last_days)
def plot_states(self, names, last_days=60):
states = self.__get_states(names)
self.plot(states, last_days)
def __aggregate_regions_by_sum(self, names):
aggregated_timeseries = pandas.DataFrame()
for name in names:
states_from_region = self.__get_states_from_region(name)
if 0 < len(states_from_region):
aggregated_timeseries[name] = self.__timeseries[states_from_region].sum(axis=1)
elif name in self.__timeseries.columns:
aggregated_timeseries[name] = self.__timeseries[name]
return aggregated_timeseries
def __get_states_from_region(self, name):
states = []
region_indices = [index for index in range(0, len(self.__regions)) if name == self.__regions[index]]
for region_index in region_indices:
state = self.__states[region_index]
if pandas.notna(state):
states.append(state)
return states
def __get_states(self, names):
states = []
for name in names:
states_from_region = self.__get_states_from_region(name)
if 0 < len(states_from_region):
states += states_from_region
else:
state_indices = [index for index in range(0, len(self.__states)) if name == self.__states[index]]
for state_index in state_indices:
state = self.__states[state_index]
if pandas.notna(state):
states.append(state)
return states
def query_hopkins_timeseries():
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
with urllib.request.urlopen(url) as stream:
timeseries_hopkins = pandas.read_csv(stream)
date_columns = timeseries_hopkins.columns[4:]
timeseries_indexed = timeseries_hopkins[date_columns].T
timeseries_indexed.index = pandas.to_datetime(timeseries_indexed.index)
states = timeseries_hopkins["Province/State"]
regions = timeseries_hopkins["Country/Region"]
timeseries_indexed.columns = [states[index] if 0 < pandas.notna(states[index]) else regions[index] for index in range(0, len(states))]
return hopkins_timeseries(states, regions, timeseries_indexed)
corona_timeseries = query_hopkins_timeseries()
###Output
_____no_output_____
###Markdown
Plot the time series
###Code
corona_timeseries.plot_regions(["Austria", "China", "France", "Germany", "Italy", "Spain"], last_days=12)
###Output
_____no_output_____ |
INTERNSHIP_grip (1).ipynb | ###Markdown
**GRIPS @ THE SPARKS FOUNDATION** ***Computer Vision & Internet of Things*** **TASK- 2*****Color detection in Image using Opencv*** **GIRISHWAR .S** STEP 1 : importing the required libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.cluster import KMeans
from collections import Counter
from skimage.color import rgb2lab, deltaE_cie76
import cv2
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 14
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 12
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (10, 10
)
plt.rcParams['axes.grid']=False
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
colors = ['xkcd:pale orange', 'xkcd:sea blue', 'xkcd:pale red', 'xkcd:sage green', 'xkcd:terra cotta', 'xkcd:dull purple', 'xkcd:teal', 'xkcd: goldenrod', 'xkcd:cadet blue',
'xkcd:scarlet']
###Output
_____no_output_____
###Markdown
STEP 2: reading image in RGB color space
###Code
def get_image(image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def RGB2HEX(color):
return "#{:02x}{:02x}{:02x}".format(int(color[0]), int(color[1]), int(color[2]))
###Output
_____no_output_____
###Markdown
STEP 3: importing image and resizeing
###Code
image = get_image('/content/picture.jpg')
number_of_colors = 10
modified_image = image.reshape(image.shape[0]*image.shape[1], 3)
clf = KMeans(n_clusters = number_of_colors)
labels = clf.fit_predict(modified_image)
counts = Counter(labels)
center_colors = clf.cluster_centers_
# We get ordered colors by iterating through the keys
ordered_colors = [center_colors[i] for i in counts.keys()]
hex_colors = [RGB2HEX(ordered_colors[i]) for i in counts.keys()]
rgb_colors = [ordered_colors[i] for i in counts.keys()]
###Output
_____no_output_____
###Markdown
STEP 5: detecting color when n=10
###Code
plt.title('Colors Detection ($n=10$)', fontsize=20)
plt.pie(counts.values(), labels = hex_colors, colors = hex_colors)
def inthreshold(array):
count = 0
for i in range(len(array)):
if array[i]>=-12 and array[i]<=12:
count=count+1
return count
def show_color(col_index):
color = col_index
sub_image = (image-rgb_colors[color])
ZEROS_VALUES = []
COUNT = []
for i in range(len(sub_image)):
for j in range(len(sub_image[i])):
e = sub_image[i,j]
#print(e.shape)
count = inthreshold(e)
COUNT.append(count)
if count==2:
ZEROS_VALUES.append([i,j])
color_arr=(np.zeros((16,16,3))+rgb_colors[color]).astype(int)
normalized = sub_image - sub_image.min()
normalized = ((sub_image/sub_image.max())*255).astype(int)
ZEROS_IMAGE = img_2.copy()
for i in range(len(ZEROS_VALUES)):
ZEROS_IMAGE[ZEROS_VALUES[i][0],ZEROS_VALUES[i][1],:] = [250,250,250]
plt.subplot(1,3,1)
plt.imshow(ZEROS_IMAGE.astype(int))
plt.subplot(1,3,2)
plt.imshow(image)
plt.subplot(1,3,3)
#pwargs = {'interpolation':'nearest'}
plt.imshow(color_arr)
def show_color(col_index):
color = col_index
sub_image = (image-rgb_colors[color])
ZEROS_VALUES = []
COUNT = []
for i in range(len(sub_image)):
for j in range(len(sub_image[i])):
e = sub_image[i,j]
#print(e.shape)
count = inthreshold(e)
COUNT.append(count)
if count==2:
ZEROS_VALUES.append([i,j])
color_arr=(np.zeros((16,16,3))+rgb_colors[color]).astype(int)
normalized = sub_image - sub_image.min()
normalized = ((sub_image/sub_image.max())*255).astype(int)
ZEROS_IMAGE = image.copy()
for i in range(len(ZEROS_VALUES)):
ZEROS_IMAGE[ZEROS_VALUES[i][0],ZEROS_VALUES[i][1],:] = [250,250,250]
plt.subplot(1,3,1)
plt.imshow(ZEROS_IMAGE.astype(int))
plt.subplot(1,3,2)
plt.imshow(image)
plt.subplot(1,3,3)
#pwargs = {'interpolation':'nearest'}
plt.imshow(color_arr)
show_color(4)
###Output
_____no_output_____
###Markdown
STEP 6: image processing
###Code
for i in range(len(rgb_colors)):
rgb_colors[i] = rgb_colors[i].astype(int)
def square_maker():
inp_img = image
h = int(inp_img.shape[0])
step_h = int(h/10)
w = int(inp_img.shape[1])
step_w = int(w/10)
X = np.arange(0,h+step_h,step_h)
Y =np.arange(0,w+step_w,step_w)
squares = [inp_img[0:step_h,0:step_w]]
for i in range(0,len(X)-1):
for j in range(0,len(Y)-1):
squares.append(inp_img[X[i]:X[i+1],Y[j]:Y[j+1]])
return np.array(squares)[1::]
def color_computing(array):
DIFF = []
squared_image = square_maker()
for square in squared_image:
DIFF_COLOR = []
for color in range(len(rgb_colors)):
diff = np.abs(square - rgb_colors[color])
DIFF_COLOR.append(diff.mean())
DIFF.append(DIFF_COLOR)
return np.array(DIFF)
###Output
_____no_output_____
###Markdown
STEP 7 : plotting the best color for each square
###Code
def best_color_plot(selected_slice):
plt.subplot(1,2,1)
plt.title('Retrieved Color')
plt.imshow((np.zeros((16,16,3))+ rgb_colors[color_computing(image)[selected_slice].argmin()]).astype(int))
plt.subplot(1,2,2)
plt.title('Selected Square: '+ str(selected_slice))
plt.imshow(square_maker()[selected_slice])
best_color_plot(5)
best_color_plot(25)
best_color_plot(75)
def build_summary():
results = color_computing(image)
cols = ['Square Number'] + hex_colors
sorted_results = pd.DataFrame(columns= cols)
k=0
for r in results:
d = {'Square Number':int(k)}
for c in range(len(hex_colors)):
d[hex_colors[c]] = r[c]*100/r.sum()
sorted_results = sorted_results.append(d,ignore_index=True)
k=k+1
sorted_results['Square Number'] = sorted_results['Square Number'].astype(int)
return sorted_results
summary_df = build_summary()
summary_df.head()
###Output
_____no_output_____ |
RNN_Exploration/rnn_lstm_hw_12.ipynb | ###Markdown
1. Word2Vec (something like that) embeddings* Read the GloVE file into word - vector pairs * Create a 2D-embedding with PCA for the 10_000 nearest neighbors (based on L2 distance) for the word 'dog'.* Visualize the 2 dimensional embeddings on a plot and add text annotations to it * 'dog' should be red * only add the nearast 50 neighbors * add an alpha (.3) to the 10_000 points (too much to visualize well with text) 2. IMDB reviews with word embeddingsLoad the 'imdb_review' dataset from 'tf.keras.datasets.imdb' an convert each sentence into a sequence of its GloVe representations. This will generate (n_samples, sample_length, 50) dimensional dataset. * mean your input along the `sample_length` axis -> this generates a dataset useable to the MLP -> (n_samples, 50) * you are basically generating a mean representation of the sentence * handle your OOV (out of vocabulary) tokens with e.g. np.zeros(50) -> this does not influence the mean muchLoading the data: * `(x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data( path="imdb.npz", num_words=None, skip_top=0, maxlen=150, seed=113, start_char=1, oov_char=2, index_from=3)` * do the preprocessing this way, this makes the dataset ~9'000 samples large and the maximum length is only 150 words * the dataset is represented as index values, so you need to convert twice: index -> word -> GloVe * the index-to-word conversion is achievable by Keras, read the documentationModel defintion: * `Dense(256, relu)`, * `Dense(64, relu)`, * `Dense(1, sigmoid)`Use default parameters in the compile: 'adam', 'binary_crossentropy', 'accuracy' metric. Train for 20-25 epochs.***Hint: approximately 55-60% accuracy is achieveable on the test set.*** 3. Sequence modeling with LSTM * use the IMDB dataset again converted into GloVe sequences but without the mean operation. This way you are going to generate (n_samples, sequence_length, 50) sample points with different sequence lengths * pad every sequence to `150` in length with np.zeros(50) -> (n_samples, 150, 50) * LSTM is a recurrent model with intricate inner operations, if you use it in a bideractional fashion, your sequence will be processed from both endsModel definition: * `BidirectionalLSTM(64, return_sequences=True),` * `BidirectionalLSTM(64),` * `Dense(64, relu)`, * `Dense(1, sigmoid)`Use default parameters in the compile: 'adam', 'binary_crossentropy', 'accuracy' metric. Train for 20-25 epochs.***Hint: approximately 65-70% accuracy is achieveable on the test set.*** Installing dependencies
###Code
!pip install tensorflow-text
###Output
Collecting tensorflow-text
Downloading tensorflow_text-2.7.3-cp37-cp37m-manylinux2010_x86_64.whl (4.9 MB)
[K |████████████████████████████████| 4.9 MB 5.6 MB/s
[?25hRequirement already satisfied: tensorflow<2.8,>=2.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-text) (2.7.0)
Requirement already satisfied: tensorflow-hub>=0.8.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow-text) (0.12.0)
Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (0.22.0)
Requirement already satisfied: libclang>=9.0.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (12.0.0)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (3.3.0)
Requirement already satisfied: absl-py>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (0.12.0)
Requirement already satisfied: keras<2.8,>=2.7.0rc0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (2.7.0)
Requirement already satisfied: flatbuffers<3.0,>=1.12 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (2.0)
Requirement already satisfied: wheel<1.0,>=0.32.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (0.37.0)
Requirement already satisfied: tensorboard~=2.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (2.7.0)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (1.6.3)
Requirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (3.17.3)
Requirement already satisfied: numpy>=1.14.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (1.19.5)
Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (3.10.0.2)
Requirement already satisfied: keras-preprocessing>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (1.1.2)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (1.1.0)
Requirement already satisfied: tensorflow-estimator<2.8,~=2.7.0rc0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (2.7.0)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (0.2.0)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (1.13.3)
Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (1.15.0)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (1.42.0)
Requirement already satisfied: gast<0.5.0,>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (0.4.0)
Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow<2.8,>=2.7.0->tensorflow-text) (3.1.0)
Requirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py>=2.9.0->tensorflow<2.8,>=2.7.0->tensorflow-text) (1.5.2)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (2.23.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (1.0.1)
Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (1.35.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (3.3.6)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (1.8.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (0.4.6)
Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (0.6.1)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (57.4.0)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (4.8)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (0.2.8)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (4.2.4)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (1.3.0)
Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (4.8.2)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (3.6.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (0.4.8)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (2021.10.8)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (1.24.3)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow<2.8,>=2.7.0->tensorflow-text) (3.1.1)
Installing collected packages: tensorflow-text
Successfully installed tensorflow-text-2.7.3
###Markdown
Importing a lot of stuff that I am not going to use half of them.
###Code
import tensorflow as tf
import numpy as np
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import string
from keras.datasets import imdb
from keras.preprocessing import sequence
from tensorflow.keras import layers
from tensorflow.keras import losses
from tensorflow.keras import utils
from tensorflow.keras.layers import TextVectorization
import math as mt
from math import sqrt
import matplotlib.pyplot as plt
import sklearn as sk
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import tensorflow_datasets as tfds
###Output
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Unzipping corpora/stopwords.zip.
###Markdown
Fetching the file from my google drive. Mounting the GOOGLE DRIVE to the '/content/drive' in the VM
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
Checking if the file exists on my drive
###Code
!ls /content/drive/MyDrive/Colab\ Notebooks
###Output
b.npy 'hw9_raw (1).ipynb' w.npy y_train.npy
glove.6B.50d.txt hw9_raw.ipynb X_test.npy
hw11_no_code.ipynb Untitled0.ipynb X_train.npy
hw12_no_code.ipynb Untitled1.ipynb y_test.npy
###Markdown
Loaoding The Glove68 file from the drive.
###Code
with open("/content/drive/MyDrive/Colab Notebooks/glove.6B.50d.txt","r") as f:
gloove68_vector_file_text = np.array(f.read())
###Output
_____no_output_____
###Markdown
Splitting the vocabularies lines into lists
###Code
gloove68_vector_file_text_newLined = str(gloove68_vector_file_text).split('\n')
gloove68_vector_file_text_newLined[0]
###Output
_____no_output_____
###Markdown
Vectorizing the data in the gloove68 (vector encoding)
###Code
gloove68_vectors = { str(vocab_vec).split(' ')[0] : np.array(str(vocab_vec).split(' ')[1:], dtype='float64') for vocab_vec in gloove68_vector_file_text_newLined}
gloove68_vectors['the']
###Output
_____no_output_____
###Markdown
Using PCA to get N neighbors to word 'dog' We get the N nearest words to 'dog' We check if data needs scaling We perform PCA on the data to reduce it to 2 dimensions * Observed that we have some words that have 0 length (Maybe because of spacing error or another error). Therefore, we will filter the data out.
###Code
# Seperating words from their vectors
words = []
vectors = []
gloove68_vectors_filtered = {}
for i, word in enumerate(gloove68_vectors):
if(len(gloove68_vectors[word]) > 0):
gloove68_vectors_filtered[word] = gloove68_vectors[word]
words.append(word)
vectors.append(list(gloove68_vectors[word]))
words = np.array(words, dtype='str')
vectors = np.array(vectors)
print(words[:5], vectors[:5])
lengths = [ len(gloove68_vectors_filtered[word]) for word in words]
print(set(lengths))
###Output
{50}
###Markdown
* Observation: Data are not normalized. 1) Extract the 10_000 nearest words to 'dog'
###Code
distance_from = 'dog'
def getEucDist(word, target):
try:
dist = gloove68_vectors[word] - gloove68_vectors[target]
l1 = sum(np.multiply(dist,dist))
l2 = sqrt(l1)
return (word, l2)
except:
pass
words_distances = list(map(lambda inp: getEucDist(inp, distance_from), words))
words_distances[:5]
sorted_distances = sorted(words_distances, key = lambda tup: tup[1] )
sorted_distances[0:5]
# Extracting the first 10_000 neighbours
n_neighbors = 10000
dog_neighbours = sorted_distances[0:n_neighbors+1] # I included the dog as well
print(len(dog_neighbours), dog_neighbours[:5])
dog_cluster_words = [ word for (word, dist) in dog_neighbours]
print(len(dog_cluster_words), dog_cluster_words[:5])
dog_cluster_vectors = [ gloove68_vectors[word] for word in dog_cluster_words]
print(len(dog_cluster_vectors), dog_cluster_vectors[:2])
###Output
10001 [array([ 0.11008 , -0.38781 , -0.57615 , -0.27714 , 0.70521 ,
0.53994 , -1.0786 , -0.40146 , 1.1504 , -0.5678 ,
0.0038977, 0.52878 , 0.64561 , 0.47262 , 0.48549 ,
-0.18407 , 0.1801 , 0.91397 , -1.1979 , -0.5778 ,
-0.37985 , 0.33606 , 0.772 , 0.75555 , 0.45506 ,
-1.7671 , -1.0503 , 0.42566 , 0.41893 , -0.68327 ,
1.5673 , 0.27685 , -0.61708 , 0.64638 , -0.076996 ,
0.37118 , 0.1308 , -0.45137 , 0.25398 , -0.74392 ,
-0.086199 , 0.24068 , -0.64819 , 0.83549 , 1.2502 ,
-0.51379 , 0.04224 , -0.88118 , 0.7158 , 0.38519 ]), array([ 0.45281 , -0.50108 , -0.53714 , -0.015697, 0.22191 , 0.54602 ,
-0.67301 , -0.6891 , 0.63493 , -0.19726 , 0.33685 , 0.7735 ,
0.90094 , 0.38488 , 0.38367 , 0.2657 , -0.08057 , 0.61089 ,
-1.2894 , -0.22313 , -0.61578 , 0.21697 , 0.35614 , 0.44499 ,
0.60885 , -1.1633 , -1.1579 , 0.36118 , 0.10466 , -0.78325 ,
1.4352 , 0.18629 , -0.26112 , 0.83275 , -0.23123 , 0.32481 ,
0.14485 , -0.44552 , 0.33497 , -0.95946 , -0.097479, 0.48138 ,
-0.43352 , 0.69455 , 0.91043 , -0.28173 , 0.41637 , -1.2609 ,
0.71278 , 0.23782 ])]
###Markdown
2) Apply PCA to reduce the dimensionality to 2.* Before applying PCA, we need to scale the data. Since as we observed, the data are not scaled.* We apply PCA to reduce the dimensionality of the 10_000 nearest points to 'dog'
###Code
scaler_shell = StandardScaler()
scaler = scaler_shell.fit(dog_cluster_vectors)
scaled_dog_cluster_vectors = scaler.transform(dog_cluster_vectors)
print(scaled_dog_cluster_vectors[:1])
print("Check if hte input was same as output after scalling", scaled_dog_cluster_vectors == dog_cluster_vectors)
pca_shell = PCA(n_components=2, random_state=0)
pca_model = pca_shell.fit(scaled_dog_cluster_vectors)
# 2dEmbed
dog_cluster_vectors_2d = pca_model.transform(scaled_dog_cluster_vectors)
annot_neighbours = 50
# Defining the plot
fig, ax = plt.subplots()
# Drawing the rest of the cluster
ax.scatter(
dog_cluster_vectors_2d[annot_neighbours + 1:, 0],
dog_cluster_vectors_2d[annot_neighbours + 1:, 1],
c='gray', alpha=0.1)
# Drawing hte 50 nearest words.
ax.scatter(
dog_cluster_vectors_2d[1:annot_neighbours + 1, 0],
dog_cluster_vectors_2d[1:annot_neighbours + 1, 1],
c='b')
ax.scatter(
dog_cluster_vectors_2d[0, 0],
dog_cluster_vectors_2d[0, 1],
c='r')
for i, vec in enumerate(dog_cluster_vectors_2d[1:annot_neighbours + 1]):
ax.annotate(dog_cluster_words[i], (vec[0], vec[1]))
plt.title("Nearest Neighbours to 'dog'")
plt.show()
###Output
_____no_output_____
###Markdown
2) IMDB reviews with word embeddings 2.1 Loading the data
###Code
#(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words = VOCAB_SIZE)
MAXLEN = 150
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data(
path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=150,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
print(x_train.shape)
print("First element length is: ", len(x_train[0]))
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz
17465344/17464789 [==============================] - 0s 0us/step
17473536/17464789 [==============================] - 0s 0us/step
(9290,)
First element length is: 141
###Markdown
2.2 Exploring the data and setting up the GLoVE for it. Notes * we need function: sentences -> Sequence of GLoVE? * GLoVE size = 50 * mean your input along the sample_length axis -> this generates a dataset useable to the MLP -> (n_samples, 50) * Generating a mean representation * handle out of vocabulary np.zeros(50) * ~9'000 samples large and the maximum length is only 150 words the dataset is represented as index values. map twice (index -> word -> GloVe). * the index-to-word conversion is achievable by Keras, read the documentation
###Code
# Loading the word->index data.
imdb_word_index = tf.keras.datasets.imdb.get_word_index(path='imdb_word_index.json')
# Reverse the word index to obtain a dict mapping indices to words
inverted_word_index = dict((i, word) for (word, i) in imdb_word_index.items())
print(len(imdb_word_index))
print(len(inverted_word_index))
# Decode the first sequence in the dataset
decoded_sequence = " ".join(inverted_word_index[i] for i in x_train[0])
print(decoded_sequence)
decoded_sequence = " ".join(inverted_word_index[i] for i in x_train[1])
print(decoded_sequence)
###Output
the as there in at by br of sure many br of proving no only women was than doesn't as you never of hat night that with ignored they bad out superman plays of how star so stories film comes defense date of wide they don't do that had with of hollywood br of my seeing fan this of pop out body shots in having because cause it's stick passing first were enjoys for from look seven sense from me superimposition die in character as cuban issues but is you that isn't one song just is him less are strongly not are you that different just even by this of you there is eight when it part are film's love film's 80's was big also light don't wrangling as it in character looked cinematography so stories is far br man acting
the sure themes br only acting i i was favourite as on she they hat but already most was scares minor if flash was well also good 8 older was with enjoy used enjoy phone too i'm of you an job br only women than robot to was with these unexpected sure little sure guy sure on was one your life was children in particularly only yes she sort is jerry but so stories them final known to have does such most that supposed imagination very moving antonioni only yes this was seconds for imagination on this of reptiles to plays that nights to for supposed still been last fan always your bit that strong said clean knowing br theory to car masterpiece out in also show for film's was tale have flash but look part i'm film as to penelope is script hard br only acting
###Markdown
* Wrapping the decoding of the sequences (x_train) data into a function that outputs sentences from the given sequence of indices. Note that this function relies on "inverted_word_index" to be defined.
###Code
def decodeSeqs(sequences):
dec_sentences = []
for enc_sentence in sequences:
dec_sent = " ".join(inverted_word_index[i] for i in enc_sentence)
dec_sentences.append(dec_sent)
return dec_sentences
print("First 2 sentences in the loaded data are: ")
print(decodeSeqs(x_train[:2]))
###Output
First 2 sentences in the loaded data are:
["the as there in at by br of sure many br of proving no only women was than doesn't as you never of hat night that with ignored they bad out superman plays of how star so stories film comes defense date of wide they don't do that had with of hollywood br of my seeing fan this of pop out body shots in having because cause it's stick passing first were enjoys for from look seven sense from me superimposition die in character as cuban issues but is you that isn't one song just is him less are strongly not are you that different just even by this of you there is eight when it part are film's love film's 80's was big also light don't wrangling as it in character looked cinematography so stories is far br man acting", "the sure themes br only acting i i was favourite as on she they hat but already most was scares minor if flash was well also good 8 older was with enjoy used enjoy phone too i'm of you an job br only women than robot to was with these unexpected sure little sure guy sure on was one your life was children in particularly only yes she sort is jerry but so stories them final known to have does such most that supposed imagination very moving antonioni only yes this was seconds for imagination on this of reptiles to plays that nights to for supposed still been last fan always your bit that strong said clean knowing br theory to car masterpiece out in also show for film's was tale have flash but look part i'm film as to penelope is script hard br only acting"]
###Markdown
* We also need to define an encoding function for making our lives easier when cleaning the data.
###Code
'''
Sequences are array of strings. Each string will be turned into a vector of indices.
'''
def encodeSeqs(sequences):
enc_sentences = []
for dec_sentence in sequences:
words_in_sentence = dec_sentence.split(' ')
dec_sent = [imdb_word_index[word] for word in words_in_sentence]
enc_sentences.append(dec_sent)
return enc_sentences
temp = decodeSeqs(x_train[:2])
print("First encoding the decoded 2 sentences that we saw in the previous cell: ")
print(encodeSeqs(temp))
print("---------------")
print("The first 2 sequences in the main data")
print(x_train[:2])
print("---------------")
print("Comparing first elements of the encoded back sequence and the main data:")
print(x_train[0] == encodeSeqs(temp)[0])
print(x_train[1] == encodeSeqs(temp)[1])
###Output
First encoding the decoded 2 sentences that we saw in the previous cell:
[[1, 14, 47, 8, 30, 31, 7, 4, 249, 108, 7, 4, 5974, 54, 61, 369, 13, 71, 149, 14, 22, 112, 4, 2401, 311, 12, 16, 3711, 33, 75, 43, 1829, 296, 4, 86, 320, 35, 534, 19, 263, 4821, 1301, 4, 1873, 33, 89, 78, 12, 66, 16, 4, 360, 7, 4, 58, 316, 334, 11, 4, 1716, 43, 645, 662, 8, 257, 85, 1200, 42, 1228, 2578, 83, 68, 3912, 15, 36, 165, 1539, 278, 36, 69, 44076, 780, 8, 106, 14, 6905, 1338, 18, 6, 22, 12, 215, 28, 610, 40, 6, 87, 326, 23, 2300, 21, 23, 22, 12, 272, 40, 57, 31, 11, 4, 22, 47, 6, 2307, 51, 9, 170, 23, 595, 116, 595, 1352, 13, 191, 79, 638, 89, 51428, 14, 9, 8, 106, 607, 624, 35, 534, 6, 227, 7, 129, 113], [1, 249, 1323, 7, 61, 113, 10, 10, 13, 1637, 14, 20, 56, 33, 2401, 18, 457, 88, 13, 2626, 1400, 45, 3171, 13, 70, 79, 49, 706, 919, 13, 16, 355, 340, 355, 1696, 96, 143, 4, 22, 32, 289, 7, 61, 369, 71, 2359, 5, 13, 16, 131, 2073, 249, 114, 249, 229, 249, 20, 13, 28, 126, 110, 13, 473, 8, 569, 61, 419, 56, 429, 6, 1513, 18, 35, 534, 95, 474, 570, 5, 25, 124, 138, 88, 12, 421, 1543, 52, 725, 6397, 61, 419, 11, 13, 1571, 15, 1543, 20, 11, 4, 22016, 5, 296, 12, 3524, 5, 15, 421, 128, 74, 233, 334, 207, 126, 224, 12, 562, 298, 2167, 1272, 7, 2601, 5, 516, 988, 43, 8, 79, 120, 15, 595, 13, 784, 25, 3171, 18, 165, 170, 143, 19, 14, 5, 7224, 6, 226, 251, 7, 61, 113]]
---------------
The first 2 sequences in the main data
[list([1, 14, 47, 8, 30, 31, 7, 4, 249, 108, 7, 4, 5974, 54, 61, 369, 13, 71, 149, 14, 22, 112, 4, 2401, 311, 12, 16, 3711, 33, 75, 43, 1829, 296, 4, 86, 320, 35, 534, 19, 263, 4821, 1301, 4, 1873, 33, 89, 78, 12, 66, 16, 4, 360, 7, 4, 58, 316, 334, 11, 4, 1716, 43, 645, 662, 8, 257, 85, 1200, 42, 1228, 2578, 83, 68, 3912, 15, 36, 165, 1539, 278, 36, 69, 44076, 780, 8, 106, 14, 6905, 1338, 18, 6, 22, 12, 215, 28, 610, 40, 6, 87, 326, 23, 2300, 21, 23, 22, 12, 272, 40, 57, 31, 11, 4, 22, 47, 6, 2307, 51, 9, 170, 23, 595, 116, 595, 1352, 13, 191, 79, 638, 89, 51428, 14, 9, 8, 106, 607, 624, 35, 534, 6, 227, 7, 129, 113])
list([1, 249, 1323, 7, 61, 113, 10, 10, 13, 1637, 14, 20, 56, 33, 2401, 18, 457, 88, 13, 2626, 1400, 45, 3171, 13, 70, 79, 49, 706, 919, 13, 16, 355, 340, 355, 1696, 96, 143, 4, 22, 32, 289, 7, 61, 369, 71, 2359, 5, 13, 16, 131, 2073, 249, 114, 249, 229, 249, 20, 13, 28, 126, 110, 13, 473, 8, 569, 61, 419, 56, 429, 6, 1513, 18, 35, 534, 95, 474, 570, 5, 25, 124, 138, 88, 12, 421, 1543, 52, 725, 6397, 61, 419, 11, 13, 1571, 15, 1543, 20, 11, 4, 22016, 5, 296, 12, 3524, 5, 15, 421, 128, 74, 233, 334, 207, 126, 224, 12, 562, 298, 2167, 1272, 7, 2601, 5, 516, 988, 43, 8, 79, 120, 15, 595, 13, 784, 25, 3171, 18, 165, 170, 143, 19, 14, 5, 7224, 6, 226, 251, 7, 61, 113])]
---------------
Comparing first elements of the encoded back sequence and the main data:
True
True
###Markdown
* Now we have encoding and decoding for the sequences. Observations: * length of x_train sentences are: 9290 * length of full vocabulary used: 885841) We get rid of the OOV
###Code
# Getting rid of the Out of vocabulary words or very rare words.
'''
1)
'''
def filterSeqs(sequences):
# from an array of vectros to array of strings
decoded_sequences = decodeSeqs(sequences)
i = 0
for sent in decoded_sequences:
# getting the words in the sentence
tokens = sent.split()
# remove punctuation from each token
table = str.maketrans('', '', string.punctuation)
tokens = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words]
# filter out short tokens
tokens = [word for word in tokens if len(word) > 1]
# filter out tokens that lost their mappings.
tokens = [word for word in tokens if word in imdb_word_index]
# Building up the sentence after cleaning it.
built_sent = " ".join(tokens)
# overwritting the sentence in the sequences.
decoded_sequences[i] = built_sent
i += 1
return encodeSeqs(decoded_sequences)
temp = filterSeqs(x_train[:3])
print(decodeSeqs(temp))
x_train_filtered = filterSeqs(x_train)
###Output
_____no_output_____
###Markdown
2) Create new training dataset with the GLoVE representations.
###Code
def seqGlove(sequences):
decoded_sequences = decodeSeqs(sequences)
ret = []
for sentence in decoded_sequences:
# tokens of the sentnence
words = sentence.split()
# the glove representation of the sentence. Now sentence is: (n, 50)
sent_vectorized = np.array([np.array(gloove68_vectors[word]) for word in words if word in gloove68_vectors])
# Padding the sentence for remedy and make it constance size (150)
padding = np.array([ np.zeros(50) for i in range(len(sent_vectorized), MAXLEN)])
final_sent_vect = np.append(sent_vectorized, padding, axis=0)
# appending the current vectorized sequence to the "ret"
if(np.sum(np.sum(final_sent_vect)) != 0):
ret.append(final_sent_vect)
return np.array(ret)
temp1 = seqGlove(x_train_filtered[:2])
print(temp1.shape)
print(temp1[0][0])
print(temp1[0][149])
vectorized_training_data = seqGlove(x_train_filtered)
print(vectorized_training_data.shape)
###Output
(9290, 150, 50)
###Markdown
Getting the mean of the sentences.
###Code
meaned_training_data = np.array([np.mean(glove_seq[:], axis=0) for glove_seq in vectorized_training_data] )
print(meaned_training_data.shape)
###Output
(9290, 50)
###Markdown
Finally, we have our data ready to be inserted into the Machine learning model!! Revision and conclusion of this section: we downloaded the imdb indices data We downloaded the word->index and created index->word conversions for the imdb data We created encoding and decoding functionalities to be able to retrieve words or indices representation of the data We cleaned the data from the punctuation errors and out of vocabulary errors We transformed the data into a GLoVE representative data. We made the data dimensions constant by inserting padding into the sentences. Thus, data dimensions are now (n_sentences, 150_gloove_words, 50_dimension_per_word) We got the mean of each sentence vectors. Thus, the dimensions of the train data is: (n_samples, 50). The 50 represents means element wise. Creating a simple neural netowork and train it on 20/25 iterations (epochs). Model comprimises of: Dense(256, relu)Dense(64, relu)Dense(1, sigmoid)Note: Use default parameters in the compile: 'adam', 'binary_crossentropy', 'accuracy' metric. Train for 20-25 epochs.
###Code
print(len(vectorized_training_data))
nn_model = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu', input_shape=(None, 50)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation="sigmoid")
])
nn_model.summary()
# Our vectorized labels
y_train_reshaped = np.asarray(y_train).astype('float32').reshape((-1,1))
y_test_reshaped = np.asarray(y_test).astype('float32').reshape((-1,1))
nn_model.compile(loss="binary_crossentropy",optimizer="adam",metrics=['acc'])
history = nn_model.fit(meaned_training_data, y_train_reshaped , epochs=25, validation_split=0.2)
# Preprocessing the test data as...
x_test_filtered = filterSeqs(x_test)
vectorized_test_data = seqGlove(x_test_filtered)
meaned_test_data = np.array([np.mean(glove_seq[:], axis=0) for glove_seq in vectorized_test_data] )
results = nn_model.evaluate(meaned_test_data, y_test_reshaped)
print(results)
prediction = nn_model.predict(meaned_test_data[:1])
print(prediction, y_test_reshaped[0] )
###Output
WARNING:tensorflow:Model was constructed with shape (None, None, 50) for input KerasTensor(type_spec=TensorSpec(shape=(None, None, 50), dtype=tf.float32, name='dense_input'), name='dense_input', description="created by layer 'dense_input'"), but it was called on an input with incompatible shape (None, 50).
[[0.4656965]] [0.]
###Markdown
Conclusion of 2nd Task:The Accuracy after training the simple neural network model with the preprocessed data was ~58%. A good task would be to increaase the Epochs, test the model with different layers, and plot its changings. However, this is a HW and the constructor didn't ask for it, so it is less prioirity. However, if I have time I will implement it in the future. 3) Sequence modeling with LSTM.
###Code
lstm_model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True), input_shape=(150, 50)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
lstm_model.summary()
lstm_model.compile(loss="binary_crossentropy",optimizer="adam",metrics=['acc'])
history = lstm_model.fit(vectorized_training_data, y_train_reshaped , epochs=25, validation_split=0.2)
###Output
Epoch 1/25
233/233 [==============================] - 45s 156ms/step - loss: 0.6888 - acc: 0.5332 - val_loss: 0.6816 - val_acc: 0.5748
Epoch 2/25
233/233 [==============================] - 34s 147ms/step - loss: 0.6720 - acc: 0.5900 - val_loss: 0.6582 - val_acc: 0.6211
Epoch 3/25
233/233 [==============================] - 34s 147ms/step - loss: 0.6531 - acc: 0.6169 - val_loss: 0.6654 - val_acc: 0.5834
Epoch 4/25
233/233 [==============================] - 34s 148ms/step - loss: 0.6315 - acc: 0.6492 - val_loss: 0.6399 - val_acc: 0.6378
Epoch 5/25
233/233 [==============================] - 34s 147ms/step - loss: 0.6056 - acc: 0.6728 - val_loss: 0.6267 - val_acc: 0.6545
Epoch 6/25
233/233 [==============================] - 34s 147ms/step - loss: 0.5509 - acc: 0.7220 - val_loss: 0.6258 - val_acc: 0.6577
Epoch 7/25
233/233 [==============================] - 34s 147ms/step - loss: 0.5349 - acc: 0.7363 - val_loss: 0.5864 - val_acc: 0.6938
Epoch 8/25
233/233 [==============================] - 34s 147ms/step - loss: 0.4774 - acc: 0.7719 - val_loss: 0.5941 - val_acc: 0.7045
Epoch 9/25
233/233 [==============================] - 34s 147ms/step - loss: 0.4213 - acc: 0.8096 - val_loss: 0.5893 - val_acc: 0.7126
Epoch 10/25
233/233 [==============================] - 34s 147ms/step - loss: 0.3910 - acc: 0.8271 - val_loss: 0.5971 - val_acc: 0.7072
Epoch 11/25
233/233 [==============================] - 34s 147ms/step - loss: 0.3301 - acc: 0.8583 - val_loss: 0.6530 - val_acc: 0.7228
Epoch 12/25
233/233 [==============================] - 34s 147ms/step - loss: 0.2842 - acc: 0.8819 - val_loss: 0.7146 - val_acc: 0.6889
Epoch 13/25
233/233 [==============================] - 34s 148ms/step - loss: 0.2323 - acc: 0.9078 - val_loss: 0.7852 - val_acc: 0.7013
Epoch 14/25
233/233 [==============================] - 34s 146ms/step - loss: 0.1915 - acc: 0.9228 - val_loss: 0.9669 - val_acc: 0.6873
Epoch 15/25
233/233 [==============================] - 34s 146ms/step - loss: 0.1422 - acc: 0.9442 - val_loss: 1.0706 - val_acc: 0.6808
Epoch 16/25
233/233 [==============================] - 34s 146ms/step - loss: 0.0898 - acc: 0.9657 - val_loss: 1.2833 - val_acc: 0.6851
Epoch 17/25
233/233 [==============================] - 34s 147ms/step - loss: 0.0918 - acc: 0.9639 - val_loss: 1.2239 - val_acc: 0.6959
Epoch 18/25
233/233 [==============================] - 34s 147ms/step - loss: 0.0737 - acc: 0.9717 - val_loss: 1.4588 - val_acc: 0.6975
Epoch 19/25
233/233 [==============================] - 34s 147ms/step - loss: 0.0479 - acc: 0.9840 - val_loss: 1.5281 - val_acc: 0.6927
Epoch 20/25
233/233 [==============================] - 34s 148ms/step - loss: 0.0426 - acc: 0.9859 - val_loss: 1.6218 - val_acc: 0.6884
Epoch 21/25
233/233 [==============================] - 34s 147ms/step - loss: 0.0505 - acc: 0.9818 - val_loss: 1.6629 - val_acc: 0.6997
Epoch 22/25
233/233 [==============================] - 34s 147ms/step - loss: 0.0407 - acc: 0.9865 - val_loss: 1.6146 - val_acc: 0.6862
Epoch 23/25
233/233 [==============================] - 34s 146ms/step - loss: 0.0251 - acc: 0.9904 - val_loss: 2.0796 - val_acc: 0.6539
Epoch 24/25
233/233 [==============================] - 34s 147ms/step - loss: 0.0329 - acc: 0.9884 - val_loss: 1.7722 - val_acc: 0.6787
Epoch 25/25
233/233 [==============================] - 34s 147ms/step - loss: 0.0258 - acc: 0.9904 - val_loss: 2.0092 - val_acc: 0.7013
###Markdown
* Observation: After the 10th epoch, the model seems to start overfitting the training data. And at the 20th it start to fully overfit the training data with ~99% accuracy
###Code
results = lstm_model.evaluate(vectorized_test_data, y_test_reshaped)
print(results)
###Output
304/304 [==============================] - 18s 60ms/step - loss: 2.2310 - acc: 0.6814
[2.230994939804077, 0.6813685297966003]
###Markdown
Conclusion:Bidicetional LSTM results have outcomed the simple Neural network that we implemented in Task2 by almost a ~10% more accuracy.
###Code
###Output
_____no_output_____ |
notebooks/thermodynamics/heatTransfer.ipynb | ###Markdown
###Code
#@title Heat transfer
#@markdown In this section a general introduction to heat transfer is introduced.
#@markdown <br><br>This document is part of the module ["Introduction to Gas Processing using NeqSim in Colab"](https://colab.research.google.com/github/EvenSol/NeqSim-Colab/blob/master/notebooks/examples_of_NeqSim_in_Colab.ipynb#scrollTo=_eRtkQnHpL70).
%%capture
!pip install neqsim
import neqsim
from neqsim.thermo.thermoTools import *
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import math
%matplotlib inline
###Output
_____no_output_____
###Markdown
Litterature..
###Code
#@title Heat transfer
from IPython.display import YouTubeVideo
YouTubeVideo('kNZi12OV9Xc', width=600, height=400)
###Output
_____no_output_____ |
DS-Unit-1-Sprint-4-Linear-Algebra-master/module1-linear-algebra-review/DS7_Linear_Algebra.ipynb | ###Markdown
Part 1 - Scalars and VectorsFor the questions below it is not sufficient to simply provide answer to the questions, but you must solve the problems and show your work using python (the NumPy library will help a lot!) Translate the vectors and matrices into their appropriate python representations and use numpy or functions that you write yourself to demonstrate the result or property. 1.1 Create a two-dimensional vector and plot it on a graph
###Code
import math
import matplotlib.pyplot as plt
import numpy as np
# 2 dimensional vector
red = [.2, .3]
plt.arrow(0, 0, .2, .3, head_width=.09, head_length=0.01, color= "r")
plt.title("2D Vector")
plt.show()
###Output
_____no_output_____
###Markdown
1.2 Create a three-dimensional vecor and plot it on a graph
###Code
from mpl_toolkits.mplot3d import Axes3D
red = [.1, .2, .3]
blue = [.3, .2, .1]
green = [.2, .1, .2]
threeD = np.array([[0, 0, 0, .1, .2, .3],
[0, 0, 0, .3, .2, .1],
[0, 0, 0, .2, .1, .1]])
X, Y, Z, U, V, W = zip(*threeD)
fig = plt.figure()
ax = fig.add_subplot(111, projection = "3d")
ax.quiver(X, Y, Z, U, V, W, length=1)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_zlim([0, 1])
plt.show()
###Output
_____no_output_____
###Markdown
1.3 Scale the vectors you created in 1.1 by $5$, $\pi$, and $-e$ and plot all four vectors (original + 3 scaled vectors) on a graph. What do you notice about these vectors?
###Code
from math import e, pi
print(-e)
print(pi)
red = [.2, .3]
#Scalar Multiplication
blue = np.multiply(5, red)
yellow = np.multiply(pi, red)
green = np.multiply(-e, red)
#Plot Scaled Vectors
plt.arrow(0,0, red[0], red[1],head_width=.09, head_length=0.09, color ='red')
plt.arrow(0,0, green[0], green[1],head_width=.05, head_length=0.05, color ='green')
plt.arrow(0,0, blue[0], blue[1],head_width=.05, head_length=0.05, color ='blue')
plt.arrow(0,0, yellow[0], yellow[1],head_width=.05, head_length=0.05, color ='yellow')
plt.xlim(-2,2)
plt.ylim(-2,2)
plt.title("Scaled Vectors")
plt.show()
###Output
_____no_output_____
###Markdown
1.4 Graph vectors $\vec{a}$ and $\vec{b}$ and plot them on a graph\begin{align}\vec{a} = \begin{bmatrix} 5 \\ 7 \end{bmatrix}\qquad\vec{b} = \begin{bmatrix} 3 \\4 \end{bmatrix}\end{align}
###Code
a = [5, 7]
b = [3, 4]
plt.arrow(0, 0, 5, 7, head_width = .5, head_length = 0.3, color = "r")
plt.arrow(0, 0, 3, 4, head_width = .5, head_length = 0.3, color = "b")
plt.xlim(0,10)
plt.ylim(0,10)
plt.show()
###Output
_____no_output_____
###Markdown
1.5 find $\vec{a} - \vec{b}$ and plot the result on the same graph as $\vec{a}$ and $\vec{b}$. Is there a relationship between vectors $\vec{a} \thinspace, \vec{b} \thinspace \text{and} \thinspace \vec{a-b}$
###Code
c = np.subtract(a,b)
plt.arrow(0, 0, 5, 7, head_width = .5, head_length = 0.3, color = "r")
plt.arrow(0, 0, 3, 4, head_width = .5, head_length = 0.3, color = "b")
plt.arrow(0, 0, 2, 3, head_width = .5, head_length = 0.3, color = "g")
plt.xlim(0,10)
plt.ylim(0,10)
plt.show()
#The length of the vector is shorter
###Output
_____no_output_____
###Markdown
1.6 Find $c \cdot d$\begin{align}\vec{c} = \begin{bmatrix}7 & 22 & 4 & 16\end{bmatrix}\qquad\vec{d} = \begin{bmatrix}12 & 6 & 2 & 9\end{bmatrix}\end{align}
###Code
c = np.array([7,22,4,16])
d = np.array([12,6,2,9])
np.dot(c, d)
###Output
_____no_output_____
###Markdown
1.7 Find $e \times f$\begin{align}\vec{e} = \begin{bmatrix} 5 \\ 7 \\ 2 \end{bmatrix}\qquad\vec{f} = \begin{bmatrix} 3 \\4 \\ 6 \end{bmatrix}\end{align}
###Code
e = np.array([5,7,2])
f = np.array([3,4,6])
e = e.T
f = f.T
np.cross(e,f)
###Output
_____no_output_____
###Markdown
1.8 Find $||g||$ and then find $||h||$. Which is longer?\begin{align}\vec{g} = \begin{bmatrix} 1 \\ 1 \\ 1 \\ 8 \end{bmatrix}\qquad\vec{h} = \begin{bmatrix} 3 \\3 \\ 3 \\ 3 \end{bmatrix}\end{align}
###Code
from numpy import linalg as LA
g = np.array[1,1,1,8]
h = np.array[3,3,3,3]
LA.norm(g)
LA.norm(h)
###Output
_____no_output_____
###Markdown
Part 2 - Matrices 2.1 What are the dimensions of the following matrices? Which of the following can be multiplied together? See if you can find all of the different legal combinations.\begin{align}A = \begin{bmatrix}1 & 2 \\3 & 4 \\5 & 6\end{bmatrix}\qquadB = \begin{bmatrix}2 & 4 & 6 \\\end{bmatrix}\qquadC = \begin{bmatrix}9 & 6 & 3 \\4 & 7 & 11\end{bmatrix}\qquadD = \begin{bmatrix}1 & 0 & 0 \\0 & 1 & 0 \\0 & 0 & 1\end{bmatrix}\qquadE = \begin{bmatrix}1 & 3 \\5 & 7\end{bmatrix}\end{align}
###Code
A = np.mat("1 2; 3 4; 4 5")
print("A ", A.shape)
B = np.array([2,4,6])
print("B ",B.shape)
C = np.mat("9 6 3; 4 7 11")
print("C ", C.shape)
D = np.mat("1 0 0; 0 1 0; 0 0 1")
print("D " ,D.shape)
E = np.mat('1 3; 5 7')
print("E", E.shape)
###Output
A (3, 2)
B (3,)
C (2, 3)
D (3, 3)
E (2, 2)
###Markdown
2.2 Find the following products: CD, AE, and BA. What are the dimensions of the resulting matrices? How does that relate to the dimensions of their factor matrices?
###Code
CD = np.matmul(C,D)
print("CD ", CD.shape)
AE = np.matmul(A,E)
print("AE ", AE.shape)
BA = np.matmul(B,A)
print("BA ", BA.shape)
###Output
CD (2, 3)
AE (3, 2)
BA (1, 2)
###Markdown
2.3 Find $F^{T}$. How are the numbers along the main diagonal (top left to bottom right) of the original matrix and its transpose related? What are the dimensions of $F$? What are the dimensions of $F^{T}$?\begin{align}F = \begin{bmatrix}20 & 19 & 18 & 17 \\16 & 15 & 14 & 13 \\12 & 11 & 10 & 9 \\8 & 7 & 6 & 5 \\4 & 3 & 2 & 1\end{bmatrix}\end{align}
###Code
F = np.mat("20 19 18 17; 16 15 14 13; 12 11 10 9; 8 7 6 5; 4 3 2 1")
print("F ", F.shape)
Ft = F.T
print("Ft ", Ft.shape)
print("The diagonal of the og matrix is the same as the transpose")
###Output
F (5, 4)
Ft (4, 5)
The diagonal of the og matrix is the same as the transpose
###Markdown
Part 3 - Square Matrices 3.1 Find $IG$ (be sure to show your work) 😃\begin{align}G= \begin{bmatrix}12 & 11 \\7 & 10 \end{bmatrix}\end{align}
###Code
G = np.mat("12 11; 7 10")
I = np.identity(2)
IG = np.matmul(I, G)
print(IG)
###Output
[[12. 11.]
[ 7. 10.]]
###Markdown
3.2 Find $|H|$ and then find $|J|$.\begin{align}H= \begin{bmatrix}12 & 11 \\7 & 10 \end{bmatrix}\qquadJ= \begin{bmatrix}0 & 1 & 2 \\7 & 10 & 4 \\3 & 2 & 0\end{bmatrix}\end{align}
###Code
H = np.mat("12 11; 7 10")
J = np.mat("0 1 2; 7 10 4; 3 2 0")
print(np.linalg.det(H))
print(np.linalg.det(J))
###Output
43.000000000000014
-19.999999999999996
###Markdown
3.3 Find $H^{-1}$ and then find $J^{-1}$
###Code
print(np.linalg.inv(H))
print(np.linalg.inv(J))
###Output
[[ 0.23255814 -0.25581395]
[-0.1627907 0.27906977]]
[[ 0.4 -0.2 0.8 ]
[-0.6 0.3 -0.7 ]
[ 0.8 -0.15 0.35]]
###Markdown
3.4 Find $HH^{-1}$ and then find $J^{-1}J$. Is $HH^{-1} == J^{-1}J$? Why or Why not?
###Code
H2 = np.multiply(H, np.linalg.inv(H))
print(H2)
J2 = np.multiply(np.linalg.inv(J), J)
print(J2)
###Output
[[ 2.79069767 -2.81395349]
[-1.13953488 2.79069767]]
[[ 0. -0.2 1.6]
[-4.2 3. -2.8]
[ 2.4 -0.3 0. ]]
|
ya3_report/notebooks/daily_report.ipynb | ###Markdown
デイリーレポート - ヤフオク!アクセスアナリティクス
###Code
from ya3_report import daily
from datetime import date, timedelta
COUNT_LABELS = ["access", "watch", "bid"]
dt = date.today() - timedelta(days=1)
df = daily.get_data(dt)
df_hour = daily.index_hour(df)
###Output
_____no_output_____
###Markdown
サマリー- access: アクセス数- watch: ウォッチ数- bid: 入札数
###Code
df_hour[COUNT_LABELS].describe()
###Output
_____no_output_____
###Markdown
合計カウント
###Code
df_hour[COUNT_LABELS].sum()
###Output
_____no_output_____
###Markdown
一日の推移
###Code
df_hour
df_hour.plot(subplots=True, xlim=(0, 23), title=f"Counts on {dt}");
###Output
_____no_output_____
###Markdown
注目された商品
###Code
df_aID = daily.index_aID(df)
###Output
_____no_output_____
###Markdown
アクセス数トップ5
###Code
df_aID.sort_values("access", ascending=False).head()
###Output
_____no_output_____
###Markdown
ウォッチ数トップ5
###Code
df_aID.sort_values("watch", ascending=False).head()
###Output
_____no_output_____
###Markdown
入札数トップ5
###Code
df_aID.sort_values("bid", ascending=False).head()
###Output
_____no_output_____
###Markdown
相関行列
###Code
df_aID[COUNT_LABELS].corr()
###Output
_____no_output_____ |
ML/DAT8-master/notebooks/19_advanced_sklearn.ipynb | ###Markdown
Advanced scikit-learn Agenda- StandardScaler- Pipeline (bonus content) StandardScaler What is the problem we're trying to solve?
###Code
# fake data
import pandas as pd
train = pd.DataFrame({'id':[0,1,2], 'length':[0.9,0.3,0.6], 'mass':[0.1,0.2,0.8], 'rings':[40,50,60]})
test = pd.DataFrame({'length':[0.59], 'mass':[0.79], 'rings':[54]})
# training data
train
# testing data
test
# define X and y
feature_cols = ['length', 'mass', 'rings']
X = train[feature_cols]
y = train.id
# KNN with K=1
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X, y)
# what "should" it predict?
knn.predict(test)
# allow plots to appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 14
plt.rcParams['figure.figsize'] = (5, 5)
# create a "colors" array for plotting
import numpy as np
colors = np.array(['red', 'green', 'blue'])
# scatter plot of training data, colored by id (0=red, 1=green, 2=blue)
plt.scatter(train.mass, train.rings, c=colors[train.id], s=50)
# testing data
plt.scatter(test.mass, test.rings, c='white', s=50)
# add labels
plt.xlabel('mass')
plt.ylabel('rings')
plt.title('How we interpret the data')
# adjust the x-limits
plt.scatter(train.mass, train.rings, c=colors[train.id], s=50)
plt.scatter(test.mass, test.rings, c='white', s=50)
plt.xlabel('mass')
plt.ylabel('rings')
plt.title('How KNN interprets the data')
plt.xlim(0, 30)
###Output
_____no_output_____
###Markdown
How does StandardScaler solve the problem?[StandardScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) is used for the "standardization" of features, also known as "center and scale" or "z-score normalization".
###Code
# standardize the features
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
# original values
X.values
# standardized values
X_scaled
# figure out how it standardized
print scaler.mean_
print scaler.std_
# manually standardize
(X.values - scaler.mean_) / scaler.std_
###Output
_____no_output_____
###Markdown
Applying StandardScaler to a real dataset- Wine dataset from the UCI Machine Learning Repository: [data](http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data), [data dictionary](http://archive.ics.uci.edu/ml/datasets/Wine)- **Goal:** Predict the origin of wine using chemical analysis
###Code
# read three columns from the dataset into a DataFrame
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
col_names = ['label', 'color', 'proline']
wine = pd.read_csv(url, header=None, names=col_names, usecols=[0, 10, 13])
wine.head()
wine.describe()
# define X and y
feature_cols = ['color', 'proline']
X = wine[feature_cols]
y = wine.label
# split into training and testing sets
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# standardize X_train
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
# check that it standardized properly
print X_train_scaled[:, 0].mean()
print X_train_scaled[:, 0].std()
print X_train_scaled[:, 1].mean()
print X_train_scaled[:, 1].std()
# standardize X_test
X_test_scaled = scaler.transform(X_test)
# is this right?
print X_test_scaled[:, 0].mean()
print X_test_scaled[:, 0].std()
print X_test_scaled[:, 1].mean()
print X_test_scaled[:, 1].std()
# KNN accuracy on original data
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
y_pred_class = knn.predict(X_test)
from sklearn import metrics
print metrics.accuracy_score(y_test, y_pred_class)
# KNN accuracy on scaled data
knn.fit(X_train_scaled, y_train)
y_pred_class = knn.predict(X_test_scaled)
print metrics.accuracy_score(y_test, y_pred_class)
###Output
0.866666666667
###Markdown
Pipeline (bonus content) What is the problem we're trying to solve?
###Code
# define X and y
feature_cols = ['color', 'proline']
X = wine[feature_cols]
y = wine.label
# proper cross-validation on the original (unscaled) data
knn = KNeighborsClassifier(n_neighbors=3)
from sklearn.cross_validation import cross_val_score
cross_val_score(knn, X, y, cv=5, scoring='accuracy').mean()
# why is this improper cross-validation on the scaled data?
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
cross_val_score(knn, X_scaled, y, cv=5, scoring='accuracy').mean()
###Output
_____no_output_____
###Markdown
How does Pipeline solve the problem?[Pipeline](http://scikit-learn.org/stable/modules/pipeline.html) is used for chaining steps together:
###Code
# fix the cross-validation process using Pipeline
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(StandardScaler(), KNeighborsClassifier(n_neighbors=3))
cross_val_score(pipe, X, y, cv=5, scoring='accuracy').mean()
###Output
_____no_output_____
###Markdown
Pipeline can also be used with [GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.GridSearchCV.html) for parameter searching:
###Code
# search for an optimal n_neighbors value using GridSearchCV
neighbors_range = range(1, 21)
param_grid = dict(kneighborsclassifier__n_neighbors=neighbors_range)
from sklearn.grid_search import GridSearchCV
grid = GridSearchCV(pipe, param_grid, cv=5, scoring='accuracy')
grid.fit(X, y)
print grid.best_score_
print grid.best_params_
###Output
0.910112359551
{'kneighborsclassifier__n_neighbors': 1}
|
labs/lab18/Lab18.ipynb | ###Markdown
Доля всех задержек ко всем вылетам
###Code
df.groupby('dep_delayed_15min')['UniqueCarrier'].count().plot(kind='pie', autopct='%1.2f%%')
###Output
_____no_output_____
###Markdown
Зависимость количества задержек от длины пути, который предстоит пролететь самолёту
###Code
sns.countplot(data.groupby('Distance')['dep_delayed_15min'].count())
###Output
_____no_output_____
###Markdown
Сгруппируем вылеты по расстоянию i <= Distance<= i+100
###Code
data['Distance'].max()
Dist = [_ for _ in range(0, 5100, 100)]
Numbers = []
for i in range(len(Dist)-1):
a = data[(data['Distance'] >= Dist[i])&(data['Distance'] <= Dist[i+1])]['dep_delayed_15min'].count()
Numbers.append(a)
c = pd.DataFrame({'Count': Numbers}, index =Dist[1::])
plt.figure(figsize=(10,8))
G = c['Count'].plot(kind='bar', rot=75, color='blue')
G.set_xlabel("Dist <=")
G.set_ylabel("Count")
#plt.ylim([19000, 21000])
###Output
_____no_output_____
###Markdown
Зависимость количества задержек по месяцам
###Code
Delay_per_month = data.groupby('Month')['dep_delayed_15min'].count().plot(kind='barh')
plt.xlim([6000, 7500])
Delay_per_month.set_xlabel("Delay number")
Delay_per_month.legend()
data.groupby('Month')['dep_delayed_15min'].count().plot(kind='pie', autopct='%1.2f%%')
###Output
_____no_output_____
###Markdown
Топ 5 месяцев, в которые происходит задержка
###Code
d1 = data.groupby('Month')['dep_delayed_15min'].count()
d1 = pd.DataFrame(d1).sort_values(by='dep_delayed_15min',ascending=False).head(5)
Smth_next = d1.plot(kind='bar', rot=5)
Smth_next.set_ylabel("Delay number")
plt.ylim([6000, 8000])
###Output
_____no_output_____
###Markdown
Топ худших компаний
###Code
Smth_after = data.groupby('UniqueCarrier')['dep_delayed_15min'].count().sort_values(ascending=True).head(10)
Smth_after = Smth_after.plot(kind="bar", rot=10, fontsize=10)
Smth_after.set_ylabel("Delay number")
###Output
_____no_output_____
###Markdown
Топ 10 аэропортов, на которые есть задержка
###Code
p1 = data.groupby('Origin')['dep_delayed_15min'].count().sort_values(ascending=False).head(10)
Smth_after_that = p1.plot(x="Origin", y="Delay", kind="bar", rot=10, fontsize=10)
Smth_after_that.set_ylabel("Delay number")
#по убыванию
###Output
_____no_output_____
###Markdown
Топ 5 направлений, на которые есть задержка
###Code
p11 = data.groupby('Dest')['dep_delayed_15min'].count().sort_values(ascending=False).head(5)
p11.plot(kind="bar", rot=5, fontsize=10, color = 'red')
###Output
_____no_output_____ |
lateral_movement/lateral_movement.ipynb | ###Markdown
Looking for Lateral Movement----*Lateral movement* is a cyberattack pattern that describes how an adversary leverages a single foothold to compromise other systems within a network.Identifying and stopping lateral movement is an important step in controlling the damage from a breach, and also plays a role in forensic analysis of a cyberattack, helping to identify its source and reconstruct what happened.In this notebook, we show how xGT can be used to find evidence of these types of patterns hiding in large data.This notebook is an example of using the vast collection of malicious cyber attack patterns described in the [MITRE ATT&CK Catalog](https://attack.mitre.org/) as a guide to search for evidence of lateral movemement within an enterprise network.For data, we'll be using the [LANL Unified Host and Network Dataset](https://datasets.trovares.com/cyber/LANL/index.html), a set of netflow and host event data collected on an internal Los Alamos National Lab network. ---- RDP HijackingThere are 17 *lateral movement* techniques presented in the MITRE ATT&CK Catalog.We will consider the *RDP Hijacking* technique presented as [tactic 1076](https://attack.mitre.org/techniques/T1076/).RDP hijacking is actually a family of attacks, each with different characteristics on how to attain theprivileges required to perform the RDP Hijacking.The attack broadly looks like this:1. Lateral movement starts from a foothold where an adversary already has gained access. We'll call this host `A`.1. The attacker uses some *privilege escalation* technique to attain SYSTEM privilege.1. The attacker then leverages their SYSTEM privilege to *hijack* as RDP session to[move through a network](https://doublepulsar.com/rdp-hijacking-how-to-hijack-rds-and-remoteapp-sessions-transparently-to-move-through-an-da2a1e73a5f6).The result is to become logged in to another system where the RDP session had been. We'll call this host `B`.This hijacking action can be repeated to form longer chains of lateral movememt; and these chainscan be represented as graph patterns: ---- Privilege EscalationThe MITRE ATT&CK Catalog contains 28 different techniques for performing privilege escalation.For our example, we will look for evidence of RDP Hijacking where privilege escalation was carried out using a technique called *Accessibility Features* described as [T1015](https://attack.mitre.org/techniques/T1015/).The astute reader will note that we are looking for only one of 476 (or more) techniques for lateral movement.Each of the others might result in different graph patterns and different queries, but can all be addressedusing the same approach described here. ---- Mapping to a cyber datasetIn order to formulate a query, we need to understand the content and structure of ourgraph.We will work under the assumption that we have both *netflow* and *windows server log* event information.Mapping each of the adversary steps (the number before each edge label in the diagram) to our dataset:1. "Accessibility Features (*privilege escalation*)": An adversary modifies the way programs are launched to get a back door into a system. The following programs can be used for this purpose: 1. `sethc.exe` 1. `utilman.exe`1. "RDP Session Hijack": Once an adversary finds a session to hijack they can do this command: `c:\windows\system32\tscon.exe [session number to be stolen]`. We look in our graph for windows log events showing the running of the `tscon.exe` program.1. "RDP/RDS Netflow": Logging in to system `B` will leave one or more netflow packets from system `A` to `B` that use the RDP port. Mapping to the LANL datasetOnce we understand the pattern we want to find, we need to determine what specifically to look for in the dataset.We first need to understand that the LANL dataset has been modified from its raw form.For example, the anonymization process replaced many of the program names with arbitrary strings such as `Prog123456.exe`. Also, the program arguments (such as a `/network` option) are not recorded.Given this lack of information, we will emulate a search for the RDP Hijacking lateral movement behavior by picking some actual values present in the LANL data as a proxy to desired programs such as `sethc.exe`. Here are the mappings: - In steps 1 and 4, we will use the string `Proc336322.exe` as a proxy for the `sethc.exe` program and the string `Proc695356.exe` as a proxy for the `utilman.exe` program. - In steps 2 and 5, we will use the string `Proc249569.exe` as a proxy for the `tscon.exe` program. ---- Using xGT to perform this searchThe rest of this notebook demonstrates how to take this LANL data and the search pattern description to do these steps: 1. Ingest the cyber data into xGT 2. Search for all occurrences of this pattern.
###Code
import xgt
conn = xgt.Connection()
conn
###Output
_____no_output_____
###Markdown
Establish Graph Component SchemasWe first try to retrieve the graph component schemas from xGT server.If that should fail, we create an empty component (vertex or edge frame) for the missing component.
###Code
try:
devices = conn.get_vertex_frame('Devices')
except xgt.XgtNameError:
devices = conn.create_vertex_frame(
name='Devices',
schema=[['device', xgt.TEXT]],
key='device')
devices
try:
netflow = conn.get_edge_frame('Netflow')
except xgt.XgtNameError:
netflow = conn.create_edge_frame(
name='Netflow',
schema=[['epochtime', xgt.INT],
['duration', xgt.INT],
['srcDevice', xgt.TEXT],
['dstDevice', xgt.TEXT],
['protocol', xgt.INT],
['srcPort', xgt.INT],
['dstPort', xgt.INT],
['srcPackets', xgt.INT],
['dstPackets', xgt.INT],
['srcBytes', xgt.INT],
['dstBytes', xgt.INT]],
source=devices,
target=devices,
source_key='srcDevice',
target_key='dstDevice')
netflow
###Output
_____no_output_____
###Markdown
**Edges:** The LANL dataset contains two types of data: netflow and host events. Of the host events recorded, some describe events within a device (e.g., reboots), and some describe events between devices (e.g., login attempts). We'll only be loading the netflow data and in-device events. We call these events "one-sided", since we describe them as graph edges from one vertex to itself.
###Code
try:
events1v = conn.get_edge_frame('Events1v')
except xgt.XgtNameError:
events1v = conn.create_edge_frame(
name='Events1v',
schema=[['epochtime', xgt.INT],
['eventID', xgt.INT],
['logHost', xgt.TEXT],
['userName', xgt.TEXT],
['domainName', xgt.TEXT],
['logonID', xgt.INT],
['processName', xgt.TEXT],
['processID', xgt.INT],
['parentProcessName', xgt.TEXT],
['parentProcessID', xgt.INT]],
source=devices,
target=devices,
source_key='logHost',
target_key='logHost')
events1v
try:
events2v = conn.get_edge_frame('Events2v')
except xgt.XgtNameError:
events2v = conn.create_edge_frame(
name='Events2v',
schema = [['epochtime',xgt.INT],
['eventID',xgt.INT],
['logHost',xgt.TEXT],
['logonType',xgt.INT],
['logonTypeDescription',xgt.TEXT],
['userName',xgt.TEXT],
['domainName',xgt.TEXT],
['logonID',xgt.INT],
['subjectUserName',xgt.TEXT],
['subjectDomainName',xgt.TEXT],
['subjectLogonID',xgt.TEXT],
['status',xgt.TEXT],
['src',xgt.TEXT],
['serviceName',xgt.TEXT],
['destination',xgt.TEXT],
['authenticationPackage',xgt.TEXT],
['failureReason',xgt.TEXT],
['processName',xgt.TEXT],
['processID',xgt.INT],
['parentProcessName',xgt.TEXT],
['parentProcessID',xgt.INT]],
source = 'Devices',
target = 'Devices',
source_key = 'src',
target_key = 'logHost')
events2v
# Utility to print the sizes of data currently in xGT
def print_data_summary():
print('Devices (vertices): {:,}'.format(devices.num_vertices))
print('Netflow (edges): {:,}'.format(netflow.num_edges))
print('Host event 1-vertex (edges): {:,}'.format(events1v.num_edges))
print('Host event 2-vertex (edges): {:,}'.format(events2v.num_edges))
print('Total (edges): {:,}'.format(
netflow.num_edges + events1v.num_edges + events2v.num_edges))
print_data_summary()
###Output
Devices (vertices): 933,314
Netflow (edges): 17,882,795,024
Host event 1-vertex (edges): 1,468,936,024
Host event 2-vertex (edges): 4,022,436,222
Total (edges): 23,374,167,270
###Markdown
Load the dataIf you are already connected to an xGT server with data loaded, this section may be skipped.You may skip ahead to the "**Utility python functions for interacting with xGT**" section.**Load the 1-sided host event data:**
###Code
%%time
if events1v.num_edges == 0:
urls = ["https://datasets.trovares.com/LANL/xgt/wls_day-85_1v.csv"]
# urls = ["xgtd://wls_day-{:02d}_1v.csv".format(_) for _ in range(2,91)]
events1v.load(urls)
print_data_summary()
###Output
_____no_output_____
###Markdown
**Load the 2-sided host event data:**
###Code
%%time
if events2v.num_edges == 0:
urls = ["https://datasets.trovares.com/LANL/xgt/wls_day-85_2v.csv"]
# urls = ["xgtd://wls_day-{:02d}_2v.csv".format(_) for _ in range(2,91)]
events2v.load(urls)
print_data_summary()
###Output
_____no_output_____
###Markdown
**Load the netflow data:**
###Code
%%time
if netflow.num_edges == 0:
urls = ["https://datasets.trovares.com/LANL/xgt/nf_day-85.csv"]
#urls = ["xgtd://nf_day-{:02d}.csv".format(_) for _ in range(2,91)]
netflow.load(urls)
print_data_summary()
###Output
_____no_output_____
###Markdown
Utility python functions for interacting with xGT----Now define some useful functions and get on with the querying ...
###Code
# Utility function to launch queries and show job number:
# The job number may be useful if a long-running job needs
# to be canceled.
def run_query(query, table_name = "answers", drop_answer_table=True, show_query=False):
if drop_answer_table:
conn.drop_frame(table_name)
if query[-1] != '\n':
query += '\n'
query += 'INTO {}'.format(table_name)
if show_query:
print("Query:\n" + query)
job = conn.schedule_job(query)
print("Launched job {}".format(job.id))
conn.wait_for_job(job)
table = conn.get_table_frame(table_name)
return table
###Output
_____no_output_____
###Markdown
Pulling out only RDP netflow edgesBecause of the way LANL has chosen to represent the netflow data, there may be some netflow edges in the *forward* direction where the `dstPort` field indicates RDP (`dstPort = 3389`), and other edges in the *reverse* direction where the `srcPort` field contains 3389.The following section of code pulls out all forward RDP edges and drops them into a new edge frame.It then pulls out all reverse RDP edges, reverses the appropriate fields (i.e., swapping `dst` and `src` versions of the attribute values), and adds these reversed RDP edges to the new edge frame.Note that the edges in this new edge frame connect up with the same set of vertices as the netflow edges.We first generate a new edge frame we call `RDPflow` that has the exact same schema as the netflow edge frame.
###Code
# Generate a new edge frame for holding only the RDP edges
conn.drop_frame('RDPflow')
rdpflow = conn.create_edge_frame(
name='RDPflow',
schema=netflow.schema,
source=devices,
target=devices,
source_key='srcDevice',
target_key='dstDevice')
rdpflow
###Output
_____no_output_____
###Markdown
Extract forward RDP edgesA "forward" edge is one where the `dstPort = 3389`.This edge is copied verbatim to the `RDPflow` edge frame.
###Code
%%time
q = """
MATCH ()-[edge:Netflow]->()
WHERE edge.dstPort=3389
MERGE (v0: Devices { device : edge.srcDevice })
MERGE (v1: Devices { device : edge.dstDevice })
CREATE (v0)-[e:RDPflow {epochtime : edge.epochtime,
duration : edge.duration, protocol : edge.protocol,
srcPort : edge.srcPort, dstPort : edge.dstPort,
srcPackets : edge.srcPackets, dstPackets : edge.dstPackets,
srcBytes : edge.srcBytes, dstBytes : edge.dstBytes}]->(v1)
RETURN count(*)
"""
data = run_query(q)
print('Number of answers: {:,}'.format(data.get_data()[0][0]))
###Output
Launched job 20
Number of answers: 2,176
CPU times: user 23 ms, sys: 11.1 ms, total: 34.1 ms
Wall time: 21.5 s
###Markdown
Extract reverse RDP edgesA "reverse" edge is one where the `srcPort = 3389`.These edges are copied to the `RDPflow` edge frame but **reversed** in transit.The reversal process involves swapping the: `srcDevice` and `dstDevice`;`srcPort` and `dstPort`; `srcPackets` and `dstPackets`; and `srcBytes` and `dstBytes`.
###Code
%%time
q = """
MATCH ()-[edge:Netflow]->()
WHERE edge.srcPort=3389
MERGE (v0: Devices { device : edge.srcDevice })
MERGE (v1: Devices { device : edge.dstDevice })
CREATE (v1)-[e:RDPflow {epochtime : edge.epochtime,
duration : edge.duration, protocol : edge.protocol,
srcPort : edge.dstPort, dstPort : edge.srcPort,
srcPackets : edge.dstPackets, dstPackets : edge.srcPackets,
srcBytes : edge.dstBytes, dstBytes : edge.srcBytes}]->(v0)
RETURN count(*)
"""
data = run_query(q)
print('Number of answers: {:,}'.format(data.get_data()[0][0]))
###Output
Launched job 23
Number of answers: 755,260
CPU times: user 26.2 ms, sys: 9.75 ms, total: 36 ms
Wall time: 25.1 s
###Markdown
Resulting RDPflowThe result of combining these two "edge-create" queries is the `RDPflow` edge frame containing only "forward" RDP edges.This alternate edge frame holding only RDP edges can be used instead of the generic`Netflow` edge frame where an RDP edge is required in a query.
###Code
data=None
if rdpflow.num_edges == 0:
print("RDPflow is empty")
elif rdpflow.num_edges <= 1000:
data = rdpflow.get_data_pandas()
else:
data = 'RDPflow (edges): {:,}'.format(rdpflow.num_edges)
data
# Utility to print the data sizes currently in xGT
def print_netflow_data_summary():
print_data_summary()
print('RDPflow (edges): {:,}'.format(rdpflow.num_edges))
print_netflow_data_summary()
###Output
Devices (vertices): 933,314
Netflow (edges): 17,882,795,024
Host event 1-vertex (edges): 1,468,936,024
Host event 2-vertex (edges): 4,022,436,222
Total (edges): 23,374,167,270
RDPflow (edges): 757,436
###Markdown
Building a better query: adding temporal constraints Being more specific about what you're looking for is a good way to both improve performance and cut down on false positives in your results.In our example, there is a causal dependence between the attacker's steps, which means that they must be temporally ordered.For convenience, we again show the RDP Hijack graph pattern here:So if *t1* represents the time at which event 1 takes place, we know that:*t1* ≤ *t2* ≤ *t3* ≤ *t4* ≤ *t5* ≤ *t6*In addition, since this pattern models intentional lateral movement, we suspect that some of these events will be close together in time.We can narrow the results by setting a maximum time thresholds between specific groups of events: - Between an RDP Hijack (`tscon.exe`) and a subsequent RDP netflow is called the *hijack threshold* - From the initial *privilege escalation* event to the RDP netflow is called the *one_step threshold* - The time allowed between between steps (e.g., the time between RDP1 and RDP2), is called the *between_step threshold*Given some fixed constants for these thresholds, we can impose the following additional constraints: - *t3* - *t2* ≤ *hijack threshold* - *t3* - *t1* ≤ *one_step threshold* - *t6* - *t5* ≤ *hijack threshold* - *t6* - *t4* ≤ *one_step threshold* - *t3* - *t1* ≤ *between_step threshold*We will add all of these onstraints to our query to help focus on just the results we want. Lateral Movement queryThis query leverages the new `RDPflow` edge frame (and data) to find the proper RDP edges for steps 3 and 6.
###Code
%%time
time_threshold_between_step = 3600 # one hour
time_threshold_hijack = 180 # three minutes
time_threshold_one_step = 480 # eight minutes
q = """
MATCH (A)-[rdp1:RDPflow]->(B)-[rdp2:RDPflow]->(C),
(A)-[hijack1:Events1v]->(A)-[privEsc1:Events1v]->(A),
(B)-[hijack2:Events1v]->(B)-[privEsc2:Events1v]->(B)
WHERE A <> B AND B <> C AND A <> C
AND privEsc1.eventID = 4688
AND (privEsc1.processName = "Proc336322.exe" OR privEsc1.processName = "Proc695356.exe")
AND hijack1.eventID = 4688 AND hijack1.processName = "Proc249569.exe"
AND privEsc2.eventID = 4688
AND (privEsc2.processName = "Proc336322.exe" OR privEsc2.processName = "Proc695356.exe")
AND hijack2.eventID = 4688 AND hijack2.processName = "Proc249569.exe"
// Check time constraints on the overall pattern
AND rdp1.epochtime <= rdp2.epochtime
AND rdp2.epochtime - rdp1.epochtime < {0}
// Check time constraints on step from A to B
AND privEsc1.epochtime <= hijack1.epochtime
AND hijack1.epochtime <= rdp1.epochtime
AND rdp1.epochtime - hijack1.epochtime < {1}
AND rdp1.epochtime - privEsc1.epochtime < {2}
// Check time constraints on step from B to C
AND privEsc2.epochtime <= hijack2.epochtime
AND hijack2.epochtime <= rdp2.epochtime
AND rdp2.epochtime - hijack2.epochtime < {1}
AND rdp2.epochtime - privEsc2.epochtime < {2}
RETURN rdp1.srcDevice, rdp1.dstDevice, rdp1.epochtime, rdp2.dstDevice, rdp2.epochtime
""".format(time_threshold_between_step, time_threshold_hijack, time_threshold_one_step)
answer_table = run_query(q)
print('Number of answers: {:,}'.format(answer_table.num_rows))
# retrieve the answer rows to the client in a pandas frame
data = answer_table.get_data_pandas()
data[0:10]
import graphistry
import pandas as pd
graphistry.register(key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', api=2)
graphistry.__version__
graphistry.bind(source='rdp1_srcDevice', destination='rdp1_dstDevice').edges(data).plot()
###Output
_____no_output_____ |
notebooks/threshold datasets.ipynb | ###Markdown
Prepare dataset
###Code
ls ../datasets/ecg_it/
foldername = "/home/chiroptera/QCThesis/datasets/" + "ecg_it/"
dataname = foldername + "ecg_it.data"
np.savetxt(foldername + "data.csv", data, delimiter=',')
np.savetxt(foldername + "ground_truth.csv", gt, delimiter=',')
dataset = pd.read_csv(dataname, header=None, sep=",")
print dataset.shape
dataset.head()
raw = dataset.values[1:,1:]
dataset = pd.DataFrame(raw)
dataset.head()
dataset.to_csv(dataname)
print dataset[64].unique()
print dataset[64].unique().size
dataset[dataset == 'y'] = 1
dataset[9].unique()
dataset[dataset=='republican']=1
dataset = dataset.astype(np.int32)
new_cols = dataset.columns.astype(np.object)
for i in range(1,17):
dataset = dataset[dataset[i] != '?']
dataset.shape
dataset.shape
dataset=load_iris()
data=dataset.data.astype(np.float32)
gt=dataset.target
data = dataset.get_values()[:,:-1]
data = data.astype(np.float32)
data.shape
gt=dataset.get_values()[:,-1]
gt = gt.astype(np.int32)
l=0
for i in np.unique(gt):
gt[gt==i] = l
l+=1
gt
gt=np.zeros(600, dtype=np.int32)
for i in range(6):
gt[i * 100 : i * 100 + 100] = i
###Output
_____no_output_____
###Markdown
iris
###Code
from sklearn.datasets import load_iris
iris=load_iris()
data=iris.data.astype(np.float32)
gt=iris.target
###Output
_____no_output_____
###Markdown
ionosphere
###Code
foldername = "/home/diogoaos/QCThesis/datasets/" + "ionosphere/"
dataname = foldername + "ionosphere.data"
dataset = pd.read_csv(dataname, header=None, sep=",")
print dataset.shape
dataset.head()
data = dataset.values[:,:-1].astype(np.float32)
gt = dataset.values[:,-1]
gt[gt=='g']=1
gt[gt=='b']=0
gt = gt.astype(np.int32)
###Output
(351, 35)
###Markdown
optdigits
###Code
foldername = "/home/diogoaos/QCThesis/datasets/" + "optdigits/"
dataname = foldername + "optdigits.tra"
dataset = pd.read_csv(dataname, header=None, sep=",")
data = dataset.get_values()[:,:-1]
data = data.astype(np.float32)
gt=dataset.get_values()[:,-1]
gt = gt.astype(np.int32)
###Output
_____no_output_____
###Markdown
mfeat-fou
###Code
foldername = "/home/diogoaos/QCThesis/datasets/" + "mfeat/"
dataname = foldername + "mfeat-fou.asc"
dataset = pd.read_csv(dataname, header=None, sep=" ")
data = dataset.get_values().astype(np.float32)
gt = np.empty(dataset.shape[0], dtype=np.int32)
for i in range(10):
gt[i*200 : i*200+200]=i
###Output
_____no_output_____
###Markdown
isolet
###Code
foldername = "/home/chiroptera/QCThesis/datasets/" + "isolet/"
dataname = foldername + "isolet1-5.data"
dataset = pd.read_csv(dataname, header=None, sep=",")
data = dataset.get_values().astype(np.float32)[:,:-1]
gt=dataset.get_values()[:,-1].astype(np.int32)
###Output
_____no_output_____
###Markdown
ECG IT
###Code
foldername = "/home/chiroptera/QCThesis/datasets/" + "ecg_it/"
dataname = foldername + "ecg_it.data"
dataset = pd.read_csv(dataname, header=None, sep=",")
data = dataset.values[1:,1:-1].astype(np.float32)
gt = dataset.values[1:,-1].astype(np.int32)
#remove unlabeled
labeled_idx = gt!=2
data = data[labeled_idx]
gt = gt[labeled_idx]
gt0 = gt==0
gt1 = gt==1
data = np.concatenate((data[gt0],data[gt1][:600]))
gt = np.concatenate((gt[gt0],gt[gt1][:600]))
###Output
_____no_output_____
###Markdown
Process
###Code
from sklearn.cluster import KMeans
reload(myKM)
generator = myKM.K_Means()
generator._MAX_THREADS_BLOCK = 256
generator._PPT = 8
#generator = KMeans(init='random')
n_samples = data.shape[0]
true_n_clusters=np.unique(gt).size
sqrt_nsamples = np.sqrt(n_samples)
n_clusters = [sqrt_nsamples/2, sqrt_nsamples]
n_clusters = map(np.ceil, n_clusters)
n_clusters = map(int, n_clusters)
n_partitions = 100
rounds = 20
max_clusts=100
assoc_mode = "full"
prot_mode = "none"
print "number of samples: ", n_samples
print "interval of clusters: ", n_clusters
print "true number of clusters: ", true_n_clusters
print "number of partitions: ", n_partitions
print "number of rounds: ", rounds
print "maximum clusters assumption: ", max_clusts
%time ensemble = mpart.generateEnsemble(data, generator, n_clusters=n_clusters, npartitions=n_partitions, iters=3)
print generator._gridDim
print generator._gridDim * generator._MAX_THREADS_BLOCK * generator._PPT
###Output
3
6144
###Markdown
sklearn 15.xmykm 2.66 1 pptmykm
###Code
eacEst = eac.EAC(n_samples=n_samples, mat_sparse=False)
%time eacEst.fit(ensemble, assoc_mode=assoc_mode, prot_mode=prot_mode)
Z = eacEst._apply_linkage()
Z[:,:2]
true_n_clusters=0
%time labels = eacEst._lifetime_clustering(n_clusters=true_n_clusters)
accEst = accuracy.HungarianIndex(nsamples=n_samples)
%time accEst.score(gt, labels)
print accEst.accuracy
for l in np.unique(gt):
print l, (gt==l).sum()
for l in np.unique(labels):
print l, (labels==l).sum()
true_n_clusters = 0
thresholds = np.arange(0.05,1.01,0.05)
res = np.empty(((thresholds.size + 1) * rounds, 5))
# threshold, max_assocs, n_assocs, acc
n_cluster_ary = np.empty((thresholds.size + 1) * rounds, dtype=np.int32)
#progress bar
print ". " * rounds
i = 0
for r in range(rounds):
print ".",
ensemble = mpart.generateEnsemble(data, generator, n_clusters=n_clusters, npartitions=n_partitions, iters=3)
eacEst = eac.EAC(n_samples=n_samples, mat_sparse=False)
eacEst.fit(ensemble, assoc_mode=assoc_mode, prot_mode=prot_mode)
max_assocs, max_idx = eacEst.getMaxAssocs()
n_assocs = eacEst.getNNZAssocs()
labels = eacEst._lifetime_clustering(n_clusters=true_n_clusters)
accEst = accuracy.HungarianIndex(nsamples=n_samples)
# HungarianIndex takes a huge time for a high cluster imbalance
if np.unique(labels).size > max_clusts:
accEst.accuracy = -1
else:
accEst.score(gt, labels)
res[i, 0] = 0
res[i, 1] = max_assocs
res[i, 2] = n_assocs
res[i, 3] = accEst.accuracy
res[i, 4] = r
n_cluster_ary[i] = np.unique(labels).size
i += 1
for j in range(thresholds.size):
eacEst.apply_threshold(thresholds[j])
max_assocs, max_idx = eacEst.getMaxAssocs()
n_assocs = eacEst.getNNZAssocs()
labels = eacEst._lifetime_clustering(n_clusters=true_n_clusters)
accEst = accuracy.HungarianIndex(nsamples=n_samples)
if np.unique(labels).size > max_clusts:
accEst.accuracy = -1
else:
accEst.score(gt, labels)
max_assocs, max_idx = eacEst.getMaxAssocs()
nnz_pc = eacEst.getNNZAssocs()
res[i, 0] = thresholds[j]
res[i, 1] = max_assocs
res[i, 2] = nnz_pc
res[i, 3] = accEst.accuracy
res[i, 4] = r
n_cluster_ary[i] = np.unique(labels).size
i += 1
resPD = pd.DataFrame(data=res, columns=["threshold", "max assoc", "n assocs", "accuracy", "round"])
print "MAX ACCURACY=", resPD['accuracy'].max()
if true_n_clusters == 0:
resPD['n_clusts']=n_cluster_ary
resPD
print dataname
save_folder = "/home/diogoaos/QCThesis/experiments/threshold/"
save_name = "ionosphere_lifetime"
resPD.to_csv(path_or_buf = save_folder + save_name + ".csv", index=False)
###Output
_____no_output_____ |
cross-ret.ipynb | ###Markdown
Cross Modal RetrievalIn this notebook, we are going to define and train a model which learns to map sentences and images into a common embedding space in order to be able to retrieve one from the other. We will be using image and captions from FLicker-8K dataset to train our network.We learn a joint embedding iamge-sentence embedding space where sentences are encoded using a gated recurrent neural network. Image features from a deep CNN are projected into the embedding space of the GRU hidden states. A pairwise ranking loss is minimized to learn the rank images and their descriptions. Steps to run this npotebook* Download the data and pre-trained files from [here](https://drive.google.com/open?id=1p2P-DgHjgpa_E5_hnK7UvEQ5D4WDJxF1)* Copy the contents from the downloaded files and paste it to your
###Code
import os
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init
import torchvision
import torchvision.models as models
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.backends.cudnn as cudnn
from tqdm import *
from PIL import Image
from flickr.data import get_loader_single, get_transform, collate_fn
from flickr.evaluation import encode_data
from matplotlib import pyplot as plt
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(0)
np.random.seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
ROOT = 'data/'
IMAGE_FOLDER = '/ssd_scratch/cvit/deep/Flickr-8K/Flicker8k_Dataset'
VOCAB_PATH = 'vocab'
PRETRAINED = 'saves'
with open('{}/flickr_precomp_vocab.pkl'.format(VOCAB_PATH), 'rb') as f:
vocab = pickle.load(f)
batch_size = 32
###Output
_____no_output_____
###Markdown
Load and visualize dataThe `get_data_loader` function returns training and validation DataLoaders that can load data efficiently and in specified batches. The function has the following parameters:* `root`: path containg images and captions* `vocab`: unique words contained in the descriptions.* `batch_size`: number of images and descriptions in one batch of data
###Code
def get_loaders(root, vocab, batch_size):
transform = get_transform('train')
train_loader = get_loader_single(root, 'train',
vocab, transform,
batch_size=batch_size, shuffle=True,
collate_fn=collate_fn)
transform = get_transform('dev')
val_loader = get_loader_single(root, 'dev',
vocab, transform,
batch_size=batch_size, shuffle=False,
collate_fn=collate_fn)
transform = get_transform('test')
test_loader = get_loader_single(root, 'test',
vocab, transform,
batch_size=batch_size, shuffle=False,
collate_fn=collate_fn)
return train_loader, val_loader
train_loader, val_loader = get_loaders(IMAGE_FOLDER, vocab, batch_size)
# helper imshow function
def imshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
#np.transpose(npimg, (1, 2, 0))
# get some images from X
dataiter = iter(train_loader)
# the "_" is a placeholder for no labels
images, _, _, _ = dataiter.next()
# show images
fig = plt.figure(figsize=(12, 8))
imshow(torchvision.utils.make_grid(images))
###Output
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
###Markdown
ModelThe model is composed of a image encoder network and text encoder network. Image EncoderThe Image encoder is composed of a pretrained VGG19 network and a Linear layer. We take the output from last but one layer. The network sees a 224X244X3 image and passes it through5 convolutional blocks, each of which down samples the image by a factor of 2. The output of the last linear layer from the VGG network acts as an input to another linear layer which embeds the image features to the cross-modal subspace.
###Code
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
X = torch.div(X, norm)
return X
class EncoderImageFull(nn.Module):
def __init__(self, embed_size, use_abs=False, no_imgnorm=False):
"""Load pretrained VGG19 and replace top fc layer."""
super(EncoderImageFull, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
# Load a pre-trained model
self.cnn = models.__dict__['vgg19'](pretrained=True)
self.cnn.features = nn.DataParallel(self.cnn.features)
self.cnn.cuda()
# Replace the last fully connected layer of CNN with a new one
self.fc = nn.Linear(self.cnn.classifier._modules['6'].in_features,
embed_size)
self.cnn.classifier = nn.Sequential(
*list(self.cnn.classifier.children())[:-1])
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(self.fc.in_features +
self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = self.cnn(images)
# normalization in the image embedding space
features = l2norm(features)
# linear projection to the joint embedding space
features = self.fc(features)
# normalization in the joint embedding space
if not self.no_imgnorm:
features = l2norm(features)
if self.use_abs:
features = torch.abs(features)
# take the absolute value of the embedding (used in order embeddings)
return features
###Output
_____no_output_____
###Markdown
Text Encoder networkThe text encoder network comprises of an [embedding](https://pytorch.org/docs/stable/nn.htmltorch.nn.Embedding) layer and a [GRU](https://pytorch.org/docs/stable/nn.html?highlight=grutorch.nn.GRU). The embedding layer takes the size of the dictionary as the input dimension and outputs word embeddings in some specied dimension. We then pass each word embedding to the GRU, at each time step and then proceed to take the output from the hidden state at the last time step. The output from the GRU should have dimesion, same as the dimension of sub-space on which we are projecting the image.
###Code
class EncoderText(nn.Module):
def __init__(self, vocab_size, word_dim, embed_size, use_abs=False):
super(EncoderText, self).__init__()
# word embedding
self.embed = nn.Embedding(vocab_size, word_dim)
self.embed_size = embed_size
self.use_abs = use_abs
# caption embedding
self.rnn = nn.GRU(word_dim, embed_size, batch_first=True)
self.init_weights()
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
def forward(self, x, lengths):
"""Handles variable size captions
"""
# Embed word ids to vectors
x = self.embed(x)
packed = pack_padded_sequence(x, lengths, batch_first=True)
# Forward propagate RNN
out, _ = self.rnn(packed)
# Reshape *final* output to (batch_size, hidden_size)
padded = pad_packed_sequence(out, batch_first=True)
I = torch.LongTensor(lengths).view(-1, 1, 1)
I = Variable(I.expand(x.size(0), 1, self.embed_size)-1).cuda()
out = torch.gather(padded[0], 1, I).squeeze(1)
# normalization in the joint embedding space
out = l2norm(out)
# take absolute value, used by order embeddings
if self.use_abs:
out = torch.abs(out)
return out
###Output
_____no_output_____
###Markdown
Contrastive LossThe objective is to learn representations with a small distance d between them for positive pairs, and greater distance than some margin value m for negative pairs.For positive pairs, the loss will be 0 only when the net produces representations for both the two elements in the pair with no distance between them, and the loss (and therefore, the corresponding net parameters update) will increase with that distance.For negative pairs, the loss will be 0 when the distance between the representations of the two pair elements is greater than the margin m. But when that distance is not bigger than m, the loss will be positive, and net parameters will be updated to produce more distant representation for those two elements. The loss value will be at most m, when the distance between ra and rn is 0. The function of the margin is that, when the representations produced for a negative pair are distant enough, no efforts are wasted on enlarging that distance, so further training can focus on more difficult pairs.
###Code
class PairwiseRankingLoss(torch.nn.Module):
def __init__(self, margin=1.0):
super(PairwiseRankingLoss, self).__init__()
self.margin = margin
def forward(self, im, s):
margin = self.margin
# compute image-sentence score matrix
scores = torch.mm(im, s.transpose(1, 0))
diagonal = scores.diag()
# compare every diagonal score to scores in its column (i.e, all contrastive images for each sentence)
cost_s = torch.max(Variable(torch.zeros(scores.size()[0], scores.size()[1]).cuda()), (margin-diagonal).expand_as(scores)+scores)
# compare every diagonal score to scores in its row (i.e, all contrastive sentences for each image)
cost_im = torch.max(Variable(torch.zeros(scores.size()[0], scores.size()[1]).cuda()), (margin-diagonal).expand_as(scores).transpose(1, 0)+scores)
for i in range(scores.size()[0]):
cost_s[i, i] = 0
cost_im[i, i] = 0
return cost_s.sum() + cost_im.sum()
batch_size = 32
vocab_size = len(vocab)
print('Dictionary size: ' + str(vocab_size))
embed_size = 1024
img_dim = 4096
word_dim = 300
num_epochs = 100
img_enc = EncoderImageFull(embed_size).to(device)
txt_enc = EncoderText(vocab_size, word_dim, embed_size).to(device)
params = list(txt_enc.parameters())
params += list(img_enc.fc.parameters())
criterion = PairwiseRankingLoss(margin=0.2)
optimizer = torch.optim.Adam(params, lr=0.0002)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR
decayed by 10 every 30 epochs"""
lr = 0.0002 * (0.1 ** (epoch // 15))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def validate(val_loader, txt_enc, img_enc):
img_embs, cap_embs = encode_data(txt_enc, img_enc, val_loader)
r1, r5, r10, medr = i2t(img_embs, cap_embs)
r1i, r5i, r10i, medri = t2i(img_embs, cap_embs)
score = r1 + r5 + r10 + r1i + r5i + r10i
return r1
start_epoch=0
for epoch in range(start_epoch, num_epochs):
adjust_learning_rate(optimizer, epoch)
for i, batch in enumerate(tqdm(train_loader)):
images, captions, lengths, ids = batch
images = images.to(device)
captions = captions.to(device)
img_emb = img_enc(images)
cap_emb = txt_enc(captions, lengths)
loss = criterion(img_emb, cap_emb)
optimizer.zero_grad()
loss.backward()
optimizer.step()
rsum = validate(val_loader, txt_enc, img_enc)
print('Epochs: [%d]/[%d] AvgScore: %.2f Loss: %.2f'%(epoch, num_epochs, rsum, loss.item()))
###Output
Dictionary size: 3446
###Markdown
Evaluation
###Code
def load_image(file_name):
image = Image.open(file_name)
return image
def load_checkpoint(savefile):
if os.path.exists(savefile):
checkpoint = torch.load(savefile)
return checkpoint
else:
print('No checkpoints available')
def get_captions():
with open('{}/f8k_train_caps.txt'.format(ROOT), 'r') as f:
lines = f.readlines()
captions = [line.strip() for line in lines]
return captions
def text_retrieval(image_embedding, cap_embs, captions) :
scores = np.dot(image_embedding, cap_embs.T).flatten()
sorted_args = np.argsort(scores)[::-1]
sentences = [captions[a] for a in sorted_args[:10]]
return sentences
savefile = 'saves/model_best_full.t7'
img_enc = EncoderImageFull(1024, use_abs=True).to(device)
txt_enc = EncoderText(vocab_size, 300, 1024, use_abs=False).to(device)
img_enc.eval()
txt_enc.eval()
checkpoint = load_checkpoint(savefile)
img_enc.load_state_dict(checkpoint['model'][0])
txt_enc.load_state_dict(checkpoint['model'][1])
if not os.path.exists('saves/%s_joint_embs.pkl' %'flickr'):
img_embs, cap_embs = encode_data(txt_enc, img_enc, train_loader)
with open('%saves/s_joint_embs.pkl' %'flickr', 'wb') as f:
pickle.dump([img_embs, cap_embs], f)
else:
with open('saves/%s_joint_embs.pkl' %'flickr', 'rb') as f:
embs = pickle.load(f)
img_embs, cap_embs = embs[0], embs[1]
query_image = load_image('{}/106490881_5a2dd9b7bd.jpg'.format(IMAGE_FOLDER))
transform = get_transform('dev')
query_image = transform(query_image)
query_image_embedding = img_enc(query_image.unsqueeze(0)).data.cpu().numpy()
captions = get_captions()
ret = text_retrieval(query_image_embedding, cap_embs, captions)
plt.figure()
plt.imshow(np.transpose(query_image, (1, 2, 0)))
plt.show()
for each_ret in ret:
print(each_ret+'\n')
###Output
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
|
10_feature_importances_and_time_series/day10_time_series.ipynb | ###Markdown
Занятие №10 Работа с временными рядами План занятия. 1. Работа с временными данными. Разложение временных рядов на тренд и сезонность.2. Авторегрессионный подход к временным рядам.3. Способы оценки качества решения в задаче регрессии.4. Предсказание временных рядов с помощью Facebook Prophet.
###Code
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
matplotlib.rcParams.update({'figure.figsize': (12, 8), 'font.size': 14})
# Dark plots
# plt.style.use('dark_background')
###Output
_____no_output_____
###Markdown
Работа с временными рядами* __Последовательности__. Данные – наборы значений, на которых задано отношение порядка. Значения могут быть дискретными (например, ДНК), или же могут принимать значения из непрерывного интервала (временной ряд энергопотребления дата центра). Перестановка значений приводит к потере информации. Нельзя нарушать отношение порядка (тестирование на прошлом, обучение на будущем).Временные ряды – последовательности по своей природе. Как правило, каждый отдельный отсчет – действительное число (или набор чисел, если отслеживается сразу несколько показателей).В большинстве случаев частота семплирования временного ряда фиксирована (временной промежуток между соседними отсчетами не меняется). В случаях, когда частота семплирования не является постоянной, требуются дополнительные шаги по предобработке данных (или же адаптация модели под новую природу данных).
###Code
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/facebook/prophet/master/examples/example_wp_log_peyton_manning.csv')
df.head()
len(df)
###Output
_____no_output_____
###Markdown
Рассмотрим временной ряд, показывающий количество обращений к определенной веб-странице с частотой раз в сутки. Доступны данные с 2008 по 2016 год, т.е. 2905 наблюдений.
###Code
time_series = pd.Series(data=df['y'].values, index=pd.to_datetime(df['ds'].values))
time_series = time_series.asfreq(freq='D', method='pad')
time_series.plot()
time_series[:7*112].plot()
###Output
_____no_output_____
###Markdown
Наблюдается недельная периодичность и сезонность (весна-лето и осень-зима прослеживаются невооруженным взглядов). Также полезно бывает разложить временной ряд на тренд и сезонность. Тренд показывет изменение среднего значения с течением времени, сезонность – периодичные изменения.*Комментарий: стоит помнить, что в данных часто бывает несколько периодичностей (например, суточная, недельная и сезонная для данных об энергопотреблении). В данном случае рассматривается лишь периодичность с наименьшим периодом – недельная).*
###Code
from statsmodels.tsa.seasonal import STL, seasonal_decompose
stl = STL(time_series[7*50:7*70])
res = stl.fit()
fig = res.plot()
###Output
_____no_output_____
###Markdown
На графике выше можно наблюдать исходный временной ряд и три компоненты: тренд, сезонность и ошибку – компоненту, которая показывает ошибку восстановления значения временного ряда с помощью двух предыдущих компонент.Многие методы прогнозирования временных рядов опираются на статистические методы. Например, ARIMA (AutoRegressive Integrated Moving Average). Данная модель показывает достаточно качественные результаты, но ее точная настройка требует серьезной подготовки и ручного анализа.При прогнозировании временных рядов необходимо предсказать значение временного ряда в определенный момент в будущем. В случае числовых значений, это задача регрессии.__Задача регрессии__ – задача обучения с учителем, где целевая переменная является континуальным числом (т.е. принимает континуальное число значений). Например, предсказание ожидаемой зарплаты на основе резюме соискателя. Или же предсказание потребления электроэнергии домохозяйством на основании исторических данных.
###Code
import numpy as np
forecast_horizon = 28
ts_train_slice = slice(7*215,7*250)
ts_full_slice = slice(ts_train_slice.start, ts_train_slice.stop + forecast_horizon)
from statsmodels.tsa.forecasting.stl import STLForecast
from statsmodels.tsa.arima.model import ARIMA
stlf = STLForecast(time_series[ts_train_slice], ARIMA, model_kwargs=dict(order=(1,1,0)))
stlf_res = stlf.fit()
forecast = stlf_res.forecast(forecast_horizon)
plt.plot(time_series[ts_full_slice], label='real values')
plt.plot(forecast, label='forecast', color='orange', linestyle='--')
plt.legend()
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
Для оценки качества прогноза стоит воспользоваться подходящими мерами качества. Обратимся к ним далее. Обзор основных мер качества в задаче регрессииКак и в задаче классификации (и во всех других задачах) полученные результаты должны рассматриваться в контексте решаемой бизнес-задачи. Во многих случаях необходимо не только построить точную модель, но и сделать это в адекватные сроки, добиться высокой интерпретируемости результатов или иметь возможность дообучения модели на новых данных.Далее рассмотрим основные способы оценки качества в задачах регрессии. __Mean Squared Error, MSE__ – среднеквадратичная ошибка. Представляет собой среднее значение квадрата отклонения предсказаний от истинных значений целевой переменной. Сильно реагирует на наличие выбросов в данных. В случаях, когда данные не слишком зашумленные, является одной из основных функций ошибки, используемых в задаче регрессии.$$\text{MSE}(f(\mathbf{X}), \mathbf{y}) = \frac{1}{N}\sum_{i=1}^{N} (f(\mathbf{x}_i) - y_i)^2$$__Mean Absolute Error, MAE__ – среднеабсолютная ошибка. Представляет собой среднее значение модуля отклонения предсказаний от истинных значений целевой переменной. Является значительно более устойчивой к выбросам. Часто используется наряду c MSE. Несмотря на то, что не является гладкой функцией (производная непрерывна на всем множестве определения функции), может минимизироваться и градиентными методами (как правило, производная в нуле доопределяется нулём).$$\text{MAE}(f(\mathbf{X}), \mathbf{y}) = \frac{1}{N}\sum_{i=1}^{N} |f(\mathbf{x}_i) - y_i|$$__Mean Absolute Percentage Error, MAPE__ позволяет оценить отклонение прогноза от значения целевой переменной в процентном соотношении. Представляет собой среднее значение отношения модуля отклонения предсказаний от истинных значений целевой переменной к значениям целевой переменной. Является значительно более интерпретируемой функцией ошибки (т.к. сразу видно отклонение от целевой переменной). Чуствительна к значениям целевой переменной. Часто используется для оценки качества модели, при обучении которой использовалась другая функция ошибки (например, MSE).$$\text{MAPE}(f(\mathbf{X}), \mathbf{y}) = \frac{1}{N}\sum_{i=1}^{N} |\frac{f(\mathbf{x}_i) - y_i}{y_i}|$$__Mean Squared Log Error, MSLE__ – то же, что и MSE, но для логарифма значений целевой переменной. Используется в случае, когда есть наблюдения с большими значениями целевой переменной, не являющиеся выбросами. Например, распределение зарплат в регионе. Стоит помнить, что модель предсказывает логарифм целевой переменной.$$\text{MSLE}(f(\mathbf{X}), \mathbf{y}) = \frac{1}{N}\sum_{i=1}^{N} (f(\mathbf{x}_i) - \log y_i)^2$$__$R^2$-score, коэффициент детерминации__ – доля дисперсии целевой переменной, объясняемая моделью. Может также рассматриваться как сравнение ошибки регрессионной модели и наивного пресказания целевой переменной в средним значением. В отличие от всех предыдущий мер качества, в наилучшем случае принимает значение $1$ (объяснена вся дисперсия). Не ограничена снизу (т.к. модель может быть и хуже простого среднего значения).$$R^2(f(\mathbf{X}, \mathbf{y}) = 1 - \frac{\sum_{i=1}^{N}(y_i - f(\mathbf{x}_i))^2}{\sum_{i=1}^{N}(y_i - \bar{y})^2},$$где $\bar{y}$ – среднее значение целевой переменной.
###Code
def print_scores(real_values, predictions, title=''):
print(' '.join([title, 'forecast quality']))
print('Forecast MSE: {:.3f}'.format(mean_squared_error(real_values, predictions)))
print('Forecast MAE: {:.3f}'.format(mean_absolute_error(real_values, predictions)))
print('Forecast r2 score: {:.3f}'.format(r2_score(real_values, predictions)))
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
real_values = time_series[forecast.index].values
print_scores(real_values, forecast.values, 'ARIMA with almost no tunning')
###Output
ARIMA with almost no tunning forecast quality
Forecast MSE: 0.366
Forecast MAE: 0.468
Forecast r2 score: 0.123
###Markdown
Видно, что качество прогноза не слишком высокое, но коэффициент детерминации $R^2$ выше нуля. Модель ARIMA требует тщательной настройки параметров под конкретный временной ряд. Обратимся к более простым в использовании методам для построения baseline-решения. Авторегрессионный подход к задаче прогнозирования временных рядов Задача прогнозирования временных рядов (особенно периодических) может рассматриваться и как классическая задача регрессии. В качестве признаков выступают предыдущие отсчеты временного ряда, в качестве целевой переменной – будущие значения. Такой подход называется __авторегрессионным__. Преобразуем временной ряд в матрицу объект-признак.
###Code
import numpy as np
period = 7
time_series_part = time_series[ts_train_slice].values
data_matrix = np.zeros((int(len(time_series_part)-period), period))
for i in range(len(time_series_part)-period):
data_matrix[i] = time_series_part[i:i+period]
plt.pcolormesh(data_matrix)
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
data_matrix.shape
rf = RandomForestRegressor()
rf.fit(data_matrix[-35:, :-1], data_matrix[-35:, -1])
curr_repr = list(data_matrix[-1])
for step in range(forecast_horizon):
next_value = rf.predict(np.asarray(curr_repr)[None, step+1:])
curr_repr.append(next_value[0])
print_scores(real_values, curr_repr[period:], 'Simple Autoregression with Random Forest')
lr = Lasso()
lr.fit(data_matrix[-25:, :-1], data_matrix[-25:, -1])
curr_repr = list(data_matrix[-1])
for step in range(forecast_horizon):
next_value = lr.predict(np.asarray(curr_repr)[None, step+1:])
curr_repr.append(next_value[0])
print_scores(real_values, curr_repr[period:], 'Simple Autoregression with Lasso regression')
###Output
Simple Autoregression with Lasso regression forecast quality
Forecast MSE: 0.468
Forecast MAE: 0.590
Forecast r2 score: -0.123
###Markdown
Качество прогноза уступает ARIMA. Но стоит учесть, что при построении прогноза методом авторегресси часто используются техники порождения новых признаков, а также экзогенные переменные (дополнительные данные, не имеющие прямого отношения к временному ряду). Например, информация о погоде при прогнозировании энергопотребления домохозяйств. Facebook Prophet в прогнозировании временных рядовFacebook Prophet – относительно молодая библиотека, отлично подходящая для прогнозирования временных рядов. Отдельно стоит выделить, что данная библиотека значительно упрощает учет различных дополнительных данных (например, календарные праздники, некоторые особые события) и неплохо работает с пропусками в автоматическом режиме. В целом, Prophet является отличным решением для работы с временными рядами во многих бизнес-задачах, и может предоставить качественный baseline (а при точной настроке и хорошее решение для многих задач).
###Code
from fbprophet import Prophet
model = Prophet()
# new_slice = slice(None, ts_train_slice.stop)
new_slice = slice(ts_train_slice.stop - 7 * 56 * 2, ts_train_slice.stop)
time_series
pr_df = pd.DataFrame()
pr_df['ds'] = time_series[new_slice].index
pr_df['y'] = time_series[new_slice].values
model.fit(pr_df)
forecast_horizon
future = model.make_future_dataframe(periods=forecast_horizon)
future.tail()
pr_df['y'].plot()
forecast = model.predict(future)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
def plot_forecast(real_data, predictions, future_start=None, confidence=None):
fig, ax = plt.subplots()
ax.plot(real_data.index, real_data.values, label='real data')
ax.plot(predictions.index, predictions.values, linestyle='--', color='orange', label='predictions')
if future_start is not None:
plt.axvline(future_start)
if confidence is not None:
ax.fill_between(predictions.index, confidence[0], confidence[1], color='orange', alpha=0.2)
plt.legend()
_start = pd.to_datetime(future['ds'].iloc[-forecast_horizon-7*5])
plot_forecast(
time_series.iloc[new_slice.stop-7*5:new_slice.stop + forecast_horizon],
pd.Series(index=forecast['ds'].values, data=forecast['yhat'].values).loc[_start:],
future_start=future['ds'].iloc[-forecast_horizon],
confidence=(
pd.Series(index=forecast['ds'].values, data=forecast['yhat_lower'].values).loc[_start:],
pd.Series(index=forecast['ds'].values, data=forecast['yhat_upper'].values).loc[_start:],
)
)
print_scores(real_values, forecast['yhat'].iloc[-forecast_horizon:], 'Prophet default forecast')
###Output
Prophet default forecast forecast quality
Forecast MSE: 0.372
Forecast MAE: 0.417
Forecast r2 score: 0.107
###Markdown
Box-Cox преобразование
###Code
from scipy import stats
def inv_boxcox(values, lambda_value):
if lambda_value == 0:
return np.exp(values)
else:
return np.exp(np.log(lambda_value * values + 1) / lambda_value)
normalized_pr_df = pr_df.copy()
normalized_pr_df['y'], lambda_value = stats.boxcox(pr_df['y'])
lambda_value
new_model = Prophet()
new_model.fit(normalized_pr_df)
normalized_forecast = new_model.predict(future)
restored_forecast = normalized_forecast[['ds', 'yhat_lower', 'yhat_upper', 'yhat']].copy()
for column_name in ['yhat_lower', 'yhat_upper', 'yhat']:
restored_forecast[column_name] = inv_boxcox(restored_forecast[column_name].values, lambda_value)
plot_forecast(
time_series.iloc[new_slice.stop-7*5:new_slice.stop + forecast_horizon],
pd.Series(index=restored_forecast['ds'].values, data=restored_forecast['yhat'].values).loc[_start:],
future_start=future['ds'].iloc[-forecast_horizon],
confidence=(
pd.Series(index=restored_forecast['ds'].values, data=restored_forecast['yhat_lower'].values).loc[_start:],
pd.Series(index=restored_forecast['ds'].values, data=restored_forecast['yhat_upper'].values).loc[_start:],
)
)
print_scores(real_values, restored_forecast['yhat'].iloc[-forecast_horizon:], 'Prophet forecast with Box-Cox transformation')
###Output
Prophet forecast with Box-Cox transformation forecast quality
Forecast MSE: 0.342
Forecast MAE: 0.491
Forecast r2 score: 0.181
###Markdown
Учет дополнительных данныхОдна из сильных сторон Prophet – относительная простота использования дополнительной информации при построении прогноза. Например, информации об особых днях:
###Code
playoffs = pd.DataFrame({
'holiday': 'playoff',
'ds': pd.to_datetime(['2008-01-13', '2009-01-03', '2010-01-16',
'2010-01-24', '2010-02-07', '2011-01-08',
'2013-01-12', '2014-01-12', '2014-01-19',
'2014-02-02', '2015-01-11', '2016-01-17',
'2016-01-24', '2016-02-07']),
'lower_window': 0,
'upper_window': 1,
})
superbowls = pd.DataFrame({
'holiday': 'superbowl',
'ds': pd.to_datetime(['2010-02-07', '2014-02-02', '2016-02-07']),
'lower_window': 0,
'upper_window': 1,
})
holidays = pd.concat((playoffs, superbowls))
m = Prophet(holidays=holidays)
m.add_country_holidays(country_name='US')
normalized_forecast_with_extra_data = m.fit(normalized_pr_df).predict(future)
restored_forecast_with_extra_data = normalized_forecast_with_extra_data.copy()
for column_name in ['yhat_lower', 'yhat_upper', 'yhat']:
restored_forecast_with_extra_data[column_name] = inv_boxcox(restored_forecast_with_extra_data[column_name].values, lambda_value)
restored_forecast_with_extra_data[(restored_forecast_with_extra_data['playoff'] + restored_forecast_with_extra_data['superbowl']).abs() > 0][
['ds', 'playoff', 'superbowl']]
plot_forecast(
time_series.iloc[new_slice.stop-7*5:new_slice.stop + forecast_horizon],
pd.Series(index=restored_forecast_with_extra_data['ds'].values, data=restored_forecast_with_extra_data['yhat'].values).loc[_start:],
future_start=future['ds'].iloc[-forecast_horizon],
confidence=(
pd.Series(index=restored_forecast_with_extra_data['ds'].values, data=restored_forecast_with_extra_data['yhat_lower'].values).loc[_start:],
pd.Series(index=restored_forecast_with_extra_data['ds'].values, data=restored_forecast_with_extra_data['yhat_upper'].values).loc[_start:],
)
)
print_scores(real_values, restored_forecast_with_extra_data['yhat'].iloc[-forecast_horizon:], 'Prophet forecast with Box-Cox transformation and additional data')
###Output
Prophet forecast with Box-Cox transformation and additional data forecast quality
Forecast MSE: 0.335
Forecast MAE: 0.489
Forecast r2 score: 0.197
###Markdown
В библиотеке доступны также и официальные праздники в Российской Федерации.
###Code
m_ru = Prophet()
m_ru.add_country_holidays(country_name='RU')
m_ru.fit(pr_df)
m_ru.train_holiday_names
###Output
_____no_output_____ |
2018_London_REST_API_Health_and_Performance_Monitoring/REST API - Health and Performance Monitoring.ipynb | ###Markdown
REST API - Health and Performance Monitoring Deep Dive
###Code
import requests as r
import urllib3
import json
import time
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
base_url = "https://9.155.108.201/scalemgmt/v2"
auth_data = ("admin","admin001")
headers = {'content-type' : 'application/json', 'accepts' : 'application/json'}
def printJson(res):
for line in res.text.split("\n"):
print line
#output = json.dumps(res.json(), sort_keys=True, indent=4)
#for line in output.split("\n"):
# print line
def get(url, params=None,printResult=True, printRequest=True):
if printRequest:
print "GET " + base_url + url
res = r.get(base_url + url, auth=auth_data, verify=False, headers=headers, params=params)
if printResult:
printJson(res)
return res
def delete(url, printResult=True, printRequest=True):
if printRequest:
print "DELETE " + base_url + url
res = r.delete(base_url + url, auth=auth_data, verify=False, headers=headers)
if printResult:
printJson(res)
return res
def post(url, data, printResult=True, printRequest=True):
if printRequest:
print "POST " + base_url + url
res = r.post(base_url + url, auth=auth_data, verify=False, json=data, headers=headers)
if printResult:
printJson(res)
return res
def put(url, data, printResult=True, printRequest=True):
if printRequest:
print "PUT " + base_url + url
res = r.put(base_url + url, auth=auth_data, verify=False, json=data, headers=headers)
if printResult:
printJson(res)
return res
def getJobId(res):
return str(res.json()['jobs'][0]['jobId'])
def waitForJob(jobId, maxRetries=10):
completed = False
retries = 0
status = ""
while not completed and retries < maxRetries:
time.sleep(1)
res = get("/jobs/" + str(jobId), printResult=False)
status = str(res.json()['jobs'][0]['completed'])
completed = status != "N/A"
if not completed:
print "Job not yet completed, status: " + status
retries += 1
return (completed, res)
def waitForJobResult(res, maxRetries=10):
jobId = getJobId(res)
return waitForJob(jobId, maxRetries)
###Output
_____no_output_____
###Markdown
Health Monitoring All the data available in "mmhealth" is available through the REST API as well
###Code
!ssh os-21 mmhealth node show
!ssh os-21 mmhealth node show CES -N cesNodes
###Output
_____no_output_____
###Markdown
With the REST API the data is retrieved directly from a database -> No command is executed on the cluster Retrieving all the component states from one node
###Code
get("/nodes/os-11/health/states")
###Output
GET https://9.155.108.201/scalemgmt/v2/nodes/os-11/health/states
{
"states" : [ {
"activeSince" : "2019-03-15 10:49:08,166",
"component" : "FILEAUDITLOG",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 7870,
"reportingNode" : "os-11.novalocal",
"state" : "CHECKING"
}, {
"activeSince" : "2019-03-15 10:49:08,482",
"component" : "THRESHOLD",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 7875,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:10,229",
"component" : "NETWORK",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 7878,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:10,295",
"component" : "NETWORK",
"entityName" : "eth0",
"entityType" : "NIC",
"oid" : 7879,
"parentName" : "os-11.novalocal",
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:10,326",
"component" : "FILESYSTEM",
"entityName" : "gpfs0",
"entityType" : "FILESYSTEM",
"oid" : 7880,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:10,344",
"component" : "FILESYSTEM",
"entityName" : "objfs",
"entityType" : "FILESYSTEM",
"oid" : 7881,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:10,369",
"component" : "PERFMON",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 7882,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:10,576",
"component" : "CESIP",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 7888,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:11,071",
"component" : "DISK",
"entityName" : "disk1",
"entityType" : "NSD",
"oid" : 7889,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:11,095",
"component" : "DISK",
"entityName" : "disk2",
"entityType" : "NSD",
"oid" : 7890,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:11,119",
"component" : "DISK",
"entityName" : "disk3",
"entityType" : "NSD",
"oid" : 7891,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:11,141",
"component" : "DISK",
"entityName" : "disk4",
"entityType" : "NSD",
"oid" : 7892,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:25,058",
"component" : "THRESHOLD",
"entityName" : "op_time_custom",
"entityType" : "THRESHOLD_RULE",
"oid" : 7898,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-15 10:49:25,110",
"component" : "THRESHOLD",
"entityName" : "MemFree_Rule",
"entityType" : "THRESHOLD_RULE",
"oid" : 7899,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-17 20:41:14,519",
"component" : "GUI",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 8014,
"reportingNode" : "os-11.novalocal",
"state" : "DEGRADED"
}, {
"activeSince" : "2019-03-17 20:41:14,539",
"component" : "NODE",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 8015,
"reportingNode" : "os-11.novalocal",
"state" : "DEGRADED"
}, {
"activeSince" : "2019-03-20 20:59:33,245",
"component" : "GPFS",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 8491,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-20 20:59:33,301",
"component" : "FILESYSTEM",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 8493,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-20 20:59:33,350",
"component" : "DISK",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 8495,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
}, {
"activeSince" : "2019-03-20 20:59:33,392",
"component" : "MSGQUEUE",
"entityName" : "os-11.novalocal",
"entityType" : "NODE",
"oid" : 8496,
"reportingNode" : "os-11.novalocal",
"state" : "HEALTHY"
} ],
"status" : {
"code" : 200,
"message" : "The request finished successfully."
}
}
###Markdown
Retrieve only the component, state and entity name from one node
###Code
get("/nodes/os-21/health/states?fields=component,state,entityName")
###Output
GET https://9.155.108.247/scalemgmt/v2/nodes/os-21/health/states?fields=component,state,entityName
{
"states" : [ {
"component" : "DISK",
"entityName" : "disk2",
"state" : "HEALTHY"
}, {
"component" : "NETWORK",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "THRESHOLD",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "DISK",
"entityName" : "disk5",
"state" : "HEALTHY"
}, {
"component" : "DISK",
"entityName" : "disk1",
"state" : "HEALTHY"
}, {
"component" : "PERFMON",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "FILEAUDITLOG",
"entityName" : "gpfs0",
"state" : "HEALTHY"
}, {
"component" : "DISK",
"entityName" : "disk3",
"state" : "HEALTHY"
}, {
"component" : "DISK",
"entityName" : "disk4",
"state" : "HEALTHY"
}, {
"component" : "THRESHOLD",
"entityName" : "MemFree_Rule",
"state" : "HEALTHY"
}, {
"component" : "FILEAUDITLOG",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "FILESYSTEM",
"entityName" : "objfs",
"state" : "HEALTHY"
}, {
"component" : "FILESYSTEM",
"entityName" : "gpfs0",
"state" : "HEALTHY"
}, {
"component" : "FILESYSTEM",
"entityName" : "backlevel_fs",
"state" : "HEALTHY"
}, {
"component" : "NETWORK",
"entityName" : "eth0",
"state" : "HEALTHY"
}, {
"component" : "GUI",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "DISK",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "FILESYSTEM",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "NODE",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "MSGQUEUE",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
}, {
"component" : "GPFS",
"entityName" : "os-21.novalocal",
"state" : "HEALTHY"
} ],
"status" : {
"code" : 200,
"message" : "The request finished successfully."
}
}
###Markdown
Retrieve all DEGRADED components from all nodes
###Code
get("/nodes/:all:/health/states?fields=component,state,entityName&filter=state=DEGRADED")
###Output
GET https://9.155.108.247/scalemgmt/v2/nodes/:all:/health/states?fields=component,state,entityName&filter=state=DEGRADED
{
"states" : [ {
"component" : "CESNETWORK",
"entityName" : "os-23.novalocal",
"state" : "DEGRADED"
}, {
"component" : "SMB",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
}, {
"component" : "CES",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
}, {
"component" : "NFS",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
}, {
"component" : "NODE",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
}, {
"component" : "AUTH",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
} ],
"status" : {
"code" : 200,
"message" : "The request finished successfully."
}
}
###Markdown
Retrieve all DEGRADED components from all CES nodes
###Code
get("/nodes/cesNodes/health/states?fields=component,state,entityName&filter=state=DEGRADED")
###Output
GET https://9.155.108.247/scalemgmt/v2/nodes/cesNodes/health/states?fields=component,state,entityName&filter=state=DEGRADED
{
"states" : [ {
"component" : "CESNETWORK",
"entityName" : "os-23.novalocal",
"state" : "DEGRADED"
}, {
"component" : "SMB",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
}, {
"component" : "CES",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
}, {
"component" : "NFS",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
}, {
"component" : "NODE",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
}, {
"component" : "AUTH",
"entityName" : "os-22.novalocal",
"state" : "DEGRADED"
} ],
"status" : {
"code" : 200,
"message" : "The request finished successfully."
}
}
###Markdown
Retrieve all ERROR events from all the nodes
###Code
get("/nodes/:all:/health/events?filter=severity=ERROR")
###Output
GET https://9.155.108.247/scalemgmt/v2/nodes/:all:/health/events?filter=severity=ERROR
{
"events" : [ {
"activeSince" : "2018-04-09 11:39:01,000",
"component" : "AUTH",
"description" : "The external Active Directory Service (ADS) is unresponsive",
"entityName" : "os-22.novalocal",
"entityType" : "NODE",
"message" : "external ADS server is unresponsive",
"name" : "ads_down",
"oid" : 89,
"reportingNode" : "os-22.novalocal",
"severity" : "ERROR",
"state" : "DEGRADED",
"type" : "STATE_CHANGE",
"userAction" : "Verify network connection and check that ADS server(s) are operational"
}, {
"activeSince" : "2018-04-09 11:38:01,000",
"component" : "GUI",
"description" : "The GUI service is down",
"entityName" : "os-22.novalocal",
"entityType" : "NODE",
"message" : "GUI service should be started and is stopped.",
"name" : "gui_down",
"oid" : 105,
"reportingNode" : "os-22.novalocal",
"severity" : "ERROR",
"state" : "FAILED",
"type" : "STATE_CHANGE",
"userAction" : "Restart the GUI service or change the node class for this node."
}, {
"activeSince" : "2018-04-09 11:39:07,000",
"component" : "AUTH",
"description" : "The external Active Directory Service (ADS) is unresponsive",
"entityName" : "os-23.novalocal",
"entityType" : "NODE",
"message" : "external ADS server is unresponsive",
"name" : "ads_down",
"oid" : 469,
"reportingNode" : "os-23.novalocal",
"severity" : "ERROR",
"state" : "DEGRADED",
"type" : "STATE_CHANGE",
"userAction" : "Verify network connection and check that ADS server(s) are operational"
}, {
"activeSince" : "2018-04-13 17:09:19,000",
"component" : "FILEAUDITLOG",
"description" : "Error encountered in audit consumer.",
"entityName" : "gpfs0",
"entityType" : "FILESYSTEM",
"message" : "Failed to indicate to systemctl on successful consumer startup sequence for filesystem gpfs0.",
"name" : "auditc_initlockauditfile",
"oid" : 782,
"reportingNode" : "os-21.novalocal",
"severity" : "ERROR",
"state" : "FAILED",
"type" : "STATE_CHANGE",
"userAction" : "Disable and re-enable auditing using the 'mmaudit command'."
} ],
"status" : {
"code" : 200,
"message" : "The request finished successfully."
}
}
###Markdown
Retrieve all WARNING events from all CES nodes
###Code
get("/nodes/cesNodes/health/events?filter=severity=WARNING")
###Output
GET https://9.155.108.247/scalemgmt/v2/nodes/cesNodes/health/events?filter=severity=WARNING
{
"events" : [ {
"activeSince" : "2018-04-03 15:42:56,360",
"component" : "CESIP",
"description" : "Check of the CES IP assignment state returned an unknown result. This might be a temporary issue, like a timeout during the check procedure",
"entityName" : "os-22.novalocal",
"entityType" : "NODE",
"message" : "The Spectrum Scale CES IP assignment monitor could not be executed. This could be a timeout issue",
"name" : "ces_ips__warn",
"oid" : 55,
"reportingNode" : "os-22.novalocal",
"severity" : "WARNING",
"state" : "UNKNOWN",
"type" : "NOTICE",
"userAction" : "Find potential issues for this kind of failure in the /var/adm/ras/mmsysmonitor.log file"
}, {
"activeSince" : "2018-04-11 10:03:49,074",
"component" : "CESIP",
"description" : "Check of the CES IP assignment state returned an unknown result. This might be a temporary issue, like a timeout during the check procedure",
"entityName" : "os-22.novalocal",
"entityType" : "NODE",
"message" : "The Spectrum Scale CES IP assignment monitor could not be executed. This could be a timeout issue",
"name" : "ces_ips__warn",
"oid" : 78,
"reportingNode" : "os-22.novalocal",
"severity" : "WARNING",
"state" : "UNKNOWN",
"type" : "NOTICE",
"userAction" : "Find potential issues for this kind of failure in the /var/adm/ras/mmsysmonitor.log file"
}, {
"activeSince" : "2018-04-09 11:37:04,000",
"component" : "FILESYSTEM",
"description" : "An internally mounted or a declared but not mounted filesystem was detected",
"entityName" : "gpfs0",
"entityType" : "FILESYSTEM",
"message" : "The filesystem gpfs0 is probably needed, but not mounted",
"name" : "unmounted_fs_check",
"oid" : 479,
"reportingNode" : "os-23.novalocal",
"severity" : "WARNING",
"state" : "DEGRADED",
"type" : "STATE_CHANGE",
"userAction" : "Run mmlsmount all_local to verify that all expected filesystems are mounted"
}, {
"activeSince" : "2018-04-09 11:36:53,000",
"component" : "GPFS",
"description" : "At least one quorum node is not reachable. See message for details",
"entityName" : "os-23.novalocal",
"entityType" : "NODE",
"message" : "At least one quorum node is not reachable Item=PC_QUORUM_NODES,ErrMsg='Ping CCR quorum nodes failed',Failed='10.0.100.66'",
"name" : "ccr_quorum_nodes_warn",
"oid" : 480,
"reportingNode" : "os-23.novalocal",
"severity" : "WARNING",
"state" : "DEGRADED",
"type" : "STATE_CHANGE",
"userAction" : "Check the network and/or firewall (default port 1191 must not be blocked) configuration of the not reachable quorum node"
}, {
"activeSince" : "2018-04-09 11:36:36,000",
"component" : "CESNETWORK",
"description" : "No CES IPs were assigned to any network adapter of this node",
"entityName" : "os-23.novalocal",
"entityType" : "NODE",
"message" : "No CES IPs were assigned to this node",
"name" : "ces_network_ips_down",
"oid" : 481,
"reportingNode" : "os-23.novalocal",
"severity" : "WARNING",
"state" : "DEGRADED",
"type" : "STATE_CHANGE",
"userAction" : "If CES is FAILED, analyse the reason thereof. If there are not enough IPs in the CES pool for this node, extend the pool"
}, {
"activeSince" : "2018-04-13 08:18:04,138",
"component" : "CESIP",
"description" : "Check of the CES IP assignment state returned an unknown result. This might be a temporary issue, like a timeout during the check procedure",
"entityName" : "os-22.novalocal",
"entityType" : "NODE",
"message" : "The Spectrum Scale CES IP assignment monitor could not be executed. This could be a timeout issue",
"name" : "ces_ips__warn",
"oid" : 727,
"reportingNode" : "os-22.novalocal",
"severity" : "WARNING",
"state" : "UNKNOWN",
"type" : "NOTICE",
"userAction" : "Find potential issues for this kind of failure in the /var/adm/ras/mmsysmonitor.log file"
} ],
"status" : {
"code" : 200,
"message" : "The request finished successfully."
}
}
###Markdown
Performance Monitoring Displaying the active sensor configuration
###Code
get("/perfmon/sensors?fields=period,description,restrict")
###Output
GET https://9.155.108.247/scalemgmt/v2/perfmon/sensors?fields=period,description,restrict
{
"sensorConfig" : [ {
"description" : "Collects the load values which is the length of the runqueue.",
"period" : 1,
"restrict" : [ ],
"sensorName" : "Load"
}, {
"description" : "Collects statistics on the account layer of the object protocol.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "SwiftAccount"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSvFLUSH"
}, {
"description" : "Collects CTDB stats. This should be enabled to enable FTDC for clusters running the SMB protocol.",
"period" : 10,
"restrict" : [ ],
"sensorName" : "CTDBStats"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSIOC"
}, {
"description" : "Collects I/O statistics for pdisks on GNR based systems.",
"period" : 10,
"restrict" : [ "nsdNodes" ],
"sensorName" : "GPFSPDDisk"
}, {
"description" : "Collects statistics on the proxy layer of the object protocol.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "SwiftProxy"
}, {
"description" : "Collects SMB statistics per node.",
"period" : 10,
"restrict" : [ "cesNodes" ],
"sensorName" : "SMBGlobalStats"
}, {
"description" : "Collects per filesystem client statistics. Accounts for transfers from and to disk and will not log cache accesses.",
"period" : 10,
"restrict" : [ ],
"sensorName" : "GPFSFilesystem"
}, {
"description" : "Collects local disk I/O statistics.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "Diskstat"
}, {
"description" : "Collects CPU data from the kernel",
"period" : 1,
"restrict" : [ ],
"sensorName" : "CPU"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "TCTFsetIcstoreStats"
}, {
"description" : "Collects AFM statistics per file set. Should only be run on gateway nodes.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSAFMFSET"
}, {
"description" : "Collects per filesystem client statistics from a client API perspective. Caching and replication are not affecting the statistics.",
"period" : 10,
"restrict" : [ ],
"sensorName" : "GPFSFilesystemAPI"
}, {
"description" : "Collects network statistics for IP based networks.",
"period" : 1,
"restrict" : [ ],
"sensorName" : "Network"
}, {
"description" : "",
"period" : 10,
"restrict" : [ ],
"sensorName" : "GPFSNode"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSVIO"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSPoolIO"
}, {
"description" : "Collects information on Spectrum Scale threads which wait for a certain time",
"period" : 10,
"restrict" : [ ],
"sensorName" : "GPFSWaiters"
}, {
"description" : "Collects per pool information on data and metadata. Requires file systems to be mounted. This sensor should run only once in the cluster.",
"period" : 90,
"restrict" : [ "os-21.novalocal" ],
"sensorName" : "GPFSPool"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "TCTFsIcstoreStats"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSLWEKafkaProducer"
}, {
"description" : "Collects NFS statistics for CES based NFS implementations. Required resources will scale with the number of exports.",
"period" : 10,
"restrict" : [ "cesNodes" ],
"sensorName" : "NFSIO"
}, {
"description" : "Collects capacity information per NSD. The collection of this data is expensive, therefore the recommended period is only once per day. This sensor should run only once in the cluster.",
"period" : 86400,
"restrict" : [ "os-21.novalocal" ],
"sensorName" : "GPFSDiskCap"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "TCTFsGpfsConnectorStats"
}, {
"description" : "",
"period" : 10,
"restrict" : [ ],
"sensorName" : "GPFSRPCS"
}, {
"description" : "",
"period" : 10,
"restrict" : [ ],
"sensorName" : "GPFSVFS"
}, {
"description" : "Collects per client node and NSD IO statistics. Enable with care and consider the large key range.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSDisk"
}, {
"description" : "",
"period" : 10,
"restrict" : [ ],
"sensorName" : "GPFSNodeAPI"
}, {
"description" : "Collects AFM statistics per file system Should only be run on gateway nodes.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSAFMFS"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "TCTDebugLweDestroyStats"
}, {
"description" : "Collects SMB statistics per transaction type",
"period" : 10,
"restrict" : [ "cesNodes" ],
"sensorName" : "SMBStats"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSLROC"
}, {
"description" : "Collects memory utilization data from the Kernel",
"period" : 1,
"restrict" : [ ],
"sensorName" : "Memory"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "TCTDebugDbStats"
}, {
"description" : "Collects local disk capacity information.",
"period" : 600,
"restrict" : [ ],
"sensorName" : "DiskFree"
}, {
"description" : "Collects netstat information.",
"period" : 10,
"restrict" : [ ],
"sensorName" : "Netstat"
}, {
"description" : "Collects statistics on the container layer of the object protocol.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "SwiftContainer"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSCHMS"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "TCTFsetGpfsConnectorStats"
}, {
"description" : "",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSVIO64"
}, {
"description" : "Collects AFM statistics per node. Should only be run on gateway nodes.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "GPFSAFM"
}, {
"description" : "Collects CTDB stats per CTDB Database. This should be enabled to enable FTDC for clusters running the SMB protocol.",
"period" : 10,
"restrict" : [ ],
"sensorName" : "CTDBDBStats"
}, {
"description" : "Collects information on inodes per inode space. This sensor should run only once in the cluster.",
"period" : 300,
"restrict" : [ "os-21.novalocal" ],
"sensorName" : "GPFSFileset"
}, {
"description" : "Collects capacity and inode data per fileset. Requires Quota to be enabled. This sensors should run only once in the cluster.",
"period" : 3600,
"restrict" : [ "os-21.novalocal" ],
"sensorName" : "GPFSFilesetQuota"
}, {
"description" : "Collects statistics on the Object layer of the object protocol.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "SwiftObject"
}, {
"description" : "Collects IO statistics in the NSD server layer. Direct disk access is not recorded.",
"period" : 10,
"restrict" : [ "nsdNodes" ],
"sensorName" : "GPFSNSDDisk"
}, {
"description" : "Collects network statistics for Infiniband infrastructure. Requires MOFED driver infrastructure.",
"period" : 0,
"restrict" : [ ],
"sensorName" : "Infiniband"
} ],
"status" : {
"code" : 200,
"message" : "The request finished successfully."
}
}
###Markdown
The Zimon query language On the shell
###Code
!ssh os-21 'echo "get metrics cpu_user bucket_size 1 last 10" | /opt/IBM/zimon/zc 127.0.0.1'
!ssh os-21 'echo "get metrics cpu_user bucket_size 1 tstart '2018-04-13 21:00:00' tend '2018-04-13 21:00:30'" | /opt/IBM/zimon/zc 127.0.0.1'
!ssh os-21 'echo "get metrics cpu_user bucket_size 60 tstart '2018-04-13 20:00:00' tend '2018-04-13 21:00:00'" | /opt/IBM/zimon/zc 127.0.0.1'
###Output
##########################################################
Info : GPFS modified Rhel72 Client : root --> Passw0rd
##########################################################
1: os-21.novalocal|CPU|cpu_user
2: os-22.novalocal|CPU|cpu_user
3: os-23.novalocal|CPU|cpu_user
4: os-24.novalocal|CPU|cpu_user
Row Timestamp cpu_user cpu_user cpu_user cpu_user
1 2018-04-13 20:01:00 4.744667 21.342200 3.373167 0.025000
2 2018-04-13 20:02:00 4.498500 18.907818 3.534333 0.033333
3 2018-04-13 20:03:00 3.298833 18.974423 3.471000 0.016667
4 2018-04-13 20:04:00 3.966833 20.296071 3.823500 0.025000
5 2018-04-13 20:05:00 4.686333 20.254182 3.340833 0.016667
6 2018-04-13 20:06:00 4.704333 28.285000 3.160000 0.025000
7 2018-04-13 20:07:00 4.232500 30.437273 3.583000 0.125500
8 2018-04-13 20:08:00 2.627167 22.791964 2.980167 0.025000
9 2018-04-13 20:09:00 2.927333 19.905556 3.032333 0.025000
10 2018-04-13 20:10:00 3.374000 18.226852 3.044333 0.108000
11 2018-04-13 20:11:00 5.729833 23.002000 3.553333 0.025000
12 2018-04-13 20:12:00 4.922833 20.591818 3.153667 0.016667
13 2018-04-13 20:13:00 3.766833 19.839286 3.561333 0.025000
14 2018-04-13 20:14:00 5.624667 18.206604 4.111833 0.225000
15 2018-04-13 20:15:00 3.456167 19.151455 2.980500 0.025000
16 2018-04-13 20:16:00 4.101167 19.296667 3.371500 0.033333
17 2018-04-13 20:17:00 3.266667 17.441818 3.326333 0.041667
18 2018-04-13 20:18:00 6.718667 17.296250 3.342500 0.025000
19 2018-04-13 20:19:00 3.140333 20.022000 3.226667 0.016667
20 2018-04-13 20:20:00 3.829667 18.810714 3.241167 0.100000
21 2018-04-13 20:21:00 4.795167 18.612500 3.301833 0.016667
22 2018-04-13 20:22:00 2.504500 23.474182 3.465667 0.025000
23 2018-04-13 20:23:00 4.305167 17.518333 3.615333 0.025000
24 2018-04-13 20:24:00 4.004000 19.549808 3.890167 0.025000
25 2018-04-13 20:25:00 2.766500 19.793750 3.568667 0.025000
26 2018-04-13 20:26:00 4.837333 20.524906 3.289333 0.016667
27 2018-04-13 20:27:00 3.073000 20.727455 3.551333 0.016667
28 2018-04-13 20:28:00 4.885500 19.362500 3.461000 0.033333
29 2018-04-13 20:29:00 3.250167 23.139615 3.346667 0.016667
30 2018-04-13 20:30:00 5.934833 22.614909 3.339000 0.091667
31 2018-04-13 20:31:00 5.570833 16.692759 3.946000 0.016667
32 2018-04-13 20:32:00 3.568333 16.760351 4.004500 0.025000
33 2018-04-13 20:33:00 4.039833 17.052105 3.857500 0.025000
34 2018-04-13 20:34:00 3.420167 13.565349 4.030667 0.025000
35 2018-04-13 20:35:00 3.037667 15.369492 3.803333 0.025000
36 2018-04-13 20:36:00 4.217667 15.559828 3.644833 0.025000
37 2018-04-13 20:37:00 3.065500 15.795439 4.043833 0.033333
38 2018-04-13 20:38:00 2.996500 15.574286 4.007667 0.016667
39 2018-04-13 20:39:00 2.327000 2.737018 2.549333 0.033333
40 2018-04-13 20:40:00 2.559667 0.320682 2.098000 0.091667
41 2018-04-13 20:41:00 3.440333 1.648500 1.515167 0.033333
42 2018-04-13 20:42:00 3.351333 3.193000 2.481333 0.025000
43 2018-04-13 20:43:00 1.419667 0.753167 2.113167 0.025000
44 2018-04-13 20:44:00 3.175667 11.025636 3.403500 0.025000
45 2018-04-13 20:45:00 2.580000 16.231458 3.106667 0.025000
46 2018-04-13 20:46:00 4.065167 2.616167 2.246333 0.025000
47 2018-04-13 20:47:00 1.738000 2.910167 1.841667 0.025000
48 2018-04-13 20:48:00 1.670667 2.601667 2.342667 0.025000
49 2018-04-13 20:49:00 1.660833 2.639167 2.319333 0.025000
50 2018-04-13 20:50:00 2.816500 3.893000 2.342000 0.091667
51 2018-04-13 20:51:00 2.432667 2.505500 2.289833 0.025000
52 2018-04-13 20:52:00 2.905167 4.726000 2.909333 0.025000
53 2018-04-13 20:53:00 2.167500 2.574667 2.299333 0.016667
54 2018-04-13 20:54:00 1.709167 2.392500 2.804333 0.016667
55 2018-04-13 20:55:00 1.711333 2.556500 2.222000 0.025000
56 2018-04-13 20:56:00 2.305333 2.116000 2.282000 0.025000
57 2018-04-13 20:57:00 1.646667 2.533833 2.156333 0.033333
58 2018-04-13 20:58:00 1.476333 2.341833 2.390833 0.025000
59 2018-04-13 20:59:00 1.694833 3.290667 2.103833 0.025000
60 2018-04-13 21:00:00 2.601333 2.661500 2.069667 0.091667
.
###Markdown
General format of a query Basic syntax:```get metrics get metrics cpu_user bucket_size 1 tstart '2018-04-13 21:00:00' tend '2018-04-13 21:00:30'get metrics cpu_user bucket_size 60 last 10``` get - can be ommitted for the REST API metric_name - name of a metric, e.g. cpu_user, mem_memfree (see "topo -m" for available metrics) tstart, tend - a unix timestamp (seconds since 01/01/1970) or in human readable format last - number of buckets that are returned bucket_size - aggregation interval (aggregation depends on metric type -> avg, sum, ...)Advanced syntax elements: Aggregates: rate(netdev_bytes_r) - other valid methods: sum, avg, max, min, rate (bytes/second) Filter: from node=... - filters the data per node Grouping: sum(netdev_bytes_r) group by netdev_name - sums the data per network adapter (eth0, eth1, ..) -> sum alone would sum the data of all adapters on all nodes (one column) Retrieving performance data using the REST API The Zimon query language is used, which is slightly different to what mmperfmon uses -> see KC Query string must be URL encoded (space = %20): metrics cpu_user bucket_size 1 last 10 -> metrics%20cpu_user%20bucket_size%201%20last%205 All Zimon metrics available, see echo "topo -m" | /opt/IBM/zimon/zc 127.0.0.1 Be careful with queries that would return a lot (megabytes) of data! Example: Retrieve the free memory of all nodes
###Code
import pandas as pd
from pandas import DataFrame
import datetime
# Retrieve the free memory in kbytes of all nodes of the last five hours
# choose a bucket_size of 60 seconds
query={'query':'metrics mem_memfree bucket_size 60 last ' + str(60*5)} # last five hours
res = get("/perfmon/data", params=query, printResult=False)
# transform the retrieved data so that a pandas DataFrame object can be created
perfData = res.json()["performanceData"]
columnHeaders = ["key", "tstamp", "date", "value"]
transformedData = []
for i,row in enumerate(perfData["rows"]):
for j,value in enumerate(row["values"]):
key = perfData["legend"][j]["keys"][0]
tstamp = row["tstamp"]
date = datetime.datetime.fromtimestamp(int(tstamp))
if value is not None:
transformedData.append([key,tstamp,date,value/1000.0]) # divide by 1000 to get megabytes
# create the DataFrame object and print the retrieved data
df = DataFrame(transformedData,columns=columnHeaders)
df
###Output
GET https://9.155.108.247/scalemgmt/v2/perfmon/data
###Markdown
Basic analysis of the data
###Code
import numpy as np
pd.pivot_table(df,index=["key",],values=["value"],
aggfunc=[len,np.min,np.max,np.mean])
###Output
_____no_output_____
###Markdown
Plot the data per node
###Code
def pltPerfData(df):
legend_entries = []
ax = None
for key, group in df.groupby("key"):
if ax:
df[df.key == key].plot(x="date",y=["value"], ax=ax)
else:
ax = df[df.key == key].plot(x="date",y=["value"])
legend_entries.append(key)
ax.legend(legend_entries, bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.)
pltPerfData(df)
###Output
_____no_output_____ |
.ipynb_checkpoints/covid-ae-pretrain-gnn-attn-cnn(1)-checkpoint.ipynb | ###Markdown
training scheme (1) train denoising auto encoder model using all data including train and test data (2) from the weights of denoising auto encoder model, finetune to predict targets such as reactivity rough network architecture inputs -> conv1ds -> aggregation of neighborhoods -> multi head attention -> aggregation of neighborhoods -> multi head attention -> conv1d -> predict this architecture was inspired by https://www.kaggle.com/cpmpml/graph-transfomer
###Code
pretrain_dir = None # model dir for resuming training. if None, train from scrach
one_fold = False # if True, train model at only first fold. use if you try a new idea quickly.
run_test = False # if True, use small data. you can check whether this code run or not
denoise = True # if True, use train data whose signal_to_noise > 1
ae_epochs = 20 # epoch of training of denoising auto encoder
ae_epochs_each = 5 # epoch of training of denoising auto encoder each time.
# I use train data (seqlen = 107) and private test data (seqlen = 130) for auto encoder training.
# I dont know how to easily fit keras model to use both of different shape data simultaneously,
# so I call fit function several times.
ae_batch_size = 32
epochs_list = [30, 10, 3, 3, 5, 5]
batch_size_list = [8, 16, 32, 64, 128, 256]
## copy pretrain model to working dir
import shutil
import glob
if pretrain_dir is not None:
for d in glob.glob(pretrain_dir + "*"):
shutil.copy(d, ".")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import gc
import os
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
load
###Code
import json
import glob
from tqdm.notebook import tqdm
train = pd.read_json("/kaggle/input/stanford-covid-vaccine/train.json",lines=True)
if denoise:
train = train[train.signal_to_noise > 1].reset_index(drop = True)
test = pd.read_json("/kaggle/input/stanford-covid-vaccine/test.json",lines=True)
test_pub = test[test["seq_length"] == 107]
test_pri = test[test["seq_length"] == 130]
sub = pd.read_csv("/kaggle/input/stanford-covid-vaccine/sample_submission.csv")
if run_test: ## to test
train = train[:30]
test_pub = test_pub[:30]
test_pri = test_pri[:30]
As = []
for id in tqdm(train["id"]):
a = np.load(f"/kaggle/input/stanford-covid-vaccine/bpps/{id}.npy")
As.append(a)
As = np.array(As)
As_pub = []
for id in tqdm(test_pub["id"]):
a = np.load(f"/kaggle/input/stanford-covid-vaccine/bpps/{id}.npy")
As_pub.append(a)
As_pub = np.array(As_pub)
As_pri = []
for id in tqdm(test_pri["id"]):
a = np.load(f"/kaggle/input/stanford-covid-vaccine/bpps/{id}.npy")
As_pri.append(a)
As_pri = np.array(As_pri)
print(train.shape)
train.head()
print(test.shape)
test.head()
print(sub.shape)
sub.head()
###Output
(457953, 6)
###Markdown
target
###Code
targets = list(sub.columns[1:])
print(targets)
y_train = []
seq_len = train["seq_length"].iloc[0]
seq_len_target = train["seq_scored"].iloc[0]
ignore = -10000
ignore_length = seq_len - seq_len_target
for target in targets:
y = np.vstack(train[target])
dummy = np.zeros([y.shape[0], ignore_length]) + ignore
y = np.hstack([y, dummy])
y_train.append(y)
y = np.stack(y_train, axis = 2)
y.shape
###Output
['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C', 'deg_50C']
###Markdown
structure adj
###Code
def get_structure_adj(train):
## get adjacent matrix from structure sequence
## here I calculate adjacent matrix of each base pair,
## but eventually ignore difference of base pair and integrate into one matrix
Ss = []
for i in tqdm(range(len(train))):
seq_length = train["seq_length"].iloc[i]
structure = train["structure"].iloc[i]
sequence = train["sequence"].iloc[i]
cue = []
a_structures = {
("A", "U") : np.zeros([seq_length, seq_length]),
("C", "G") : np.zeros([seq_length, seq_length]),
("U", "G") : np.zeros([seq_length, seq_length]),
("U", "A") : np.zeros([seq_length, seq_length]),
("G", "C") : np.zeros([seq_length, seq_length]),
("G", "U") : np.zeros([seq_length, seq_length]),
}
a_structure = np.zeros([seq_length, seq_length])
for i in range(seq_length):
if structure[i] == "(":
cue.append(i)
elif structure[i] == ")":
start = cue.pop()
# a_structure[start, i] = 1
# a_structure[i, start] = 1
a_structures[(sequence[start], sequence[i])][start, i] = 1
a_structures[(sequence[i], sequence[start])][i, start] = 1
a_strc = np.stack([a for a in a_structures.values()], axis = 2)
a_strc = np.sum(a_strc, axis = 2, keepdims = True)
Ss.append(a_strc)
Ss = np.array(Ss)
print(Ss.shape)
return Ss
Ss = get_structure_adj(train)
Ss_pub = get_structure_adj(test_pub)
Ss_pri = get_structure_adj(test_pri)
###Output
_____no_output_____
###Markdown
distance adj
###Code
def get_distance_matrix(As):
## adjacent matrix based on distance on the sequence
## D[i, j] = 1 / (abs(i - j) + 1) ** pow, pow = 1, 2, 4
idx = np.arange(As.shape[1])
Ds = []
for i in range(len(idx)):
d = np.abs(idx[i] - idx)
Ds.append(d)
Ds = np.array(Ds) + 1
Ds = 1/Ds
Ds = Ds[None, :,:]
Ds = np.repeat(Ds, len(As), axis = 0)
Dss = []
for i in [1, 2, 4]:
Dss.append(Ds ** i)
Ds = np.stack(Dss, axis = 3)
print(Ds.shape)
return Ds
Ds = get_distance_matrix(As)
Ds_pub = get_distance_matrix(As_pub)
Ds_pri = get_distance_matrix(As_pri)
## concat adjecent
As = np.concatenate([As[:,:,:,None], Ss, Ds], axis = 3).astype(np.float32)
As_pub = np.concatenate([As_pub[:,:,:,None], Ss_pub, Ds_pub], axis = 3).astype(np.float32)
As_pri = np.concatenate([As_pri[:,:,:,None], Ss_pri, Ds_pri], axis = 3).astype(np.float32)
del Ss, Ds, Ss_pub, Ds_pub, Ss_pri, Ds_pri
As.shape, As_pub.shape, As_pri.shape
###Output
_____no_output_____
###Markdown
node
###Code
## sequence
def return_ohe(n, i):
tmp = [0] * n
tmp[i] = 1
return tmp
def get_input(train):
## get node features, which is one hot encoded
mapping = {}
vocab = ["A", "G", "C", "U"]
for i, s in enumerate(vocab):
mapping[s] = return_ohe(len(vocab), i)
X_node = np.stack(train["sequence"].apply(lambda x : list(map(lambda y : mapping[y], list(x)))))
mapping = {}
vocab = ["S", "M", "I", "B", "H", "E", "X"]
for i, s in enumerate(vocab):
mapping[s] = return_ohe(len(vocab), i)
X_loop = np.stack(train["predicted_loop_type"].apply(lambda x : list(map(lambda y : mapping[y], list(x)))))
mapping = {}
vocab = [".", "(", ")"]
for i, s in enumerate(vocab):
mapping[s] = return_ohe(len(vocab), i)
X_structure = np.stack(train["structure"].apply(lambda x : list(map(lambda y : mapping[y], list(x)))))
X_node = np.concatenate([X_node, X_loop], axis = 2)
## interaction
a = np.sum(X_node * (2 ** np.arange(X_node.shape[2])[None, None, :]), axis = 2)
vocab = sorted(set(a.flatten()))
print(vocab)
ohes = []
for v in vocab:
ohes.append(a == v)
ohes = np.stack(ohes, axis = 2)
X_node = np.concatenate([X_node, ohes], axis = 2).astype(np.float32)
print(X_node.shape)
return X_node
X_node = get_input(train)
X_node_pub = get_input(test_pub)
X_node_pri = get_input(test_pri)
###Output
[17, 18, 20, 24, 33, 34, 36, 40, 65, 66, 68, 72, 129, 130, 132, 136, 257, 258, 260, 264, 513, 514, 516, 520, 1025, 1026, 1028, 1032]
(2096, 107, 39)
[17, 18, 20, 24, 33, 34, 36, 40, 65, 66, 68, 72, 129, 130, 132, 136, 257, 258, 260, 264, 513, 514, 516, 520, 1025, 1026, 1028, 1032]
(629, 107, 39)
[17, 18, 20, 24, 33, 34, 36, 40, 65, 66, 68, 72, 129, 130, 132, 136, 257, 258, 260, 264, 513, 514, 516, 520, 1025, 1026, 1028, 1032]
(3005, 130, 39)
###Markdown
model
###Code
import tensorflow as tf
from tensorflow.keras import layers as L
import tensorflow_addons as tfa
from tensorflow.keras import backend as K
def mcrmse(t, p, seq_len_target = seq_len_target):
## calculate mcrmse score by using numpy
t = t[:, :seq_len_target]
p = p[:, :seq_len_target]
score = np.mean(np.sqrt(np.mean((p - t) ** 2, axis = 1)))
return score
def mcrmse_loss(t, y, seq_len_target = seq_len_target):
## calculate mcrmse score by using tf
t = t[:, :seq_len_target]
y = y[:, :seq_len_target]
loss = tf.reduce_mean(tf.sqrt(tf.reduce_mean((t - y) ** 2, axis = 1)))
return loss
def attention(x_inner, x_outer, n_factor, dropout):
x_Q = L.Conv1D(n_factor, 1, activation='linear',
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform',
)(x_inner)
x_K = L.Conv1D(n_factor, 1, activation='linear',
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform',
)(x_outer)
x_V = L.Conv1D(n_factor, 1, activation='linear',
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform',
)(x_outer)
x_KT = L.Permute((2, 1))(x_K)
res = L.Lambda(lambda c: K.batch_dot(c[0], c[1]) / np.sqrt(n_factor))([x_Q, x_KT])
# res = tf.expand_dims(res, axis = 3)
# res = L.Conv2D(16, 3, 1, padding = "same", activation = "relu")(res)
# res = L.Conv2D(1, 3, 1, padding = "same", activation = "relu")(res)
# res = tf.squeeze(res, axis = 3)
att = L.Lambda(lambda c: K.softmax(c, axis=-1))(res)
att = L.Lambda(lambda c: K.batch_dot(c[0], c[1]))([att, x_V])
return att
def multi_head_attention(x, y, n_factor, n_head, dropout):
if n_head == 1:
att = attention(x, y, n_factor, dropout)
else:
n_factor_head = n_factor // n_head
heads = [attention(x, y, n_factor_head, dropout) for i in range(n_head)]
att = L.Concatenate()(heads)
att = L.Dense(n_factor,
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform',
)(att)
x = L.Add()([x, att])
x = L.LayerNormalization()(x)
if dropout > 0:
x = L.Dropout(dropout)(x)
return x
def res(x, unit, kernel = 3, rate = 0.1):
h = L.Conv1D(unit, kernel, 1, padding = "same", activation = None)(x)
h = L.LayerNormalization()(h)
h = L.LeakyReLU()(h)
h = L.Dropout(rate)(h)
return L.Add()([x, h])
def forward(x, unit, kernel = 3, rate = 0.1):
# h = L.Dense(unit, None)(x)
h = L.Conv1D(unit, kernel, 1, padding = "same", activation = None)(x)
h = L.LayerNormalization()(h)
h = L.Dropout(rate)(h)
# h = tf.keras.activations.swish(h)
h = L.LeakyReLU()(h)
h = res(h, unit, kernel, rate)
return h
def adj_attn(x, adj, unit, n = 2, rate = 0.1):
x_a = x
x_as = []
for i in range(n):
x_a = forward(x_a, unit)
x_a = tf.matmul(adj, x_a) ## aggregate neighborhoods
x_as.append(x_a)
if n == 1:
x_a = x_as[0]
else:
x_a = L.Concatenate()(x_as)
x_a = forward(x_a, unit)
return x_a
def get_base(config):
## base model architecture
## node, adj -> middle feature
node = tf.keras.Input(shape = (None, X_node.shape[2]), name = "node")
adj = tf.keras.Input(shape = (None, None, As.shape[3]), name = "adj")
adj_learned = L.Dense(1, "relu")(adj)
adj_all = L.Concatenate(axis = 3)([adj, adj_learned])
xs = []
xs.append(node)
x1 = forward(node, 128, kernel = 3, rate = 0.0)
x2 = forward(x1, 64, kernel = 6, rate = 0.0)
x3 = forward(x2, 32, kernel = 15, rate = 0.0)
x4 = forward(x3, 16, kernel = 30, rate = 0.0)
x = L.Concatenate()([x1, x2, x3, x4])
for unit in [64, 32]:
x_as = []
for i in range(adj_all.shape[3]):
x_a = adj_attn(x, adj_all[:, :, :, i], unit, rate = 0.0)
x_as.append(x_a)
x_c = forward(x, unit, kernel = 30)
x = L.Concatenate()(x_as + [x_c])
x = forward(x, unit)
x = multi_head_attention(x, x, unit, 4, 0.0)
xs.append(x)
x = L.Concatenate()(xs)
model = tf.keras.Model(inputs = [node, adj], outputs = [x])
return model
def get_ae_model(base, config):
## denoising auto encoder part
## node, adj -> middle feature -> node
node = tf.keras.Input(shape = (None, X_node.shape[2]), name = "node")
adj = tf.keras.Input(shape = (None, None, As.shape[3]), name = "adj")
x = base([L.SpatialDropout1D(0.3)(node), adj])
x = forward(x, 64, rate = 0.3)
p = L.Dense(X_node.shape[2], "sigmoid")(x)
loss = - tf.reduce_mean(20 * node * tf.math.log(p + 1e-4) + (1 - node) * tf.math.log(1 - p + 1e-4))
model = tf.keras.Model(inputs = [node, adj], outputs = [loss])
opt = get_optimizer()
model.compile(optimizer = opt, loss = lambda t, y : y)
return model
def get_model(base, config):
## regression part
## node, adj -> middle feature -> prediction of targets
node = tf.keras.Input(shape = (None, X_node.shape[2]), name = "node")
adj = tf.keras.Input(shape = (None, None, As.shape[3]), name = "adj")
x = base([node, adj])
x = forward(x, 128, rate = 0.4)
x = L.Dense(5, None)(x)
model = tf.keras.Model(inputs = [node, adj], outputs = [x])
opt = get_optimizer()
model.compile(optimizer = opt, loss = mcrmse_loss)
return model
def get_optimizer():
# sgd = tf.keras.optimizers.SGD(0.05, momentum = 0.9, nesterov=True)
adam = tf.optimizers.Adam()
# radam = tfa.optimizers.RectifiedAdam()
# lookahead = tfa.optimizers.Lookahead(adam, sync_period=6)
# swa = tfa.optimizers.SWA(adam)
return adam
###Output
/opt/conda/lib/python3.7/site-packages/tensorflow_addons/utils/ensure_tf_install.py:68: UserWarning: Tensorflow Addons supports using Python ops for all Tensorflow versions above or equal to 2.2.0 and strictly below 2.3.0 (nightly versions are not supported).
The versions of TensorFlow you are currently using is 2.3.0 and is not supported.
Some things might work, some things might not.
If you were to encounter a bug, do not file an issue.
If you want to make sure you're using a tested and supported configuration, either change the TensorFlow version or the TensorFlow Addons's version.
You can find the compatibility matrix in TensorFlow Addon's readme:
https://github.com/tensorflow/addons
UserWarning,
###Markdown
pretrain
###Code
## here train denoising auto encoder model using all data
config = {} ## not use now
if ae_epochs > 0:
base = get_base(config)
ae_model = get_ae_model(base, config)
## TODO : simultaneous train
for i in range(ae_epochs//ae_epochs_each):
print(f"------ {i} ------")
print("--- train ---")
ae_model.fit([X_node, As], [X_node[:,0]],
epochs = ae_epochs_each,
batch_size = ae_batch_size)
print("--- public ---")
ae_model.fit([X_node_pub, As_pub], [X_node_pub[:,0]],
epochs = ae_epochs_each,
batch_size = ae_batch_size)
print("--- private ---")
ae_model.fit([X_node_pri, As_pri], [X_node_pri[:,0]],
epochs = ae_epochs_each,
batch_size = ae_batch_size)
gc.collect()
print("****** save ae model ******")
base.save_weights("./base_ae")
###Output
------ 0 ------
--- train ---
Epoch 1/5
66/66 [==============================] - 6s 89ms/step - loss: 0.9165
Epoch 2/5
66/66 [==============================] - 5s 77ms/step - loss: 0.3297
Epoch 3/5
66/66 [==============================] - 5s 80ms/step - loss: 0.1660
Epoch 4/5
66/66 [==============================] - 5s 81ms/step - loss: 0.1061
Epoch 5/5
66/66 [==============================] - 5s 81ms/step - loss: 0.0794
--- public ---
Epoch 1/5
20/20 [==============================] - 2s 117ms/step - loss: 0.0687
Epoch 2/5
20/20 [==============================] - 2s 76ms/step - loss: 0.0626
Epoch 3/5
20/20 [==============================] - 2s 75ms/step - loss: 0.0588
Epoch 4/5
20/20 [==============================] - 1s 74ms/step - loss: 0.0603
Epoch 5/5
20/20 [==============================] - 2s 78ms/step - loss: 0.0589
--- private ---
Epoch 1/5
94/94 [==============================] - 10s 102ms/step - loss: 0.0602
Epoch 2/5
94/94 [==============================] - 9s 91ms/step - loss: 0.0457
Epoch 3/5
94/94 [==============================] - 9s 95ms/step - loss: 0.0404
Epoch 4/5
94/94 [==============================] - 9s 93ms/step - loss: 0.0336
Epoch 5/5
94/94 [==============================] - 9s 91ms/step - loss: 0.0280
------ 1 ------
--- train ---
Epoch 1/5
66/66 [==============================] - 5s 79ms/step - loss: 0.0261
Epoch 2/5
66/66 [==============================] - 5s 79ms/step - loss: 0.0358
Epoch 3/5
66/66 [==============================] - 5s 79ms/step - loss: 0.0246
Epoch 4/5
66/66 [==============================] - 5s 77ms/step - loss: 0.0225
Epoch 5/5
66/66 [==============================] - 5s 81ms/step - loss: 0.0220
--- public ---
Epoch 1/5
20/20 [==============================] - 1s 75ms/step - loss: 0.0203
Epoch 2/5
20/20 [==============================] - 1s 75ms/step - loss: 0.0177
Epoch 3/5
20/20 [==============================] - 2s 75ms/step - loss: 0.0198
Epoch 4/5
20/20 [==============================] - 1s 75ms/step - loss: 0.0178
Epoch 5/5
20/20 [==============================] - 2s 80ms/step - loss: 0.0162
--- private ---
Epoch 1/5
94/94 [==============================] - 9s 95ms/step - loss: 0.0221
Epoch 2/5
94/94 [==============================] - 9s 94ms/step - loss: 0.0178
Epoch 3/5
94/94 [==============================] - 9s 91ms/step - loss: 0.0165
Epoch 4/5
94/94 [==============================] - 9s 91ms/step - loss: 0.0178
Epoch 5/5
94/94 [==============================] - 9s 91ms/step - loss: 0.0166
------ 2 ------
--- train ---
Epoch 1/5
66/66 [==============================] - 5s 80ms/step - loss: 0.0144
Epoch 2/5
66/66 [==============================] - 5s 79ms/step - loss: 0.0119
Epoch 3/5
66/66 [==============================] - 5s 78ms/step - loss: 0.0138
Epoch 4/5
66/66 [==============================] - 6s 83ms/step - loss: 0.0149
Epoch 5/5
66/66 [==============================] - 5s 83ms/step - loss: 0.0120
--- public ---
Epoch 1/5
20/20 [==============================] - 2s 77ms/step - loss: 0.0156
Epoch 2/5
20/20 [==============================] - 2s 76ms/step - loss: 0.0132
Epoch 3/5
20/20 [==============================] - 2s 80ms/step - loss: 0.0129
Epoch 4/5
20/20 [==============================] - 2s 77ms/step - loss: 0.0112
Epoch 5/5
20/20 [==============================] - 2s 84ms/step - loss: 0.0114
--- private ---
Epoch 1/5
94/94 [==============================] - 8s 90ms/step - loss: 0.0150
Epoch 2/5
94/94 [==============================] - 9s 91ms/step - loss: 0.0130
Epoch 3/5
94/94 [==============================] - 9s 91ms/step - loss: 0.0118
Epoch 4/5
94/94 [==============================] - 9s 92ms/step - loss: 0.0138
Epoch 5/5
94/94 [==============================] - 8s 90ms/step - loss: 0.0107
------ 3 ------
--- train ---
Epoch 1/5
66/66 [==============================] - 5s 80ms/step - loss: 0.0179
Epoch 2/5
66/66 [==============================] - 6s 87ms/step - loss: 0.0136
Epoch 3/5
66/66 [==============================] - 5s 77ms/step - loss: 0.0110
Epoch 4/5
66/66 [==============================] - 6s 84ms/step - loss: 0.0088
Epoch 5/5
66/66 [==============================] - 5s 77ms/step - loss: 0.0091
--- public ---
Epoch 1/5
20/20 [==============================] - 2s 80ms/step - loss: 0.0076
Epoch 2/5
20/20 [==============================] - 1s 75ms/step - loss: 0.0073
Epoch 3/5
20/20 [==============================] - 1s 74ms/step - loss: 0.0071
Epoch 4/5
20/20 [==============================] - 1s 75ms/step - loss: 0.0062
Epoch 5/5
20/20 [==============================] - 1s 74ms/step - loss: 0.0075
--- private ---
Epoch 1/5
94/94 [==============================] - 9s 92ms/step - loss: 0.0111
Epoch 2/5
94/94 [==============================] - 9s 94ms/step - loss: 0.0102
Epoch 3/5
94/94 [==============================] - 9s 94ms/step - loss: 0.0103
Epoch 4/5
94/94 [==============================] - 9s 92ms/step - loss: 0.0100
Epoch 5/5
94/94 [==============================] - 8s 90ms/step - loss: 0.0101
****** save ae model ******
###Markdown
train
###Code
## here train regression model from pretrain auto encoder model
from sklearn.model_selection import KFold
kfold = KFold(5, shuffle = True, random_state = 42)
scores = []
preds = np.zeros([len(X_node), X_node.shape[1], 5])
for i, (tr_idx, va_idx) in enumerate(kfold.split(X_node, As)):
print(f"------ fold {i} start -----")
print(f"------ fold {i} start -----")
print(f"------ fold {i} start -----")
X_node_tr = X_node[tr_idx]
X_node_va = X_node[va_idx]
As_tr = As[tr_idx]
As_va = As[va_idx]
y_tr = y[tr_idx]
y_va = y[va_idx]
base = get_base(config)
if ae_epochs > 0:
print("****** load ae model ******")
base.load_weights("./base_ae")
model = get_model(base, config)
if pretrain_dir is not None:
d = f"./model{i}"
print(f"--- load from {d} ---")
model.load_weights(d)
for epochs, batch_size in zip(epochs_list, batch_size_list):
print(f"epochs : {epochs}, batch_size : {batch_size}")
model.fit([X_node_tr, As_tr], [y_tr],
validation_data=([X_node_va, As_va], [y_va]),
epochs = epochs,
batch_size = batch_size, validation_freq = 3)
model.save_weights(f"./model{i}")
p = model.predict([X_node_va, As_va])
scores.append(mcrmse(y_va, p))
print(f"fold {i}: mcrmse {scores[-1]}")
preds[va_idx] = p
if one_fold:
break
pd.to_pickle(preds, "oof.pkl")
print(scores)
###Output
[0.20787316087387428, 0.20292475175106967, 0.21125404692954683, 0.2146298189452942, 0.206133587062614]
###Markdown
predict
###Code
p_pub = 0
p_pri = 0
for i in range(5):
model.load_weights(f"./model{i}")
p_pub += model.predict([X_node_pub, As_pub]) / 5
p_pri += model.predict([X_node_pri, As_pri]) / 5
if one_fold:
p_pub *= 5
p_pri *= 5
break
for i, target in enumerate(targets):
test_pub[target] = [list(p_pub[k, :, i]) for k in range(p_pub.shape[0])]
test_pri[target] = [list(p_pri[k, :, i]) for k in range(p_pri.shape[0])]
###Output
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:13: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
del sys.path[0]
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:14: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
###Markdown
sub
###Code
preds_ls = []
for df, preds in [(test_pub, p_pub), (test_pri, p_pri)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_df = pd.DataFrame(single_pred, columns=targets)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
preds_ls.append(single_df)
preds_df = pd.concat(preds_ls)
preds_df.to_csv("submission.csv", index = False)
preds_df.head()
print(scores)
print(np.mean(scores))
###Output
[0.20787316087387428, 0.20292475175106967, 0.21125404692954683, 0.2146298189452942, 0.206133587062614]
0.20856307311247982
|
p1_navigation/.ipynb_checkpoints/Deep_Q_Network_Solution-checkpoint.ipynb | ###Markdown
Deep Q-Network (DQN)---In this notebook, you will implement a DQN agent with OpenAI Gym's LunarLander-v2 environment. 1. Import the Necessary Packages
###Code
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
2. Instantiate the Environment and AgentInitialize the environment in the code cell below.
###Code
env = gym.make('LunarLander-v2')
env.seed(0)
print('State shape: ', env.observation_space.shape)
print('Number of actions: ', env.action_space.n)
###Output
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
State shape: (8,)
Number of actions: 4
###Markdown
Please refer to the instructions in `Deep_Q_Network.ipynb` if you would like to write your own DQN agent. Otherwise, run the code cell below to load the solution files.
###Code
from dqn_agent import Agent
agent = Agent(state_size=8, action_size=4, seed=0)
# watch an untrained agent
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
###Output
_____no_output_____
###Markdown
3. Train the Agent with DQNRun the code cell below to train the agent from scratch. You are welcome to amend the supplied values of the parameters in the function, to try to see if you can get better performance!Alternatively, you can skip to the next step below (**4. Watch a Smart Agent!**), to load the saved model weights from a pre-trained agent.
###Code
def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=200.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
scores = dqn()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
###Output
Episode 100 Average Score: -225.00
Episode 200 Average Score: -176.93
Episode 300 Average Score: -87.32
Episode 400 Average Score: -56.19
Episode 500 Average Score: -66.67
Episode 600 Average Score: -42.52
Episode 700 Average Score: 36.50
Episode 800 Average Score: 48.18
Episode 900 Average Score: 155.64
Episode 1000 Average Score: 191.26
Episode 1100 Average Score: 185.16
Episode 1200 Average Score: 180.23
Episode 1300 Average Score: 184.97
Episode 1400 Average Score: 188.67
Episode 1500 Average Score: 181.42
Episode 1600 Average Score: 199.10
Episode 1604 Average Score: 200.17
Environment solved in 1504 episodes! Average Score: 200.17
###Markdown
4. Watch a Smart Agent!In the next code cell, you will load the trained weights from file to watch a smart agent!
###Code
# load the weights from file
agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
for i in range(3):
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
###Output
_____no_output_____ |
ch4/4.Gaussian.ipynb | ###Markdown
Machine learning Chapter 4, Gaussian Jun Sup Shin, Digital Imaging, GSAIM, CAU
###Code
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
###Output
_____no_output_____
###Markdown
Gaussian function$$G(x;\mu,\sigma)=\frac{1}{(2\pi\sigma^{2})^{1/2}} \exp{\left(-\frac{(x-\mu)^2}{\sigma^2}\right)}$$
###Code
def gaussian(x, mu, sigma, normalize:bool = True):
norm_term = 1
if normalize is True:
norm_term = 1 / np.sqrt(2 * np.pi * np.power(sigma, 2))
return norm_term * np.exp(-np.power(x-mu, 2) / np.power(sigma, 2))
x = np.linspace(-3, 3, 100)
plt.plot(x, gaussian(x, 0, 1), color='blueviolet')
plt.plot(x, gaussian(x, 0, 1, False), color='magenta')
plt.title('$G(x;\mu, \sigma)$')
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
2D Gaussian function$$G_{2D}(\mathbf{x};\mathbf{\mu}, \Sigma) = \frac{1}{2\pi {\left| \Sigma \right|}^{-1/2}} \exp{(-\frac{1}{2}(\mathbf{x}-\mathbf{\mu})^T\Sigma^{-1}(\mathbf{x}-\mathbf{\mu}))}$$where $\mathbf{\mu} = [\mu_1, \mu_2]^T$ is mean, $\Sigma$ is covariance Covariance of 2D Gaussian$$\Sigma =\begin{bmatrix}{\sigma_x}^2 & {\sigma_{xy}} \\{\sigma_{xy}} & {\sigma_y}^2\end{bmatrix}$$where $\sigma_x$ and $\sigma_y$ are standard deviation of x axis and y axis repectively
###Code
def gaussian_2D(x, y, mu, Sigma, normalize:bool = True):
norm_term = 1
if normalize is True:
norm_term = 1 / (2 * np.pi * np.power(np.linalg.det(Sigma), -0.5))
_x = np.reshape(x, (1, -1))
_y = np.reshape(y, (1, -1))
xy = np.concatenate((_x, _y), axis=0)
exp_term = np.matmul(np.transpose(xy - mu), np.linalg.inv(Sigma))
exp_term = -0.5 * np.matmul(exp_term, xy - mu)
exp_term = np.reshape(np.diag(exp_term), (y.shape[0], x.shape[1]))
return norm_term * np.exp(exp_term)
from mpl_toolkits.mplot3d import Axes3D
x, y = np.linspace(-10, 10, 100), np.linspace(-10, 10, 100)
_x, _y = np.meshgrid(x, y)
mu = np.array([[0.5], [0]])
Sigma = np.array([[7, -3],[-3, 7]])
plt.figure(figsize = (12, 6))
plt.subplots_adjust(wspace=0.5)
ax = plt.subplot(1, 2, 1, projection='3d')
ax.plot_surface(_x, _y, gaussian_2D(_x, _y, mu, Sigma), alpha=0.5, color='blueviolet')
ax.view_init(30, 120)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.subplot(1, 2, 2)
cont = plt.contour(_x, _y, gaussian_2D(_x, _y, mu, Sigma), 7, colors='blue')
cont.clabel(fmt='%.2f', fontsize=10)
plt.xlabel('$x$', fontsize=15)
plt.ylabel('$y$', fontsize=15)
plt.show()
###Output
_____no_output_____ |
DS 223.ipynb | ###Markdown
_Lambda School Data Science_ Make explanatory visualizationsTody we will reproduce this [example by FiveThirtyEight:](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/)
###Code
from IPython.display import display, Image
url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png'
example = Image(url=url, width=400)
display(example)
###Output
_____no_output_____
###Markdown
Using this data: https://github.com/fivethirtyeight/data/tree/master/inconvenient-sequel Objectives- add emphasis and annotations to transform visualizations from exploratory to explanatory- remove clutter from visualizationsLinks- [Strong Titles Are The Biggest Bang for Your Buck](http://stephanieevergreen.com/strong-titles/)- [Remove to improve (the data-ink ratio)](https://www.darkhorseanalytics.com/blog/data-looks-better-naked)- [How to Generate FiveThirtyEight Graphs in Python](https://www.dataquest.io/blog/making-538-plots/) Make prototypesThis helps us understand the problem
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.style.use('fivethirtyeight')
fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33],
index=range(1,11))
ax = fake.plot.bar(color='C1', width=0.9);
ax.tick_params(labelrotation=45)
#ax.set(title="An Inconvenient Sequel: 'Truth to Power' is divisive");
#Raw text objects in place of titles
ax.text(x=-0.6,y=50,s="An Inconvenient Sequel: 'Truth to Power' is divisive", fontweight='bold');
ax.text(x=-0.6, y=47, s= "- IMDB ratings for the film as of Aug. 29", fontsize=11);
ax.set(xlabel='Rating', ylabel='Percent of total votes',
yticks = range(0,50,10));
fake2 = pd.Series(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2,
3, 3, 3,
4, 4,
5, 5, 5,
6, 6, 6, 6,
7, 7, 7, 7, 7,
8, 8, 8, 8,
9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10])
fake2.value_counts().sort_index().plot.bar(color='C1', width=0.9);
###Output
_____no_output_____
###Markdown
Annotate with text
###Code
display(example)
###Output
_____no_output_____
###Markdown
Reproduce with real data
###Code
df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/inconvenient-sequel/ratings.csv')
plt.style.use('fivethirtyeight')
fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33],
index=range(1,11))
ax = fake.plot.bar(color='C1', width=0.9)
ax.tick_params(labelrotation=0)
ax.text(x=-2, y=50, s="'An Inconvenient Sequel: Truth to Power' is divisive",
fontsize=16, fontweight='bold')
ax.text(x=-2, y=46, s= 'IMDb ratings for the flim as of Aug. 29')
ax.set(xlabel = 'Rating',
ylabel = 'Percent of total votes',
yticks = range(0, 50, 10));
df.shape
width, height = df.shape
width * height
pd.options.display.max_columns = 500
df['mean minus avg'] = df['mean'] - df['average']
plt.plot(df['respondents'], df['mean minus avg'])
plt.show();
df.timestamp = pd.to_datetime(df.timestamp)
df.timestamp.describe()
df = df.set_index('timestamp')
df['2017-08-29']
df.category.value_counts()
df[df.category=='IMDb users']
lastday = df['2017-08-29']
lastday[lastday.category=='IMDb users']
lastday[lastday.category=='IMDb users'].respondents.plot();
final = df.tail(1)
columns = [str(i) + '_pct' for i in range(1, 11)]
final[columns]
data = final[columns].T
data.index = range(1,11)
plt.style.use('fivethirtyeight')
ax = data.plot.bar(color='C1', width=0.9, legend=False)
ax.tick_params(labelrotation=30)
ax.text(x=-2, y=50, s="'An Inconvenient Sequel: Truth to Power' is divisive",
fontsize=16, fontweight='bold')
ax.text(x=-2, y=46, s= 'IMDb ratings for the flim as of Aug. 29')
ax.set(xlabel = 'Rating',
ylabel = 'Percent of total votes',
yticks = range(0, 50, 10));
###Output
_____no_output_____
###Markdown
ASSIGNMENTReplicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit). STRETCH OPTIONS Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/).If you aren't sure what to choose, try:- the chart titled ["Men dominated Al Gore's IMDb movie rating"](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/)- or the tutorial, [How to Generate FiveThirtyEight Graphs in Python](https://www.dataquest.io/blog/making-538-plots/)Other options include:- [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) (try the [`altair`](https://altair-viz.github.io/gallery/index.htmlmaps) library)- [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) (try the [`statsmodels`](https://www.statsmodels.org/stable/index.html) library)- or another example of your choice! Make more charts!Choose a chart you want to make, from [FT's Visual Vocabulary poster](http://ft.com/vocabulary).Find the chart in an example gallery of a Python data visualization library:- [Seaborn](http://seaborn.pydata.org/examples/index.html)- [Altair](https://altair-viz.github.io/gallery/index.html)- [Matplotlib](https://matplotlib.org/gallery.html)- [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html)Reproduce the chart. [Optionally, try the "Ben Franklin Method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes.Take notes. Consider sharing your work with your cohort!
###Code
oildf = pd.read_excel('monthly_oil_production.xls')
print (oildf.head())
oildf.shape
oildf.columns = ['Date', 'Total U.S.', 'East Coast', 'Florida', 'New York', 'Pennsylvania', 'Virginia,', 'West Virginia', 'Midwest', 'Illinois', 'Indiana', 'Kansas', 'Kentucky', 'Michigan', 'Missouri', 'Nebraska', 'North Dakota', 'Ohio', 'Oklahoma', 'South Dakota', 'Tennessee', 'Gulf Coast', 'Alabama', 'Arkansas', 'Louisiana', 'Mississippi', 'New Mexico', 'Texas', 'Gulf Offshore', 'Rocky Mountains', 'Colorado', 'Idaho', 'Montana', 'Utah', 'Wyoming', 'West Coast', 'Alaska', 'South Alaska', 'North Alaska', 'Arizona', 'California', 'Nevada', 'West Coast Offshore']
oildf.head()
oildf['Date'] = pd.to_datetime(oildf['Date'])
import datetime
thisMillenia = oildf[oildf['Date'] >= datetime.date(2000, 1, 1)]
thisMillenia.head()
#print (thisMillenia.head())
#thisMillenia['Date'] = thisMillenia['Date'].dt.year
plt.style.use('fivethirtyeight')
#ax = thisMillenia.plot.line(color='C1')
x = thisMillenia['Date']
y = (thisMillenia['Texas'])
y2 = (thisMillenia['North Dakota'])
set_xlim= (2000, 2019)
ylim=((y+y2)*2)
plt.plot(x, y2)
plt.plot(x,y)
plt.xticks(rotation=45, fontsize=10)
plt.legend(['North Dakota', 'Texas'])
plt.xlabel('Year')
plt.ylabel('Thousands of Barrels')
plt.title('Oil Production by Year')
###Output
_____no_output_____ |
eload/eapoligon.ipynb | ###Markdown
Poligonok
###Code
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import shapely
import matplotlib.pyplot as plt
import random
import numpy as np
w = 0.5
h = 5
f = 10
polygon = Polygon([(-f, -h), (-f+w, -h), (-f+w, h-w), (f+w, h-w), (f+w, -h), (f+2*w, -h), (f+2*w, h), (-f, h)])
x,y = polygon.exterior.xy
plt.plot(x, y, "k*--")
plt.show()
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import matplotlib.pyplot as plt
import random
w = 1
h = 10
f = 4
polygon = Polygon([(-f, -h), (-f+w, -h), (-f+w, h-w), (f+w, h-w), (f+w, -h), (f+2*w, -h), (f+2*w, h), (-f, h)])
x,y = polygon.exterior.xy
plt.plot(x, y, "k--")
plt.show()
plt.plot(x, y, "k--")
for _ in range(50):
point = Point(random.uniform(-4,4), random.uniform(-10,10))
if polygon.contains(point):
plt.plot(point.x, point.y, "r*")
else:
plt.plot(point.x, point.y, "bo")
print(polygon.contains(point))
plt.show()
rot = np.pi / 20
t = np.array([[np.cos(rot), np.sin(rot)], [-np.sin(rot), np.cos(rot)]])
a = np.dot(np.transpose([x,y]), t)
polygon_r = Polygon(a)
x_r,y_r = polygon_r.exterior.xy
plt.plot(x_r, y_r, "k--")
for _ in range(50):
point = Point(random.uniform(-10,10), random.uniform(-10,10))
if polygon_r.contains(point):
plt.plot(point.x, point.y, "r*")
else:
plt.plot(point.x, point.y, "bo")
plt.show()
# x, y, width, height, tolerance (margin width), rotation(deg!)
def create_polygon(x, y, w, h, t, rot):
polygon = Polygon([(-w/2-t/2, -h/2), (-w/2+t/2, -h/2), (-w/2+t/2, h/2-t), (w/2-t/2, h/2-t), (w/2-t/2, -h/2), (w/2+t/2, -h/2), (w/2+t/2, h/2), (-w/2-t/2, h/2)])
polygon = shapely.affinity.rotate(polygon, rot)
polygon = shapely.affinity.translate(polygon, x, y)
return polygon
polygon = create_polygon(8, 2, 5, 18, 1.5, -80)
x,y = polygon.exterior.xy
plt.plot(x, y, "k--")
xy = np.loadtxt("data/scan07.csv", delimiter=",")
plt.axis('equal')
plt.plot(xy[:,0], xy[:,1], "b.")
for p in xy:
point = Point(p[0], p[1])
if polygon.contains(point):
plt.plot(point.x, point.y, "r*")
plt.grid()
plt.show()
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import shapely
import matplotlib.pyplot as plt
import random
import numpy as np
import time
class ParkingPoly:
polygon = None
# x, y, width, height, tolerance (margin width), rotation(deg!)
def __init__(self, x, y, w, h, t, rot):
self.polygon = Polygon([(-w/2-t/2, -h/2), (-w/2+t/2, -h/2), (-w/2+t/2, h/2-t), (w/2-t/2, h/2-t), (w/2-t/2, -h/2), (w/2+t/2, -h/2), (w/2+t/2, h/2), (-w/2-t/2, h/2)])
self.polygon = shapely.affinity.rotate(self.polygon, rot)
self.polygon = shapely.affinity.translate(self.polygon, x, y)
def getCentroid(self):
return self.polygon.centroid.coords[0]
def getLines(self):
# 2 lines, 4 points
l = self.polygon.exterior.coords[1:5]
l = np.asarray(l)
return l
def plotLines(self):
lxy = self.getLines()
plt.plot(lxy[:, 0], lxy[:, 1], "yo-", linewidth = 4)
def plot(self, style = "--"):
x,y = self.polygon.exterior.xy
plt.plot(x, y, style)
def pointsInside(self, xy):
inside = 0
for p in xy:
point = Point(p[0], p[1])
if self.polygon.contains(point):
inside += 1
return inside
lscan = np.loadtxt("data/scan07.csv", delimiter=",")
best = 0
start = time.time()
for i in range(30):
x = random.uniform(5,9) # x between 5m and 9m
y = random.uniform(1,3)
r = random.uniform(-90, 90) # rotation between 90 ang -90 deg
rand_poly = ParkingPoly(x, y, 5, 18, 1.5, r)
inside = rand_poly.pointsInside(lscan)
if inside > best:
print("New best fitted:", inside)
best = inside
best_poly = rand_poly
rand_poly.plot()
if best > 700:
break
end = time.time()
print("Elapsed time: ", end - start)
best_poly.plotLines()
best_poly.plot("k*--")
plt.axis("equal")
plt.plot(lscan[:,0], lscan[:,1], "b*")
plt.grid()
plt.show()
###Output
New best fitted: 132
New best fitted: 147
New best fitted: 170
New best fitted: 311
New best fitted: 387
Elapsed time: 0.9879753589630127
|
00-notebooks/10-script-test-data-assembler-eo-datasets3.ipynb | ###Markdown
https://raw.githubusercontent.com/digitalearthafrica/scripts/master/misc/collection2cog.py https://github.com/digitalearthafrica/scripts/blob/master/misc/collection2cog.py
###Code
from datetime import datetime
from pathlib import Path
from eodatasets3.assemble import DatasetAssembler
from eodatasets3.prepare.landsat_l1_prepare import get_mtl_content
LANDSAT_OLI_TIRS_BAND_ALIASES = {
"1": "coastal_aerosol",
"2": "blue",
"3": "green",
"4": "red",
"5": "nir",
"6": "swir_1",
"7": "swir_2",
"st_b10": "st_b10",
"thermal_radiance": "thermal_radiance",
"upwell_radiance": "upwell_radiance",
"downwell_radiance": "downwell_radiance",
"atmospheric_transmittance": "atmospheric_transmittance",
"emissivity": "emissivity",
"emissivity_stdev": "emissivity_stdev",
"cloud_distance": "cloud_distance",
"quality_l2_aerosol": "quality_l2_aerosol",
"quality_l2_surface_temperature": "quality_l2_surface_temperature",
"quality_l1_pixel": "quality_l1_pixel",
"quality_l1_radiometric_saturation": "quality_l1_radiometric_saturation",
"metadata_odl": "metadata_odl",
"metadata_xml": "metadata_xml",
}
# Ensure output path exists
output_location = Path("/notebooks/opt/eoi/00-notebooks/data/")
output_location.mkdir(parents=True, exist_ok=True)
# adir = "/g/data/u46/users/dsg547/test_data/collection2/LC08_L2SP_185052_20180104_20190821_02_T1/"
# adir = 's3://ga-africa-provisional/nigeria-2018-08-21/collection2/level2/standard/oli-tirs/2018/185/052/LC08_L2SP_185052_20180104_20190821_02_T1/'
adir = './data/in'
acquisition_path = Path(adir)
#acquisition_path.exists()
paths = list(acquisition_path.rglob("*_MTL.txt"))
mtl, _ = get_mtl_content(acquisition_path, root_element="landsat_metadata_file")
#print (mtl)
with DatasetAssembler(output_location, naming_conventions="dea") as p:
p.properties['eo:instrument'] = mtl['image_attributes']['sensor_id'] # 'OLI_TIRS'
p.properties['eo:platform'] = mtl['image_attributes']['spacecraft_id'].lower() # 'landsat_8'
p.properties['odc:dataset_version'] = '0.0.1'
p.properties['odc:processing_datetime'] = mtl['level2_processing_record']['date_product_generated']
p.properties['odc:producer'] = "usgs.gov" #mtl['product_contents']['origin']
p.properties['odc:product_family'] = mtl['product_contents']['processing_level'].lower() # 'l2sp'
apath = int(mtl['image_attributes']['wrs_path'])
arow = int(mtl['image_attributes']['wrs_row'])
p.properties['odc:region_code'] = f"{apath:03d}{arow:03d}"
dt_string = mtl['image_attributes']['date_acquired'] + ' ' + mtl['image_attributes']['scene_center_time'][:-2]
p.datetime = datetime.strptime(dt_string, '%Y-%m-%d %H:%M:%S.%f')
p.properties['landsat:landsat_scene_id'] = mtl['level1_processing_record']['landsat_scene_id']
# p.write_measurement('b1', '/g/data/u46/users/dsg547/test_data/collection2/LC08_L2SP_185052_20180104_20190821_02_T1/LC08_L2SP_185052_20180104_20190821_02_T1_SR_B1.TIF')
LANDSAT_OLI_TIRS_BAND_ALIASES
dataset_id, metadata_path = p.done()
print (dataset_id)
metadata_path
!cat /notebooks/opt/eoi/00-notebooks/data/usgs_ls8c_l2sp/185/052/2018/01/04/usgs_ls8c_l2sp_0-0-1_185052_2018-01-04.odc-metadata.yaml
%%bash
#ls
bucket_name=deafrica-usgs-c2-data
path=usgs_ls8c_level2_2/157/070/2015/11/08/usgs_ls8c_level2_2-0-20190911_157070_2015-11-08.odc-metadata.yaml
aws s3 cp s3://${bucket_name}/${path} -
###Output
---
# Dataset
$schema: https://schemas.opendatacube.org/dataset
id: 3d5d8b9c-c284-56b5-9b5f-a7567962d392
label: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08
product:
name: usgs_ls8c_level2_2
href: https://collections.dea.ga.gov.au/product/usgs_ls8c_level2_2
crs: epsg:32639
geometry:
type: Polygon
coordinates: [[[241785.0, -1481685.0], [241785.0, -1715415.0], [482415.0, -1715415.0],
[482415.0, -1481685.0], [241785.0, -1481685.0]]]
grids:
default:
shape: [7791, 8021]
transform: [30.0, 0.0, 241785.0, 0.0, -30.0, -1481685.0, 0.0, 0.0, 1.0]
properties:
datetime: 2015-11-08 06:38:57.875828Z
eo:cloud_cover: 68.37
eo:gsd: 30.0 # Ground sample distance (m)
eo:instrument: OLI_TIRS
eo:platform: landsat-8
eo:sun_azimuth: 98.06121879
eo:sun_elevation: 64.46336127
landsat:collection_number: 2
landsat:geometric_rmse_model_x: 4.688
landsat:geometric_rmse_model_y: 6.009
landsat:geometric_rmse_verify: 7.144
landsat:ground_control_points_model: 513
landsat:ground_control_points_verify: 147
landsat:ground_control_points_version: 51
landsat:landsat_product_id: LC08_L1TP_157070_20151108_20190911_02_T1
landsat:landsat_scene_id: LC81570702015312LGN01
landsat:processing_software_version: LPGS_Unknown
landsat:station_id: LGN
landsat:wrs_path: 157
landsat:wrs_row: 70
odc:dataset_version: 2.0.20190911
odc:file_format: GeoTIFF
odc:processing_datetime: 2019-09-11 20:39:46Z
odc:producer: usgs.gov
odc:product_family: level2
odc:region_code: '157070'
measurements:
coastal_aerosol:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_sr-b1.tif
blue:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_sr-b2.tif
green:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_sr-b3.tif
red:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_sr-b4.tif
nir:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_sr-b5.tif
swir_1:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_sr-b6.tif
swir_2:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_sr-b7.tif
surface_temperature:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_st-b10.tif
thermal_radiance:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_thermal-radiance.tif
upwell_radiance:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_upwell-radiance.tif
downwell_radiance:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_downwell-radiance.tif
atmospheric_transmittance:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_atmospheric-transmittance.tif
emissivity:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_emissivity.tif
emissivity_stdev:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_emissivity-stdev.tif
cloud_distance:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_cloud-distance.tif
quality_l2_aerosol:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_quality-l2-aerosol.tif
quality_l2_surface_temperature:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_quality-l2-surface-temperature.tif
quality_l1_pixel:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_quality-l1-pixel.tif
quality_l1_radiometric_saturation:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08_quality-l1-radiometric-saturation.tif
accessories:
metadata:landsat_mtl:
path: LC08_L2SP_157070_20151108_20190911_02_T1_MTL.txt
checksum:sha1:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08.sha1
metadata:processor:
path: usgs_ls8c_level2_2-0-20190911_157070_2015-11-08.proc-info.yaml
lineage: {}
...
|
wk6May18/IntroToParaView.ipynb | ###Markdown
First and foremost, you should have already installed ParaView on your machine. If not, you can [download the package](https://www.paraview.org/download/) from [paraview.org](https://www.paraview.org). An Introduction to ParaView**version 0.1*****Adam A Miller (Northwestern CIERA/Adler Planetarium)01 May 2018 Our data do not live in 2 dimensions... [As the third grade version of me would say, *"Ummmm, Mr. Miller? Duh!"*] And yet, a fundamental limitation we that consisently place upon ourselves is to insist that we represent the data in 2D when communicating with the public and our colleagues. In a sense, we are doing science with 1 arm tied behind our backs... Our data also do not live in 3 dimensions.But I can think of a distinct advantage to moving to 3D representations: our spatial representation of the Universe can be fully described in 3 dimensions. Thus, if we want to describe the positions of stars in a cluster (*Gaia*?), or show variations in density within a Giant Molecular Cloud, or examine the surface of a minor planet or asteroid, these tasks are all acomplished much better in 3D. (ignoring special relativity for now...) Why interactive?It would not be unreasonable to take the following stance: the heavy lifting in exploring a data set should be done by the researchers. In this sense, "final" plots or renditions can be presented to an audience via a slide or paper that summarize all of the salient features. What if, however, you wish to enable science, or discovery, for amatuers (e.g., the [Zooniverse](https://www.zooniverse.org/), or even in the classroom (see James's talk from yesterday)? It is unfair to ask this audience to calculate integrals, or to develop even a small fraction of the domain expertise that you and your colleagues (PhDs and PhD candidates) have. Interactivity provides a natural way for this audience to explore the data. Furthermore, it may even *help you* in your quest for discovery and understaning. So, today –– [ParaView](https://www.paraview.org/) ParaView is an open source platform that was specifically designed for data analysis and visualization. Briefly, ParaView is interesting (for us) for several reasons: 1. General purpose and can easily be used on laptop or HPC 2. Naturally utilizes multiple processors for handling large data sets (in particular 3D) 3. Includes a scripting interface (via Python) Ultimately, we want to turn data (images, measurements, spectra, etc) into some rendering to represent the data in a way that provides insight or better understanding [for all viz, not just ParaView].ParaView takes VTK, the Visualization Toolkit, data as an input (later you will create some VTK files). VTK uses a basic data-flow paradigm, in which data flows through the system while being transformed at each step [via modules known as algorithms]. Algorithms have input ports to take data, and output ports to produce output: * Sources do not have input ports, but have outputs [i.e. to get data into the system, e.g., reading a file] * Sinks convert input into graphics such that they can be rendered * Filters are intermediate algorithms, that convert input into output Connecting sources, sinks, and filters can create arbitrarily complicated visual renderings. (credit: ParaView Guide) Following this brief introduction, we will start the visual exploration with ParaView. Problem 1) Creating an Interactive SphereWe will begin by creating a simple 3D object (a sphere). While there is nothing spectacular about a sphere, I'll note that it is not particularly easy to represent a(n interactive) sphere in `matplotlib`. **Problem 1a**Open paraview. Create a sphere. [*Sources $\rightarrow$ Sphere*] At this stage you will notice that nothing is yet visible in the layout panel. However, a few things have happened.There is now a pipeline module in the pipeline browser (upper left panel). Properties of the sphere can be adjusted in the properties panel (lower left). Finally, the *Apply* button in the properties panel is now active. While this is not the case for this particular data set, because data ingestion and manipulation can be highly time consuming, ParaView allows you to perform those operations prior to rendering the data. The *Apply* button allows you to accept those changes before proceeding. **Problem 1b**Render the sphere [click *Apply*].Use your mouse to inspect, rotate, and examine the sphere. **Problem 1c**Adjust the center of the sphere to the position [0,0,1], and adjust the radius to 1. **Problem 1d**Make the sphere appear more "spherical" by adjusting theta resolution to 100. There are a variety of filters that can be applied to the data (in the *Filters* menu). The available filters are dependent on the data type provided to ParaView.**Problem 1e**Shrink the size of the mesh cells on the surface of the sphere. [*Filters* $\rightarrow$ *Alphabetical* $\rightarrow$ *Shrink*]Can you see the shrunken mesh cells? Problem 2) Python Scripting a SphereParaView provides a python scripting interface. The package, `pvpython`, makes it possible to script up everything that one might want to do with paraview, using `python` (!). The ability to script these tasks is hugely important for reproducibility (and more advanced versions of automated analysis). Unfortunately, (as far as I can tell) `pvpython` is only compatible with `python` version 2.x, and it will not run within our DSFP conda environment. Forunately, ParaView ships with an internal `python` interpreter, so we are going to use that for now. **Problem 2a**Open the `python` interpreter. [*View $\rightarrow$ Python Shell*]This will open a `python` (likely v2.7.10) instance just below the layout panel. Unfortunately, at this stage there are many panels, and not a lot of room to inspect the visualization. Ideally a lot of this work would be done on larger screens, but we will for now work with what we have.[If space is really at a premium on your screen, you can remove the pipeline browser and properties window as everything will be scripted for the remainder of this problem.] Before we proceed - remove the previously created sphere from the layout panel. [Click on Shrink, then click the *delete* button in the properties tab. Click on Sphere1, then click the *delete* button in the properties tab.] **Problem 2b**Create the sphere data source using the `python` interpreter. Sphere() As before, we have created an active source in the pipeline. However, the sphere has not been rendered.**Problem 2c**Show the active source. Show() prepare the display Render() render the visualization **Problem 2d**Adjust the properties of the sphere to match those in **Problem 1**. SetProperties(radius=1.0) SetProperties(Center=[0,0,1]) In addition to `SetProperties` for the data, you can also use `SetDisplayProperties` to adjust the geometric represenation of the data. **Problem 2e**Set the opacity of the sphere to 0.2. SetDisplayProperties(0.2) As before, we can also create filters via `pvpython`. Again, we will shrink the size of the mesh cells on the sphere. **Problem 2f**Shrink the mesh cells on the surface of the sphere (using only python commands). Shrink()*Hint* - don't forget to render the object. Does this look like the rendering that we created previously?When scripting ParaView, the input data set is not automatically hidden after creating a new output. Instead, these actions must be done explicitly. There are a few different ways to handle this (below is an example that leverages an object oriented approach – meaning you need to start over to follow this). **Problem 2g**Using python remove the sphere instance from the rendition of the sphere. sphereInstance = Sphere() sphereInstance.Radius = 1.0 sphereInstance.Center[2] = 1.0 print sphereInstance.Center sphereDisplay = Show(sphereInstance) view = Render () sphereDisplay.Opacity = 0.7 Render(view) shrinkInstance = Shrink(Input=sphereInstance , ShrinkFactor=1.0) print shrinkInstance.ShrinkFactor Hide(sphereInstance) shrinkDisplay = Show(shrinkInstance) Render() Problem 3) Scripting An Interactive SphereFinally, it is possible to capture the commands executed via the GUI as a python script. In this way it is easy to reproduce an interactive session. **Problem 3a**Trace your interactive commands [*Tools $\rightarrow$ Start Trace*] **Problem 3b**Recrete a sphere following the steps from **Problem 1**.How does your trace compare to the script that you developed in **Problem 2**? **Break Out Problem**Of the 3 modes covered in Problems 1, 2, and 3 - which is to be preferred?[*spend a few minutes discussing with your neighbor*] Problem 4) Getting Data Into ParaView Spheres are all well and good, but what we truly care about is visualizing our astronomical data.Unfortunately, this is not as simple as `plot(x,y)` or even `plot(x,y,z)` as you might expect for something designed to play nicely with `python`. Instead, we need to package the data as [Visualization Toolkit](https://www.vtk.org), or vtk, files.Even more unfortunately, vtk are binary files that are known to be somewhat challenging to work with.Nevertheless, many of the most common/powerful 3D rendering programs (ParaView, Mayavi, etc), utilize vtk data. If I am being completely honest – and I always try to be completely honest – I don't know a lot about vtk, or why it's an especially useful format for these programs. Nevertheless, I will try to provide you with some insight (i.e. repeat what other people have said) on the vtk format. Briefly, the vtk representation of data requires the specification of a geometry or topology, and then the data set properties are specified within the chosen geometry. vtk can handle points or cells data in five different formats: structured points, structured grid, rectilinear grid, unstructured grid, and polygonal data. Structured points is the most simple format, wherein only the mesh dimensions (nx, ny, nz), mesh origin (x0, y0, z0), and cell dimensions (dx, dy, dz) are specified. (credit: https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python) Rectilinear grid is also regularly spaced, but the spacing is not uniform. Thus, nodes must be specified along the given axes Ox, Oy, and Oz. (credit: https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python) Structured grid is not regular or uniform. Thus, the nodes for every point within the mesh must be specified. (credit: https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python) Unstructured grid is a structured grid, but it can handle cell data (not just point data).Polygonal data is the most complicated (and thus provides a great deal of representational freedom), but we are going to ignore this for now. As I mentioned previously, formatting data as vtk files is a bit of a pain. Fortuantely, there is a python package `pyevtk` that makes the process relatively painless. You should have already installed `pyevtk`, but if not you can run: pip install pyevtk
###Code
from pyevtk.hl import gridToVTK, pointsToVTK
###Output
_____no_output_____
###Markdown
We will start with a relatively straightforward example of creating a collection of points in 3 dimentions**Problem 4a**Create 3 arrays, `x, y, z`, of 500 points each that consist of random draws from independent gaussian distributions with mean 0 and standard deviation 20.Evalute the temperature and density on the grid provided below.$$T = 2.4/x + 0.04y^2 + 5/z$$$$\rho = 1.11x^2 + 3.1y + 0.6z^2$$
###Code
np.random.seed(23)
x = # complete
y = # complete
z = # complete
temp = # complete
pressure = # complete
###Output
_____no_output_____
###Markdown
**Problem 4b**Execute the cell below to create a .vtu file that can be read by ParaView.Open the file in ParaView. Represent the pressure data as *Point Gaussian*, and change the color to match that of the data.From the ParaView render, how would you describe the pressure of the system? What about the temperature?
###Code
pointsToVTK("./points", x, y, z, data = {"temp" : temp, "pressure" : pressure})
###Output
_____no_output_____
###Markdown
I can already think of a few ways in which this representation would be useful (especially now that there is Gaia data available), but the real power of ParaView comes via the volume renderings (which require "cell" data as opposed to "point" data). We will start with random data on a structured grid. Recall that this means we need to specify the mesh dimensions and cell widths before evaluating data on the grid. [A more interesting example follows]
###Code
nx, ny, nz = 6, 6, 2 # number of cells
lx, ly, lz = 1.0, 1.0, 1.0 # length on each size
dx, dy, dz = lx/nx, ly/ny, lz/nz # size of the cells
ncells = nx * ny * nz # total number of cells
npoints = (nx + 1) * (ny + 1) * (nz + 1) # points defined by boundaries, hence n + 1
###Output
_____no_output_____
###Markdown
From here we can establish the coordinates of the cell and the point data.
###Code
x = np.arange(0, lx + 0.1*dx, dx, dtype='float64')
y = np.arange(0, ly + 0.1*dy, dy, dtype='float64')
z = np.arange(0, lz + 0.1*dz, dz, dtype='float64')
###Output
_____no_output_____
###Markdown
**Problem 4c**Assign random values for the pressure and tempurature and export the data to a file called structure.vtr.Open structure.vtr in ParaView. Represent the data as a surface with edges, and color the pressure data.*Note* - pay attention to the shape of the pressure and temperature data.
###Code
pressure = # complete
temp = # complete
gridToVTK("./structured", x, y, z, cellData = {"pressure" : pressure}, pointData = {"temp" : temp})
###Output
_____no_output_____
###Markdown
Now we will create a slightly more complicated volume rendition of the pressure and temperature.**Problem 4d**Create a grid with 25 cells on a side and equal size cells of length = 0.4. Create cell data for the pressure and point data for the temparture according to the functions given below.$$\rho = \left(\cos(x) + \cos(y) + \cos(z)\right)e^{-(x + y + z)^2/5}$$$$T = x + y + z$$*Hint* - avoid writing any loops for this problem.
###Code
nx, ny, nz = 25, 25, 25 # number of cells
lx, ly, lz = 10, 10, 10 # length on each size
dx, dy, dz = lx/nx, ly/ny, lz/nz # size of the cells
x = # complete
y = # complete
z = # complete
# complete
# complete
# complete
# complete
# complete
pressure = # complete
###Output
_____no_output_____
###Markdown
**Problem 4e**Create a vtk file with the pressure and temperature data. Open the resuling file in ParaView.Examine the volume rendering of the data. Does the visualization make sense given the input data?*Hint* - the x, y, and z coordinates should all be specified as 3d arrays.
###Code
gridToVTK("./structured", xt, yt, zt, cellData = {"pressure" : pressure}, pointData = {"temp" : temp})
###Output
_____no_output_____
###Markdown
For simplicity, we have focused on the most ordered methods of producing vtk files. As we have demonstrated, `pyevtk` provides a simple interface to convert `NumPy` arrays into vtk binary files. Problem 5) Example Data Analysis - K means In addition to 3D rendering, ParaView provies utilities for performing (light) statistical analysis on data sets.As a demonstration of this we will revisit one of our commonly used data sets (the famous Iris machine learning data).Load the iris data into a `pandas` `DataFrame` via seaborn.
###Code
import seaborn as sns
iris = sns.load_dataset("iris")
###Output
_____no_output_____
###Markdown
**Problem 5a**As a point of comparison, quickly visualize this data in 2D. *Hint* - [`seaborn`](https://seaborn.pydata.org/index.html) makes this easy and possible with a single line of code.
###Code
# complete
# complete
# complete
###Output
_____no_output_____
###Markdown
As you can see: the iris data feature 3 classes in a four dimentional data set. We will now prepare this data for visualization in ParaView. **Problem 5b**Select 3 of the variables to serve as the (x, y, z) spatial coordinates for the data set. Note - the specific selection does not particularly matter, though 2 features are clearly better than the others.
###Code
x = # complete
y = # complete
z = # complete
###Output
_____no_output_____
###Markdown
**Problem 5c** Export the iris data as a vtk file for ParaView. Include both the species *and* the four features as data for the vtk file. Finally, be sure to randomize the order of the data prior to writing the vtk file.*Hint 1* - convert the species to a numerical representation.*Hint 2* - you may find [`np.random.choice`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.choice.html) helpful.
###Code
species = # complete
petal_width = # complete
petal_length = # complete
sepal_width = # complete
sepal_length = # complete
# complete
pointsToVTK("./iris", # complete
# complete
# complete
# complete
# complete
# complete
###Output
_____no_output_____ |
8_common_data_structures/Queue/Queue.ipynb | ###Markdown
Author: OMKAR PATHAK What is a Queue? * A queue is a data structure used for storing data (similar to Linked Lists and stacks). In queue, the order in which dula arrives is important.* A queue is an ordered list in which insertions are done at one end (rear) and deletions are done at other end (front). The first element to be inserted is the first one to be deleted. Hence, it is called First in First out (FIFO) or Last in Last out (LILO) list.* For example, our labs have 30 computers networked with a single printer. When students want to print, their print tasks “get in line” with all the other printing tasks that are waiting. The first task in is the next to be completed. Applications of Queue: * When a resource is shared among multiple consumers. Examples include CPU scheduling, Disk Scheduling.* When data is transferred asynchronously (data not necessarily received at same rate as sent) between two processes. Examples include IO Buffers, pipes, file IO, etc.* Keyboard: As we type, sometimes keystrokes get ahead of the characters that appear on the screen. This is due to the computer doing other work at that moment. The keystrokes are being placed in a queue-like buffer so that they can eventually be displayed on the screen in the proper order.
###Code
class Queue(object):
def __init__(self, limit = 10):
self.queue = []
self.front = None
self.rear = None
self.limit = limit
self.size = 0
def __str__(self):
return ' '.join([str(i) for i in self.queue])
# to check if queue is empty
def isEmpty(self):
return self.size <= 0
# to add an element from the rear end of the queue
def enqueue(self, data):
if self.size >= self.limit:
return -1 # queue overflow
else:
self.queue.append(data)
# assign the rear as size of the queue and front as 0
if self.front is None:
self.front = self.rear = 0
else:
self.rear = self.size
self.size += 1
# to pop an element from the front end of the queue
def dequeue(self):
if self.isEmpty():
return -1 # queue underflow
else:
self.queue.pop()
self.size -= 1
if self.size == 0:
self.front = self.rear = 0
else:
self.rear = self.size - 1
def getSize(self):
return self.size
myQueue = Queue()
for i in range(10):
myQueue.enqueue(i)
print(myQueue)
print('Queue Size:',myQueue.getSize())
myQueue.dequeue()
print(myQueue)
print('Queue Size:',myQueue.getSize())
###Output
0 1 2 3 4 5 6 7 8 9
Queue Size: 10
0 1 2 3 4 5 6 7 8
Queue Size: 9
|
end-to-end-heart-disease-classificaion.ipynb | ###Markdown
Predicting heart disease using machine learningThis notebook looks into using various Python-based machine learning and data science libraries in an attempt to build a machine learning model capable of predicting whether or not someone has heart disease on their medical attributes.We're going to take the following approach:1. Problem definition2. Data3. Evaluation4. Features5. Modelling 6. Experimentation 1. Problem definitionIn a statement,> Given clinical parameters about a patient, can we predict whether or not they have heart disease? 2. DataThe original data came from the Cleavland data fro UCI Machine Learning Repository.https://www.kaggle.com/ronitf/heart-disease-uci 3. Evaluation> If we can reach 95% accuracy at predicting whether or not a patienthas heart disease during the proof of concept, we'll pursue the project. 4. FeaturesThis is where you'll get different information about each of the features in your data.**Create data dictionary**1. age - age in years2. sex - (1 = male; 0 = female)3. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of disease4. trestbps - resting blood pressure (in mm Hg on admission to the hospital) anything above 130-140 is typically cause for concern5. chol - serum cholestoral in mg/dl * serum = LDL + HDL + .2 * triglycerides * above 200 is cause for concern6. fbs - (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) * '>126' mg/dL signals diabetes7. restecg - resting electrocardiographic results * 0: Nothing to note * 1: ST-T Wave abnormality * can range from mild symptoms to severe problems * signals non-normal heart beat * 2: Possible or definite left ventricular hypertrophy * Enlarged heart's main pumping chamber8. thalach - maximum heart rate achieved9. exang - exercise induced angina (1 = yes; 0 = no)10. oldpeak - ST depression induced by exercise relative to rest looks at stress of heart during excercise unhealthy heart will stress more11. slope - the slope of the peak exercise ST segment * 0: Upsloping: better heart rate with excercise (uncommon) * 1: Flatsloping: minimal change (typical healthy heart) * 2: Downslopins: signs of unhealthy heart12. ca - number of major vessels (0-3) colored by flourosopy * colored vessel means the doctor can see the blood passing through * the more blood movement the better (no clots)13. thal - thalium stress result * 1,3: normal * 6: fixed defect: used to be defect but ok now * 7: reversable defect: no proper blood movement when excercising14. target - have disease or not (1=yes, 0=no) (= the predicted attribute) Preparing the toolsWe're going to use pandas, Matplotlib and NumPy for data analysis and manipulation.
###Code
# Import all the tools we need
# Regular EDA (exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# we want our plots to appear inside the notebook
%matplotlib inline
# Models from Scikit-Learn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# Model Evaluations
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve
df = pd.read_csv("heart-disease.csv")
df
df.shape
###Output
_____no_output_____
###Markdown
Data Exploration (exploratory data analysis or EDA)The goal here is to find out more about the data and become a subject matter export on the dataset you're working with.1. What question(s) are you trying to solve?2. What kind of data do we have and how do we treat different types?3. What's missing from the data and how do we deal with it?4. Where are the outliers and why should you care about them?5. How can you add, change or remove features to get more out of your data?
###Code
df.head()
df.tail()
# Let's find out how many of each class there
df["target"].value_counts()
df["target"].value_counts().plot(kind="bar", color=["salmon", "lightblue"])
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Heart Disease Frequency according to Sex
###Code
df.sex.value_counts()
# Compare target column with sex column
pd.crosstab(df.target, df.sex)
# Create a plot of crosstab
pd.crosstab(df.target, df.sex).plot(kind="bar",
figsize=(10, 6),
color=["salmon", "lightblue"])
plt.title("Heart Disease Frequency for Sex")
plt.xlabel("0 = No Disease, 1 = Disease")
plt.ylabel("Amount")
plt.legend(["Female", "Male"])
plt.xticks(rotation=0);
###Output
_____no_output_____
###Markdown
Age vs. Max Heart Rate for Heart Disease
###Code
# Create another figure
plt.figure(figsize=(10, 6))
# Scatter with positive examples
plt.scatter(df.age[df.target==1],
df.thalach[df.target==1],
c='salmon')
# Scatter with negative examples
plt.scatter(df.age[df.target==0],
df.thalach[df.target==0],
c='lightblue')
# Add some helpful info
plt.title("Heart Disease in function of Age and Max Heart Rate")
plt.xlabel("Age")
plt.ylabel("Max Heart Rate")
plt.legend(["Disease", "No Disease"]);
# Check the distribution of the age columns with a histogram
df.age.plot.hist();
###Output
_____no_output_____
###Markdown
Heart Disease Frequency per Chest Pain Type3. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of disease
###Code
pd.crosstab(df.cp, df.target)
# Make the crosstab more visual
pd.crosstab(df.cp, df.target).plot(kind='bar',
figsize=(10, 6),
color=['salmon', 'lightblue'])
# Add some communication
plt.title("Heart Disease Frequency Per Chest Pain Type")
plt.xlabel('Chest Pain Type')
plt.ylabel('Amount')
plt.legend(['No Disease', 'Disease'])
plt.xticks(rotation=0)
df.corr()
# Let's make it pretty
corr_mat = df.corr()
fig, ax = plt.subplots(figsize=(15, 10))
ax = sns.heatmap(corr_mat,
annot=True,
linewidth=0.5,
fmt='.2f',
cmap='YlGnBu');
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
###Output
_____no_output_____
###Markdown
5. Modelling
###Code
# Split data into x and y
x = df.drop("target", axis=1)
y = df["target"]
# Split data into train and test sets
np.random.seed(42)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
###Output
_____no_output_____
###Markdown
Now we've got our data split into training and test sets, it's time to build a machine learning model.We'll train it (find the patterns) on the training set.And we'll test it (use the pattern) on the test sets.We're going to try 3 different machine learning model:1. Logistic Regression2. K-Nearest Neighbours Classifier3. Random Forest Classifier
###Code
# Put model in a dictionary
models = {"Logistic Regression": LogisticRegression(),
"KNN": KNeighborsClassifier(),
"Random Forest": RandomForestClassifier()}
# Create a funtion to fit and score models
def fit_and_score(models, x_train, x_test, y_train, y_test):
"""
Fits and evaluate given machine learning models.
models: a dict of different SciKit-Learn machine learning models
x_train: training data (no labels)
x_test: testing data (no labels)
y_train: training labels
y_test: test labels
"""
# Set random seed
np.random.seed(42)
# Make a dictionary to keep model scores
model_score = {}
# Loop through models
for name, model in models.items():
# Fit the model to the data
model.fit(x_train, y_train)
# Evaluate the model and append its score to model_score
model_score[name] = model.score(x_test, y_test)
return model_score
model_score = fit_and_score(models=models,
x_train=x_train,
x_test=x_test,
y_train=y_train,
y_test=y_test)
model_score
###Output
D:\Anaconda 3\lib\site-packages\sklearn\linear_model\_logistic.py:764: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
###Markdown
Model Comparison
###Code
model_compare = pd.DataFrame(model_score, index=["accuracy"])
model_compare.T.plot.bar()
###Output
_____no_output_____
###Markdown
Now we've got a baseline model... and we know a model's first predictions aren't always what we should based our next steps off. What should we do?Let's look at the following:* Hyperparameter tuning* Feature importance* Confusion matrix* Cross-validation* Precision* Recall* F1 score* Classification report* ROC curve* Area under curve (AUC) Hyperparameter tuning (by hand)
###Code
# Let's tune KNN
train_scores = []
test_scores = []
# Create a list of different values of n_neighbors
neighbors = range(1, 21)
# Setup KNN instance
knn = KNeighborsClassifier()
# Loop through different n_neighbors
for i in neighbors:
knn.set_params(n_neighbors=i)
# Fit the algorithm
knn.fit(x_train, y_train)
# Update the training scores list
train_scores.append(knn.score(x_train, y_train))
# Update the test scores list
test_scores.append(knn.score(x_test, y_test))
train_scores
test_scores
plt.plot(neighbors, train_scores, label="Train Score")
plt.plot(neighbors, test_scores, label="Test Score")
plt.xlabel("Number of neighbors")
plt.ylabel("Model score")
plt.legend()
plt.xticks(np.arange(1, 21, 1))
print(f"Maximum KNN score on the test data: {max(test_scores)*100:.2f}%")
###Output
Maximum KNN score on the test data: 75.41%
###Markdown
Hyperparameter tuning with RandomizedSearchCVWe're going to tune:* LogisticRegression()* RandomForestClassifier()...using RandomizedSearchCV
###Code
# Create a hyperparameter grid for LOgisticRegression
log_reg_grid = {"C": np.logspace(-4, 4, 20),
"solver": ["liblinear"]}
# Create a hyperparameter grid for RandomForestClassifier
rf_grid = {"n_estimators": np.arange(10, 1000, 50),
"max_depth": [None, 3, 5, 10],
"min_samples_split": np.arange(2, 20, 2),
"min_samples_leaf": np.arange(1, 20, 2)}
###Output
_____no_output_____
###Markdown
Now we've got hyperparameter grids setuip for each of our models, let's tune them using RandomizedSearchCV...
###Code
# Tune LogisticRegression
np.random.seed(42)
# Setup random hyperparameter search for LogisticRegression
rs_log_reg = RandomizedSearchCV(LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model for LogisticRegression
rs_log_reg.fit(x_train, y_train)
rs_log_reg.best_params_
rs_log_reg.score(x_test, y_test)
###Output
_____no_output_____
###Markdown
Now we've tuned LogisticRegression(), Let's do the same for RandomForestClassifier()...
###Code
# Setup random seed
np.random.seed(42)
# Setup random hyperparameter search for RandomForestClassifier
rs_rf = RandomizedSearchCV(RandomForestClassifier(),
param_distributions=rf_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model for RandomForestClassifier
rs_rf.fit(x_train, y_train)
rs_rf.best_params_
rs_rf.score(x_test, y_test)
model_score
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning with GridSearchCVSince our LogisticRegression model provides the best scores so far, we'll try and improve them again using GridSearchCV...
###Code
# Different hyperparameters for our LogisticRegression model
log_reg_grid = {"C": np.logspace(-4, 4, 30),
"solver": ["liblinear"]}
# Setup random hyperparameter search for LogisticRegression
gs_log_reg = GridSearchCV(LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
# Fit grid hyperparameter search model for LogisticRegression
gs_log_reg.fit(x_train, y_train);
gs_log_reg.best_params_
gs_log_reg.score(x_test, y_test)
model_score
###Output
_____no_output_____
###Markdown
Evaluating our tuned machine learning classifier, beyond accuracy* ROC curve and AUC score* Confusion matrix* Classification report* Precision * Recall* F1-score... and it would be great if cross-validation was used where possible.To make comparisons and evaluate our trained model, first we need to make predictions.
###Code
# Make predictions with tuned model
y_preds = gs_log_reg.predict(x_test)
y_preds
y_test
# Plot ROC curve and calculate AUC metric
plot_roc_curve(gs_log_reg, x_test, y_test)
# Confusion matrix
confusion_matrix(y_test, y_preds)
sns.set(font_scale=1.5)
def plot_conf_mat(y_test, y_preds):
"""
Plot a noce looking confusion matrix using Seaborn's heatmap()
"""
fig, ax = plt.subplots(figsize=(3, 3))
ax = sns.heatmap(confusion_matrix(y_test, y_preds),
annot=True,
cbar=False
)
plt.xlabel("True label")
plt.ylabel("Predicted label")
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plot_conf_mat(y_test, y_preds)
print(classification_report(y_test, y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
Calculate evaluation metrics using cross-validationWe're going to calculate accuracy, precision, recall, and f1-score of our model using cross-validation ans to do so we'll be using `cross_val_score()`.
###Code
# Check best hyperparameters
gs_log_reg.best_params_
# Check a new classifier with best parameters
clf = LogisticRegression(C=0.20433597178569418,
solver='liblinear')
# Cross-validated accuracy
cv_acc = cross_val_score(clf,
x,
y,
cv=5,
scoring="accuracy")
cv_acc
cv_acc = np.mean(cv_acc)
cv_acc
# Cross-validated precision
cv_prec = cross_val_score(clf,
x,
y,
cv=5,
scoring="precision")
cv_prec = np.mean(cv_prec)
cv_prec
# Cross-validated recall
cv_recall = cross_val_score(clf,
x,
y,
cv=5,
scoring="recall")
cv_recall = np.mean(cv_recall)
cv_recall
# Cross-validated f1-score
cv_f1 = cross_val_score(clf,
x,
y,
cv=5,
scoring="f1")
cv_f1 = np.mean(cv_f1)
cv_f1
# visualise cross-validated metrics
cv_metric = pd.DataFrame({"Accuracy": cv_acc,
"Precision": cv_prec,
"Recall": cv_recall,
"F1": cv_f1},
index=[0])
cv_metric.T.plot.bar(title="Cross-Validated Classification Report",
legend=False)
###Output
_____no_output_____
###Markdown
Feature ImportanceFeature importance is another as asking, "which features contributed most to the outcomes of the model and how did they contribute?"Finding feature importance is different for each machine learning model. One way to find feature importance is to search for "(MODEL NAME) feature importance".Let's find the feature importance for our Logisticregression model...
###Code
# Fit an instance of LogisticRegression
clf = LogisticRegression(C=0.20433597178569418,
solver="liblinear")
clf.fit(x_train, y_train);
# Check coef_
clf.coef_
# Match coef's of feature to columns
feature_dict = dict(zip(df.columns, list(clf.coef_[0])))
feature_dict
# Visualize feature importance
feature_df = pd.DataFrame(feature_dict, index=[0])
feature_df.T.plot.bar(title="Feature Importance", legend=False);
pd.crosstab(df["sex"], df["target"])
pd.crosstab(df["slope"], df["target"])
###Output
_____no_output_____ |
Activity_1_Python_Fundamentals_Tornito_J_J.ipynb | ###Markdown
Python FundamentalsIn this module, we are going to establish our skills in Pythos programming. In this notebook we are going to cover:* Variables and Data Types* Operations* Input and Output Operations* Logic Control* Iterables* Functions Variables and Data Types
###Code
x = 1
x
x = 1
a, b = 3, -2
b
type(x)
y = 2.25
type(y)
x = float(x)
type(x)
x
s, t, u = "1", '3', 'three'
type(s)
type(t)
type(u)
###Output
_____no_output_____
###Markdown
Operations Arithmetic
###Code
w, x, y, z = 4.0, -3.0, 1, -32
### Addition
S = w + x
S
### Subtraction
D = y - z
D
### Multiplication
P = w*z
P
### Division
Q = y/x
Q
### Floor Division
Qf = w//x
Qf
### Exponentiation
E = w**x
E
### Modulo
mod = z%x
mod
###Output
_____no_output_____
###Markdown
Assignment
###Code
A, B, C, D, E = 0, 100, 2, 1, 2
A += w
B -= x
C *= w
D /= x
E **= y
###Output
_____no_output_____
###Markdown
Comparators
###Code
size_1, size_2, size_3 = 1, 2.0, "1"
true_size = 1.0
## Equality
size_1 == true_size
## Non-Equality
size_2 != true_size
## Inequality
s1 = size_1 > size_2
s2 = size_1 < size_2/2
s3 = true_size >= size_1
s4 = size_2 <= true_size
###Output
_____no_output_____
###Markdown
Logical
###Code
size_1 == true_size
size_1 is true_size
size_1 is not true_size
P, Q = True, False
conj = P and Q
conj
disj = P or Q
disj
nand = not(P and Q)
nand
xor = (not P and Q) or (P and not Q)
xor
###Output
_____no_output_____
###Markdown
Input and Output
###Code
print("Hello World!")
cnt = 14000
string = "Hello World!"
print(string, ", Current COVID count is:", cnt)
cnt += 11000
print(f"{string}, current count is: {cnt}")
sem_grade = 86.25
name = ""
print("Hello {}, your semestral grade is: {}".format(name, sem_grade))
pg, mg, fg, = 98, 85, 65
print("The weights of your semestral grades are:\
\n\t {} for Prelims\
\n\t {} for Midterms, and\
\n\t {} for Finals" .format(pg, mg, fg))
pg, mg, fg, = 0.3, 0.3, 0.4
print("The weights of your semestral grades are:\
\n\t {:.2%} for Prelims\
\n\t {:.2%} for Midterms, and\
\n\t {:.2%} for Finals" .format(pg, mg, fg))
e = input("Enter a number: ")
name = input("Enter your name: ")
pg = input("Enter prelim grade: ")
mg = input("Enter midterm grade: ")
fg = input("Enter final grade: ")
sem_grade = None
print("Hello {}, your semestral grade is: {}" .format(name, sem_grade))
###Output
Enter your name: Bars
Enter prelim grade: 74
Enter midterm grade: 25
Enter final grade: 53
Hello Bars, your semestral grade is: None
###Markdown
Looping Statements While
###Code
## While loops
i, j = 0, 10
while(i<=j):
print(f"{i}\t|\t{j}")
i += 1
###Output
0 | 10
1 | 10
2 | 10
3 | 10
4 | 10
5 | 10
6 | 10
7 | 10
8 | 10
9 | 10
10 | 10
###Markdown
For
###Code
# for(int = 0; 1<10; i++){
# printf(i)
#}
i = 0
for i in range(10):
print(i)
playlist = ["Bahay Kubo", "Bars", "Buko"]
print('Now Playing:\n')
for song in playlist:
print(song)
###Output
Now Playing:
Bahay Kubo
Bars
Buko
###Markdown
Flow Control Condition Statements
###Code
num_1, num_2 = 14, 12
if(num_1 == num_2):
print("Wews")
elif(num_1 > num_2):
print("Uwu")
else:
print("Ewe")
###Output
Uwu
###Markdown
Functions
###Code
# void DeleteUser (int userid){
# delete(userid);
#}
def delete_user (userid):
print("successfully deleted user: {}".format(userid))
userid = 2021_102934812
delete_user(2021_102934812)
def add(addend1, addend2):
sum = addend1 + addend2
return sum
add(5, 4)
###Output
_____no_output_____ |
gpt2_huggingface.ipynb | ###Markdown
Check GPU allocated on Google colab
###Code
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ')
print('and then re-execute this cell.')
else:
print(gpu_info)
###Output
Thu Jan 28 17:53:08 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.32.03 Driver Version: 418.67 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |
| N/A 39C P0 27W / 250W | 0MiB / 16280MiB | 0% Default |
| | | ERR! |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
Step 1. Installations
###Code
!git clone https://github.com/huggingface/transformers
!pip install git+https://github.com/huggingface/transformers
!pip install git+https://github.com/huggingface/datasets
!pip list | grep -E 'transformers|tokenizers'
###Output
Cloning into 'transformers'...
remote: Enumerating objects: 34, done.[K
remote: Counting objects: 100% (34/34), done.[K
remote: Compressing objects: 100% (29/29), done.[K
remote: Total 60874 (delta 12), reused 12 (delta 2), pack-reused 60840[K
Receiving objects: 100% (60874/60874), 45.64 MiB | 28.00 MiB/s, done.
Resolving deltas: 100% (42991/42991), done.
Collecting git+https://github.com/huggingface/transformers
Cloning https://github.com/huggingface/transformers to /tmp/pip-req-build-f6jexgoy
Running command git clone -q https://github.com/huggingface/transformers /tmp/pip-req-build-f6jexgoy
Installing build dependencies ... [?25l[?25hdone
Getting requirements to build wheel ... [?25l[?25hdone
Preparing wheel metadata ... [?25l[?25hdone
Requirement already satisfied (use --upgrade to upgrade): transformers==4.3.0.dev0 from git+https://github.com/huggingface/transformers in /usr/local/lib/python3.6/dist-packages
Requirement already satisfied: sacremoses in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (0.0.43)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (3.4.0)
Requirement already satisfied: dataclasses; python_version < "3.7" in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (0.8)
Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (2019.12.20)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (2.23.0)
Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (20.8)
Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (4.41.1)
Requirement already satisfied: tokenizers==0.9.4 in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (0.9.4)
Requirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (3.0.12)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from transformers==4.3.0.dev0) (1.19.5)
Requirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers==4.3.0.dev0) (7.1.2)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers==4.3.0.dev0) (1.0.0)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers==4.3.0.dev0) (1.15.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->transformers==4.3.0.dev0) (3.4.0)
Requirement already satisfied: typing-extensions>=3.6.4; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->transformers==4.3.0.dev0) (3.7.4.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==4.3.0.dev0) (2020.12.5)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==4.3.0.dev0) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==4.3.0.dev0) (1.24.3)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==4.3.0.dev0) (2.10)
Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers==4.3.0.dev0) (2.4.7)
Building wheels for collected packages: transformers
Building wheel for transformers (PEP 517) ... [?25l[?25hdone
Created wheel for transformers: filename=transformers-4.3.0.dev0-cp36-none-any.whl size=1783713 sha256=dc9446b968eb32ffaf36acb048e1f101ed8ac32126541ebbeb04c8a03f1289b2
Stored in directory: /tmp/pip-ephem-wheel-cache-cgq3rpqu/wheels/70/d3/52/b3fa4f8b8ef04167ac62e5bb2accb62ae764db2a378247490e
Successfully built transformers
Collecting git+https://github.com/huggingface/datasets
Cloning https://github.com/huggingface/datasets to /tmp/pip-req-build-62vwjq2k
Running command git clone -q https://github.com/huggingface/datasets /tmp/pip-req-build-62vwjq2k
Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.6/dist-packages (from datasets==1.2.1) (1.19.5)
Collecting pyarrow>=0.17.1
[?25l Downloading https://files.pythonhosted.org/packages/33/67/2f4fcce1b41bcc7e88a6bfdb42046597ae72e5bc95c2789b7c5ac893c433/pyarrow-3.0.0-cp36-cp36m-manylinux2014_x86_64.whl (20.7MB)
[K |████████████████████████████████| 20.7MB 1.4MB/s
[?25hRequirement already satisfied: dill in /usr/local/lib/python3.6/dist-packages (from datasets==1.2.1) (0.3.3)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from datasets==1.2.1) (1.1.5)
Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.6/dist-packages (from datasets==1.2.1) (2.23.0)
Requirement already satisfied: tqdm<4.50.0,>=4.27 in /usr/local/lib/python3.6/dist-packages (from datasets==1.2.1) (4.41.1)
Collecting xxhash
[?25l Downloading https://files.pythonhosted.org/packages/f7/73/826b19f3594756cb1c6c23d2fbd8ca6a77a9cd3b650c9dec5acc85004c38/xxhash-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl (242kB)
[K |████████████████████████████████| 245kB 52.2MB/s
[?25hRequirement already satisfied: multiprocess in /usr/local/lib/python3.6/dist-packages (from datasets==1.2.1) (0.70.11.1)
Collecting fsspec
[?25l Downloading https://files.pythonhosted.org/packages/ec/80/72ac0982cc833945fada4b76c52f0f65435ba4d53bc9317d1c70b5f7e7d5/fsspec-0.8.5-py3-none-any.whl (98kB)
[K |████████████████████████████████| 102kB 10.6MB/s
[?25hRequirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from datasets==1.2.1) (0.8)
Requirement already satisfied: importlib_metadata in /usr/local/lib/python3.6/dist-packages (from datasets==1.2.1) (3.4.0)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas->datasets==1.2.1) (2.8.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->datasets==1.2.1) (2018.9)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.0->datasets==1.2.1) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.0->datasets==1.2.1) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.0->datasets==1.2.1) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.0->datasets==1.2.1) (2020.12.5)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib_metadata->datasets==1.2.1) (3.4.0)
Requirement already satisfied: typing-extensions>=3.6.4; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from importlib_metadata->datasets==1.2.1) (3.7.4.3)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas->datasets==1.2.1) (1.15.0)
Building wheels for collected packages: datasets
Building wheel for datasets (setup.py) ... [?25l[?25hdone
Created wheel for datasets: filename=datasets-1.2.1-cp36-none-any.whl size=172662 sha256=18f6ce80c900e03fed6ffe562eac760fdce4eeec9651de05e5d789da06c92208
Stored in directory: /tmp/pip-ephem-wheel-cache-plxg915r/wheels/ef/76/f4/b4ebbfebddcfdccc0378b9d9c9332b141161feb1b31f8a17c7
Successfully built datasets
Installing collected packages: pyarrow, xxhash, fsspec, datasets
Found existing installation: pyarrow 0.14.1
Uninstalling pyarrow-0.14.1:
Successfully uninstalled pyarrow-0.14.1
Successfully installed datasets-1.2.1 fsspec-0.8.5 pyarrow-3.0.0 xxhash-2.0.0
tokenizers 0.9.4
transformers 4.3.0.dev0
###Markdown
[Note: If you see errors during the above installation step, restarting the runtime might help.] Step 2. Imports
###Code
# from changed directory
# import run_language_modeling
# from transformers.examples import run_generation
# standard ML imports
import torch
import collections
import random
import numpy as np
from transformers import AutoConfig
from transformers import AutoTokenizer
from transformers import AutoModelWithLMHead
# Text processing
import json
from pathlib import Path
from glob import glob
import os
from concurrent.futures import ProcessPoolExecutor
from itertools import chain
import nltk
import re
nltk.download('punkt')
###Output
[nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data] Unzipping tokenizers/punkt.zip.
###Markdown
Step 3. Get data
###Code
# I am running on Colab with data stored in Google drive.
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
Text Preprocessing Using sliding window of 8 sentences
###Code
sentence_tokenizer = nltk.data.load('tokenizers/punkt/polish.pickle')
sent_tokenize = sentence_tokenizer.tokenize
def flatten(iterable):
return chain.from_iterable(iterable)
def preprocess_book(book_txt):
start_idx = book_txt.index("START OF THIS PROJECT GUTENBERG") + 100
end_idx = book_txt.index("END OF THIS PROJECT") - 20
txt = book_txt[start_idx: end_idx]
return re.sub("\s+", " ", txt)
def process_book(book_path):
try:
txt = preprocess_book(Path(book_path).read_text("utf-8"))
sentences = [s for s in sent_tokenize(txt) if len(s) >= 16]
windowed_sentences = []
for snt in range(len(sentences)):
windowed_sentences.append(" ".join(sentences[snt: snt + 8]))
return windowed_sentences
except:
print(f"Could not parse \n{book_path}\n")
return []
# Uncomment on first run ONLY. Once you have the training file, comment it out again.
# train_data_directory = '/content/drive/My Drive/Colab Notebooks/wodehouse_generator/data/all_novels/'
# sliding_train_data = '/content/drive/My Drive/Colab Notebooks/wodehouse_generator/data/train_sliding.txt'
# books = []
# for filename in os.listdir(train_data_directory):
# file_path = os.path.join(train_data_directory, filename)
# books.append(file_path)
# buffer, BUFFER_SIZE = [], 100000
# with open(sliding_train_data, "w") as file:
# for i, sentence in enumerate(flatten(process_book(f) for f in books)):
# if len(buffer) >= BUFFER_SIZE:
# file.write("\n".join(buffer))
# buffer.clear()
# print(i, end="\r")
# buffer.append(sentence)
# if len(buffer) > 0:
# file.write("\n".join(buffer))
# buffer.clear()
!head /content/drive/My\ Drive/Colab\ Notebooks/wodehouse_generator/data/train_sliding.txt
# number of lines, words, characters respectively
!wc /content/drive/My\ Drive/Colab\ Notebooks/wodehouse_generator/data/train_sliding.txt
## TAKES LOOOONG TIME. DO NOT RUN ONCE you have a trained model handy.
# !python run_language_modeling.py \
# --output_dir='/content/drive/My Drive/finetuned_models/wodehouse' \
# --model_type=gpt2 \
# --model_name_or_path=gpt2-medium \
# --save_total_limit=5 \
# --num_train_epochs=1.0 \
# --do_train \
# --evaluate_during_training \
# --logging_steps=500 \
# --save_steps=1500 \
# --train_data_file=/content/drive/My\ Drive/Colab\ Notebooks/wodehouse_generator/data/train_sliding.txt \
# --do_eval \
# --eval_data_file=/content/drive/My\ Drive/Colab\ Notebooks/wodehouse_generator/data/validate.txt \
# --per_gpu_train_batch_size=2 \
# --per_gpu_eval_batch_size=2 \
# --block_size=128 \
# --gradient_accumulation_steps=5 \
# --overwrite_output_dir # too lazy to delete previous failed run
!ls
## TAKES LOOOONG TIME. DO NOT RUN ONCE you have a trained model handy.
import os
os.chdir('/content/transformers/examples/language-modeling')
!python run_clm.py \
--output_dir='/content/drive/My Drive/finetuned_models_2/wodehouse' \
--model_type=gpt2 \
--model_name_or_path=gpt2-medium \
--save_total_limit=5 \
--num_train_epochs=1.0 \
--do_train \
--logging_steps=500 \
--save_steps=1500 \
--train_file=/content/drive/My\ Drive/Colab\ Notebooks/wodehouse_generator/data/train_sliding.txt \
--do_eval=y \
--validation_file=/content/drive/My\ Drive/Colab\ Notebooks/wodehouse_generator/data/validate.txt \
--per_gpu_train_batch_size=2 \
--per_gpu_eval_batch_size=2 \
--block_size=128 \
--gradient_accumulation_steps=5 \
--overwrite_output_dir # too lazy to delete previous failed run
###Output
2021-01-28 18:13:44.368248: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1
01/28/2021 18:13:45 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1distributed training: False, 16-bits training: False
01/28/2021 18:13:45 - INFO - __main__ - Training/evaluation parameters TrainingArguments(output_dir=/content/drive/My Drive/finetuned_models_2/wodehouse, overwrite_output_dir=True, do_train=True, do_eval=True, do_predict=False, evaluation_strategy=EvaluationStrategy.NO, prediction_loss_only=False, per_device_train_batch_size=8, per_device_eval_batch_size=8, gradient_accumulation_steps=5, eval_accumulation_steps=None, learning_rate=5e-05, weight_decay=0.0, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=1.0, max_steps=-1, lr_scheduler_type=SchedulerType.LINEAR, warmup_steps=0, logging_dir=runs/Jan28_18-13-45_c65b84e3a59f, logging_first_step=False, logging_steps=500, save_steps=1500, save_total_limit=5, no_cuda=False, seed=42, fp16=False, fp16_opt_level=O1, fp16_backend=auto, local_rank=-1, tpu_num_cores=None, tpu_metrics_debug=False, debug=False, dataloader_drop_last=False, eval_steps=500, dataloader_num_workers=0, past_index=-1, run_name=/content/drive/My Drive/finetuned_models_2/wodehouse, disable_tqdm=False, remove_unused_columns=True, label_names=None, load_best_model_at_end=False, metric_for_best_model=None, greater_is_better=None, ignore_data_skip=False, sharded_ddp=False, deepspeed=None, label_smoothing_factor=0.0, adafactor=False, group_by_length=False, report_to=['tensorboard'], ddp_find_unused_parameters=None, pin_memory=True, _n_gpu=1)
Using custom data configuration default-3155a570d63d4eac
Downloading and preparing dataset text/default (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /root/.cache/huggingface/datasets/text/default-3155a570d63d4eac/0.0.0/44d63bd03e7e554f16131765a251f2d8333a5fe8a73f6ea3de012dbc49443691...
Dataset text downloaded and prepared to /root/.cache/huggingface/datasets/text/default-3155a570d63d4eac/0.0.0/44d63bd03e7e554f16131765a251f2d8333a5fe8a73f6ea3de012dbc49443691. Subsequent calls will reuse this data.
[INFO|file_utils.py:1286] 2021-01-28 18:13:48,017 >> https://huggingface.co/gpt2-medium/resolve/main/config.json not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmpivr4cfjs
Downloading: 100% 718/718 [00:00<00:00, 662kB/s]
[INFO|file_utils.py:1290] 2021-01-28 18:13:48,223 >> storing https://huggingface.co/gpt2-medium/resolve/main/config.json in cache at /root/.cache/huggingface/transformers/3a7a4b7235202f93d14a4a5e8200709184c5b25a29d9cfa6b0ede5166adf0768.cf0ec4a33a38dc96108560e01338af4bd3360dd859385d451c35b41987ae73ff
[INFO|file_utils.py:1293] 2021-01-28 18:13:48,224 >> creating metadata file for /root/.cache/huggingface/transformers/3a7a4b7235202f93d14a4a5e8200709184c5b25a29d9cfa6b0ede5166adf0768.cf0ec4a33a38dc96108560e01338af4bd3360dd859385d451c35b41987ae73ff
[INFO|configuration_utils.py:445] 2021-01-28 18:13:48,224 >> loading configuration file https://huggingface.co/gpt2-medium/resolve/main/config.json from cache at /root/.cache/huggingface/transformers/3a7a4b7235202f93d14a4a5e8200709184c5b25a29d9cfa6b0ede5166adf0768.cf0ec4a33a38dc96108560e01338af4bd3360dd859385d451c35b41987ae73ff
[INFO|configuration_utils.py:481] 2021-01-28 18:13:48,225 >> Model config GPT2Config {
"activation_function": "gelu_new",
"architectures": [
"GPT2LMHeadModel"
],
"attn_pdrop": 0.1,
"bos_token_id": 50256,
"embd_pdrop": 0.1,
"eos_token_id": 50256,
"gradient_checkpointing": false,
"initializer_range": 0.02,
"layer_norm_epsilon": 1e-05,
"model_type": "gpt2",
"n_ctx": 1024,
"n_embd": 1024,
"n_head": 16,
"n_inner": null,
"n_layer": 24,
"n_positions": 1024,
"n_special": 0,
"predict_special_tokens": true,
"resid_pdrop": 0.1,
"summary_activation": null,
"summary_first_dropout": 0.1,
"summary_proj_to_labels": true,
"summary_type": "cls_index",
"summary_use_proj": true,
"task_specific_params": {
"text-generation": {
"do_sample": true,
"max_length": 50
}
},
"transformers_version": "4.3.0.dev0",
"use_cache": true,
"vocab_size": 50257
}
[INFO|configuration_utils.py:445] 2021-01-28 18:13:48,428 >> loading configuration file https://huggingface.co/gpt2-medium/resolve/main/config.json from cache at /root/.cache/huggingface/transformers/3a7a4b7235202f93d14a4a5e8200709184c5b25a29d9cfa6b0ede5166adf0768.cf0ec4a33a38dc96108560e01338af4bd3360dd859385d451c35b41987ae73ff
[INFO|configuration_utils.py:481] 2021-01-28 18:13:48,429 >> Model config GPT2Config {
"activation_function": "gelu_new",
"architectures": [
"GPT2LMHeadModel"
],
"attn_pdrop": 0.1,
"bos_token_id": 50256,
"embd_pdrop": 0.1,
"eos_token_id": 50256,
"gradient_checkpointing": false,
"initializer_range": 0.02,
"layer_norm_epsilon": 1e-05,
"model_type": "gpt2",
"n_ctx": 1024,
"n_embd": 1024,
"n_head": 16,
"n_inner": null,
"n_layer": 24,
"n_positions": 1024,
"n_special": 0,
"predict_special_tokens": true,
"resid_pdrop": 0.1,
"summary_activation": null,
"summary_first_dropout": 0.1,
"summary_proj_to_labels": true,
"summary_type": "cls_index",
"summary_use_proj": true,
"task_specific_params": {
"text-generation": {
"do_sample": true,
"max_length": 50
}
},
"transformers_version": "4.3.0.dev0",
"use_cache": true,
"vocab_size": 50257
}
[INFO|file_utils.py:1286] 2021-01-28 18:13:48,643 >> https://huggingface.co/gpt2-medium/resolve/main/vocab.json not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmpuita0fq5
Downloading: 100% 1.04M/1.04M [00:00<00:00, 2.77MB/s]
[INFO|file_utils.py:1290] 2021-01-28 18:13:49,233 >> storing https://huggingface.co/gpt2-medium/resolve/main/vocab.json in cache at /root/.cache/huggingface/transformers/fee58641d7a73348d842afaa337d5a7763dad32beff8d9008bb3c3c847749d6b.c7ed1f96aac49e745788faa77ba0a26a392643a50bb388b9c04ff469e555241f
[INFO|file_utils.py:1293] 2021-01-28 18:13:49,233 >> creating metadata file for /root/.cache/huggingface/transformers/fee58641d7a73348d842afaa337d5a7763dad32beff8d9008bb3c3c847749d6b.c7ed1f96aac49e745788faa77ba0a26a392643a50bb388b9c04ff469e555241f
[INFO|file_utils.py:1286] 2021-01-28 18:13:49,450 >> https://huggingface.co/gpt2-medium/resolve/main/merges.txt not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmpmxa_lo8t
Downloading: 100% 456k/456k [00:00<00:00, 1.40MB/s]
[INFO|file_utils.py:1290] 2021-01-28 18:13:49,996 >> storing https://huggingface.co/gpt2-medium/resolve/main/merges.txt in cache at /root/.cache/huggingface/transformers/23c853a0fcfc12c7d72ad4e922068b6982665b673f6de30b4c5cbe5bd70a2236.5d12962c5ee615a4c803841266e9c3be9a691a924f72d395d3a6c6c81157788b
[INFO|file_utils.py:1293] 2021-01-28 18:13:49,996 >> creating metadata file for /root/.cache/huggingface/transformers/23c853a0fcfc12c7d72ad4e922068b6982665b673f6de30b4c5cbe5bd70a2236.5d12962c5ee615a4c803841266e9c3be9a691a924f72d395d3a6c6c81157788b
[INFO|file_utils.py:1286] 2021-01-28 18:13:50,220 >> https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmpbaoyl4ew
Downloading: 100% 1.36M/1.36M [00:00<00:00, 3.49MB/s]
[INFO|file_utils.py:1290] 2021-01-28 18:13:50,827 >> storing https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json in cache at /root/.cache/huggingface/transformers/8e4f9a65085b1b4ae69ffac9a953a44249c9ea1e72e4a7816ee87b70081df038.cf2d0ecb83b6df91b3dbb53f1d1e4c311578bfd3aa0e04934215a49bf9898df0
[INFO|file_utils.py:1293] 2021-01-28 18:13:50,827 >> creating metadata file for /root/.cache/huggingface/transformers/8e4f9a65085b1b4ae69ffac9a953a44249c9ea1e72e4a7816ee87b70081df038.cf2d0ecb83b6df91b3dbb53f1d1e4c311578bfd3aa0e04934215a49bf9898df0
[INFO|tokenization_utils_base.py:1783] 2021-01-28 18:13:50,827 >> loading file https://huggingface.co/gpt2-medium/resolve/main/vocab.json from cache at /root/.cache/huggingface/transformers/fee58641d7a73348d842afaa337d5a7763dad32beff8d9008bb3c3c847749d6b.c7ed1f96aac49e745788faa77ba0a26a392643a50bb388b9c04ff469e555241f
[INFO|tokenization_utils_base.py:1783] 2021-01-28 18:13:50,827 >> loading file https://huggingface.co/gpt2-medium/resolve/main/merges.txt from cache at /root/.cache/huggingface/transformers/23c853a0fcfc12c7d72ad4e922068b6982665b673f6de30b4c5cbe5bd70a2236.5d12962c5ee615a4c803841266e9c3be9a691a924f72d395d3a6c6c81157788b
[INFO|tokenization_utils_base.py:1783] 2021-01-28 18:13:50,828 >> loading file https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json from cache at /root/.cache/huggingface/transformers/8e4f9a65085b1b4ae69ffac9a953a44249c9ea1e72e4a7816ee87b70081df038.cf2d0ecb83b6df91b3dbb53f1d1e4c311578bfd3aa0e04934215a49bf9898df0
[INFO|file_utils.py:1286] 2021-01-28 18:13:51,111 >> https://huggingface.co/gpt2-medium/resolve/main/pytorch_model.bin not found in cache or force_download set to True, downloading to /root/.cache/huggingface/transformers/tmp5xh8kchk
Downloading: 100% 1.52G/1.52G [00:25<00:00, 60.2MB/s]
[INFO|file_utils.py:1290] 2021-01-28 18:14:16,588 >> storing https://huggingface.co/gpt2-medium/resolve/main/pytorch_model.bin in cache at /root/.cache/huggingface/transformers/6249eef5c8c1fcfccf9f36fc2e59301b109ac4036d8ebbee9c2b7f7e47f440bd.2538e2565f9e439a3668b981faf959c8b490b36dd631f3c4cd992519b2dd36f1
[INFO|file_utils.py:1293] 2021-01-28 18:14:16,588 >> creating metadata file for /root/.cache/huggingface/transformers/6249eef5c8c1fcfccf9f36fc2e59301b109ac4036d8ebbee9c2b7f7e47f440bd.2538e2565f9e439a3668b981faf959c8b490b36dd631f3c4cd992519b2dd36f1
[INFO|modeling_utils.py:1027] 2021-01-28 18:14:16,588 >> loading weights file https://huggingface.co/gpt2-medium/resolve/main/pytorch_model.bin from cache at /root/.cache/huggingface/transformers/6249eef5c8c1fcfccf9f36fc2e59301b109ac4036d8ebbee9c2b7f7e47f440bd.2538e2565f9e439a3668b981faf959c8b490b36dd631f3c4cd992519b2dd36f1
[INFO|modeling_utils.py:1143] 2021-01-28 18:14:32,690 >> All model checkpoint weights were used when initializing GPT2LMHeadModel.
[INFO|modeling_utils.py:1152] 2021-01-28 18:14:32,690 >> All the weights of GPT2LMHeadModel were initialized from the model checkpoint at gpt2-medium.
If your task is similar to the task the model of the checkpoint was trained on, you can already use GPT2LMHeadModel for predictions without further training.
100% 171/171 [00:29<00:00, 5.73ba/s]
100% 16/16 [00:00<00:00, 37.55ba/s]
100% 171/171 [06:54<00:00, 2.43s/ba]
100% 16/16 [00:00<00:00, 19.33ba/s]
[INFO|trainer.py:429] 2021-01-28 18:22:08,070 >> The following columns in the training set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: .
[INFO|trainer.py:429] 2021-01-28 18:22:08,071 >> The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: .
[WARNING|training_args.py:485] 2021-01-28 18:22:08,072 >> Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future version. Using `--per_device_train_batch_size` is preferred.
[WARNING|training_args.py:485] 2021-01-28 18:22:08,078 >> Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future version. Using `--per_device_train_batch_size` is preferred.
[INFO|trainer.py:832] 2021-01-28 18:22:08,078 >> ***** Running training *****
[INFO|trainer.py:833] 2021-01-28 18:22:08,078 >> Num examples = 186911
[INFO|trainer.py:834] 2021-01-28 18:22:08,078 >> Num Epochs = 1
[INFO|trainer.py:835] 2021-01-28 18:22:08,078 >> Instantaneous batch size per device = 8
[INFO|trainer.py:836] 2021-01-28 18:22:08,078 >> Total train batch size (w. parallel, distributed & accumulation) = 10
[INFO|trainer.py:837] 2021-01-28 18:22:08,078 >> Gradient Accumulation steps = 5
[INFO|trainer.py:838] 2021-01-28 18:22:08,079 >> Total optimization steps = 18691
[WARNING|training_args.py:485] 2021-01-28 18:22:08,085 >> Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future version. Using `--per_device_train_batch_size` is preferred.
[WARNING|training_args.py:499] 2021-01-28 18:22:08,085 >> Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future version. Using `--per_device_eval_batch_size` is preferred.
{'loss': 3.1718, 'learning_rate': 4.8662457867422825e-05, 'epoch': 0.03}
{'loss': 3.0199, 'learning_rate': 4.732491573484565e-05, 'epoch': 0.05}
{'loss': 2.9462, 'learning_rate': 4.598737360226847e-05, 'epoch': 0.08}
8% 1500/18691 [19:05<3:37:38, 1.32it/s][INFO|trainer.py:1392] 2021-01-28 18:41:13,163 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-1500
[INFO|configuration_utils.py:300] 2021-01-28 18:41:13,169 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-1500/config.json
[INFO|modeling_utils.py:817] 2021-01-28 18:41:20,740 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-1500/pytorch_model.bin
{'loss': 2.8972, 'learning_rate': 4.46498314696913e-05, 'epoch': 0.11}
{'loss': 2.8563, 'learning_rate': 4.331228933711412e-05, 'epoch': 0.13}
{'loss': 2.8199, 'learning_rate': 4.197474720453695e-05, 'epoch': 0.16}
16% 3000/18691 [38:45<3:19:14, 1.31it/s][INFO|trainer.py:1392] 2021-01-28 19:00:53,586 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-3000
[INFO|configuration_utils.py:300] 2021-01-28 19:00:53,595 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-3000/config.json
[INFO|modeling_utils.py:817] 2021-01-28 19:01:01,107 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-3000/pytorch_model.bin
{'loss': 2.7817, 'learning_rate': 4.0637205071959763e-05, 'epoch': 0.19}
{'loss': 2.7288, 'learning_rate': 3.929966293938259e-05, 'epoch': 0.21}
{'loss': 2.7055, 'learning_rate': 3.7962120806805416e-05, 'epoch': 0.24}
24% 4500/18691 [58:30<2:59:36, 1.32it/s][INFO|trainer.py:1392] 2021-01-28 19:20:38,333 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-4500
[INFO|configuration_utils.py:300] 2021-01-28 19:20:38,340 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-4500/config.json
[INFO|modeling_utils.py:817] 2021-01-28 19:20:46,115 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-4500/pytorch_model.bin
{'loss': 2.6735, 'learning_rate': 3.662457867422824e-05, 'epoch': 0.27}
{'loss': 2.6506, 'learning_rate': 3.528703654165106e-05, 'epoch': 0.29}
{'loss': 2.6134, 'learning_rate': 3.394949440907389e-05, 'epoch': 0.32}
32% 6000/18691 [1:18:24<2:41:45, 1.31it/s][INFO|trainer.py:1392] 2021-01-28 19:40:32,902 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-6000
[INFO|configuration_utils.py:300] 2021-01-28 19:40:32,910 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-6000/config.json
[INFO|modeling_utils.py:817] 2021-01-28 19:40:41,374 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-6000/pytorch_model.bin
{'loss': 2.6019, 'learning_rate': 3.261195227649671e-05, 'epoch': 0.35}
{'loss': 2.5751, 'learning_rate': 3.127441014391954e-05, 'epoch': 0.37}
{'loss': 2.5538, 'learning_rate': 2.9936868011342358e-05, 'epoch': 0.4}
40% 7500/18691 [1:38:20<2:21:21, 1.32it/s][INFO|trainer.py:1392] 2021-01-28 20:00:28,281 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-7500
[INFO|configuration_utils.py:300] 2021-01-28 20:00:28,290 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-7500/config.json
[INFO|modeling_utils.py:817] 2021-01-28 20:00:34,910 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-7500/pytorch_model.bin
{'loss': 2.5392, 'learning_rate': 2.859932587876518e-05, 'epoch': 0.43}
{'loss': 2.5166, 'learning_rate': 2.7261783746188007e-05, 'epoch': 0.45}
{'loss': 2.4878, 'learning_rate': 2.5924241613610827e-05, 'epoch': 0.48}
48% 9000/18691 [1:58:11<2:05:02, 1.29it/s][INFO|trainer.py:1392] 2021-01-28 20:20:19,791 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-9000
[INFO|configuration_utils.py:300] 2021-01-28 20:20:19,800 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-9000/config.json
[INFO|modeling_utils.py:817] 2021-01-28 20:20:26,792 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-9000/pytorch_model.bin
[INFO|trainer.py:1451] 2021-01-28 20:21:01,050 >> Deleting older checkpoint [/content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-1500] due to args.save_total_limit
{'loss': 2.4764, 'learning_rate': 2.4586699481033653e-05, 'epoch': 0.51}
{'loss': 2.4632, 'learning_rate': 2.3249157348456476e-05, 'epoch': 0.54}
{'loss': 2.4637, 'learning_rate': 2.1911615215879303e-05, 'epoch': 0.56}
56% 10500/18691 [2:18:05<1:45:16, 1.30it/s][INFO|trainer.py:1392] 2021-01-28 20:40:13,781 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-10500
[INFO|configuration_utils.py:300] 2021-01-28 20:40:13,788 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-10500/config.json
[INFO|modeling_utils.py:817] 2021-01-28 20:40:20,699 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-10500/pytorch_model.bin
[INFO|trainer.py:1451] 2021-01-28 20:40:53,929 >> Deleting older checkpoint [/content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-3000] due to args.save_total_limit
{'loss': 2.4265, 'learning_rate': 2.0574073083302126e-05, 'epoch': 0.59}
{'loss': 2.4094, 'learning_rate': 1.923653095072495e-05, 'epoch': 0.62}
{'loss': 2.4108, 'learning_rate': 1.789898881814777e-05, 'epoch': 0.64}
64% 12000/18691 [2:38:08<1:24:56, 1.31it/s][INFO|trainer.py:1392] 2021-01-28 21:00:16,188 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-12000
[INFO|configuration_utils.py:300] 2021-01-28 21:00:16,197 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-12000/config.json
[INFO|modeling_utils.py:817] 2021-01-28 21:00:23,623 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-12000/pytorch_model.bin
[INFO|trainer.py:1451] 2021-01-28 21:00:53,669 >> Deleting older checkpoint [/content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-4500] due to args.save_total_limit
{'loss': 2.3966, 'learning_rate': 1.6561446685570598e-05, 'epoch': 0.67}
{'loss': 2.3813, 'learning_rate': 1.5223904552993421e-05, 'epoch': 0.7}
{'loss': 2.3707, 'learning_rate': 1.3886362420416244e-05, 'epoch': 0.72}
72% 13500/18691 [2:58:05<1:05:55, 1.31it/s][INFO|trainer.py:1392] 2021-01-28 21:20:13,989 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-13500
[INFO|configuration_utils.py:300] 2021-01-28 21:20:13,997 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-13500/config.json
[INFO|modeling_utils.py:817] 2021-01-28 21:20:20,787 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-13500/pytorch_model.bin
[INFO|trainer.py:1451] 2021-01-28 21:20:51,248 >> Deleting older checkpoint [/content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-6000] due to args.save_total_limit
{'loss': 2.3636, 'learning_rate': 1.2548820287839067e-05, 'epoch': 0.75}
{'loss': 2.3558, 'learning_rate': 1.121127815526189e-05, 'epoch': 0.78}
{'loss': 2.3479, 'learning_rate': 9.873736022684715e-06, 'epoch': 0.8}
80% 15000/18691 [3:18:01<47:12, 1.30it/s][INFO|trainer.py:1392] 2021-01-28 21:40:09,289 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-15000
[INFO|configuration_utils.py:300] 2021-01-28 21:40:09,296 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-15000/config.json
[INFO|modeling_utils.py:817] 2021-01-28 21:40:16,482 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-15000/pytorch_model.bin
[INFO|trainer.py:1451] 2021-01-28 21:40:45,569 >> Deleting older checkpoint [/content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-7500] due to args.save_total_limit
{'loss': 2.333, 'learning_rate': 8.536193890107538e-06, 'epoch': 0.83}
{'loss': 2.3191, 'learning_rate': 7.1986517575303625e-06, 'epoch': 0.86}
{'loss': 2.3238, 'learning_rate': 5.861109624953186e-06, 'epoch': 0.88}
88% 16500/18691 [3:37:59<27:47, 1.31it/s][INFO|trainer.py:1392] 2021-01-28 22:00:07,943 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-16500
[INFO|configuration_utils.py:300] 2021-01-28 22:00:07,950 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-16500/config.json
[INFO|modeling_utils.py:817] 2021-01-28 22:00:14,938 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-16500/pytorch_model.bin
[INFO|trainer.py:1451] 2021-01-28 22:00:44,264 >> Deleting older checkpoint [/content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-9000] due to args.save_total_limit
{'loss': 2.3225, 'learning_rate': 4.52356749237601e-06, 'epoch': 0.91}
{'loss': 2.3212, 'learning_rate': 3.186025359798834e-06, 'epoch': 0.94}
{'loss': 2.3101, 'learning_rate': 1.8484832272216577e-06, 'epoch': 0.96}
96% 18000/18691 [3:57:58<08:44, 1.32it/s][INFO|trainer.py:1392] 2021-01-28 22:20:06,166 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-18000
[INFO|configuration_utils.py:300] 2021-01-28 22:20:06,174 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-18000/config.json
[INFO|modeling_utils.py:817] 2021-01-28 22:20:13,301 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-18000/pytorch_model.bin
[INFO|trainer.py:1451] 2021-01-28 22:20:46,622 >> Deleting older checkpoint [/content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-10500] due to args.save_total_limit
{'loss': 2.3093, 'learning_rate': 5.109410946444814e-07, 'epoch': 0.99}
100% 18691/18691 [4:07:32<00:00, 1.31it/s][INFO|trainer.py:998] 2021-01-28 22:29:40,193 >>
Training completed. Do not forget to share your model on huggingface.co/models =)
{'train_runtime': 14852.115, 'train_samples_per_second': 1.258, 'epoch': 1.0}
100% 18691/18691 [4:07:32<00:00, 1.26it/s]
[INFO|trainer.py:1392] 2021-01-28 22:29:40,249 >> Saving model checkpoint to /content/drive/My Drive/finetuned_models_2/wodehouse
[INFO|configuration_utils.py:300] 2021-01-28 22:29:40,261 >> Configuration saved in /content/drive/My Drive/finetuned_models_2/wodehouse/config.json
[INFO|modeling_utils.py:817] 2021-01-28 22:29:47,311 >> Model weights saved in /content/drive/My Drive/finetuned_models_2/wodehouse/pytorch_model.bin
01/28/2021 22:29:47 - INFO - __main__ - ***** Train results *****
01/28/2021 22:29:47 - INFO - __main__ - epoch = 1.0
01/28/2021 22:29:47 - INFO - __main__ - train_runtime = 14852.115
01/28/2021 22:29:47 - INFO - __main__ - train_samples_per_second = 1.258
01/28/2021 22:29:47 - INFO - __main__ - *** Evaluate ***
[WARNING|training_args.py:499] 2021-01-28 22:29:47,446 >> Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future version. Using `--per_device_eval_batch_size` is preferred.
[INFO|trainer.py:1584] 2021-01-28 22:29:47,446 >> ***** Running Evaluation *****
[INFO|trainer.py:1585] 2021-01-28 22:29:47,447 >> Num examples = 1260
[INFO|trainer.py:1586] 2021-01-28 22:29:47,448 >> Batch size = 2
100% 630/630 [00:29<00:00, 21.71it/s]
01/28/2021 22:30:16 - INFO - __main__ - ***** Eval results *****
01/28/2021 22:30:16 - INFO - __main__ - perplexity = 36.58978545212275
###Markdown
Compute perplexity of a dataset.This section shows how to compute perplexity of a dataset according to either the pre-trained or your fine-tuned language model. While this is possible to do by calling `run_language_modeling.py` on the command-line as above, we'll instead call the Python functions directly. Look at what checkpoints are availableRun `ls` to look at what checkpoints saved been saved. You'll want to set `CHECKPOINT_PATH` below to one of these in order to evaluate the model weights saved in that checkpoint.
###Code
!ls '/content/drive/My Drive/finetuned_models_2/wodehouse'
###Output
checkpoint-12000 config.json tokenizer_config.json
checkpoint-13500 eval_results_clm.txt trainer_state.json
checkpoint-15000 merges.txt training_args.bin
checkpoint-16500 pytorch_model.bin train_results.txt
checkpoint-18000 special_tokens_map.json vocab.json
###Markdown
Helper functions
###Code
def load_model(args):
"""Creates a model and loads in weights for it."""
config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=None)
model = AutoModelWithLMHead.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=None
)
model.to(args.device)
return model
def set_seed(seed):
"""Set the random seed."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def do_perplexity_eval(args, model, data_file_path):
"""Computes the perplexity of the text in data_file_path according to the provided model."""
set_seed(args.seed)
args.eval_data_file=data_file_path
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, cache_dir=None)
#args.block_size = min(args.block_size, tokenizer.max_len)
result = run_language_modeling.evaluate(args, model, tokenizer, prefix="")
return result
###Output
_____no_output_____
###Markdown
How is the trained model doing?
###Code
class DictToObject(object):
def __init__(self, dictionary):
def _traverse(key, element):
if isinstance(element, dict):
return key, DictToObject(element)
else:
return key, element
objd = dict(_traverse(k, v) for k, v in dictionary.items())
self.__dict__.update(objd)
# Set this to the checkpoint you want to evalute, or to "gpt2-medium" to
# evaluate the pre-trained model without finetuning.
CHECKPOINT_PATH = '/content/drive/My Drive/finetuned_models_2/wodehouse/checkpoint-18000'
OUTPUT_PATH = '/content/drive/My Drive/finetuned_models_2/wodehouse/output_checkpoint_18000'
# Set this to the list of text files you want to evaluate the perplexity of.
DATA_PATHS = ["/content/drive/My Drive/Colab Notebooks/wodehouse_generator/data/validate.txt",
"/content/drive/My Drive/Colab Notebooks/wodehouse_generator/data/test.txt"]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("Running on device: ", device)
args = collections.defaultdict(
model_name_or_path=CHECKPOINT_PATH,
output_dir=OUTPUT_PATH,
block_size = 128,
local_rank=-1,
eval_batch_size=2,
per_gpu_eval_batch_size=2,
n_gpu=n_gpu,
mlm=False,
device=device,
line_by_line=False,
overwrite_cache=None,
model_type='gpt2',
seed=42,
)
args = DictToObject(args)
model = load_model(args)
for data_path in DATA_PATHS:
eval_results = do_perplexity_eval(args, model, data_path)
perplexity = eval_results['perplexity']
print('{} is the perplexity of {} according to {}'.format(
perplexity, data_path, CHECKPOINT_PATH))
###Output
Running on device: cuda
###Markdown
Generate samplesThe following code generates text samples that are are continuations of a provided prompt.
###Code
def generate_samples(args, model, prompt_text):
"""Generating sampling for the provided prompt using the provided model."""
set_seed(args.seed)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, cache_dir=None)
requires_preprocessing = args.model_type in run_generation.PREPROCESSING_FUNCTIONS.keys()
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors="pt")
encoded_prompt = encoded_prompt.to(args.device)
output_sequences = model.generate(
input_ids=encoded_prompt,
max_length=args.length + len(encoded_prompt[0]),
temperature=args.temperature,
top_k=args.k,
top_p=args.p,
repetition_penalty=args.repetition_penalty,
do_sample=True,
num_return_sequences=args.num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[: text.find(args.stop_token) if args.stop_token else None]
# Remove the excess text that was used for pre-processing
text = text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
# Add the prompt at the beginning of the sequence.
total_sequence = prompt_text + text
generated_sequences.append(total_sequence)
return generated_sequences
def generate_wodehouse_samples(prompt):
# You should try out other prompts as well as no prompt at all.
PROMPT = prompt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("Running on device: ", device)
args = collections.defaultdict(
model_name_or_path=CHECKPOINT_PATH,
output_dir=OUTPUT_PATH,
n_gpu=n_gpu,
mlm=False,
device=device,
model_type='gpt2',
seed=42,
stop_token=None, # Set this if your dataset has a special word that indicates the end of a text.
temperature=1.0, # temperature sampling. Set this to temperature=1.0 to not use temperature.
k=50, # k for top-k sampling. Set this to k=0 to not use top-k.
p=1.0, # p for nucleus sampling. Set this to p=1.0 to not use nucleus sampling.
repetition_penalty=None,
length=900, # Number of tokens to generate.
num_return_sequences=3, # Number of independently computed samples to generate.
)
args = DictToObject(dict(args))
model = load_model(args)
sequences = generate_samples(args, model, PROMPT)
return sequences
def print_sequences(sequences):
for idx, sequence in enumerate(sequences):
print('\n====== GENERATION {} ======'.format(idx))
print(sequence)
sequences = generate_wodehouse_samples("Seated with his wife at breakfast on the veranda which overlooked the rolling lawns and leafy woods of his charming Sussex home, Geoffrey Windlebird, the great financier, was enjoying the morning sun to the full. ")
print_sequences(sequences)
sequences = generate_wodehouse_samples("It was in Oxford Street at the hour when women come up from the suburbs to shop; and he was standing among the dogs and commissionaires outside Selfridge’s.")
print_sequences(sequences)
###Output
_____no_output_____ |
Counting Dots.ipynb | ###Markdown
Copyright (c) 2000 Jabavu W. Adams
###Code
import numpy as np
import tensorflow as tf
print(tf.__version__)
print(tf.keras.__version__)
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (16, 8)
###Output
_____no_output_____
###Markdown
Load Images==
###Code
import PIL
print(PIL.__version__)
from PIL import Image
from PIL import ImageOps
import os
DATA_ROOT = os.path.expanduser('~/devel/polkaspots/polkaspots_dataset')
DATA_PARENT = os.path.abspath(os.path.join(DATA_ROOT, '..'))
IMG_ROOT = os.path.join(DATA_ROOT, 'images')
POS_ROOT = os.path.join(DATA_ROOT, 'positions')
import glob
import re
import zipfile
radii = []
dot_counts = []
zip_files = glob.glob(os.path.join(IMG_ROOT, 'radius*.zip'))
for zf in zip_files:
unzipped_file_name = os.path.splitext(zf)[0]
if not os.path.exists(unzipped_file_name):
with zipfile.ZipFile(zf, 'r') as zip_ref:
zip_ref.extractall(DATA_PARENT)
m = re.search('radius([0-9]+)', unzipped_file_name)
assert(m)
radius = int(m[1])
radii.append(radius)
max_radius = max(radii)
min_radius = min(radii)
print(f'radius: {min_radius} -> {max_radius}')
for radius in range(min_radius, max_radius+1):
if not len(dot_counts):
unzipped_dirs = glob.glob(os.path.join(IMG_ROOT, f'radius{radius:02d}/*'))
assert(len(unzipped_dirs))
for uzd in unzipped_dirs:
first_unzipped_dir = unzipped_dirs[0]
m = re.search('radius([0-9]+)', first_unzipped_dir)
dot_counts.append(int(m[1]))
min_dot_count = min(dot_counts)
max_dot_count = max(dot_counts)
print(min_dot_count, max_dot_count)
def image_count(nb_dots, radius):
"""Return the number of images containing the given number of dots of the given radius."""
containing_path = os.path.join(IMG_ROOT, f'radius{radius:02d}', f'{nb_dots:02d}')
file_wildcard = f'dots_radius_{radius}_count_{nb_dots:02d}_*.png'
filenames = glob.glob(os.path.join(containing_path, file_wildcard))
return len(filenames)
def get_image(nb_dots, radius, i):
"""Get the ith image with the given number of dots of the given radius. Returns a PIL.Image"""
containing_folder = os.path.join(IMG_ROOT, f'radius{radius:02d}', f'{nb_dots:02d}')
filename = f'dots_radius_{radius}_count_{nb_dots:02d}_{i:05d}.png'
return Image.open(os.path.join(containing_folder, filename))
im = get_image(7, 20, 42)
print(im.format)
print(im.mode)
print(im.size)
im
# Convert to from RGB to grayscale
im = im.convert(mode='L')
print(im.mode)
print(im.size)
# Invert image so blobs are high and background is low
im = ImageOps.invert(im)
im.show()
# Convert to numpy array and convert values from 0 -> 255 to 0.0 -> 1.0
x = np.asarray(im) / 255.0
print(x)
def array_for_image(nb_dots, radius, i):
"""Get the ith image with the given number of dots of the given radius. Returns a NumPy array of float.
The array is one channel (grayscale), background pixels are 0.0, and dot pixels are 1.0."""
im = get_image(nb_dots, radius, i)
im = im.convert(mode='L')
im = ImageOps.invert(im)
return np.asarray(im) / 255.0
array_for_image(8, 32, 100)
###Output
_____no_output_____
###Markdown
Create training, validation, and test dataset splits
###Code
import hickle as hkl
# from sklearn.model_selection import train_test_split
# image_arrays = []
# dot_counts = []
# for radius in range(2, 40+1):
# print(radius)
# for nb_dots in range(10+1):
# #print(nb_dots)
# for i in range(image_count(nb_dots, radius)):
# image_arrays.append(array_for_image(nb_dots, radius, i))
# dot_counts.append(nb_dots)
# nb_all_images = len(image_arrays)
# inputs = np.zeros((nb_all_images, image_arrays[0].shape[0], image_arrays[0].shape[1], 1), np.uint8)
# labels = np.zeros((nb_all_images, 1), np.uint8)
# for i in range(nb_all_images):
# inputs[i] = image_arrays[i].reshape((image_arrays[0].shape[0], image_arrays[0].shape[1], 1))
# labels[i] = dot_counts[i]
# del image_arrays
# del dot_counts
# (trainX, testX, trainY, testY) = train_test_split(inputs, labels, test_size=0.2, random_state=301)
# hkl.dump(trainX, 'X_train.hkl')
# del trainX
# hkl.dump(trainY, 'Y_train.hkl')
# del trainY
# hkl.dump(testX, 'X_test.hkl')
# del testX
# hkl.dump(testY, 'Y_test.hkl')
# del testY
X_train = hkl.load('X_train.hkl')
Y_train = hkl.load('Y_train.hkl')
###Output
_____no_output_____
###Markdown
Build Model and Train
###Code
from tensorflow import keras
from tensorflow.keras import layers
n_rows = X_train.shape[1]
n_cols = X_train.shape[2]
width_px = n_cols
height_px = n_rows
model = keras.Sequential()
model.add(layers.Conv2D(32, (7, 7), activation="relu", input_shape=(n_rows, n_cols, 1)))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(32, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(32, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(32, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(32, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(32, (3, 3), activation="relu"))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation="relu"))
model.summary()
model.compile(optimizer="adam", loss="MeanSquaredError")
###Output
_____no_output_____
###Markdown
Load existing model (if any) and continue training.
###Code
from tensorflow.keras.models import load_model
model_files = glob.glob('./models/polkaspots_epoch_*.hdf5')
if len(model_files):
model_epochs = [int(fn_[-8:-5]) for fn_ in model_files]
highest_completed_epoch = max(model_epochs)
model = load_model(f'./models/polkaspots_epoch_{highest_completed_epoch:03d}.hdf5')
else:
highest_completed_epoch = 0
from tensorflow.keras.callbacks import ModelCheckpoint
callbacks = []
callbacks.append(ModelCheckpoint(filepath='./models/polkaspots_epoch_{epoch:03d}.hdf5', monitor='val_loss'))
history = model.fit(X_train, Y_train, batch_size=20, epochs=10, initial_epoch=highest_completed_epoch, callbacks=callbacks)
###Output
Epoch 2/10
3432/3432 [==============================] - 1648s 480ms/step - loss: 0.5477
Epoch 3/10
2648/3432 [======================>.......] - ETA: 6:16 - loss: 0.4549 |
Other investigations/Plot lesion removal area based comparison.ipynb | ###Markdown
Normalized values
###Code
path = "/analysis/ritter/projects/MS/interm_results"
normal_scans_norm = pd.read_csv(os.path.join(path, "evidence_in_area-normalized-TP-normal_images.csv"))
lesions_removed_norm = pd.read_csv(os.path.join(path, "evidence_in_area-normalized-TP-lesions_removed.csv"))
plt.figure(figsize=(10, 6))
plt.plot(normal_scans_norm['area'], normal_scans_norm['relevance'], 'go', label="Normal")
plt.plot(lesions_removed_norm['area'], lesions_removed_norm['relevance'], 'yo', label="Lesions removed")
plt.xticks(rotation='vertical')
plt.legend(loc="center left")
plt.title("test")
plt.show()
df_norm = pd.merge(normal_scans_norm, lesions_removed_norm, left_on='area',right_on='area',how='outer', suffixes=('_normal_scans', '_lesions_removed'))
df_norm = df_norm[::-1]
fig, axes = plt.subplots(ncols=2, sharey=True, figsize=(8, 10),
gridspec_kw = {'wspace':0, 'hspace':10.})
sns.barplot(y=df_norm['area'], x=df_norm['relevance_normal_scans'], color='goldenrod', ax=axes[0])
#axes[0].invert_xaxis()
#axes[0].invert_yaxis()
axes[0].set_xlabel(' ')
axes[0].set_ylabel(' ')
axes[0].set_xlim(np.max(df_norm['relevance_normal_scans'])*1.1, 0)
axes[0].set_title("Normal")
sns.barplot(y=df_norm['area'], x=df_norm['relevance_lesions_removed'], color='purple', ax=axes[1])
axes[1].set_xlabel(' ')
axes[1].set_ylabel(' ')
axes[1].set_xlim(0, np.max(df_norm['relevance_normal_scans'])*1.1)
axes[1].set_title("Lesions removed")
plt.tight_layout()
plt.show()
###Output
/home/fabiane/anaconda2/envs/postal/lib/python3.6/site-packages/matplotlib/figure.py:2362: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.
warnings.warn("This figure includes Axes that are not compatible "
###Markdown
Drop based on least sum
###Code
# get order based on relevance sum
order = (df_norm["relevance_normal_scans"] + df_norm["relevance_lesions_removed"]).sort_values().index
# take top 15
short_df_norm = df_norm.drop(order[:7])
fig, axes = plt.subplots(ncols=2, sharey=True, figsize=(8, 10),
gridspec_kw = {'wspace':0, 'hspace':10.})
sns.barplot(y=short_df_norm['area'], x=short_df_norm['relevance_normal_scans'], color='orangered', ax=axes[0])
axes[0].set_xlabel(' ')
axes[0].set_ylabel(' ')
axes[0].set_xlim(np.max(short_df_norm['relevance_normal_scans'])*1.1, 0)
axes[0].set_title("Normal")
sns.barplot(y=short_df_norm['area'], x=short_df_norm['relevance_lesions_removed'], color='royalblue', ax=axes[1])
axes[1].set_xlabel(' ')
axes[1].set_ylabel(' ')
axes[1].set_xlim(0, np.max(short_df_norm['relevance_normal_scans'])*1.1)
axes[1].set_title("Lesions removed")
plt.tight_layout()
plt.show()
# take top 10
short_df_norm = df_norm.drop(order[:12])
fig, axes = plt.subplots(ncols=2, sharey=True, figsize=(8, 10),
gridspec_kw = {'wspace':0, 'hspace':10.})
sns.barplot(y=short_df_norm['area'], x=short_df_norm['relevance_normal_scans'], color='goldenrod', ax=axes[0])
axes[0].set_xlabel(' ')
axes[0].set_ylabel(' ')
axes[0].set_xlim(np.max(short_df_norm['relevance_normal_scans'])*1.1, 0)
axes[0].set_title("Normal")
sns.barplot(y=short_df_norm['area'], x=short_df_norm['relevance_lesions_removed'], color='purple', ax=axes[1])
axes[1].set_xlabel(' ')
axes[1].set_ylabel(' ')
axes[1].set_xlim(0, np.max(short_df_norm['relevance_normal_scans'])*1.1)
axes[1].set_title("Lesions removed")
plt.tight_layout()
plt.show()
###Output
/home/fabiane/anaconda2/envs/postal/lib/python3.6/site-packages/matplotlib/figure.py:2362: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.
warnings.warn("This figure includes Axes that are not compatible "
###Markdown
Absolute Values
###Code
from matplotlib.ticker import ScalarFormatter
path = "/analysis/ritter/projects/MS/interm_results"
normal_scans_abs = pd.read_csv(os.path.join(path, "evidence_in_area-absolute-TP-normal_images.csv"))
lesions_removed_abs = pd.read_csv(os.path.join(path, "evidence_in_area-absolute-TP-lesions_removed.csv"))
df_abs = pd.merge(normal_scans_abs, lesions_removed_abs, left_on='area',right_on='area',how='outer', suffixes=('_normal_scans', '_lesions_removed'))
#df_abs = df_abs[::-1]
# get order based on relevance sum
order_abs = (df_abs["relevance_normal_scans"] + df_abs["relevance_lesions_removed"]).sort_values().index
order_abs
df_abs
relevance_normal_max = df_abs["relevance_normal_scans"].max()
relevance_removed_max = df_abs["relevance_lesions_removed"].max()
relevance_normal_sum = df_abs["relevance_normal_scans"][df_abs["relevance_normal_scans"]>0].sum()
relevance_removed_sum = df_abs["relevance_lesions_removed"][df_abs["relevance_lesions_removed"]>0].sum()
#relevance_normal_sum = df_abs["relevance_normal_scans"].sum()
#relevance_removed_sum = df_abs["relevance_lesions_removed"].sum()
# normalize by experiment (left and right)
normalize_side = False
if normalize_side:
df_abs["relevance_normal_scans"] /= relevance_normal_sum
df_abs["relevance_lesions_removed"] /= relevance_removed_sum
# take top 10
short_df_abs = df_abs.loc[list(order_abs)].drop(order_abs[:12])
short_df_abs = short_df_abs[::-1]
(0, np.max(short_df_abs['relevance_lesions_removed'])*1.1)
fig, axes = plt.subplots(ncols=2, sharey=True, figsize=(12, 5.5),
gridspec_kw = {'wspace':0, 'hspace':10.})
sns.barplot(y=short_df_abs['area'], x=short_df_abs['relevance_normal_scans'], color='g', ax=axes[0])
axes[0].set_xlabel(' ')
axes[0].set_ylabel(' ')
axes[0].set_xlim(np.max(short_df_abs['relevance_lesions_removed'])*1.1, 0)
axes[0].set_title("With lesions")
axes[0].xaxis.set_major_formatter(ScalarFormatter())
axes[0].ticklabel_format(axis='x', style='sci', scilimits=(0,0))
# hide scientific notation exponent in left half
axes[0].xaxis.get_offset_text().set_visible(False)
sns.barplot(y=short_df_abs['area'], x=short_df_abs['relevance_lesions_removed'], color='orange', ax=axes[1])
axes[1].set_xlabel(' ')
axes[1].set_ylabel(' ')
axes[1].set_xlim(0, np.max(short_df_abs['relevance_lesions_removed'])*1.1)
axes[1].set_title("Without lesions (NABM)")
axes[1].xaxis.set_major_formatter(ScalarFormatter())
axes[1].ticklabel_format(axis='x', style='sci', scilimits=(0,0))
# hide duplicate 0 xtick
axes[1].xaxis.get_major_ticks()[0].label1.set_visible(False)
#plt.tight_layout()
plt.subplots_adjust(left=0.3, right=0.7)
file_path = os.path.join(image_file_dir, "lesion_removal_comparison_absolute_b.png")
#plt.savefig(file_path, format='png', transparent=True)#, bbox_inches='tight')
plt.show()
###Output
_____no_output_____ |
1_transform/stock.ipynb | ###Markdown
API from yfianance Stock Market* stocks from 1970 through 2018* The S&P 500 (^GSPC), or just the S&P, is a stock market index that measures the stock performance of 500 large companies listed on stock exchanges in the United States.* Remove incomplete rows* Deal with error-prone columns* Drop un-needed columns* Change casing* save to csv
###Code
from pandas_datareader import data as pdr
import pandas as pd
import yfinance as yf
###Output
_____no_output_____
###Markdown
Pulling S&P 500 stock market index
###Code
yf.pdr_override() # <== that's all it takes :-)
# download dataframe
df = pdr.get_data_yahoo("^GSPC", start="1970-01-01", end="2019-01-01")
###Output
[*********************100%***********************] 1 of 1 completed
###Markdown
length of dataset
###Code
df.count()
###Output
_____no_output_____
###Markdown
Dropping any rows that are missing
###Code
df = df.dropna()
df.count()
###Output
_____no_output_____
###Markdown
Looking at columns
###Code
df.columns
###Output
_____no_output_____
###Markdown
Reseting the Index
###Code
df = df.reset_index()
df.head()
###Output
_____no_output_____
###Markdown
Checking data types
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Using .dt to extract year only
###Code
df['Date'] = df['Date'].dt.year
df.head()
###Output
_____no_output_____
###Markdown
Group by date and get the mean average
###Code
group_df = df.groupby(df['Date'])
group_df = group_df.mean()
group_df.head()
###Output
_____no_output_____
###Markdown
Checking data types
###Code
group_df.count()
###Output
_____no_output_____
###Markdown
Drop unwanted column
###Code
group_df = group_df.drop(columns=['Date'])
group_df.head()
###Output
_____no_output_____
###Markdown
Remove index
###Code
group_df.reset_index(level=['Date'], inplace=True)
group_df.head()
###Output
_____no_output_____
###Markdown
lowercase/rename columns
###Code
group_df = group_df.rename(columns={'Date': 'year','Open':'open','High':'high','Low':'low','Close':'close','Adj Close':'adj_close','Volume':'volume'})
group_df.head()
###Output
_____no_output_____
###Markdown
Create new df that only has year
###Code
year_df = group_df['year']
year_df.head()
year_df = pd.DataFrame(year_df)
year_df.head()
###Output
_____no_output_____
###Markdown
Exporting to a csv file
###Code
year_df.to_csv('../data_transformed/year.csv')
group_df.to_csv('../data_transformed/stock.csv')
###Output
_____no_output_____ |
use-r-and-google-cloud-to-analyze-wikipedia-page-views/Use R and Google Cloud to Analyze Wikipedia Views Data (no GCS R Package).ipynb | ###Markdown
R, BIGQUERY, CLOUD STORAGE SETUP Install and import necessary R libraries for this notebook, set options
###Code
suppressMessages({
# Install packages that might be not be installed by default
# install.packages('plotly')
# install.packages('furrr')
# install.packages('tictoc')
library(tidyverse)
library(glue)
library(future)
library(furrr)
library(tictoc)
library(bigrquery)
library(DT)
library(plotly)
})
options(tibble.width = Inf)
###Output
_____no_output_____
###Markdown
Enter Google Cloud/BigQuery Project ID in cell below, authenticate BigQuery
###Code
# ENTER YOUR PROJECT ID HERE
PROJECT_ID <- 'gcp-data-science-demo'
bq_auth(use_oob = TRUE)
###Output
_____no_output_____
###Markdown
PICK 2 ATHLETES OF INTEREST AND LOOK AT DAILY WIKIPEDIA VIEWS Enter names of 2 athletes of interest in cell below
###Code
# ENTER 2 ATHLETES' NAMES HERE
ATHLETE_1_NAME <- 'LeBron James'
ATHLETE_2_NAME <- 'Patrick Mahomes'
athlete_wiki_page_titles <- c(ATHLETE_1_NAME, ATHLETE_2_NAME) %>%
tolower() %>%
str_replace(" ", "_") %>%
paste0("'", ., "'") %>%
paste0(collapse = ", ") %>%
paste0("(", ., ")")
print(athlete_wiki_page_titles)
###Output
_____no_output_____
###Markdown
Create SQL query to get specific Wikipedia page views using text formatting
###Code
sql_query_with_names_param <- "
SELECT
DATE(datehour, 'America/Los_Angeles') AS date,
LOWER(title) AS page_title,
SUM(views) AS views
FROM
`bigquery-public-data.wikipedia.pageviews_2020`
WHERE
DATE(datehour, 'America/Los_Angeles') >= '2020-01-01' AND
DATE(datehour, 'America/Los_Angeles') <= '2020-12-31' AND
LOWER(title) IN %s AND
wiki IN ('en', 'en.m')
GROUP BY
date, page_title
ORDER BY
date DESC, views DESC
"
sql_query_with_names <- sprintf(sql_query_with_names_param,
athlete_wiki_page_titles)
cat(sql_query_with_names)
###Output
_____no_output_____
###Markdown
Look at page views data with both static and interactive tables
###Code
athlete_views <- bq_project_query(
x = PROJECT_ID,
query = sql_query_with_names
) %>%
bq_table_download()
head(athlete_views, n = 10)
DT::datatable(athlete_views)
###Output
_____no_output_____
###Markdown
Create interactive plot of 2 athletes daily views
###Code
athlete_views_by_date_plot <- ggplot(
data = athlete_views,
aes(
x = date,
y = views,
color = page_title
),
) +
scale_x_date(date_breaks = 'month', date_labels = '%b %d') +
scale_color_manual(values =
c('#552583', '#E31837')
) +
geom_point() +
geom_path() +
ggtitle(
label = paste0('2020 Wikipedia Page Views by Date for ',
paste0(c(ATHLETE_1_NAME, ATHLETE_2_NAME), collapse = ', '))
) +
theme(
axis.text.x = element_text(angle = 45)
)
interactive_athlete_views_by_date_plot <- ggplotly(athlete_views_by_date_plot)
interactive_athlete_views_by_date_plot
###Output
_____no_output_____
###Markdown
Output single plot to Cloud Storage using system commands
###Code
# ENTER CLOUD STORAGE BUCKET AND DESIRED INTERACTIVE PLOT OUTPUT FILE NAME
CLOUD_STORAGE_BUCKET <- 'r-demos'
PLOT_OUTPUT_FILENAME <- "athlete_wiki_views_by_date.html"
htmlwidgets::saveWidget(
widget = interactive_athlete_views_by_date_plot,
file = PLOT_OUTPUT_FILENAME,
selfcontained = T
)
cloud_storage_bucket_url <- paste0("gs://", CLOUD_STORAGE_BUCKET, "/")
cloud_storage_upload_command <- paste("gsutil cp", PLOT_OUTPUT_FILENAME,
cloud_storage_bucket_url)
system(cloud_storage_upload_command)
cloud_storage_check_command <- paste("gsutil ls -l",
cloud_storage_bucket_url)
system(cloud_storage_check_command, intern = TRUE)
###Output
_____no_output_____
###Markdown
BONUS: EXTEND DATA GATHERING TO MANY MORE ATHLETES Set up for parallel processing in following steps
###Code
options(future.availableCores.methods = "mc.cores")
options(mc.cores = 64)
plan(multisession)
###Output
_____no_output_____
###Markdown
Read in CSV of top athletes according to YouGov
###Code
top_athletes <- read_csv("top_athletes_by_yougov.csv") %>%
mutate(
page_title = athlete_name %>%
tolower() %>%
str_replace(" ", "_") %>%
paste0(.,
ifelse(is.na(wikipedia_page_add), "",
paste0('_', wikipedia_page_add))
)
) %>%
print(n = 25)
###Output
_____no_output_____
###Markdown
Create template of SQL query to be used to read in single athlete, single year page views
###Code
wiki_views_query_template <- "
SELECT
DATE(datehour, 'America/Los_Angeles') AS date,
LOWER(title) AS page_title,
SUM(views) AS views
FROM
`bigquery-public-data.wikipedia.pageviews_{year}`
WHERE
(DATE(datehour, 'America/Los_Angeles') BETWEEN
'{year}-01-01' AND '{year}-12-31') AND
LOWER(title) = '{page_title}' AND
wiki IN ('en', 'en.m')
GROUP BY
date, page_title
"
###Output
_____no_output_____
###Markdown
Enter years of interest, read in page view data for many athletes in that span (this may take several minutes, depending on years)
###Code
# ENTER START AND END YEARS FOR PAGE VIEW DATA GATHERING
START_YEAR <- 2017
END_YEAR <- 2020
tic("Reading in Wikipedia Page View Data for Many Athletes Across Multiple Years")
many_athlete_multi_year_page_views <- top_athletes %>%
crossing(
year = START_YEAR:END_YEAR
) %>%
mutate(
wiki_views_query_text = glue(wiki_views_query_template)
) %>%
mutate(
wiki_views = future_pmap(
list(query = wiki_views_query_text),
~bq_table_download(bq_project_query(x = PROJECT_ID, query = .))
)
) %>%
print()
toc()
###Output
_____no_output_____
###Markdown
Get unnested version of athlete daily page views data, add daily ranking (among this set)
###Code
athlete_page_views_unnest_with_ranks <- many_athlete_multi_year_page_views %>%
select(year, athlete_name, wiki_views) %>%
unnest(wiki_views) %>%
select(date, athlete_name, views) %>%
group_by(date) %>%
mutate(
date_view_rank = rank(desc(views))
) %>%
ungroup() %>%
arrange(desc(date), date_view_rank)
DT::datatable(athlete_page_views_unnest_with_ranks)
###Output
_____no_output_____
###Markdown
Count of days each athlete ranked 1 (among this set) in Wikipedia page views in span, look at those w/ most 1 days
###Code
athletes_with_most_days_no1_in_views <- athlete_page_views_unnest_with_ranks %>%
group_by(athlete_name) %>%
summarize(
num_days_no1 = sum(ifelse(date_view_rank == 1, 1, 0))
) %>%
ungroup() %>%
arrange(desc(num_days_no1)) %>%
print(n = 25)
###Output
_____no_output_____ |
Segunda Lista/jupyter notebooks/segunda_lista.ipynb | ###Markdown
> Programação Orientada a Objetos II> Exercícios - Lista de Exercícios 2> Aluna: Mariana dos Santos Dick> Data: Nov/2021 Exercício 1
###Code
'''Exercício 1: Faça um Programa que leia um vetor de 5 números inteiros e mostre-os.'''
vetor = [int(x) for x in input().split()]
for item in vetor:
print(item, end=' ')
###Output
_____no_output_____
###Markdown
Exercício 2
###Code
'''Exercício 2: Faça um Programa que leia um vetor de 10 números reais e mostre-os na
ordem inversa.'''
vetor = [float(x) for x in input().split()]
vetor.reverse()
for item in vetor:
print(f'{item:.2f}', end=' ')
###Output
_____no_output_____
###Markdown
Exercício 3
###Code
'''Exercício 3: Faça um Programa que leia 4 notas, mostre as notas e a média na tela.'''
notas = []
media = 0
for _ in range(4):
nota = int(input('Digite uma nota: '))
notas.append(str(nota))
media += nota
media = media/4
print('Notas: ', ', '.join(notas))
print(f'Média: {media:.2f}')
###Output
_____no_output_____
###Markdown
Exercício 4
###Code
'''Exercício 4: Faça um Programa que leia um vetor de 10 caracteres, e diga quantas
consoantes foram lidas. Imprima as consoantes.'''
#Vou considerar as repetidas também, mas poderia ter utilizado set para contar apenas as consoantes únicas.
vogais = ['a', 'e', 'i', 'o', 'u']
consoantes_lidas = []
vetor = [str(x).lower() for x in input().split()]
for item in vetor:
if item not in vogais:
consoantes_lidas.append(item)
if len(consoantes_lidas) == 0:
print('Não encontramos nenhuma consoante.')
else:
print(f'As {len(consoantes_lidas)} consoantes lidas foram:')
print(', '.join(consoantes_lidas))
###Output
_____no_output_____
###Markdown
Exercício 5
###Code
'''Exercício 5: Faça um Programa que leia 20 números inteiros e armazene-os num
vetor. Armazene os números pares no vetor PAR e os números IMPARES no vetor
impar. Imprima os três vetores.'''
vetor = [int(x) for x in input().split()]
pares = list(filter(lambda x: x % 2 == 0, vetor))
impares = list(filter(lambda x: x % 2 == 1, vetor))
print(f'Vetor de entrada {vetor}')
print(f'Pares: {pares}')
print(f'Impares: {impares}')
###Output
_____no_output_____
###Markdown
Exercício 6
###Code
'''Exercício 6: Faça um Programa que peça as quatro notas de 10 alunos, calcule e
armazene num vetor a média de cada aluno, imprima o número de alunos com média
maior ou igual a 7.0.'''
count = 0
notas = []
for _ in range(10):
media = 0
for _ in range(4):
nota = float(input('Digite uma nota:'))
media += nota
media = media/4
notas.append(media)
if (media >= 7.0):
count += 1
print(f'Vetor com as notas dos alunos: {notas}')
print(f'Num de alunos que possuem média >= 7.0: {count}')
###Output
_____no_output_____
###Markdown
Exercício 7
###Code
'''Exercício 7: Faça um Programa que leia um vetor de 5 números inteiros, mostre a
soma, a multiplicação e os números.'''
import functools
vetor = [int(x) for x in input().split()]
soma = functools.reduce(lambda a, b: a+b, vetor)
mult = functools.reduce(lambda a, b: a*b, vetor)
print(f'Números: {vetor}')
print(f'Soma: {soma}')
print(f'Multiplicação: {mult}')
###Output
_____no_output_____
###Markdown
Exercício 8
###Code
'''Exercício 8: Faça um Programa que peça a idade e a altura de 5 pessoas, armazene
cada informação no seu respectivo vetor. Imprima a idade e a altura na ordem inversa
a ordem lida.'''
idades = []
alturas = []
for _ in range(5):
altura = float(input('Qual sua altura? '))
alturas.append(altura)
idade = int(input('Qual sua idade? '))
idades.append(idade)
idades.reverse()
alturas.reverse()
print(f'Idades: {idades}')
print(f'Alturas: {alturas}')
###Output
_____no_output_____
###Markdown
Exercício 9
###Code
'''Exercício 9: Faça um Programa que leia um vetor A com 10 números inteiros, calcule
e mostre a soma dos quadrados dos elementos do vetor.'''
vetor = [int(x) for x in input().split()]
soma_dos_quadrados = 0
for item in vetor:
soma_dos_quadrados += ((item)**2)
print(f'A soma dos quadrados dos itens do vetor é: {soma_dos_quadrados}')
###Output
_____no_output_____
###Markdown
Exercício 10
###Code
'''Exercício 10: Faça um Programa que leia dois vetores com 10 elementos cada. Gere
um terceiro vetor de 20 elementos, cujos valores deverão ser compostos pelos
elementos intercalados dos dois outros vetores.'''
vetor1 = [int(x) for x in input().split()]
vetor2 = [int(x) for x in input().split()]
vetor3 = [*sum(zip(vetor1,vetor2),())]
print(f'Primeiro vetor: {vetor1}')
print(f'Segundo vetor: {vetor2}')
print(f'Vetor intercalado: {vetor3}')
###Output
_____no_output_____ |
notebooks/code-graveyard/6.1.1-modeling-bertweet-sentiment-analysis-Copy1.ipynb | ###Markdown
Modeling: (Monolingual) BERTweet For Sentiment AnalysisFine-tuning a BerTweet model for ISP Tweet Sentiment Prediction**`Brief Discussion:`** Despite the model in my initial [sentiment analysis implementation](https://github.com/KoredeAkande/nigerian_isp_sentiment_analysis/tree/main/notebooks/test-version) struggling with a number of tweets containing pidgin English, I found that tweets in Pidgin English constituted a minority (about 15%) in a randomly extracted sample from our dataset (SEE HERE). Moreover, the tweets featuring Pidgin English used the language very lightly (i.e. a huge proportion of the tweet was still in grammatically correct English). Thus, I hypothesize that a monolingual English model should not have issues predicting sentiment even on Pidgin English. In this notebook, I finetune a BERTweet model pretrained on 40k tweets. Specifically, a BERTweet model is selected for finetuning over other models (e.g. BERT model trained on wikipedia) as it gets us closer to our problem domain of tweets, especially English tweets. Thus the model should have learned all the informalities of Twitter and the learn curve should be less steep (as in the case of say, a BERT model).**`Process:`**- Load pretrained BERTweet model from transformers package- Train on an annotated training set of tweets on Nigerian ISPs- Fine tune on an annotated validation set of tweets on Nigerian ISPs- Predict unseen tweets in a test set**`Results:`** **`Next steps:`** 1. Library Importation
###Code
#Visualization packages
import pandas as pd
import numpy as np
import seaborn as sns
#sns.set()
import matplotlib.pyplot as plt
#Modeling and evaluation packages
import torch
from transformers import BertForSequenceClassification, BertTokenizer, Trainer, TrainingArguments
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from sklearn.metrics import accuracy_score, precision_recall_fscore_support,classification_report
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, roc_auc_score
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
2. Loading the data
###Code
df = pd.read_csv('../data/processed/sample_encoded_and_cleaned.csv')
df
###Output
_____no_output_____
###Markdown
3. Split data into training & test set **Observing the true distribution of the data**
###Code
df.label.value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Seeing as the dataset is highly imbalanced, I perform a **stratified train-test split**:
###Code
#Split the data 80:20 and ensure same proportion of classes as original data
#Note: The below gets the indices of the tweets for the training and test set
X_train, X_test, y_train, y_test = train_test_split(df.index.values,
df.label.values,
test_size=0.2,
random_state=42,
stratify=df.label.values)
#Note which tweets were split into the training and test sets so we can see the division
df.loc[X_train, 'split_group'] = 'training_set'
df.loc[X_test, 'split_group'] = 'test_set'
df.groupby(['sentiment', 'label', 'split_group'])[['Text']].count().sort_values('label')
#Plot the proportion of each sentiment class in the training and test sets
#-to confirm the distribution is the same in both sets
with plt.style.context(['notebook','no-latex']):
sns.barplot(x='split_group',
y='percent',
hue='sentiment',
palette = ['tab:red','moccasin','tab:green'],
data=df.groupby(['split_group'])['sentiment'].value_counts(normalize=True).\
rename('percent').\
reset_index())
plt.show()
###Output
_____no_output_____
###Markdown
4. Split training set into training set and validation set
###Code
#Split the training set 75:25 using stratified sampling
#Note: The below gets the indices of the tweets for the training and validation set
X_train, X_val, y_train, y_val = train_test_split(X_train,
y_train,
test_size=0.25,
random_state=1,
stratify=y_train)
#Note which tweets were split into the training and validation sets so we can see the division
df.loc[X_train, 'split_group'] = 'training_set'
df.loc[X_val, 'split_group'] = 'validation_set'
df.groupby(['sentiment', 'label', 'split_group'])[['Text']].count().sort_values('label')
#Plot the proportion of each sentiment class in the training and test sets
#-to confirm the distribution is the same in both sets
with plt.style.context(['notebook','no-latex']):
sns.barplot(x='split_group',
y='percent',
hue='sentiment',
palette = ['tab:red','moccasin','tab:green'],
data=df.groupby(['split_group'])['sentiment'].value_counts(normalize=True).\
rename('percent').\
reset_index())
plt.show()
df.groupby('split_group')['Text'].count()
###Output
_____no_output_____
###Markdown
5. Modeling & Evaluation – BerTweet
###Code
#Load BerTweet tokenizer
tokenizer = AutoTokenizer.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis", normalization=True)
class Generate_PyTorch_Dataset(torch.utils.data.Dataset):
"""
Code adapted from NLPiation. (2021).
Is it possible to do Sentiment Analysis on unlabeled data using BERT? (Feat. Vader) [Experiment].
https://nlpiation.medium.com/is-it-possible-to-do-sentiment-analysis-on-unlabeled-data-using-bert-feat-vader-experiment-357bba53768c
"""
def __init__(self, texts, labels, tokenizer):
self.texts = texts
self.labels = labels
self.tokenizer = tokenizer
self.max_len = tokenizer.model_max_length
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = str(self.texts[idx])
labels = self.labels[idx]
encoded_text = self.tokenizer.encode_plus(
text,
add_special_tokens = True,
truncation = True,
return_attention_mask = True,
return_token_type_ids = False,
max_length = self.max_len,
return_tensors = 'pt',
padding = "max_length"
)
return {
'input_ids': encoded_text['input_ids'][0],
'attention_mask': encoded_text['attention_mask'][0],
'labels': torch.tensor(labels, dtype=torch.long)
}
def compute_metrics(eval_pred):
"""
Function to compute accuracy metrics
Input:
- eval_pred (tuple): Tuple containing the model predictions and targets to be matched
in the form: (predictions,targets)]
Output:
- (dict): Dictionary containing different accuracy-related metrics
"""
#Get the predicted labels and the true labels
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
#Compute, precision, recall, f1 and accuracy score
#We use macro so we can better see the effect of the class imbalance i.e. treat all classes as equal
precision, recall, f1, _ = precision_recall_fscore_support(labels, predictions, average='micro')
accuracy = accuracy_score(labels, predictions)
return {
'f1': f1,
'precision': precision,
'recall': recall,
'accuracy': accuracy}
###Output
loading configuration file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/config.json from cache at /Users/koredeakande/.cache/huggingface/transformers/cb09766f7ba60b5f7a1bb640617b24f1499c4a6f3ab160c4a0ac171e3a377c68.008dca06003188334001a96363da79ced4944abc68d94a2f1e0db786dc5aa08b
Model config RobertaConfig {
"_name_or_path": "vinai/bertweet-base",
"architectures": [
"RobertaForSequenceClassification"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"eos_token_id": 2,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "NEG",
"1": "NEU",
"2": "POS"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"NEG": 0,
"NEU": 1,
"POS": 2
},
"layer_norm_eps": 1e-05,
"max_position_embeddings": 130,
"model_type": "roberta",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"problem_type": "single_label_classification",
"tokenizer_class": "BertweetTokenizer",
"transformers_version": "4.8.0",
"type_vocab_size": 1,
"use_cache": true,
"vocab_size": 64001
}
loading file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/vocab.txt from cache at /Users/koredeakande/.cache/huggingface/transformers/973dbacfdf4c488622f01d1a226089e9e3dba130a0c3c11c2e36d49466fa40a8.f8a4dfe5c3c45a26f9df849d732decb191dc0c05ab270799695430332d143982
loading file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/bpe.codes from cache at /Users/koredeakande/.cache/huggingface/transformers/0e474c44ff353f3b378fb140e7e6d4431df4ec6142e8b38d584c0dbc5afc3521.75877d86011e5d5d46614d3a21757b705e9d20ed45a019805d25159b4837b0a4
loading file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/added_tokens.json from cache at /Users/koredeakande/.cache/huggingface/transformers/fe46927817477a58ec2aa92ef52f8ee6fc9e824d054f4aa6a3c129724dc9c9b7.c1e7052e39d2135302ec27455f6db22e1520e6539942ff60a849c7f83f8ec6dc
loading file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/special_tokens_map.json from cache at /Users/koredeakande/.cache/huggingface/transformers/9413ac0bed76140860deffa0c5a29ee4da7d49a3810da1b4b51b27f790bc9255.0dc5b1041f62041ebbd23b1297f2f573769d5c97d8b7c28180ec86b8f6185aa8
loading file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/tokenizer_config.json from cache at /Users/koredeakande/.cache/huggingface/transformers/61374b71c02fdfd2929a3cdce24c242049e036624e15e18461a3a70cfc35e939.c260b44e952f7f2a825aac395f2ebbed4ac9553800d1e320af246e81a548f37c
loading file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/tokenizer.json from cache at None
Adding <mask> to the vocabulary
###Markdown
a. Encode the training and validation sets
###Code
#Prepare the Training and validation sets
train_set_dataset = Generate_PyTorch_Dataset(
texts = df.query("split_group == 'training_set' ").Text.tolist(),
labels = df.query("split_group == 'training_set' ").label.tolist(),
tokenizer = tokenizer
)
val_set_dataset = Generate_PyTorch_Dataset(
texts = df.query("split_group == 'validation_set' ").Text.tolist(),
labels = df.query("split_group == 'validation_set' ").label.tolist(),
tokenizer = tokenizer
)
###Output
_____no_output_____
###Markdown
b. Load Original Pretrained BERTweet model
###Code
#Load the original pretrained BERTweet model
bt_model = AutoModelForSequenceClassification.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis")
###Output
_____no_output_____
###Markdown
• Evaluating the model: How well does it classify tweets? **Training set (Note: The model was never exposed to these tweets!)**
###Code
bt_training_args = TrainingArguments(
output_dir = "../model_predictions",
do_predict = True
)
bt_trainer = Trainer(
model = bt_model,
args = bt_training_args,
compute_metrics = compute_metrics,
)
bt_train_preds = bt_trainer.predict(train_set_dataset, metric_key_prefix='train')
pd.DataFrame([bt_train_preds[-1]])
###Output
_____no_output_____
###Markdown
**Validation set (Note: The model was never exposed to these tweets!)**
###Code
bt_val_preds = bt_trainer.predict(val_set_dataset, metric_key_prefix='validation')
pd.DataFrame([bt_val_preds[-1]])
###Output
_____no_output_____
###Markdown
Comparing the pretrained BerTweet to the M-BERT model (ADD LINK), we see that it already starts out predicting our the training and validation sets better than the M-Bert was able to achieve even after training (above metrics around 65%). This indicates that our decision to finetune a pretrained model closer to the problem domain (Twitter tweets) was beneficial despite the model being monolingual (again, this is also helped by the fact that the tweets on Nigerian ISPs were pretty much in English).The model does great right out of the box! Can we do even better by fine-tuning? c. Fine-tuning pretrained BERTweet using Huggingface Trainer 5(i) Baseline BerTweet`Default Huggingface training configuration`
###Code
#Load the model
bbt_model = AutoModelForSequenceClassification.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis")
#Define the training parameters
bbt_training_args = TrainingArguments(
output_dir = "../models/bertweet/baseline-bertweet",
num_train_epochs = 10,
save_strategy = "epoch",
logging_strategy = "epoch",
evaluation_strategy = "epoch"
)
#Define Trainer object
bbt_trainer = Trainer(
model = bbt_model,
args = bbt_training_args,
train_dataset = train_set_dataset,
eval_dataset = val_set_dataset,
compute_metrics = compute_metrics
)
bbt_trainer.train()
###Output
***** Running training *****
Num examples = 225
Num Epochs = 10
Instantaneous batch size per device = 8
Total train batch size (w. parallel, distributed & accumulation) = 8
Gradient Accumulation steps = 1
Total optimization steps = 290
###Markdown
• Plot train and validation metrics
###Code
#Get the baseline bertweet's training logs
bbt_history = pd.DataFrame(bbt_trainer.state.log_history)
bbt_history.to_csv("../models/logs/baseline_bertweet_logs.csv",index=False)
bbt_history = bbt_history.fillna(method='bfill', limit=1).drop_duplicates(subset=['epoch','eval_loss'])
plt.figure(figsize=(15,9))
plt.tight_layout()
metrics = ['eval_loss','eval_f1','eval_precision','eval_recall','eval_accuracy','eval_runtime']
for idx, metric in enumerate(metrics):
with plt.style.context(['grid']):
ax = plt.subplot(2,3,idx+1).plot(bbt_history['epoch'],
bbt_history[metric],
label='validation')
#Also plot training loss when plotting validation loss
if metric == 'eval_loss':
ax = plt.subplot(2,3,idx+1).plot(bbt_history['epoch'],
bbt_history['loss'],
color = ax[0].get_color(),
linestyle= "--",
label='training')
plt.xlabel('Epoch')
plt.ylabel(metric.replace('eval_','').capitalize())
plt.legend()
###Output
_____no_output_____
###Markdown
• Validation Results DiscussionComparing the baseline BerTweet to the M-BERT model (ADD LINK), we see that it already starts out predicting our samples better (metrics around 69%) than the M-Bert was able to achieve even after training (above metrics around 65%). This confirms that our decision to finetune a pretrained model closer to the problem domain (Twitter tweets) was beneficial despite the model being monolingual (again, this is also helped by the fact that the tweets on Nigerian ISPs were pretty much in English).The best model, corresponding to that from epoch 7 (**checkpoint-203**) had the following validation set results:- **Validation Loss:** 0.810084- **F1:** 0.829649- **Precision:** 0.819307- **Recall:** 0.843956- **Accuracy:** 0.855263The validation loss not really dropping while the training loss does might indicate that the model is overfitting the data. We contrast both the training and validation performanc below and also plot a confusion matrix and classification report to get more context into the model's performance: • [TRAIN] Evaluating training set performance – baseline BERTweet
###Code
#Load the best baseline bertweet model
bbt_model = AutoModelForSequenceClassification.from_pretrained("../models/bertweet/baseline-bertweet/checkpoint-203")
bbt_training_args = TrainingArguments(
output_dir = "../model_predictions",
do_predict = True
)
bbt_trainer = Trainer(
model = bbt_model,
args = bbt_training_args,
compute_metrics = compute_metrics,
)
bbt_train = bbt_trainer.predict(train_set_dataset, metric_key_prefix= 'train')
pd.DataFrame([bbt_train[-1]])
###Output
_____no_output_____
###Markdown
• [VALIDATION] Evaluating validation set performance – baseline BERTweet
###Code
bbt_validation = bbt_trainer.predict(val_set_dataset)
pd.DataFrame([bbt_validation[-1]])
y_true_val = df.query("split_group == 'validation_set' ").label.tolist()
bbt_val_preds = np.argmax(bbt_validation[0], axis=1).flatten()
print(classification_report(y_true_val, bbt_val_preds))
#Generate the confusion matrix
bbt_cm = confusion_matrix(y_true_val, bbt_val_preds)
bbt_disp = ConfusionMatrixDisplay(confusion_matrix=bbt_cm, display_labels = ['Negative', 'Neutral', 'Positive'])
#Plot the confusion matrix
with plt.style.context(['notebook','no-latex']):
bbt_disp.plot(cmap='Blues',ax=None)
plt.grid(False)
plt.show()
###Output
_____no_output_____
###Markdown
• View misclassified tweets
###Code
valid_df = df.query("split_group == 'validation_set' ")
valid_df['model_preds'] = bbt_val_preds
misclassified = valid_df[valid_df.label != valid_df.model_preds][['Text','sentiment','model_preds']]
misclassified.model_preds = misclassified.model_preds.map({0:'Negative', 1: 'Neutral', 2: 'Positive'})
with pd.option_context('display.max_colwidth', None):
display(misclassified)
###Output
/Users/koredeakande/opt/anaconda3/envs/capstone/lib/python3.7/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
###Markdown
- Some of the misclassified sentenes could indeed (objectively) be interpreted as the ISP predicts. But since they do not speak specifically to the ISP's performance or service but more so the actions or attitudes of the user, I classify them otherwise e.g. I'm not going to subscribe because I'm waiting for double data (as in sentence 78). Not subscribing indeed has a negative connotation but the user follows up by stating it is because they are waiting to take advantage of a promo and not necessarily because the ISP's performance is bad.- Seems to struggle to detect the negative sentiments towards the named entity e.g. Spectranet (sentence 108).- Might be struggling with the pidgin English in the tweets (see sentences 222 and 261)- Possible that since a high proportion of the tweets containing the ISPs names are negative, it automatically relates tweets with no other strongly suggestive words except the ISPs names to 'Negative' e.g. sentence 360 and 372 • Conclusion Pros- Does a great job predicting all the classes Cons- Has minor challenges differentiating some positive and negative tweets - Lowwe predictive performance on positive samples. We note however that there were very few positive samples in the validation set and it was able to recall 83% and predict 71% correctly. DID FINETUNING ACTUALLY IMPROVE THE MODEL? How might we improve predictive performance, especially on positive samples?- We note that the default batch size of 8 was used. Hence it is very possible that the model trained on numerous batches which didn't have a positive sample. We could try increasing the batch size to increase the chance of the model training on and learning positive samples- We could also weight positive samples higher so that the model pays more attention to them- Similar to the first in trying to increase the chances of training on positive samples, we could try oversampling (i.e. resampling) positive tweets. --- (ii) Modified BerTweet (v1)**`Loss function reweighting` | `Increased batch size`**
###Code
class CustomTrainer(Trainer):
"""
Class weighted trainer to account for imbalance
"""
def __init__(self, no_of_classes, samples_per_cls, *args, **kwargs):
super().__init__(*args, **kwargs)
self.class_weights = self.get_ens_weights(no_of_classes, samples_per_cls)
def get_ens_weights(self, no_of_classes,samples_per_cls,beta=0.99):
"""
Compute class weights using effective number of samples strategy
"""
effective_num = 1.0 - np.power(beta, samples_per_cls)
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * no_of_classes
weights = torch.tensor(weights.astype(np.float32))
return weights
def compute_loss(self, model, inputs, return_outputs=False):
labels = inputs.get("labels")
#Forward pass
outputs = model(**inputs)
logits = outputs.get('logits')
#Compute custom loss
loss_fct = torch.nn.CrossEntropyLoss(weight= self.class_weights)
loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1))
return (loss, outputs) if return_outputs else loss
#Define the training parameters
lrbt_training_args = TrainingArguments(
output_dir = "../models/bertweet/loss-reweighted",
num_train_epochs = 20,
per_device_train_batch_size = 32, #Increase training batch size to ensure that each batch has a decent chance of containing a few positive samples.
save_steps = 20,
seed = 123,
logging_strategy = "epoch",
evaluation_strategy = "epoch"
)
#Define Trainer object
lrbt_trainer = CustomTrainer(
no_of_classes = len(df.label.unique()),
samples_per_cls = df.label.value_counts(sort=False).to_list(),
model = bt_model,
args = lrbt_training_args,
train_dataset = train_set_dataset,
eval_dataset = val_set_dataset,
compute_metrics = compute_metrics
)
#Start pre-training!
lrbt_trainer.train()
#Continue training
lrbt_trainer.train("../models/bertweet/loss-reweighted/checkpoint-80")
###Output
Loading model from ../models/bertweet/loss-reweighted/checkpoint-80).
***** Running training *****
Num examples = 225
Num Epochs = 20
Instantaneous batch size per device = 32
Total train batch size (w. parallel, distributed & accumulation) = 32
Gradient Accumulation steps = 1
Total optimization steps = 160
Continuing training from checkpoint, will skip to saved global_step
Continuing training from epoch 10
Continuing training from global step 80
Will skip the first 10 epochs then the first 0 batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` flag to your launch command, but you will resume the training on data already seen by your model.
###Markdown
• Plot train and validation metrics
###Code
#Get the baseline bertweet's training logs
lrbt_history = pd.DataFrame(lrbt_trainer.state.log_history)
lrbt_history.to_csv("../models/logs/loss_reweighted_bertweet_logs.csv",index=False)
lrbt_history = lrbt_history.fillna(method='bfill', limit=1).drop_duplicates(subset=['epoch','eval_loss'])
plt.figure(figsize=(15,9))
#plt.tight_layout()
metrics = ['eval_loss','eval_f1','eval_precision','eval_recall','eval_accuracy','eval_runtime']
for idx, metric in enumerate(metrics):
with plt.style.context(['grid']):
plt.tight_layout()
ax = plt.subplot(3,2,idx+1).plot(lrbt_history['epoch'],
lrbt_history[metric],
label='validation')
#Also plot training loss when plotting validation loss
if metric == 'eval_loss':
ax = plt.subplot(3,2,idx+1).plot(lrbt_history['epoch'],
lrbt_history['loss'],
color = ax[0].get_color(),
linestyle= "--",
label='training')
plt.xlabel('Epoch', fontsize=12)
plt.ylabel(metric.replace('eval_','').capitalize(),fontsize=12)
plt.legend()
###Output
_____no_output_____
###Markdown
The metric I prioritize for model evaluation is F1 given it tries to balance precision and recall. I also consider precision and recall as well. Based on this, the best model/checkpoint from our training procedure is checkpoint-15, outperforming all other checkpoints. Given this was the first epoch, the model had been exposed to very little data and was essentially still the BerTweet model. This suggests that training actually worsened its classification performance. • Determine the best checkpoint
###Code
from glob import glob
import re
#List to store validation set results for the different checkpoints
val_results = []
#Iterate through all the subfolders in the bertweet directory
for folder in glob('../models/bertweet/loss-reweighted/*/'):
#If it is a model save checkpoint
if 'checkpoint' in folder:
#Load the checkpoint
val_model = AutoModelForSequenceClassification.from_pretrained(folder)
lrbt_val_training_args = TrainingArguments(
output_dir = "../model_predictions",
do_predict = True)
lrbt_val_trainer = Trainer(
model = val_model,
args = lrbt_val_training_args,
compute_metrics = compute_metrics)
model_preds_and_results = lrbt_val_trainer.predict(val_set_dataset)
val_results.append(model_preds_and_results[-1])
lrbt_results_df = pd.DataFrame(val_results)
lrbt_results_df.insert(0,'model_checkpoint',[re.search('checkpoint-\d+',checkpoint)[0] for checkpoint in (glob('../models/bertweet/loss-reweighted/*/')) if 'checkpoint' in checkpoint])
lrbt_results_df
###Output
_____no_output_____
###Markdown
Checkpoint-100 appears to have performed the best. The differentiating factor, asides from the classification metrics (on which it tied with a significant number of other checkpoints), was the test loss • Load the best checkpoint
###Code
#Load the model
model = AutoModelForSequenceClassification.from_pretrained("../models/bertweet/checkpoint-100")
trainer = Trainer(
model = model,
args = training_args,
compute_metrics = compute_metrics,
)
test_preds = trainer.predict(test_set_dataset)
###Output
loading configuration file ../models/bertweet/checkpoint-100/config.json
Model config RobertaConfig {
"_name_or_path": "finiteautomata/bertweet-base-sentiment-analysis",
"architectures": [
"RobertaForSequenceClassification"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"eos_token_id": 2,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "NEG",
"1": "NEU",
"2": "POS"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"NEG": 0,
"NEU": 1,
"POS": 2
},
"layer_norm_eps": 1e-05,
"max_position_embeddings": 130,
"model_type": "roberta",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"problem_type": "single_label_classification",
"tokenizer_class": "BertweetTokenizer",
"transformers_version": "4.8.0",
"type_vocab_size": 1,
"use_cache": true,
"vocab_size": 64001
}
loading weights file ../models/bertweet/checkpoint-100/pytorch_model.bin
All model checkpoint weights were used when initializing RobertaForSequenceClassification.
All the weights of RobertaForSequenceClassification were initialized from the model checkpoint at ../models/bertweet/checkpoint-100.
If your task is similar to the task the model of the checkpoint was trained on, you can already use RobertaForSequenceClassification for predictions without further training.
***** Running Prediction *****
Num examples = 76
Batch size = 8
###Markdown
• Evaluating the model: How well does it classify tweets?
###Code
pd.DataFrame([test_preds[-1]])
y_true = df.query("split_group == 'test_set' ").label.tolist()
preds = np.argmax(test_preds[0], axis=1).flatten()
print(classification_report(y_true, preds))
cm = confusion_matrix(y_true, preds)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels = ['Negative', 'Neutral', 'Positive'])
disp.plot(cmap='Blues',ax=None)
plt.grid(False)
plt.show()
###Output
_____no_output_____
###Markdown
Comparing the classification report and the confusion matrix above to those of the other BerTweet models, we see that this model improved on all fronts. Not only did it have better validation set metrics (around 85% for F1, precision, recall, etc.), it also performed better on the test.Specifically, we see improved performance in predicting and distinguishing all classes. We again, however, note that the positive samples were very few in the test set.These results suggest that loss function reweighting and increasing the batch size for beneficial for the training process.Next, I experiment with oversampling the positive class to see if it yields even better results --- (iv) Modified BerTweet (v2)`Oversampling/resampling positive class` • Oversample the positive sample to the size of the neutral class
###Code
#Make a copy of the training data for oversampling and drop all positive sample
#We DO NOT oversample the validation set as we want it to mirror the true class distribution
#and be similar to the the test data
training_df = df.query(" split_group == 'training_set' ")
oversampled_df = training_df.copy().query(" sentiment != 'Positive' ")
#Oversample the positive samples to the size of the neutral class
pos_samples = training_df.query("sentiment == 'Positive'").sample(training_df.\
sentiment.value_counts()['Negative'],
replace=True)
#Merge the positive samples, shuffle, and reset index
oversampled_df = oversampled_df.append(pos_samples).sample(frac=1).reset_index(drop=True)
###Output
_____no_output_____
###Markdown
• Quick preview of the resampled data
###Code
oversampled_df
#Check the class proportions
oversampled_df.sentiment.value_counts(normalize=True)
#Plot the proportion of each sentiment class in the training set
with plt.style.context(['notebook','no-latex']):
sns.barplot(x='split_group',
y='percent',
hue='sentiment',
palette = ['tab:red','tab:green','moccasin'],
data=oversampled_df.groupby(['split_group'])['sentiment'].value_counts(normalize=True).\
rename('percent').\
reset_index())
plt.show()
###Output
_____no_output_____
###Markdown
a. Encode the training and validation sets
###Code
#Prepare the Training and validation sets
oversampled_train_dataset = Generate_PyTorch_Dataset(
texts = oversampled_df.query("split_group == 'training_set' ").Text.tolist(),
labels = oversampled_df.query("split_group == 'training_set' ").label.tolist(),
tokenizer = tokenizer
)
val_set_dataset = Generate_PyTorch_Dataset(
texts = df.query("split_group == 'validation_set' ").Text.tolist(),
labels = df.query("split_group == 'validation_set' ").label.tolist(),
tokenizer = tokenizer
)
#Load the model
oversampled_model = AutoModelForSequenceClassification.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis")
#Define the training parameters
oversampling_training_args = TrainingArguments(
output_dir = "../models/bertweet/after-oversampling",
num_train_epochs = 10,
per_device_eval_batch_size = 16,
save_strategy = "epoch",
evaluation_strategy = "epoch",
seed = 124
)
#Define Trainer object
oversampling_trainer = Trainer(
model = oversampled_model,
args = oversampling_training_args,
train_dataset = oversampled_train_dataset,
eval_dataset = val_set_dataset,
compute_metrics = compute_metrics
)
oversampling_trainer.train()
#Get the training logs
oversampled_history = pd.DataFrame(oversampling_trainer.state.log_history)
oversampled_history.to_csv("../models/logs/oversampled_logs.csv",index=False)
plt.figure(figsize=(15,9))
plt.tight_layout()
metrics = ['eval_loss','eval_f1','eval_precision','eval_recall','eval_accuracy','eval_runtime']
for idx, metric in enumerate(metrics):
with plt.style.context(['grid']):
plt.tight_layout()
plt.subplot(2,3,idx+1).plot(oversampled_history['epoch'],
oversampled_history[metric],
label='validation')
plt.xlabel('Epoch')
plt.ylabel(metric.replace('eval_','').capitalize())
plt.legend()
###Output
_____no_output_____
###Markdown
Recall actually dropped during training. However, the model got better at predicting correctly when it did predict a sentiment group • Load the best checkpoint
###Code
#Load the model
best_oversampled = AutoModelForSequenceClassification.from_pretrained("../models/bertweet/after-oversampling/checkpoint-294")
training_args = TrainingArguments(
output_dir = "../model_predictions",
do_predict = True
)
trainer = Trainer(
model = best_oversampled,
args = training_args,
compute_metrics = compute_metrics
)
# Make the test set ready
test_set_dataset = Generate_PyTorch_Dataset(
texts = df.query("split_group == 'test_set' ").Text.tolist(),
labels = df.query("split_group == 'test_set' ").label.tolist(),
tokenizer = tokenizer
)
oversampled_test_preds = trainer.predict(test_set_dataset)
###Output
loading configuration file ../models/bertweet/after-oversampling/checkpoint-294/config.json
Model config RobertaConfig {
"_name_or_path": "finiteautomata/bertweet-base-sentiment-analysis",
"architectures": [
"RobertaForSequenceClassification"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"eos_token_id": 2,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "NEG",
"1": "NEU",
"2": "POS"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"NEG": 0,
"NEU": 1,
"POS": 2
},
"layer_norm_eps": 1e-05,
"max_position_embeddings": 130,
"model_type": "roberta",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"problem_type": "single_label_classification",
"tokenizer_class": "BertweetTokenizer",
"transformers_version": "4.8.0",
"type_vocab_size": 1,
"use_cache": true,
"vocab_size": 64001
}
loading weights file ../models/bertweet/after-oversampling/checkpoint-294/pytorch_model.bin
All model checkpoint weights were used when initializing RobertaForSequenceClassification.
All the weights of RobertaForSequenceClassification were initialized from the model checkpoint at ../models/bertweet/after-oversampling/checkpoint-294.
If your task is similar to the task the model of the checkpoint was trained on, you can already use RobertaForSequenceClassification for predictions without further training.
PyTorch: setting up devices
The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
***** Running Prediction *****
Num examples = 76
Batch size = 8
###Markdown
• Evaluating the model: How well does it classify tweets?
###Code
pd.DataFrame([oversampled_test_preds[-1]])
###Output
_____no_output_____
###Markdown
Does a lot poorer on the test set!
###Code
y_true = df.query("split_group == 'test_set' ").label.tolist()
oversampled_preds = np.argmax(oversampled_test_preds[0], axis=1).flatten()
print(classification_report(y_true, oversampled_preds))
###Output
precision recall f1-score support
0 0.80 0.80 0.80 44
1 0.72 0.81 0.76 26
2 0.67 0.33 0.44 6
accuracy 0.76 76
macro avg 0.73 0.65 0.67 76
weighted avg 0.76 0.76 0.76 76
###Markdown
From above, this is because of poorer predictive performance on the positive samples
###Code
oversampled_cm = confusion_matrix(y_true, oversampled_preds)
oversampled_disp = ConfusionMatrixDisplay(confusion_matrix=oversampled_cm,
display_labels = ['Negative', 'Neutral', 'Positive'])
with plt.style.context(['notebook','no-latex']):
oversampled_disp.plot(cmap='Blues',ax=None)
plt.grid(False)
plt.show()
#Load the model
oversampled_model = AutoModelForSequenceClassification.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis")
#Define the training parameters
training_args = TrainingArguments(
output_dir = "../models/bertweet/after-oversampling",
num_train_epochs = 20,
per_device_train_batch_size = 32, #Increase training batch size to ensure that each batch has a decent chance of containing a few positive samples.
per_device_eval_batch_size = 16,
save_steps = 20,
evaluation_strategy = "epoch"
)
#Define Trainer object
trainer = Trainer(
model = oversampled_model,
args = training_args,
train_dataset = train_set_dataset,
eval_dataset = val_set_dataset,
compute_metrics = compute_metrics
)
trainer.train()
###Output
loading configuration file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/config.json from cache at /Users/koredeakande/.cache/huggingface/transformers/cb09766f7ba60b5f7a1bb640617b24f1499c4a6f3ab160c4a0ac171e3a377c68.008dca06003188334001a96363da79ced4944abc68d94a2f1e0db786dc5aa08b
Model config RobertaConfig {
"_name_or_path": "vinai/bertweet-base",
"architectures": [
"RobertaForSequenceClassification"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"eos_token_id": 2,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "NEG",
"1": "NEU",
"2": "POS"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"NEG": 0,
"NEU": 1,
"POS": 2
},
"layer_norm_eps": 1e-05,
"max_position_embeddings": 130,
"model_type": "roberta",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"problem_type": "single_label_classification",
"tokenizer_class": "BertweetTokenizer",
"transformers_version": "4.8.0",
"type_vocab_size": 1,
"use_cache": true,
"vocab_size": 64001
}
loading weights file https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis/resolve/main/pytorch_model.bin from cache at /Users/koredeakande/.cache/huggingface/transformers/2e4719cf8d097772eb75070b88cbc56f1d3b1392fffc5f75032a389ef21d1847.16366ca1277caccb15200478349503b3336a1420ac26d44fc16763354f5a2cae
All model checkpoint weights were used when initializing RobertaForSequenceClassification.
All the weights of RobertaForSequenceClassification were initialized from the model checkpoint at finiteautomata/bertweet-base-sentiment-analysis.
If your task is similar to the task the model of the checkpoint was trained on, you can already use RobertaForSequenceClassification for predictions without further training.
PyTorch: setting up devices
The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).
***** Running training *****
Num examples = 288
Num Epochs = 20
Instantaneous batch size per device = 32
Total train batch size (w. parallel, distributed & accumulation) = 32
Gradient Accumulation steps = 1
Total optimization steps = 180
###Markdown
7. Evaluating the model: How well does it classify tweets?
###Code
# Load the checkpoint
model = AutoModelForSequenceClassification.from_pretrained("../models/bertweet/checkpoint-120")
# Make the test set ready
test_set_dataset = Generate_PyTorch_Dataset(
texts = df.query("split_group == 'test_set' ").Text.tolist(),
labels = df.query("split_group == 'test_set' ").label.tolist(),
tokenizer = tokenizer
)
training_args = TrainingArguments(
output_dir = "../model_predictions",
do_predict = True
)
trainer = Trainer(
model = model,
args = training_args,
compute_metrics = compute_metrics,
)
test_preds = trainer.predict(test_set_dataset)
pd.DataFrame([test_preds[-1]])
y_true = df.query("split_group == 'test_set' ").label.tolist()
preds = np.argmax(test_preds[0], axis=1).flatten()
print(classification_report(y_true, preds))
###Output
precision recall f1-score support
0 0.88 0.81 0.84 43
1 0.77 0.85 0.81 27
2 0.67 0.67 0.67 6
accuracy 0.82 76
macro avg 0.77 0.78 0.77 76
weighted avg 0.82 0.82 0.82 76
###Markdown
How about the 2nd best model? It could be that the model did not get to train on
###Code
# Define the training parameters
training_args = TrainingArguments(
output_dir = "../models",
per_device_train_batch_size = 16, #Increase default batch size to ensure that each batch has a decent chance of containing a few positive samples.
per_device_eval_batch_size = 64,
warmup_steps = 500,
save_strategy = "epoch",
evaluation_strategy = "epoch"
)
#Define Trainer object
trainer = Trainer(
model = model,
args = training_args,
train_dataset = train_set_dataset,
eval_dataset = val_set_dataset,
compute_metrics = compute_metrics
)
#Start pre-training!
trainer.train()
###Output
_____no_output_____
###Markdown
7. Modeling: M-BERT
###Code
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# Define the training parameters
training_args = TrainingArguments(
output_dir = "../models",
num_train_epochs = 10,
per_device_train_batch_size = 16,
per_device_eval_batch_size = 64,
warmup_steps = 500,
weight_decay = 0.01,
save_strategy = "epoch",
evaluation_strategy = "epoch"
)
#Define Trainer object
trainer = Trainer(
model = model,
args = training_args,
train_dataset = train_set_dataset,
eval_dataset = val_set_dataset,
compute_metrics = compute_metrics
)
trainer.train('../models/checkpoint-75')
###Output
Loading model from ../models/checkpoint-75).
***** Running training *****
Num examples = 226
Num Epochs = 10
Instantaneous batch size per device = 16
Total train batch size (w. parallel, distributed & accumulation) = 16
Gradient Accumulation steps = 1
Total optimization steps = 150
Continuing training from checkpoint, will skip to saved global_step
Continuing training from epoch 5
Continuing training from global step 75
Will skip the first 5 epochs then the first 0 batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` flag to your launch command, but you will resume the training on data already seen by your model.
###Markdown
From the above results, the checkpoint from epoch 9 (checkpoint-135) does the best across all metrics. I also had a similar checkpoint in a previous run (see Appendix). We thus compare both checkpoints on metrics to prioritize one:
###Code
checkpoint_135 = {'Validation Loss': 0.666470, 'F1': 0.756062, 'Precision': 0.728259,
'Recall': 0.789474, 'Accuracy': 0.789474}
checkpoint_174 = {'Validation Loss': 0.905348, 'F1': 0.756738, 'Precision': 0.727052,
'Recall': 0.789474, 'Accuracy': 0.789474}
pd.DataFrame([checkpoint_135,checkpoint_174], index= ['checkpoint_135','checkpoint_174'])
###Output
_____no_output_____
###Markdown
We see a tradeoff between F1 and precision (not surprising given that F1 tries to balance precision and recall). In this scenario, I don't have a reason to believe precision is more important than recall (i.e. it is important to consider false positives and false negatives for the classes). Hence I decide to go with checkpoint_174 for future predictions. I also keep checkpoint_135, just in case, but I delete all other checkpoints (including those listed in the Appendix)**Note:** Here, I don't really pay attention to accuracy (although in this scenario they are the same) because we have a really uneven class distribution, and the accuracy would be misleading. 6. Test set Prediction with the Fine-tuned BERT
###Code
# Load the checkpoint
model = BertForSequenceClassification.from_pretrained("../models/checkpoint-174")
# Make the test set ready
test_set_dataset = Generate_PyTorch_Dataset(
texts = df.query("split_group == 'test_set' ").Text.tolist(),
labels = df.query("split_group == 'test_set' ").label.tolist(),
tokenizer = tokenizer
)
training_args = TrainingArguments(
output_dir = "../model_predictions",
do_predict = True
)
trainer = Trainer(
model = model,
args = training_args,
compute_metrics =compute_metrics,
)
test_preds = trainer.predict(test_set_dataset)
pd.DataFrame([test_preds[-1]])
###Output
_____no_output_____
###Markdown
7. Evaluating the model: How well does it classify tweets?
###Code
y_true = df.query("split_group == 'test_set' ").label.tolist()
preds = np.argmax(test_preds[0], axis=1).flatten()
print(classification_report(y_true, preds))
cm = confusion_matrix(y_true, preds)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels = ['Negative', 'Neutral', 'Positive'])
disp.plot(cmap='Blues',ax=None)
plt.grid(False)
plt.show()
###Output
_____no_output_____ |
python/notebooks/Easy21.ipynb | ###Markdown
Whoa, this is pretty awesome. But why, but why, but why. Because we have more features to put weights on? See notes in easy21.linfa.
###Code
train_linfa_ex_feature = functools.partial(dev.train_linfa, feature=ex_feature)
n = 10000
dev.plot_msq_errors(train_linfa_ex_feature, monte_carlo_V, n)
dev.plot_msq_errors(train_linfa_ex_feature, monte_carlo_V, n)
dev.plot_msq_errors(train_linfa_ex_feature, monte_carlo_V, n)
n = 10000
dev.plot_msq_errors(dev.train, monte_carlo_V, n)
dev.plot_msq_errors(dev.train, monte_carlo_V, n)
dev.plot_msq_errors(dev.train, monte_carlo_V, n)
###Output
_____no_output_____
###Markdown
This is the new weirdness. Errors are pretty random. In the tabular case it looks like lambda does have a small influence. The linear function approximation case looks completely random. I wonder what it would look like if I'd run it many times for each lambda and averaged over the results. This is what Sutton and Barto do.
###Code
dev.plot_avg_msq_errors(dev.train, monte_carlo_V, 1000)
###Output
_____no_output_____
###Markdown
This is what I call a nice graph! Let's make some for function approximation with my features and their features, too! Hm, but why are the numbers so high? Because I forgot to divide the sum of the errors by the number of the errors. So you need to scale the graph up there by 1/210.
###Code
train_linfa_ex_feature = functools.partial(dev.train_linfa, feature=ex_feature)
dev.plot_avg_msq_errors(train_linfa_ex_feature, monte_carlo_V, 1000)
###Output
_____no_output_____
###Markdown
Hmm, could it be that I messed up something? It's nice to see that the errors are all smaller than for the tabular algorithm (really???), but it's strange that lambda appears to have no influence. I have to try and find some errors. Though first I want to have a look at the value function.
###Code
%matplotlib notebook
dev.plot_V(train_linfa_ex_feature(100000, 0.3))
###Output
_____no_output_____ |
logistic regression/Logistic Regression.ipynb | ###Markdown
Logistic RegressionThe name has regression but it is actually a classification algorithmIt forms a sigmoid function and we find the probability of the y variable.The scale is from 0 to 1. So if the probability id less than 0.5 then y is 0 and if the probability is more than 0.5, then the output is 1.Predict if the user will buy the SUV or not.
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from google.colab import files
from google.colab import files
# with open('C:\Users\diptangshu.banik\Desktop\AIML training\logistic regression\Social_Network_Ads.csv', 'w') as f:
# f.write('some content')
# files.download('example.txt')
files.upload('C:\Users\diptangshu.banik\Desktop\AIML training\logistic regression\Social_Network_Ads.csv')
files.upload()
dataset = pd.read_csv('Social_Network_Ads.csv')
dataset
# We are going to use the age and salary of a person to predict if a person is going to buy an SUV or not.
X = dataset.iloc[:, [2,3]].values
y = dataset.iloc[:, 4].values
X
y
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
X_train
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
y_pred
#Confusion Matrix
from sklearn.metrics import confusion_matrix
#this is a function. A function is made of small letter initials. Classes wil have the first letter in capitals
cm = confusion_matrix(y_test, y_pred)
cm
###Output
_____no_output_____
###Markdown
65 and 24 correct predictions and 8 and 3 incorrect predictions
###Code
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
_____no_output_____ |
deep_models/paper_14_clstm/models.ipynb | ###Markdown
Setup
###Code
import sys
import os
import re
import collections
import itertools
import bcolz
import pickle
sys.path.append('../../lib')
sys.path.append('../')
import numpy as np
import pandas as pd
import gc
import random
import smart_open
import h5py
import csv
import json
import functools
import time
import string
import datetime as dt
from tqdm import tqdm_notebook as tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import global_utils
random_state_number = 967898
gc.collect()
import tensorflow as tf
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
get_available_gpus()
%pylab
%matplotlib inline
%load_ext line_profiler
%load_ext memory_profiler
%load_ext autoreload
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
color = sns.color_palette()
###Output
_____no_output_____
###Markdown
Data
###Code
store = pd.HDFStore('../../data_prep/processed/stage1/data_frames.h5')
train_df = store['train_df']
test_df = store['test_df']
display(train_df.head())
display(test_df.head())
corpus_vocab_list, corpus_vocab_wordidx = None, None
with open('../../data_prep/processed/stage1/vocab_words_wordidx.pkl', 'rb') as f:
(corpus_vocab_list, corpus_wordidx) = pickle.load(f)
print(len(corpus_vocab_list), len(corpus_wordidx))
###Output
352220 352220
###Markdown
Data Prep To control the vocabulary pass in updated corpus_wordidx
###Code
from sklearn.model_selection import train_test_split
x_train_df, x_val_df = train_test_split(train_df,
test_size=0.10, random_state=random_state_number,
stratify=train_df.Class)
print(x_train_df.shape)
print(x_val_df.shape)
from tensorflow.contrib.keras.python.keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
vocab_size=len(corpus_vocab_list)
###Output
_____no_output_____
###Markdown
T:sent_words generate data
###Code
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "words",
"doc_form" : "sentences",
"divide_document": "multiple_unit"
}
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_21_T, x_train_21_G, x_train_21_V, x_train_21_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print(np.array(x_train_21_T).shape, x_train_21_T[0])
print(np.array(x_train_21_G).shape, x_train_21_G[0])
print(np.array(x_train_21_V).shape, x_train_21_V[0])
print(np.array(x_train_21_C).shape, x_train_21_C[0])
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_21_T, x_val_21_G, x_val_21_V, x_val_21_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_21_T).shape)
print("gene",np.array(x_val_21_G).shape, x_val_21_G[0])
print("variation",np.array(x_val_21_V).shape, x_val_21_V[0])
print("classes",np.array(x_val_21_C).shape, x_val_21_C[0])
###Output
Val data
text (128341,)
gene (128341, 3) [352216, 217983, 352217]
variation (128341,) [352216, 41934, 352217]
classes (128341,) 4
###Markdown
format data
###Code
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_SENT_LEN = 60
x_train_21_T = pad_sequences(x_train_21_T, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_val_21_T = pad_sequences(x_val_21_T, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_21_T.shape, x_val_21_T.shape)
###Output
(1086419, 60) (128341, 60)
###Markdown
keras np_utils.to_categorical expects zero index categorical variableshttps://github.com/fchollet/keras/issues/570
###Code
x_train_21_C = np.array(x_train_21_C) - 1
x_val_21_C = np.array(x_val_21_C) - 1
x_train_21_C = np_utils.to_categorical(np.array(x_train_21_C), 9)
x_val_21_C = np_utils.to_categorical(np.array(x_val_21_C), 9)
print(x_train_21_C.shape, x_val_21_C.shape)
###Output
(1086419, 9) (128341, 9)
###Markdown
T:text_words generate data
###Code
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "words",
"doc_form" : "text",
"divide_document": "single_unit"
}
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_22_T, x_train_22_G, x_train_22_V, x_train_22_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print("text",np.array(x_train_22_T).shape)
print("gene",np.array(x_train_22_G).shape, x_train_22_G[0])
print("variation",np.array(x_train_22_V).shape, x_train_22_V[0])
print("classes",np.array(x_train_22_C).shape, x_train_22_C[0])
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_22_T, x_val_22_G, x_val_22_V, x_val_22_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_22_T).shape)
print("gene",np.array(x_val_22_G).shape, x_val_22_G[0])
print("variation",np.array(x_val_22_V).shape, x_val_22_V[0])
print("classes",np.array(x_val_22_C).shape, x_val_22_C[0])
###Output
Val data
text (333,)
gene (333, 3) [352216, 217983, 352217]
variation (333,) [352216, 41934, 352217]
classes (333,) 4
###Markdown
format data
###Code
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_TEXT_LEN = 5000
x_train_22_T = pad_sequences(x_train_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_val_22_T = pad_sequences(x_val_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_22_T.shape, x_val_22_T.shape)
MAX_GENE_LEN = 1
MAX_VAR_LEN = 4
x_train_22_G = pad_sequences(x_train_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_train_22_V = pad_sequences(x_train_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
x_val_22_G = pad_sequences(x_val_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_val_22_V = pad_sequences(x_val_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
print(x_train_22_G.shape, x_train_22_V.shape)
print(x_val_22_G.shape, x_val_22_V.shape)
###Output
(2988, 1) (2988, 4)
(333, 1) (333, 4)
###Markdown
keras np_utils.to_categorical expects zero index categorical variableshttps://github.com/fchollet/keras/issues/570
###Code
x_train_22_C = np.array(x_train_22_C) - 1
x_val_22_C = np.array(x_val_22_C) - 1
x_train_22_C = np_utils.to_categorical(np.array(x_train_22_C), 9)
x_val_22_C = np_utils.to_categorical(np.array(x_val_22_C), 9)
print(x_train_22_C.shape, x_val_22_C.shape)
###Output
(2988, 9) (333, 9)
###Markdown
test Data setup
###Code
gen_data = global_utils.GenerateDataset(test_df, corpus_wordidx)
x_test_22_T, x_test_22_G, x_test_22_V, _ = gen_data.generate_data(custom_unit_dict,
has_class=False,
add_start_end_tag=True)
del gen_data
print("Test data")
print("text",np.array(x_test_22_T).shape)
print("gene",np.array(x_test_22_G).shape, x_test_22_G[0])
print("variation",np.array(x_test_22_V).shape, x_test_22_V[0])
x_test_22_T = pad_sequences(x_test_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_test_22_T.shape)
MAX_GENE_LEN = 1
MAX_VAR_LEN = 4
x_test_22_G = pad_sequences(x_test_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_test_22_V = pad_sequences(x_test_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
print(x_test_22_G.shape, x_test_22_V.shape)
###Output
(986, 1) (986, 4)
###Markdown
T:text_chars generate data
###Code
custom_unit_dict = {
"gene_unit" : "raw_chars",
"variation_unit" : "raw_chars",
# text transformed to sentences attribute
"doc_unit" : "raw_chars",
"doc_form" : "text",
"divide_document" : "multiple_unit"
}
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_33_T, x_train_33_G, x_train_33_V, x_train_33_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print("text",np.array(x_train_33_T).shape, x_train_33_T[0])
print("gene",np.array(x_train_33_G).shape, x_train_33_G[0])
print("variation",np.array(x_train_33_V).shape, x_train_33_V[0])
print("classes",np.array(x_train_33_C).shape, x_train_33_C[0])
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_33_T, x_val_33_G, x_val_33_V, x_val_33_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_33_T).shape, x_val_33_T[98])
print("gene",np.array(x_val_33_G).shape, x_val_33_G[0])
print("variation",np.array(x_val_33_V).shape, x_val_33_V[0])
print("classes",np.array(x_val_33_C).shape, x_val_33_C[0])
###Output
Val data
text (128341,) [74, 71, 0, 19, 72, 71, 19, 7, 8, 18, 72, 71, 19, 8, 12, 4, 72, 71, 15, 14, 8, 13, 19, 72, 71, 72, 71, 19, 7, 4, 72, 71, 4, 23, 15, 17, 4, 18, 18, 8, 14, 13, 72, 71, 14, 5, 72, 71, 22, 8, 11, 3, 36, 19, 24, 15, 4, 72, 71, 15, 27, 32, 8, 13, 10, 30, 0, 72, 71, 8, 13, 72, 71, 20, 28, 14, 18, 72, 71, 2, 4, 11, 11, 18, 72, 71, 8, 13, 3, 20, 2, 4, 3, 72, 71, 15, 14, 19, 4, 13, 19, 72, 71, 2, 4, 11, 11, 72, 71, 2, 24, 2, 11, 4, 72, 71, 0, 17, 17, 4, 18, 19, 72, 71, 0, 19, 72, 71, 1, 14, 19, 7, 72, 71, 19, 4, 12, 15, 4, 17, 0, 19, 20, 17, 4, 18, 72, 71, 72, 71, 15, 27, 32, 8, 13, 10, 30, 0, 72, 71, 8, 13, 3, 20, 2, 4, 3, 72, 71, 18, 36, 15, 7, 0, 18, 4, 72, 71, 8, 13, 7, 8, 1, 8, 19, 8, 14, 13, 72, 71, 14, 5, 72, 71, 30, 28, 33, 29, 72, 71, 72, 71, 0, 13, 3, 72, 71, 30, 35, 33, 29, 72, 71, 72, 71, 0, 19, 72, 71, 29, 33, 27, 2, 72, 71, 0, 13, 3, 72, 71, 30, 26, 27, 2, 72, 71, 72, 71, 17, 4, 18, 15, 4, 2, 19, 8, 21, 4, 11, 24, 72, 71, 72, 75]
gene (128341,) [74, 71, 2, 3, 10, 13, 28, 0, 72, 75]
variation (128341,) [74, 71, 0, 32, 26, 21, 72, 75]
classes (128341,) 4
###Markdown
format data
###Code
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_CHAR_IN_SENT_LEN = 150
x_train_33_T = pad_sequences(x_train_33_T, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx,
padding="post",truncating="post")
x_val_33_T = pad_sequences(x_val_33_T, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_33_T.shape, x_val_33_T.shape)
x_train_33_G = pad_sequences(x_train_33_G, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_train_33_V = pad_sequences(x_train_33_V, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_val_33_G = pad_sequences(x_val_33_G, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_val_33_V = pad_sequences(x_val_33_V, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
print(x_train_33_G.shape, x_train_33_V.shape)
print(x_val_33_G.shape, x_val_33_V.shape)
###Output
(1086419, 150) (1086419, 150)
(128341, 150) (128341, 150)
###Markdown
keras np_utils.to_categorical expects zero index categorical variableshttps://github.com/fchollet/keras/issues/570
###Code
x_train_33_C = np.array(x_train_33_C) - 1
x_val_33_C = np.array(x_val_33_C) - 1
x_train_33_C = np_utils.to_categorical(np.array(x_train_33_C), 9)
x_val_33_C = np_utils.to_categorical(np.array(x_val_33_C), 9)
print(x_train_33_C.shape, x_val_33_C.shape)
###Output
(1086419, 9) (128341, 9)
###Markdown
T:text_sent_words generate data
###Code
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "word_list",
"doc_form" : "text",
"divide_document" : "single_unit"
}
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_34_T, x_train_34_G, x_train_34_V, x_train_34_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print("text",np.array(x_train_34_T).shape, x_train_34_T[0][:1])
print("gene",np.array(x_train_34_G).shape, x_train_34_G[0])
print("variation",np.array(x_train_34_V).shape, x_train_34_V[0])
print("classes",np.array(x_train_34_C).shape, x_train_34_C[0])
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_34_T, x_val_34_G, x_val_34_V, x_val_34_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_34_T).shape, x_val_34_T[98][:1])
print("gene",np.array(x_val_34_G).shape, x_val_34_G[0])
print("variation",np.array(x_val_34_V).shape, x_val_34_V[0])
print("classes",np.array(x_val_34_C).shape, x_val_34_C[0])
###Output
Val data
text (333,) [[352216, 252037, 156537, 91785, 67201, 109857, 123191, 209585, 213751, 5638, 0, 126280, 49123, 331220, 0, 352217]]
gene (333, 3) [352216, 217983, 352217]
variation (333,) [352216, 41934, 352217]
classes (333,) 4
###Markdown
format data
###Code
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_DOC_LEN = 500 # no of sentences in a document
MAX_SENT_LEN = 80 # no of words in a sentence
###Output
_____no_output_____
###Markdown

###Code
for doc_i, doc in enumerate(x_train_34_T):
x_train_34_T[doc_i] = x_train_34_T[doc_i][:MAX_DOC_LEN]
# padding sentences
if len(x_train_34_T[doc_i]) < MAX_DOC_LEN:
for not_used_i in range(0,MAX_DOC_LEN - len(x_train_34_T[doc_i])):
x_train_34_T[doc_i].append([word_unknown_tag_idx]*MAX_SENT_LEN)
# padding words
x_train_34_T[doc_i] = pad_sequences(x_train_34_T[doc_i], maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
for doc_i, doc in enumerate(x_val_34_T):
x_val_34_T[doc_i] = x_val_34_T[doc_i][:MAX_DOC_LEN]
# padding sentences
if len(x_val_34_T[doc_i]) < MAX_DOC_LEN:
for not_used_i in range(0,MAX_DOC_LEN - len(x_val_34_T[doc_i])):
x_val_34_T[doc_i].append([word_unknown_tag_idx]*MAX_SENT_LEN)
# padding words
x_val_34_T[doc_i] = pad_sequences(x_val_34_T[doc_i], maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_train_34_T = np.array(x_train_34_T)
x_val_34_T = np.array(x_val_34_T)
print(x_val_34_T.shape, x_train_34_T.shape)
x_train_34_G = pad_sequences(x_train_34_G, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_train_34_V = pad_sequences(x_train_34_V, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_val_34_G = pad_sequences(x_val_34_G, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_val_34_V = pad_sequences(x_val_34_V, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
print(x_train_34_G.shape, x_train_34_V.shape)
print(x_val_34_G.shape, x_val_34_V.shape)
###Output
(2988, 80) (2988, 80)
(333, 80) (333, 80)
###Markdown
keras np_utils.to_categorical expects zero index categorical variableshttps://github.com/fchollet/keras/issues/570
###Code
x_train_34_C = np.array(x_train_34_C) - 1
x_val_34_C = np.array(x_val_34_C) - 1
x_train_34_C = np_utils.to_categorical(np.array(x_train_34_C), 9)
x_val_34_C = np_utils.to_categorical(np.array(x_val_34_C), 9)
print(x_train_34_C.shape, x_val_34_C.shape)
###Output
(2988, 9) (333, 9)
###Markdown
Need to form 3 dimensional target data for rationale model training
###Code
temp = (x_train_34_C.shape[0],1,x_train_34_C.shape[1])
x_train_34_C_sent = np.repeat(x_train_34_C.reshape(temp[0],temp[1],temp[2]), MAX_DOC_LEN, axis=1)
#sentence test targets
temp = (x_val_34_C.shape[0],1,x_val_34_C.shape[1])
x_val_34_C_sent = np.repeat(x_val_34_C.reshape(temp[0],temp[1],temp[2]), MAX_DOC_LEN, axis=1)
print(x_train_34_C_sent.shape, x_val_34_C_sent.shape)
###Output
(2988, 500, 9) (333, 500, 9)
###Markdown
T:text_words_context generate data
###Code
custom_unit_dict_forward_context = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "words",
"doc_form" : "text",
"divide_document" : "single_unit"
}
custom_unit_dict_backward_context = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "words",
"doc_cntx_dir" : "backward",
"doc_form" : "text",
"divide_document" : "single_unit"
}
###Output
_____no_output_____
###Markdown
Train data
###Code
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_35_T, _, _, _ = gen_data.generate_data(custom_unit_dict_forward_context, has_class=True, add_start_end_tag=True)
x_train_35_T_fwd = x_train_35_T.copy()
del gen_data
print("Train data forward")
print("text",np.array(x_train_35_T_fwd).shape, x_train_35_T_fwd[0][:10])
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_35_T_bwd, x_train_35_G, x_train_35_V, x_train_35_C = gen_data.generate_data(custom_unit_dict_backward_context,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data backward")
print("text",np.array(x_train_35_T_bwd).shape, x_train_35_T_bwd[0][-10:])
###Output
Train data backward
text (2988,) [209585, 123191, 338562, 109857, 164788, 86431, 70974, 202038, 252037, 352216]
###Markdown
Val data
###Code
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_35_T, _, _, _ = gen_data.generate_data(custom_unit_dict_forward_context, has_class=True, add_start_end_tag=True)
x_val_35_T_fwd = x_val_35_T.copy()
del gen_data
print("Val data forward")
print("text",np.array(x_val_35_T_fwd).shape, x_val_35_T_fwd[0][:10])
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_35_T_bwd, x_val_35_G, x_val_35_V, x_val_35_C = gen_data.generate_data(custom_unit_dict_backward_context,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data backward")
print("text",np.array(x_val_35_T_bwd).shape, x_val_35_T_bwd[0][-10:])
###Output
Val data backward
text (333,) [0, 217983, 0, 97039, 5234, 252037, 88498, 310762, 24685, 352216]
###Markdown
format data
###Code
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_TEXT_LEN = 5000 # no of words in a document
###Output
_____no_output_____
###Markdown
to achieve the context words line up as shown below we have to shift the contexts1. forward context (left) by 1 forward (remove last element and append in the frontof list)1. backward context (right) by 1 backward (remove first element and append in the end of list) 
###Code
for doc_i in range(len(x_train_35_T)):
# forward context processing
# remove first word
x_train_35_T_fwd[doc_i] = x_train_35_T_fwd[doc_i][1:]
# append word_unknown_tag_idx to front
x_train_35_T_fwd[doc_i] = [word_unknown_tag_idx] + x_train_35_T_fwd[doc_i]
# backward context processing
# remove first word
x_train_35_T_bwd[doc_i] = x_train_35_T_bwd[doc_i][:-1]
# append word_unknown_tag_idx to end
x_train_35_T_bwd[doc_i] = x_train_35_T_fwd[doc_i] + [word_unknown_tag_idx]
x_train_35_T = pad_sequences(x_train_35_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_train_35_T_fwd = pad_sequences(x_train_35_T_fwd, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_train_35_T_bwd = pad_sequences(x_train_35_T_bwd, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_35_T.shape, x_train_35_T_fwd.shape, x_train_35_T_bwd.shape)
for doc_i in range(len(x_val_35_T)):
# forward context processing
# remove first word
x_val_35_T_fwd[doc_i] = x_val_35_T_fwd[doc_i][1:]
# append word_unknown_tag_idx to front
x_val_35_T_fwd[doc_i] = [word_unknown_tag_idx] + x_val_35_T_fwd[doc_i]
# backward context processing
# remove first word
x_val_35_T_bwd[doc_i] = x_val_35_T_bwd[doc_i][:-1]
# append word_unknown_tag_idx to end
x_val_35_T_bwd[doc_i] = x_val_35_T_bwd[doc_i] + [word_unknown_tag_idx]
x_val_35_T = pad_sequences(x_val_35_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_val_35_T_fwd = pad_sequences(x_val_35_T_fwd, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_val_35_T_bwd = pad_sequences(x_val_35_T_bwd, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_val_35_T.shape, x_val_35_T_fwd.shape, x_val_35_T_bwd.shape)
MAX_GENE_LEN = 1
MAX_VAR_LEN = 4
x_train_35_G = pad_sequences(x_train_35_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_train_35_V = pad_sequences(x_train_35_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
x_val_35_G = pad_sequences(x_val_35_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_val_35_V = pad_sequences(x_val_35_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
print(x_train_35_G.shape, x_train_35_V.shape)
print(x_val_35_G.shape, x_val_35_V.shape)
###Output
(2988, 1) (2988, 4)
(333, 1) (333, 4)
###Markdown
keras np_utils.to_categorical expects zero index categorical variableshttps://github.com/fchollet/keras/issues/570
###Code
x_train_35_C = np.array(x_train_35_C) - 1
x_val_35_C = np.array(x_val_35_C) - 1
x_train_35_C = np_utils.to_categorical(np.array(x_train_35_C), 9)
x_val_35_C = np_utils.to_categorical(np.array(x_val_35_C), 9)
print(x_train_35_C.shape, x_val_35_C.shape)
###Output
(2988, 9) (333, 9)
###Markdown
Embedding layer for words
###Code
WORD_EMB_SIZE = 200
%autoreload
import global_utils
ft_file_path = "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/processed/stage1/pretrained_word_vectors/ft_sg_200d_50e.vec"
trained_embeddings = global_utils.get_embeddings_from_ft(ft_file_path, WORD_EMB_SIZE, corpus_vocab_list)
trained_embeddings.shape
###Output
_____no_output_____
###Markdown
for characters
###Code
CHAR_EMB_SIZE = 64
char_embeddings = np.random.randn(global_utils.CHAR_ALPHABETS_LEN, CHAR_EMB_SIZE)
char_embeddings.shape
###Output
_____no_output_____
###Markdown
Models prep
###Code
%autoreload
import tensorflow.contrib.keras as keras
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer, InputSpec, InputLayer
from keras.models import Model, Sequential
from keras.layers import Dropout, Embedding, concatenate
from keras.layers import Conv1D, MaxPool1D, Conv2D, MaxPool2D, ZeroPadding1D, GlobalMaxPool1D
from keras.layers import Dense, Input, Flatten, BatchNormalization
from keras.layers import Concatenate, Dot, Merge, Multiply, RepeatVector
from keras.layers import Bidirectional, TimeDistributed
from keras.layers import SimpleRNN, LSTM, GRU, Lambda, Permute
from keras.layers.core import Reshape, Activation
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,EarlyStopping,TensorBoard
from keras.constraints import maxnorm
from keras.regularizers import l2
###Output
_____no_output_____
###Markdown
model_1: paper CNN model
###Code
text_input_layer = Input(shape=(MAX_TEXT_LEN,), dtype='int32')
doc_embedding = Embedding(vocab_size, WORD_EMB_SIZE, weights=[trained_embeddings],
input_length=MAX_TEXT_LEN, trainable=True)(text_input_layer)
convs = []
filter_sizes = [10, 20, 30, 40, 50]
for filter_size in filter_sizes:
l_conv = Conv1D(filters=64, kernel_size=filter_size, padding='valid', activation='relu')(doc_embedding)
convs.append(l_conv)
cnn_feature_maps = Concatenate(axis=1)(convs)
###Output
_____no_output_____
###Markdown
LSTM
###Code
sentence_encoder = LSTM(64,return_sequences=False)(cnn_feature_maps)
fc_layer =Dense(128, activation="relu")(sentence_encoder)
output_layer = Dense(9,activation="softmax")(fc_layer)
model_1 = Model(inputs=[text_input_layer], outputs=[output_layer])
model_1.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['categorical_accuracy'])
model_1.summary()
###Output
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 5000) 0
____________________________________________________________________________________________________
embedding_1 (Embedding) (None, 5000, 200) 70444000 input_1[0][0]
____________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, 4991, 64) 128064 embedding_1[0][0]
____________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, 4981, 64) 256064 embedding_1[0][0]
____________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, 4971, 64) 384064 embedding_1[0][0]
____________________________________________________________________________________________________
conv1d_4 (Conv1D) (None, 4961, 64) 512064 embedding_1[0][0]
____________________________________________________________________________________________________
conv1d_5 (Conv1D) (None, 4951, 64) 640064 embedding_1[0][0]
____________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 24855, 64) 0 conv1d_1[0][0]
conv1d_2[0][0]
conv1d_3[0][0]
conv1d_4[0][0]
conv1d_5[0][0]
____________________________________________________________________________________________________
lstm_1 (LSTM) (None, 64) 33024 concatenate_1[0][0]
____________________________________________________________________________________________________
dense_1 (Dense) (None, 128) 8320 lstm_1[0][0]
____________________________________________________________________________________________________
dense_2 (Dense) (None, 9) 1161 dense_1[0][0]
====================================================================================================
Total params: 72,406,825
Trainable params: 72,406,825
Non-trainable params: 0
____________________________________________________________________________________________________
###Markdown
training
###Code
%rm -rf ./tb_graphs/*
tb_callback = keras.callbacks.TensorBoard(log_dir='./tb_graphs', histogram_freq=0, write_graph=True, write_images=True)
checkpointer = ModelCheckpoint(filepath="model_1_weights.hdf5",
verbose=1,
monitor="val_categorical_accuracy",
save_best_only=True,
mode="max")
with tf.Session() as sess:
# model = keras.models.load_model('current_model.h5')
sess.run(tf.global_variables_initializer())
try:
model_1.load_weights("model_1_weights.hdf5")
except IOError as ioe:
print("no checkpoints available !")
model_1.fit(x_train_22_T, x_train_22_C,
validation_data=(x_val_22_T, x_val_22_C),
epochs=5, batch_size=32, shuffle=True,
callbacks=[tb_callback,checkpointer])
#model.save('current_sent_model.h5')
###Output
no checkpoints available !
Train on 2988 samples, validate on 333 samples
Epoch 1/5
2976/2988 [============================>.] - ETA: 10s - loss: 1.7246 - categorical_accuracy: 0.3552Epoch 00000: val_categorical_accuracy improved from -inf to 0.47447, saving model to model_1_weights.hdf5
2988/2988 [==============================] - 2734s - loss: 1.7244 - categorical_accuracy: 0.3554 - val_loss: 1.4532 - val_categorical_accuracy: 0.4745
Epoch 2/5
2976/2988 [============================>.] - ETA: 10s - loss: 1.1825 - categorical_accuracy: 0.5645Epoch 00001: val_categorical_accuracy improved from 0.47447 to 0.51351, saving model to model_1_weights.hdf5
2988/2988 [==============================] - 2731s - loss: 1.1848 - categorical_accuracy: 0.5639 - val_loss: 1.3478 - val_categorical_accuracy: 0.5135
Epoch 3/5
2976/2988 [============================>.] - ETA: 10s - loss: 0.9221 - categorical_accuracy: 0.6586Epoch 00002: val_categorical_accuracy improved from 0.51351 to 0.55856, saving model to model_1_weights.hdf5
2988/2988 [==============================] - 2631s - loss: 0.9208 - categorical_accuracy: 0.6590 - val_loss: 1.3677 - val_categorical_accuracy: 0.5586
Epoch 4/5
2976/2988 [============================>.] - ETA: 10s - loss: 0.8082 - categorical_accuracy: 0.6868Epoch 00003: val_categorical_accuracy did not improve
2988/2988 [==============================] - 2630s - loss: 0.8074 - categorical_accuracy: 0.6867 - val_loss: 1.3838 - val_categorical_accuracy: 0.5255
Epoch 5/5
2976/2988 [============================>.] - ETA: 10s - loss: 0.7543 - categorical_accuracy: 0.6892Epoch 00004: val_categorical_accuracy did not improve
2988/2988 [==============================] - 2583s - loss: 0.7529 - categorical_accuracy: 0.6898 - val_loss: 1.4368 - val_categorical_accuracy: 0.5255
|
SepidehDadashi_Lab1.ipynb | ###Markdown
Fire Spread Rate * **Sepideh Dadashi_Lab1** [[email protected]](mailto:[email protected]) [github.com/SepidehD](https://github.com/SepidehD)
###Code
from IPython.display import IFrame, Image, display, YouTubeVideo
import pandas as pd
%config InlineBackend.figure_format = 'svg'
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
In this notebook,I am going to use a sample of fire spread data for year 2011 and make a garaph based on the area and fire spread rate of each fire event.
###Code
display(Image(r"c:/t3/Bushfire_assoc.jpg", width=1000, embed=True))
fire_data = pd.read_csv(r"C:/t3/y2011_fire.csv")
fire_data.head()
print ("There are " + str(len(fire_data))+" fire events in my study area" )
tidy = pd.melt(fire_data, id_vars=['FireClass'])
tidy
#fire_data['FSR']
#fire_data['AREA_hec']
plt.plot(fire_data['FSR'])
plt.xlabel('Fire Event')
plt.ylabel('Fire Spread Rate (FSR)')
plt.show()
plt.plot(fire_data['AREA_hec'])
plt.xlabel('Fire Event')
plt.ylabel('AREA (Ha)')
plt.show()
###Output
_____no_output_____ |
notebooks/robust-regression.ipynb | ###Markdown
Robust Linear Regression ExampleWe will approximate the posterior for the simple 2D robust linear regression model$$\beta_i \sim \mathcal{N}(0, 10)$$$$y_n | x_n, \beta, \sigma \sim \mathcal{T}_{40}(\beta^\top x_n, 1)$$and use Stan to compute (the gradient of) the model log density. For more details and discussion of this example, see:[Practical posterior error bounds from variational objectives](https://arxiv.org/abs/1910.04102).Jonathan H. Huggins,Miko&0322;aj Kasprzak,Trevor Campbell,Tamara Broderick.In *Proc. of the 23rd International Conference on Artificial Intelligence andStatistics* (AISTATS), Palermo, Italy. PMLR: Volume 108, 2020.
###Code
regression_model_code = """data {
int<lower=0> N; // number of observations
matrix[N, 2] x; // predictor matrix
vector[N] y; // outcome vector
real<lower=1> df; // degrees of freedom
}
parameters {
vector[2] beta; // coefficients for predictors
}
model {
beta ~ normal(0, 10);
y ~ student_t(df, x * beta, 1); // likelihood
}"""
regression_model = pystan.StanModel(model_code=regression_model_code,
model_name='regression_model')
###Output
INFO:pystan:COMPILING THE C++ CODE FOR MODEL regression_model_da1de6f365ef6d14558a2a755c2fe849 NOW.
###Markdown
We generate 25 observations from the model with $\beta = (-2, 1)$.
###Code
np.random.seed(5039)
beta_gen = np.array([-2, 1])
N = 25
x = np.random.randn(N, 2).dot(np.array([[1,.75],[.75, 1]]))
y_raw = x.dot(beta_gen) + np.random.standard_t(40, N)
y = y_raw - np.mean(y_raw)
###Output
_____no_output_____
###Markdown
First, we generate ground truth samples using Stan's dynamic HMC implementation
###Code
data = dict(N=N, x=x, y=y, df=40)
fit = regression_model.sampling(data=data, iter=50000, thin=50, chains=10)
true_mean = np.mean(fit['beta'], axis=0)
true_cov = np.cov(fit['beta'].T)
print('true mean =', true_mean)
print('true cov =', true_cov)
fit
jp = sns.jointplot(x=fit['beta[1]'], y=fit['beta[2]'], kind='kde')
jp.set_axis_labels(r'$\beta_1$', r'$\beta_2$')
plt.show()
###Output
_____no_output_____
###Markdown
Standard mean-field variational inferenceNext, we compute a mean field variational approximation using standard variational inference (that is, minimizing the KL-divergence)
###Code
mf_t_var_family = mf_t_var_family = mean_field_t_variational_family(2, 40)
stan_log_density = make_stan_log_density(fit)
klvi_objective_and_grad = black_box_klvi(mf_t_var_family, stan_log_density, 100)
init_mean = np.zeros(2)
init_log_std = np.ones(2)
init_var_param = np.concatenate([init_mean, init_log_std])
n_iters = 5000
plot_approx_and_exact_contours(stan_log_density, mf_t_var_family, init_var_param, **lims)
klvi_var_param, klvi_param_history, klvi_history, _ = \
adagrad_optimize(n_iters, klvi_objective_and_grad, init_var_param, learning_rate=.01)
###Output
_____no_output_____
###Markdown
We can plot the distance from the optimal parameter to visually verify convergence
###Code
plot_dist_to_opt_param(klvi_param_history, klvi_var_param)
###Output
_____no_output_____
###Markdown
Due to the strong posterior correlation, the variational approximation dramatically underestimates uncertainty
###Code
plot_approx_and_exact_contours(stan_log_density, mf_t_var_family, klvi_var_param, **lims,
savepath='../figures/robust-regression-mf-klvi.pdf')
check_approx_accuracy(mf_t_var_family, klvi_var_param, true_mean, true_cov, verbose=True);
###Output
mean = [-2.52404451 1.52136443]
stdevs = [0.14346427 0.14578815]
mean error = 0.00295
stdev error = 0.734
||cov error||_2^{1/2} = 0.922
||true cov||_2^{1/2} = 0.933
###Markdown
We can check approximation quality using `viabel`. All bounds suggest the approximation is not good.
###Code
_, log_weights = get_samples_and_log_weights(stan_log_density, mf_t_var_family, klvi_var_param, mc_samples)
elbo = np.mean(log_weights)
var_dist_cov = mf_t_var_family.mean_and_cov(klvi_var_param)[1]
moment_bound_fn = lambda p: mf_t_var_family.pth_moment(p, klvi_var_param)
klvi_results = all_bounds(log_weights, q_var=var_dist_cov, moment_bound_fn=moment_bound_fn)
print_bounds(klvi_results)
klvi_psis_results, _, _ = improve_with_psis(stan_log_density, mf_t_var_family, klvi_var_param,
mc_samples, true_mean, true_cov, verbose=True)
###Output
/Users/jhuggins/Desktop/viabel/notebooks/psis.py:295: RuntimeWarning: overflow encountered in exp
np.exp(temp, out=temp)
###Markdown
Mean-field variational inference with CHIVIWe can repeat the same procedure, but this time using CHIVI and a Student t mean-field variational family.
###Code
mf_t_var_family = mean_field_t_variational_family(2, 40)
chivi_objective_and_grad = black_box_chivi(2, mf_t_var_family, stan_log_density, 500)
init_var_param = klvi_var_param.copy()
init_var_param[2:] += 3
chivi_var_param, chivi_param_history, chivi_history, _ = \
adagrad_optimize(n_iters, chivi_objective_and_grad, init_var_param, learning_rate=.01)
plot_dist_to_opt_param(chivi_param_history, chivi_var_param)
###Output
_____no_output_____
###Markdown
The variance is no longer so badly underestimated. But the variational approximation is still not very good, now greatly over-estimating the region with significant posterior mass.
###Code
plot_approx_and_exact_contours(stan_log_density, mf_t_var_family, chivi_var_param, **lims,
cmap2='Blues', savepath='../figures/robust-regression-mf-chivi.pdf')
check_approx_accuracy(mf_t_var_family, chivi_var_param, true_mean, true_cov, verbose=True);
###Output
mean = [-2.5182418 1.52051221]
stdevs = [0.71666519 0.73338569]
mean error = 0.00659
stdev error = 0.0864
||cov error||_2^{1/2} = 0.718
||true cov||_2^{1/2} = 0.933
###Markdown
The fact that approximation is still not good is reflected is the large bounds:
###Code
_, log_weights = get_samples_and_log_weights(stan_log_density, mf_t_var_family, chivi_var_param, mc_samples)
var_dist_cov = mf_t_var_family.mean_and_cov(chivi_var_param)[1]
moment_bound_fn = lambda p: mf_t_var_family.pth_moment(p, chivi_var_param)
chivi_results = all_bounds(log_weights, q_var=var_dist_cov, log_norm_bound=elbo, moment_bound_fn=moment_bound_fn)
print_bounds(chivi_results)
chivi_psis_results, _, _ = improve_with_psis(stan_log_density, mf_t_var_family, chivi_var_param,
mc_samples, true_mean, true_cov, verbose=True)
###Output
khat = 0.341
mean = [-2.52373214 1.51998706]
stdevs = [0.65295089 0.6633713 ]
mean error = 0.00172
stdev error = 0.00826
||cov error||_2^{1/2} = 0.124
||true cov||_2^{1/2} = 0.933
###Markdown
A non-mean-field approximationFinally, we obtain a very good approximation by using a Student t variational family with a full-rank scaling matrix.
###Code
t_var_family = t_variational_family(2, 100)
full_klvi_objective_and_grad = black_box_klvi(t_var_family, stan_log_density, 100)
init_var_param = np.zeros(t_var_family.var_param_dim)
plot_approx_and_exact_contours(stan_log_density, t_var_family, init_var_param, **lims)
full_klvi_var_param, full_klvi_param_history, full_klvi_history, _ = \
adagrad_optimize(n_iters, full_klvi_objective_and_grad, init_var_param,
learning_rate=.1, learning_rate_end=.001)
plot_dist_to_opt_param(full_klvi_param_history, full_klvi_var_param)
plot_approx_and_exact_contours(stan_log_density, t_var_family, full_klvi_var_param, **lims,
savepath='../figures/robust-regression-full-rank-klvi.pdf')
check_approx_accuracy(t_var_family, full_klvi_var_param, true_mean, true_cov, verbose=True);
###Output
mean = [-2.52491437 1.52131867]
stdevs = [0.65700772 0.66745318]
mean error = 0.00289
stdev error = 0.00271
||cov error||_2^{1/2} = 0.0689
||true cov||_2^{1/2} = 0.933
###Markdown
Bounds computed using `viabel` -- and the 2-divergence bound in particular -- confirm that the approximation is accurate.
###Code
_, log_weights = get_samples_and_log_weights(stan_log_density, t_var_family, full_klvi_var_param, mc_samples)
var_dist_cov = t_var_family.mean_and_cov(full_klvi_var_param)[1]
moment_bound_fn = lambda p: t_var_family.pth_moment(p, full_klvi_var_param)
full_klvi_results = all_bounds(log_weights, q_var=var_dist_cov, moment_bound_fn=moment_bound_fn)
print_bounds(full_klvi_results)
full_klvi_psis_results, _, _ = improve_with_psis(stan_log_density, t_var_family, full_klvi_var_param,
mc_samples, true_mean, true_cov, verbose=True)
###Output
khat = -0.93
mean = [-2.52488309 1.52143322]
stdevs = [0.6542771 0.66500373]
mean error = 0.003
stdev error = 0.00616
||cov error||_2^{1/2} = 0.107
||true cov||_2^{1/2} = 0.933
|
SVM_HashingVectorizer_Under_Sampling_TomekLinks.ipynb | ###Markdown
Support vector machines using hashing vectorizer and Tomek links under-sampling techniqueIn this notebook, the Tomek-Links algorithm for under-sampling is used in an attempt to overcome the problem of imbalanced classes in the section '[6.5] Under Sampling' of the code. Tomek-Links are pairs of opposing instances that are very close together, the TomekLinks function removes the majority instance of the pairs. Once transformed, the class distribution of the new transformed dataset in the variables *X_tl* and *y_tl* would expect to now be balanced through the removal of many examples in the majority class.
###Code
import pandas as pd
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn import model_selection, naive_bayes, svm
from sklearn.metrics import accuracy_score
from collections import Counter
#[1] Importing dataset
dataset = pd.read_json(r"C:\Users\Panos\Desktop\Dissert\Code\Video_Games_5.json", lines=True, encoding='latin-1')
dataset = dataset[['reviewText','overall']]
#[2] Reduce number of classes
ratings = []
for index,entry in enumerate(dataset['overall']):
if entry == 1.0 or entry == 2.0:
ratings.append(-1)
elif entry == 3.0:
ratings.append(0)
elif entry == 4.0 or entry == 5.0:
ratings.append(1)
#[3] Cleaning the text
import re
import nltk
from nltk.corpus import stopwords
corpus = []
for i in range(0, len(dataset)):
review = re.sub('[^a-zA-Z]', ' ', dataset['reviewText'][i])
review = review.lower()
review = review.split()
review = [word for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
#[4] Prepare Train and Test Data sets
Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split(corpus,ratings,test_size=0.3)
print(Counter(Train_Y).values()) # counts the elements' frequency
#[5] Encoding
Encoder = LabelEncoder()
Train_Y = Encoder.fit_transform(Train_Y)
Test_Y = Encoder.fit_transform(Test_Y)
#[6] Word Vectorization
Hashing_vect = HashingVectorizer(alternate_sign=False, binary=True)
Hashing_vect.fit(corpus)
Train_X_Hashing = Hashing_vect.transform(Train_X)
Test_X_Hashing = Hashing_vect.transform(Test_X)
#[6.5] Under Sampling
# Perform under-sampling by removing Tomek’s links
from imblearn.under_sampling import TomekLinks
tl = TomekLinks()
#Train_Y = Train_Y.transpose()
X_tl, y_tl = tl.fit_resample(Train_X_Hashing, Train_Y)
#[8] Use the Support Vector Machine Algorithms to Predict the outcome
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM.fit(X_tl,y_tl)
# predict the labels on validation dataset
predictions_SVM = SVM.predict(Test_X_Hashing)
# Use accuracy_score function to get the accuracy
print("-----------------Support Vector Machine CM------------------\n")
print("Accuracy Score -> ",accuracy_score(predictions_SVM, Test_Y)*100)
cm = confusion_matrix(Test_Y, predictions_SVM)
# Making the confusion matrix
print("\n",cm,"\n")
# Printing a classification report of different metrics
print(classification_report(Test_Y, predictions_SVM,target_names=my_tags))
# Export reports to files for later visualizations
report_SVM = classification_report(Test_Y, predictions_SVM,target_names=my_tags, output_dict=True)
report_SVM_df = pd.DataFrame(report_SVM).transpose()
report_SVM_df.to_csv(r'SVM_report_HashingVect.csv', index = True, float_format="%.3f")
###Output
-----------------Support Vector Machine CM------------------
Accuracy Score -> 82.44455949607386
[[ 4997 697 2846]
[ 1333 1291 5900]
[ 869 562 51039]]
precision recall f1-score support
Positive 0.69 0.59 0.63 8540
Neutral 0.51 0.15 0.23 8524
Negative 0.85 0.97 0.91 52470
accuracy 0.82 69534
macro avg 0.68 0.57 0.59 69534
weighted avg 0.79 0.82 0.79 69534
|
case_studies/wine.ipynb | ###Markdown
Screenshot of the visualized result in UI
###Code
print(Plot().current_info().dr.M[:, 0]) # print x-axis info for the current result
print(Plot().current_info().w_tg) # print x-axis info for the current result
print(Plot().current_info().w_bg) # print x-axis info for the current result
print(Plot().current_info().w_bw) # print x-axis info for the current result
print(Plot().current_info().alpha) # print x-axis info for the current result
# print(Plot().saved_info()['PCA on Label 1'].dr.M[:, 0]) # print x-axis infor for the saved result
# Common in Labels 1&2 but different from Label 0
# (move Label 1 close to Label 2)
# This is a produced parameter example (but rotation is not exactly the same)
w_tg = {0: 0.8362787627856337, 1: 0.41704556470577103, 2: 0.8566001894111713}
w_bg = {0: 0, 1: 0.9618254239294945, 2: 0.6878804303425374}
w_bw = {0: 1.0, 1: 0.4456044544201703, 2: 0}
alpha = 3.462280952336661
ulca = ulca.fit(X, y=y, w_tg=w_tg, w_bg=w_bg, w_bw=w_bw, alpha=alpha)
Plot().plot_emb(ulca, X=X, y=y, w_tg=w_tg, w_bg=w_bg, w_bw=w_bw, feat_names=feat_names)
###Output
_____no_output_____
###Markdown
Screenshot of the visualized result in UI
###Code
# high variance only in Label 2
# Move sliders to be
# w_tg = {0: 0, 1: 0, 2: 1}
# w_bg = {0: 1, 1: 1, 2: 0}
# w_bw = {0: 0, 1: 0, 2: 0}
# alpha = 10
# This produces the same result with the above interactions (but rotation is not exactly the same)
w_tg = {0: 0, 1: 0, 2: 1}
w_bg = {0: 1, 1: 1, 2: 0}
w_bw = {0: 0, 1: 0, 2: 0}
alpha = 10
ulca = ulca.fit(X, y=y, w_tg=w_tg, w_bg=w_bg, w_bw=w_bw, alpha=alpha)
Plot().plot_emb(ulca, X=X, y=y, w_tg=w_tg, w_bg=w_bg, w_bw=w_bw, feat_names=feat_names)
###Output
_____no_output_____ |
notebooks/Data_understanding_v2.ipynb | ###Markdown
 Data Understanding * RKI, webscrape (webscraping) https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html* John Hopkins (GITHUB) https://github.com/CSSEGISandData/COVID-19.git* REST API services to retreive data https://npgeo-corona-npgeo-de.hub.arcgis.com/ John Hopkins data
###Code
git_pull = subprocess.Popen( "git pull" ,
cwd = os.path.dirname( '../data/raw/COVID-19/' ),
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE )
(out, error) = git_pull.communicate()
print("Error : " + str(error))
print("out : " + str(out))
data_path = ('../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
pd_raw = pd.read_csv(data_path)
pd_raw
###Output
_____no_output_____
###Markdown
Web scraping through python
###Code
page = requests.get(' https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html')
soup = BeautifulSoup(page.content,'html.parser')
###Output
_____no_output_____
###Markdown
To retrieve the entire page from above link
###Code
soup.get_text()
html_table = soup.find('table')
all_rows = html_table.find_all('tr')
final_data_list = []
for pos, rows in enumerate(all_rows):
#print(pos)
#print(rows)
col_list = [each_col.get_text(strip=True) for each_col in rows.find_all('td')]
final_data_list.append(col_list)
#print(col_list)
#for each_col in rows.find_all('td'):
#print(each_col.get_text(strip=True))
pd_daily_status = pd.DataFrame(final_data_list).dropna().rename(columns={0:'state',
1: 'Cases',
2: 'Changes',
3: 'cases_per_100k',
4: 'fatal',
5: 'comment'})
pd_daily_status.head()
###Output
_____no_output_____
###Markdown
Rest API Calls
###Code
data = requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
json_object = json.loads(data.content)
type(json_object)
json_object.keys()
full_list = []
for pos, each_dict in enumerate (json_object['features'][:]):
full_list.append(each_dict['attributes'])
pd.DataFrame(full_list)
###Output
_____no_output_____ |
Tissue_DNA-FISH/20201205-single_fov_P-brain_after_clearing_IgH_Crick.ipynb | ###Markdown
0. required packages for h5py
###Code
import h5py
from ImageAnalysis3.classes import _allowed_kwds
import ast
###Output
_____no_output_____
###Markdown
1. Create field-of-view class
###Code
reload(ia)
reload(classes)
reload(classes.batch_functions)
reload(classes.field_of_view)
reload(io_tools.load)
reload(visual_tools)
reload(ia.correction_tools)
reload(ia.correction_tools.alignment)
reload(ia.spot_tools.matching)
reload(ia.segmentation_tools.chromosome)
reload(ia.spot_tools.fitting)
fov_param = {'data_folder':r'\\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing',
'save_folder':r'W:\Pu_Temp\20201127_IgH_P-brain\after_clearing',
#'save_folder':r'D:\Pu_Temp\202009_IgH_proB_DMSO_2color',
'experiment_type': 'DNA',
'num_threads': 6,
'correction_folder':r'\\10.245.74.158\Chromatin_NAS_0\Corrections\20201204-Corrections_3color_50',
'shared_parameters':{
'single_im_size':[50,2048,2048],
'corr_channels':['750','647','561'],
'num_empty_frames': 0,
'corr_hot_pixel':True,
'corr_Z_shift':False,
'min_num_seeds':500,
'max_num_seeds': 2500,
'spot_seeding_th':150,
'normalize_intensity_local':False,
'normalize_intensity_background':False,
},
}
sel_fov_id = 7
fov = classes.field_of_view.Field_of_View(fov_param, _fov_id=sel_fov_id,
_color_info_kwargs={
'_color_filename':'Color_Usage',
},
_prioritize_saved_attrs=False,
)
###Output
Get Folder Names: (ia.get_img_info.get_folders)
- Number of folders: 14
- Number of field of views: 60
- Importing csv file: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing\Analysis\Color_Usage.csv
- header: ['Hyb', '750', '647', '561', '488', '405']
-- Hyb H0R0 exists in this data
-- DAPI exists in hyb: H0R0
- 14 folders are found according to color-usage annotation.
++ load bleed correction profile from original file.
-- loading bleedthrough correction profile from file:bleedthrough_correction_750_647_561_2048_2048.npy
++ load chromatic correction profile from original file.
-- loading chromatic correction profile from file:
750 chromatic_correction_750_647_50_2048_2048.npy
647 None
561 chromatic_correction_561_647_50_2048_2048.npy
++ load chromatic_constants correction profile from original file.
-- loading chromatic_constants correction profile from file:
750 chromatic_correction_750_647_50_2048_2048_const.pkl
647 None
561 chromatic_correction_561_647_50_2048_2048_const.pkl
++ load illumination correction profile from original file.
-- loading illumination correction profile from file:
750 illumination_correction_750_2048x2048.npy
647 illumination_correction_647_2048x2048.npy
561 illumination_correction_561_2048x2048.npy
488 illumination_correction_488_2048x2048.npy
405 illumination_correction_405_2048x2048.npy
- Creating save file for fov:Conv_zscan_07.dax: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
* create savefile: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5
-- saving fov_info to file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5
++ base attributes saved:['analysis_folder', 'annotated_folders', 'bead_channel_index', 'channels', 'color_dic', 'color_filename', 'color_format', 'correction_folder', 'dapi_channel_index', 'data_folder', 'drift', 'drift_filename', 'drift_folder', 'experiment_folder', 'folders', 'fov_id', 'fov_name', 'map_folder', 'num_threads', 'ref_filename', 'ref_id', 'rotation', 'save_filename', 'save_folder', 'segmentation_dim', 'segmentation_folder', 'shared_parameters', 'use_dapi'] in 0.037s.
-- saving segmentation to file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5
-- saving correction to file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5
-- saving 750_illumination profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 647_illumination profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 561_illumination profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 488_illumination profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 405_illumination profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 750_chromatic profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 647_chromatic profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 561_chromatic profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 750_chromatic_constants profile with ['fitting_orders', 'constants', 'rsquares', 'ref_center'] to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- 647_chromatic_constants profile already exist in save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5, skip.
-- saving 561_chromatic_constants profile with ['fitting_orders', 'constants', 'rsquares', 'ref_center'] to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving 750_647_561_bleed profile to save_file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5.
-- saving unique to file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5
--- unique attributes updated:['ids', 'channels', 'ims', 'spots', 'drifts', 'flags'] in 0.011s.
-- saving fov_info to file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5
++ base attributes saved:['analysis_folder', 'annotated_folders', 'bead_channel_index', 'channels', 'color_dic', 'color_filename', 'color_format', 'correction_folder', 'dapi_channel_index', 'data_folder', 'drift', 'drift_filename', 'drift_folder', 'experiment_folder', 'folders', 'fov_id', 'fov_name', 'map_folder', 'num_threads', 'ref_filename', 'ref_id', 'rotation', 'save_filename', 'save_folder', 'segmentation_dim', 'segmentation_folder', 'shared_parameters', 'use_dapi'] in 0.030s.
###Markdown
2. Process image into candidate spots
###Code
reload(io_tools.load)
reload(spot_tools.fitting)
reload(correction_tools.chromatic)
reload(classes.batch_functions)
# process image into spots
id_list, spot_list = fov._process_image_to_spots('unique',
#_sel_ids=np.arange(41,47),
_load_common_reference=True,
_load_with_multiple=False,
_save_images=True,
_warp_images=False,
_overwrite_drift=True,
_overwrite_image=False,
_overwrite_spot=False,
_verbose=True)
###Output
-- No folder selected, allow processing all 14 folders
+ load reference image from file:\\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing\H0R0\Conv_zscan_07.dax
- correct the whole fov for image: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing\H0R0\Conv_zscan_07.dax
-- loading illumination correction profile from file:
488 illumination_correction_488_2048x2048.npy
-- loading image from file:\\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing\H0R0\Conv_zscan_07.dax in 23.478s
-- removing hot pixels for channels:['488'] in 12.861s
-- illumination correction for channels: 488, in 2.533s
-- -- generate translation function with drift:[0. 0. 0.] in 0.000s
-- finish correction in 39.504s
-- saving fov_info to file: W:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_07.hdf5
++ base attributes saved:['ref_im'] in 22.708s.
-- checking unique, region:[41 42] in 0.010s.
-- checking unique, region:[66 67] in 0.008s.
-- checking unique, region:[69 70] in 0.008s.
-- checking unique, region:[72 73] in 0.008s.
-- checking unique, region:[75 76] in 0.008s.
-- checking unique, region:[78 79] in 0.008s.
-- checking unique, region:[81 82] in 0.008s.
-- checking unique, region:[ 99 100] in 0.008s.
-- checking unique, region:[102 103] in 0.009s.
-- checking unique, region:[105 106] in 0.008s.
-- checking unique, region:[108 109] in 0.008s.
-- checking unique, region:[111 112] in 0.009s.
+ Start multi-processing of pre-processing for 12 images with 6 threads
++ processed unique ids: [ 41 42 66 67 69 70 72 73 75 76 78 79 81 82 99 100 102 103
105 106 108 109 111 112] in 1547.11s.
###Markdown
3. Find chromosomes 3.1 load chromosome image
###Code
chrom_im = fov._load_chromosome_image(_type='forward', _overwrite=False)
%matplotlib notebook
# visualize chromsome image:
visual_tools.imshow_mark_3d_v2([fov.chrom_im])
###Output
_____no_output_____
###Markdown
3.2 find candidate chromosomes
###Code
chrom_coords = fov._find_candidate_chromosomes_by_segmentation(_filt_size=4,
_binary_per_th=99.,
_morphology_size=2,
_overwrite=True)
###Output
-- adjust seed image with filter size=4
-- binarize image with threshold: 99.0%
-- erosion and dialation with size=2.
-- find close objects.
-- random walk segmentation, beta=10.
###Markdown
3.3 select among candidate chromosomes
###Code
chrom_coords = fov._select_chromosome_by_candidate_spots(_good_chr_loss_th=0.5,
_cand_spot_intensity_th=0.5,
_save=True,
_overwrite=True)
###Output
- start select from 482 chromosomes with loss threshold=0.5
-- remove chr id 0, percentage of lost rounds:0.792.
-- remove chr id 12, percentage of lost rounds:0.792.
-- remove chr id 6, percentage of lost rounds:0.667.
-- remove chr id 379, percentage of lost rounds:0.625.
-- remove chr id 309, percentage of lost rounds:0.583.
-- remove chr id 335, percentage of lost rounds:0.542.
-- 476 chromosomes are kept.
-- saving fov_info to file: D:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_03.hdf5
++ base attributes saved:['chrom_coords'] in 0.228s.
###Markdown
visualize chromosomes selections
###Code
%matplotlib notebook
%matplotlib notebook
## visualize
coord_dict = {'coords':[np.flipud(_coord) for _coord in fov.chrom_coords],
'class_ids':list(np.zeros(len(fov.chrom_coords),dtype=np.int)),
}
visual_tools.imshow_mark_3d_v2([fov.chrom_im],
given_dic=coord_dict,
save_file=None,
)
fov.color_dic
fov.unique_ids[:14]
before_fov_param = {
'data_folder':r'\\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\Before_clearing',
'save_folder':r'D:\Pu_Temp\20201127_IgH_P-brain\before_clearing',
#'save_folder':r'D:\Pu_Temp\202009_IgH_proB_DMSO_2color',
'experiment_type': 'DNA',
'num_threads': 12,
'correction_folder':r'\\10.245.74.158\Chromatin_NAS_0\Corrections\20201204-Corrections_3color_50',
'shared_parameters':{
'single_im_size':[50,2048,2048],
'corr_channels':['750','647','561'],
'num_empty_frames': 0,
'corr_hot_pixel':True,
'corr_Z_shift':False,
'min_num_seeds':500,
'max_num_seeds': 2500,
'spot_seeding_th':150,
'normalize_intensity_local':True,
'normalize_intensity_background':False,
},
}
before_fov = classes.field_of_view.Field_of_View(before_fov_param, _fov_id=3,
_color_info_kwargs={
'_color_filename':'Color_Usage',
},
_prioritize_saved_attrs=False,
)
###Output
Get Folder Names: (ia.get_img_info.get_folders)
- Number of folders: 8
- Number of field of views: 60
- Importing csv file: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\Before_clearing\Analysis\Color_Usage.csv
- header: ['Hyb', '750', '647', '561', '488', '405']
-- Hyb H0R0 exists in this data
-- DAPI exists in hyb: H0R0
- 8 folders are found according to color-usage annotation.
+ loading fov_info from file: D:\Pu_Temp\20201127_IgH_P-brain\before_clearing\Conv_zscan_03.hdf5
++ base attributes loaded:['cand_chrom_coords', 'chrom_coords', 'chrom_im', 'ref_im'] in 3.439s.
+ loading correction from file: D:\Pu_Temp\20201127_IgH_P-brain\before_clearing\Conv_zscan_03.hdf5
++ load bleed correction profile directly from savefile.
++ load chromatic correction profile directly from savefile.
++ load chromatic_constants correction profile directly from savefile.
++ load illumination correction profile directly from savefile.
+ loading segmentation from file: D:\Pu_Temp\20201127_IgH_P-brain\before_clearing\Conv_zscan_03.hdf5
++ base attributes loaded:[] in 0.001s.
-- saving fov_info to file: D:\Pu_Temp\20201127_IgH_P-brain\before_clearing\Conv_zscan_03.hdf5
++ base attributes saved:['analysis_folder', 'annotated_folders', 'bead_channel_index', 'cand_chrom_coords', 'channels', 'chrom_coords', 'chrom_im', 'color_dic', 'color_filename', 'color_format', 'correction_folder', 'dapi_channel_index', 'data_folder', 'drift', 'drift_filename', 'drift_folder', 'experiment_folder', 'folders', 'fov_id', 'fov_name', 'map_folder', 'num_threads', 'ref_filename', 'ref_id', 'ref_im', 'rotation', 'save_filename', 'save_folder', 'segmentation_dim', 'segmentation_folder', 'shared_parameters', 'use_dapi'] in 11.224s.
###Markdown
Load DAPI-image as reference
###Code
fov._load_dapi_image()
before_fov._load_dapi_image()
###Output
-- choose dapi images from folder: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing\H0R0.
-- choose dapi images from folder: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing\H0R0.
- correct the whole fov for image: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing\H0R0\Conv_zscan_03.dax
-- loading illumination correction profile from file:
405 illumination_correction_405_2048x2048.npy
-- loading image from file:\\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\After_clearing\H0R0\Conv_zscan_03.dax in 24.165s
-- removing hot pixels for channels:['405'] in 7.276s
-- illumination correction for channels: 405, in 1.538s
-- warp image with drift:[0. 0. 0.] in 0.000s
-- finish correction in 33.582s
-- saving fov_info to file: D:\Pu_Temp\20201127_IgH_P-brain\after_clearing\Conv_zscan_03.hdf5
++ base attributes saved:['dapi_im'] in 4.672s.
-- choose dapi images from folder: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\Before_clearing\H0R0.
-- choose dapi images from folder: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\Before_clearing\H0R0.
- correct the whole fov for image: \\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\Before_clearing\H0R0\Conv_zscan_03.dax
-- loading illumination correction profile from file:
405 illumination_correction_405_2048x2048.npy
-- loading image from file:\\10.245.74.158\Chromatin_NAS_5\20201127-NOAcr_CTP-08_E14_brain_no_clearing\Before_clearing\H0R0\Conv_zscan_03.dax in 13.592s
-- removing hot pixels for channels:['405'] in 7.317s
-- illumination correction for channels: 405, in 1.559s
-- warp image with drift:[0. 0. 0.] in 0.000s
-- finish correction in 22.965s
-- saving fov_info to file: D:\Pu_Temp\20201127_IgH_P-brain\before_clearing\Conv_zscan_03.hdf5
++ base attributes saved:['dapi_im'] in 4.609s.
###Markdown
calculate rotation matrix
###Code
from ImageAnalysis3 import alignment_tools
reload(alignment_tools)
experiment_folder = r'U:\20201127-NOAcr_CTP-08_E14_brain_no_clearing\Experiments'
before_position_file = os.path.join(experiment_folder, '10x_positions.txt')
after_position_file = os.path.join(experiment_folder, '10x_positions_after.txt')
R, T = alignment_tools.align_manual_points(before_position_file, after_position_file, save=False)
%matplotlib notebook
visual_tools.imshow_mark_3d_v2([before_fov.dapi_im, fov.dapi_im, nim])
1096 604 1108 588
nim.dtype
reload(correction_tools.alignment)
nim, rot, dft = correction_tools.alignment.calculate_translation(before_fov.dapi_im,
fov.dapi_im, R)
cross_correlation_align_single_image(before_fov.dapi_im)
calculate_translation(before_fov.dapi_im, fov.dapi_im, R )
R[:1]
reload(correction_tools.alignment)
nim, M = correction_tools.alignment.generate_translation_from_DAPI(before_fov.dapi_im, fov.dapi_im,
R)
correction_tools.alignment.cross_correlation_align_single_image(before_fov.dapi_im, nim, #fov.dapi_im,
single_im_size=fov.shared_parameters['single_im_size'])
reload(spot_tools)
reload(spot_tools.translating)
fov.chrom_coords = spot_tools.translating.translate_spots(before_fov.chrom_coords,
rot, dft)
%matplotlib notebook
%matplotlib notebook
## visualize
coord_dict = {'coords':[np.flipud(_coord) for _coord in fov.chrom_coords],
'class_ids':list(np.zeros(len(fov.chrom_coords),dtype=np.int)),
}
visual_tools.imshow_mark_3d_v2([fov.chrom_im],
given_dic=coord_dict,
save_file=None,
)
###Output
_____no_output_____
###Markdown
find spots
###Code
fov.color_dic
###Output
_____no_output_____
###Markdown
select spots based on chromosomes
###Code
fov._load_from_file('unique')
plt.figure()
for _reg_id in range(4):
plt.hist(fov.unique_spots_list[_reg_id][:,0], bins=np.arange(0,4000,40),
label=f"{_reg_id}", alpha=0.5)
plt.legend()
plt.show()
plt.figure()
for _reg_id in range(4):
plt.hist(fov.unique_spots_list[_reg_id][:,4], bins=np.arange(0,4000,40),
label=f"{_reg_id}", alpha=0.5)
plt.legend()
plt.show()
intensity_th = 0.25
from ImageAnalysis3.spot_tools.picking import assign_spots_to_chromosomes
kept_spots_list = []
for _spots in fov.unique_spots_list:
kept_spots_list.append(_spots[_spots[:,0] > intensity_th])
# finalize candidate spots
cand_chr_spots_list = [[] for _ct in fov.chrom_coords]
for _spots in kept_spots_list:
_cands_list = assign_spots_to_chromosomes(_spots, fov.chrom_coords)
for _i, _cands in enumerate(_cands_list):
cand_chr_spots_list[_i].append(_cands)
print(f"kept chromosomes: {len(fov.chrom_coords)}")
reload(spot_tools.picking)
from ImageAnalysis3.spot_tools.picking import convert_spots_to_hzxys
dna_cand_hzxys_list = [convert_spots_to_hzxys(_spots, fov.shared_parameters['distance_zxy'])
for _spots in cand_chr_spots_list]
dna_reg_ids = fov.unique_ids
# select_hzxys close to the chromosome center
dist_th = 3000 # upper limit is 5000nm
sel_dna_cand_hzxys_list = []
for _cand_hzxys, _chrom_coord in zip(dna_cand_hzxys_list, fov.chrom_coords):
_sel_cands_list = []
for _cands in _cand_hzxys:
if len(_cands) == 0:
_sel_cands_list.append([])
else:
_dists = np.linalg.norm(_cands[:,1:4] - _chrom_coord*np.array([200,108,108]), axis=1)
_sel_cands_list.append(_cands[_dists < dist_th])
# append
sel_dna_cand_hzxys_list.append(_sel_cands_list)
reload(ia.spot_tools.picking)
# load functions
from ImageAnalysis3.spot_tools.picking import Pick_spots_by_intensity, EM_pick_scores_in_population, generate_reference_from_population,evaluate_differences
%matplotlib inline
niter= 10
nkeep = len(sel_dna_cand_hzxys_list)
num_threads = 12
# initialize
init_dna_hzxys = Pick_spots_by_intensity(sel_dna_cand_hzxys_list[:nkeep])
# set save list
sel_dna_hzxys_list, sel_dna_scores_list, all_dna_scores_list = [init_dna_hzxys], [], []
for _iter in range(niter):
print(f"- iter:{_iter}")
# generate reference
ref_ct_dists, ref_local_dists, ref_ints = generate_reference_from_population(
sel_dna_hzxys_list[-1], dna_reg_ids,
sel_dna_hzxys_list[-1][:nkeep], dna_reg_ids,
num_threads=num_threads,
collapse_regions=True,
)
plt.figure(figsize=(4,2))
plt.hist(np.ravel(ref_ints), bins=np.arange(0,20,0.5))
plt.figure(figsize=(4,2))
plt.hist(np.ravel(ref_ct_dists), bins=np.arange(0,5000,100))
plt.figure(figsize=(4,2))
plt.hist(np.ravel(ref_local_dists), bins=np.arange(0,5000,100))
plt.show()
# scoring
sel_hzxys, sel_scores, all_scores = EM_pick_scores_in_population(
sel_dna_cand_hzxys_list[:nkeep], dna_reg_ids, sel_dna_hzxys_list[-1],
ref_ct_dists, ref_local_dists, ref_ints,
sel_dna_hzxys_list[-1], dna_reg_ids, num_threads=num_threads,
)
update_rate = evaluate_differences(sel_hzxys, sel_dna_hzxys_list[-1])
print(f"-- region kept: {update_rate:.4f}")
sel_dna_hzxys_list.append(sel_hzxys)
sel_dna_scores_list.append(sel_scores)
all_dna_scores_list.append(all_scores)
if update_rate > 0.99:
break
from scipy.spatial.distance import pdist, squareform
sel_iter = -1
final_dna_hzxys_list = []
distmap_list = []
score_th = np.exp(-6)
bad_spot_percentage = 0.6
for _hzxys, _scores in zip(sel_dna_hzxys_list[sel_iter], sel_dna_scores_list[sel_iter]):
_kept_hzxys = np.array(_hzxys).copy()
_kept_hzxys[_scores < score_th] = np.nan
if np.mean(np.isnan(_kept_hzxys).sum(1)>0)<bad_spot_percentage:
final_dna_hzxys_list.append(_kept_hzxys)
distmap_list.append(squareform(pdist(_kept_hzxys[:,1:4])))
distmap_list = np.array(distmap_list)
median_distmap = np.nanmedian(distmap_list, axis=0)
loss_rates = np.mean(np.sum(np.isnan(final_dna_hzxys_list), axis=2)>0, axis=0)
fig, ax = plt.subplots(figsize=(4,2),dpi=200)
ax.plot(loss_rates, '.-')
#ax.set_xticks(np.arange(0,150,20))
plt.show()
fig, ax = plt.subplots(figsize=(4,3),dpi=200)
ax = ia.figure_tools.distmap.plot_distance_map(#median_distmap,
median_distmap[:14,:14],
color_limits=[0,800],
ax=ax,
ticks=np.arange(0,150,20),
figure_dpi=200)
ax.set_title(f"after clearing, n={len(distmap_list)}", fontsize=7.5)
plt.gcf().subplots_adjust(bottom=0.1)
plt.show()
###Output
_____no_output_____ |
02_fundamentos_pandas/notebook/07_filtrando_dados.ipynb | ###Markdown
Relatório de Análise III Imóveis Residenciais
###Code
import pandas as pd
dados = pd.read_csv('../dados/aluguel.csv', sep=';')
dados.head(10)
###Output
_____no_output_____
###Markdown
Filtrando Imóveis pela Coluna Tipo * queremos apenas imóveis do tipo residencial* para isso, podemos usar alguns tipos de seleção na coluna Tipo Usando unique()
###Code
dados.Tipo.unique()
###Output
_____no_output_____
###Markdown
Usando value_counts()
###Code
dados.Tipo.value_counts()
###Output
_____no_output_____
###Markdown
Usando drop_duplicates()
###Code
dados.Tipo.drop_duplicates()
###Output
_____no_output_____
###Markdown
Colocando drop_duplicates() em uma lista
###Code
list(dados.Tipo.drop_duplicates())
###Output
_____no_output_____
###Markdown
Criando uma Nova Lista* pegamos a lista atual e descartamos os tipos que não queremos
###Code
residencial = ['Quitinete',
'Casa',
'Apartamento',
'Casa de Condomínio',
'Casa de Vila']
residencial
###Output
_____no_output_____
###Markdown
Método isin()* compara a variável e o dataframe passados* cria uma series booleana, identificando se o que está na variável, consta no dataframe
###Code
dados.head(10)
dados.Tipo.isin(residencial).head(10)
###Output
_____no_output_____
###Markdown
* pode-se observar com os dois comandos acima que: * valores presentes na variável e no dataframe recebem True * valores não presentes recebem False Criando um Novo Dataframe * colocando o valor acima em uma variável
###Code
selecao = dados.Tipo.isin(residencial)
selecao
###Output
_____no_output_____
###Markdown
* passando essa informação para um dataframe, ele vai manter apenas os registros que tiverem o valor True* os valores com False são descartados
###Code
dados_residencial = dados[selecao]
dados_residencial
###Output
_____no_output_____
###Markdown
Verificando se a Seleção foi Realizada Corretamente
###Code
list(dados_residencial.Tipo.drop_duplicates())
###Output
_____no_output_____
###Markdown
Comparando o Tamanho dos Dataframes
###Code
print(dados_residencial.shape[0])
print(dados.shape[0])
###Output
22580
32960
###Markdown
Reconstruindo o Índice * atribuímos ao índice um range com o próprio tamanho dele
###Code
dados_residencial.index = range(dados_residencial.shape[0])
dados_residencial
###Output
_____no_output_____ |
practicals/day_2/practical_4/training_neural_networks.ipynb | ###Markdown
Practical 4 - Training Neural NetworksIn this practical, we will focus on how to effectively train a neural network We will be training a model to classify a small image into 100 different classes/labels. Imports
###Code
import os
import pickle
import random
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import datasets
from keras import layers
from keras import optimizers
from keras import regularizers
from keras import losses,metrics
from keras import models
from keras import callbacks
from datetime import datetime
from functools import partial
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils import resample
import baseline_model
%matplotlib inline
%load_ext autoreload
%autoreload 2
#%env CUDA_VISIBLE_DEVICES="" # turn off gpu if present
tf.set_random_seed(0) # seed rng
###Output
Using TensorFlow backend.
###Markdown
Sourcing the DataWe will be working with the CIFAR 10 dataset: a 10 class/label image classification dataset. Images from CIFAR 10 dataset The task is to classify the 32 by 32 color images into 10 categories.Loading the data is simple as the dataset is built into Keras:
###Code
(train_imgs, train_labels), (valid_imgs, valid_labels) = datasets.cifar10.load_data()
###Output
_____no_output_____
###Markdown
Exploring the DataLets visualise a random example in the dataset:
###Code
# select a random example in the training set to visualise
rand_idx = random.randint(0, len(train_imgs))
rand_img = train_imgs[rand_idx]
rand_label = train_labels[rand_idx][0]
# visualise the image and the corresponding label
plt.imshow(rand_img)
plt.title(f"label: {rand_label}")
###Output
_____no_output_____
###Markdown
As you can see the label has already been converted to integer for us. That the data preparation work that we have to do, but a integer is not very interpretable. We would like to have string label like 'frog' instead of a integer label '6'.To do this we construct a dictionary to map from integer label to string labels:
###Code
# load the metadata for the dataset from disk
meta_path = os.path.expanduser(os.path.join(
"~", ".keras", "datasets", "cifar-10-batches-py", "batches.meta"))
with open(meta_path, "rb") as f:
meta = pickle.load(f)
# build label mapping dictionary
label_names = meta["label_names"]
label_map = {}
for label_int, label in enumerate(label_names):
label_map[label_int] = label
###Output
_____no_output_____
###Markdown
Now we can visualise image with an interpretable label:
###Code
# visualise the image and the corresponding label
plt.imshow(rand_img)
plt.title(f"label: {label_map[rand_label]}")
###Output
_____no_output_____
###Markdown
Typically you explore the data further to see how you can improve your dataset before going further. In the interest of time we will skip that and say:- cifar-10 has no class imbalance issues.- cifar-10 has no missing values.So the hard work has already been completed for you. Preparing the dataNow we proceed with preparing the data for ML.But first some background on the representation of the images: images are represented in the dataset as numpy arrays with the shape `[height, width, channel]`: Channel dimension stores the red, green and blue values Each 'pixel' in the numpy array ranges from 0 - 255, with 255 as the largest value. Feature NormalisationFirst we perform feature normalisation on the input images:- only mean normalisation (subtract mean from image) is required as the pixel come form the same scale (0 - 255)
###Code
# Perform feature scaling - mean normalisation
mean_img = np.mean(train_imgs, axis=0)
pp_train_imgs = (train_imgs - mean_img)
pp_valid_imgs = (valid_imgs - mean_img)
###Output
_____no_output_____
###Markdown
Encoding the LabelsAs usual, we one hot encode the labels as one hot vectors to enforce the idea of categorical to the model.
###Code
encoder = OneHotEncoder(categories="auto")
encoder.fit(train_labels)
pp_train_labels = encoder.transform(train_labels).toarray()
pp_valid_labels = encoder.transform(valid_labels).toarray()
###Output
_____no_output_____
###Markdown
Typically, you do not sample your data, instead you train your model on all the data you have. In this practical we will artificially limiting the training set to 1/5 of the dataset to observe the effects of too little data.
###Code
# Compute new training set size
sample_train_size = len(train_imgs) // 5
# sample the trainng example without replacement
sample_pp_train_imgs, sample_pp_train_labels = resample(
pp_train_imgs, pp_train_labels,
n_samples=sample_train_size,
replace=False)
###Output
_____no_output_____
###Markdown
Building the ModelNow that data preprocessing has been complete, we can now preceed to build a model to classify the image. Since this practical focuses on the training process, we will provide a model generation function for you. Generate the model using `build_model()`: There are some parameters you can configure:- `input_shape` - the shape of the inputs (in this case images) given to the neural network.- `n_output` - no. of outputs in the neural network. Set this to the no. of classes/labels- `scale_width` - increasing this multiples the no. of hidden units used per layer. - `scale_depth` - no. of hidden layers in the network.- `activation` - the activation function to use in the network. Good choices are Relu, Elu, Selu- `l2_lamda` - amount of L2 regularisation to add.
###Code
# pass the input and no of classes (n_outputs) to build the model
input_shape = train_imgs.shape[1:]
model = baseline_model.build_model(
input_shape, n_outputs=10, scale_width=1, scale_depth=3,
activation=layers.ReLU,
l2_lambda=0)
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_1 (Flatten) (None, 3072) 0
_________________________________________________________________
dense_1 (Dense) (None, 64) 196672
_________________________________________________________________
re_lu_1 (ReLU) (None, 64) 0
_________________________________________________________________
dense_2 (Dense) (None, 64) 4160
_________________________________________________________________
re_lu_2 (ReLU) (None, 64) 0
_________________________________________________________________
dense_3 (Dense) (None, 64) 4160
_________________________________________________________________
re_lu_3 (ReLU) (None, 64) 0
_________________________________________________________________
dense_4 (Dense) (None, 10) 650
=================================================================
Total params: 205,642
Trainable params: 205,642
Non-trainable params: 0
_________________________________________________________________
###Markdown
Training the ModelNow we proceed with training the model. In Keras, there are two steps to training the model: 1. `.compile()` to compile the model - Here you will specify the the loss function to minimize, optimizer that will minimise the loss function (with the corresponding learning rate) and the metrics to record during training/evaluation 2. `.fit()` to actually train the mode - train the model by specifying the training data for training, validation data for metrics during training, the batch size and no. of epochs/the model passes over the data to train. - optionally you can specify some usefully callbacks to add functionality during training Compling the modelWe first compile the model by calling `.compile()` with:- the loss function to minimise: - classification task - use `binary_crossentropy` for 2-class/label classification - use `categorical_crossentropy` for multi-class/label classification - regression task - use `mean_squared_error` for regression- the optimizer that will minimise the loss function. - recommanded choice: `Adam` or `Nadam` - metrics to evaluate the model during training (ie accuarcy)
###Code
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(),
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
Fitting the modelNow we can fit the model to the training data by calling `.fit()`:- provide the training set for the model to train on- provide the validation set for the model to evaluate on- tell the model only many to fit on at once with `batch_size`> If you are training on a hardware accelerator like a GPU, you might want to increase> `batch_size` to take advantage of your GPU
###Code
# fit the model to the data
model.fit(sample_pp_train_imgs, sample_pp_train_labels,
validation_data=(pp_valid_imgs, pp_valid_labels),
batch_size=64,
epochs=10)
###Output
WARNING:tensorflow:From /home/zzy/.conda/envs/mlbootcamp/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Train on 10000 samples, validate on 10000 samples
Epoch 1/10
10000/10000 [==============================] - 1s 107us/step - loss: 13.5470 - acc: 0.1556 - val_loss: 13.4733 - val_acc: 0.1627
Epoch 2/10
10000/10000 [==============================] - 1s 76us/step - loss: 13.4343 - acc: 0.1649 - val_loss: 13.2572 - val_acc: 0.1767
Epoch 3/10
10000/10000 [==============================] - 1s 63us/step - loss: 13.6097 - acc: 0.1548 - val_loss: 13.6282 - val_acc: 0.1539
Epoch 4/10
10000/10000 [==============================] - 1s 65us/step - loss: 13.4754 - acc: 0.1635 - val_loss: 13.5978 - val_acc: 0.1562
Epoch 5/10
10000/10000 [==============================] - 1s 72us/step - loss: 13.5488 - acc: 0.1591 - val_loss: 13.5506 - val_acc: 0.1590
Epoch 6/10
10000/10000 [==============================] - 1s 73us/step - loss: 13.5506 - acc: 0.1588 - val_loss: 13.6250 - val_acc: 0.1543
Epoch 7/10
10000/10000 [==============================] - 1s 62us/step - loss: 13.6623 - acc: 0.1519 - val_loss: 13.4931 - val_acc: 0.1627
Epoch 8/10
10000/10000 [==============================] - 1s 65us/step - loss: 13.5912 - acc: 0.1566 - val_loss: 13.5959 - val_acc: 0.1564
Epoch 9/10
10000/10000 [==============================] - 1s 66us/step - loss: 13.5775 - acc: 0.1574 - val_loss: 13.6461 - val_acc: 0.1531
Epoch 10/10
10000/10000 [==============================] - 1s 65us/step - loss: 13.7740 - acc: 0.1453 - val_loss: 13.8791 - val_acc: 0.1388
###Markdown
Looking the metrics looss and accuracy, they seem to be barely improving at all.Lets evaluate the model to investigate what the problem is. Evaluating the ModelWe evaluate the model to determine why the model does not train.There is a tool included with `tensorflow` called `tensorboard` that makes it easier to evaluate neural networks For `tensorboard` to work we have to admend your `.fit()` call:
###Code
# pass the input and no of classes (n_outputs) to build the model
input_shape = train_imgs.shape[1:]
model = baseline_model.build_model(
input_shape, n_outputs=10, scale_width=1, scale_depth=3,
activation=layers.ReLU,
l2_lambda=0)
# compile the model
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(),
metrics=["accuracy"])
### New stuff:
# we need to name our training run, so we create a name with the
# current time
run_name = f"run_{datetime.now():%Y_%m_%d__%H_%M_%S}"
# create directory for storing tensorboard logs
logs_dir = os.path.join("logs", run_name)
os.makedirs(logs_dir, exist_ok=True)
# fit the model to the data
model.fit(sample_pp_train_imgs, sample_pp_train_labels,
validation_data=(pp_valid_imgs, pp_valid_labels),
batch_size=64,
epochs=10,
# set the tensorboard callback to enable tensorboard for the model
callbacks=[callbacks.TensorBoard(log_dir=logs_dir)])
###Output
Train on 10000 samples, validate on 10000 samples
Epoch 1/10
10000/10000 [==============================] - 1s 77us/step - loss: 13.3534 - acc: 0.1666 - val_loss: 13.1322 - val_acc: 0.1828
Epoch 2/10
10000/10000 [==============================] - 1s 71us/step - loss: 13.5912 - acc: 0.1556 - val_loss: 13.4534 - val_acc: 0.1642
Epoch 3/10
10000/10000 [==============================] - 1s 68us/step - loss: 13.4192 - acc: 0.1666 - val_loss: 13.3650 - val_acc: 0.1700
Epoch 4/10
10000/10000 [==============================] - 1s 64us/step - loss: 13.4634 - acc: 0.1643 - val_loss: 13.4700 - val_acc: 0.1635
Epoch 5/10
10000/10000 [==============================] - 1s 68us/step - loss: 13.3075 - acc: 0.1738 - val_loss: 13.2751 - val_acc: 0.1760
Epoch 6/10
10000/10000 [==============================] - 1s 75us/step - loss: 13.3752 - acc: 0.1698 - val_loss: 13.4357 - val_acc: 0.1663
Epoch 7/10
10000/10000 [==============================] - 1s 77us/step - loss: 13.5495 - acc: 0.1591 - val_loss: 13.4656 - val_acc: 0.1645
Epoch 8/10
10000/10000 [==============================] - 1s 77us/step - loss: 13.6599 - acc: 0.1522 - val_loss: 14.1533 - val_acc: 0.1216
Epoch 9/10
10000/10000 [==============================] - 1s 81us/step - loss: 14.1507 - acc: 0.1220 - val_loss: 14.1366 - val_acc: 0.1229
Epoch 10/10
10000/10000 [==============================] - 1s 81us/step - loss: 14.1977 - acc: 0.1191 - val_loss: 14.1726 - val_acc: 0.1207
###Markdown
After training is complete, we have to run the Tensorboard server by:- opening a new terminal in Jupyter Lab (File>New>Terminal)- Running the tensorboard server and pointing the server to the logs with `tensorboard --log-dir=logs`- Point a new tab at http://localhost:6006 Tensorboard UI Looking at the training loss `loss` we can see that the model having problems fitting the the training data (its not decreasing much at all).> When this happens the model is having problems **converging** to a lower loss When this happens, its time to tune your learning rate. Iterate. Learning Rate TuningTuning the learning rate is arguablely the important hyperparameter you have to tune when trainingneural networks:- set the learning rate too high and metrics do not improve or even explode (increase rapidly) during - set the learning rate too low and the neural network takes forever (figure of speech) to trainWe seem to have a high learning rate problem, so we set the learning rate from the default value of `1e-3` to `1e-4`.> `1e-4` is eqivilent to $1 \times 10^{-4}$
###Code
# pass the input and no of classes (n_outputs) to build the model
input_shape = train_imgs.shape[1:]
model = baseline_model.build_model(
input_shape, n_outputs=10, scale_width=1, scale_depth=3,
activation=layers.ReLU,
l2_lambda=0, dropout_prob=0.0)
# compile the model
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(lr=1e-4), # Lowered the learning rate
metrics=["accuracy"])
# we need to name our training run, so we create a name with the
# current time
run_name = f"run_{datetime.now():%Y_%m_%d__%H_%M_%S}"
# create directory for storing tensorboard logs
logs_dir = os.path.join("logs", run_name)
os.makedirs(logs_dir, exist_ok=True)
# fit the model to the data
model.fit(sample_pp_train_imgs, sample_pp_train_labels,
validation_data=(pp_valid_imgs, pp_valid_labels),
batch_size=64,
epochs=10,
# set the tensorboard callback to enable tensorboard for the model
callbacks=[callbacks.TensorBoard(log_dir=logs_dir)])
###Output
Train on 10000 samples, validate on 10000 samples
Epoch 1/10
10000/10000 [==============================] - 1s 78us/step - loss: 12.2835 - acc: 0.1929 - val_loss: 11.2900 - val_acc: 0.2372
Epoch 2/10
10000/10000 [==============================] - 1s 69us/step - loss: 10.6626 - acc: 0.2772 - val_loss: 10.3010 - val_acc: 0.2954
Epoch 3/10
10000/10000 [==============================] - 1s 69us/step - loss: 9.8902 - acc: 0.3199 - val_loss: 10.1081 - val_acc: 0.3066
Epoch 4/10
10000/10000 [==============================] - 1s 81us/step - loss: 9.4279 - acc: 0.3483 - val_loss: 9.8166 - val_acc: 0.3166
Epoch 5/10
10000/10000 [==============================] - 1s 83us/step - loss: 8.9481 - acc: 0.3741 - val_loss: 9.5571 - val_acc: 0.3278
Epoch 6/10
10000/10000 [==============================] - 1s 83us/step - loss: 8.6145 - acc: 0.3949 - val_loss: 9.6439 - val_acc: 0.3157
Epoch 7/10
10000/10000 [==============================] - 1s 87us/step - loss: 8.3633 - acc: 0.4116 - val_loss: 9.3811 - val_acc: 0.3366
Epoch 8/10
10000/10000 [==============================] - 1s 81us/step - loss: 8.1298 - acc: 0.4242 - val_loss: 9.2423 - val_acc: 0.3412
Epoch 9/10
10000/10000 [==============================] - 1s 68us/step - loss: 7.8655 - acc: 0.4365 - val_loss: 9.1471 - val_acc: 0.3456
Epoch 10/10
10000/10000 [==============================] - 1s 71us/step - loss: 7.7753 - acc: 0.4419 - val_loss: 9.1115 - val_acc: 0.3449
###Markdown
Take a look at your tensorboard again. The training loss curve for new training run should appear in tensorboard.> Try different values of the learning rate to see now the learning rate affects convergence. > Clue: change the power $N$ in `Ae-N` first before tuning the $A$ part of the learning rate Underfitting A look at the training accuracy shows a dismal picture. The model appears to be underfitting. Training a larger modelOne way to combat underfitting in a neural network is to simply add more hidden layers or more hidden neurons per hidden layer to address overfitting.> With a model building function you can make this very easy by exposing > it as a function parameter.We both deepen (add more layers) and widen (add more neurons per layer) by increasing `scale_depth` and `scale_depth` parameters respectively:
###Code
model = baseline_model.build_model(
input_shape, n_outputs=10, scale_width=2, scale_depth=8,
activation=layers.ReLU,
l2_lambda=0)
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_1 (Flatten) (None, 3072) 0
_________________________________________________________________
dense_1 (Dense) (None, 128) 393344
_________________________________________________________________
re_lu_1 (ReLU) (None, 128) 0
_________________________________________________________________
dense_2 (Dense) (None, 128) 16512
_________________________________________________________________
re_lu_2 (ReLU) (None, 128) 0
_________________________________________________________________
dense_3 (Dense) (None, 128) 16512
_________________________________________________________________
re_lu_3 (ReLU) (None, 128) 0
_________________________________________________________________
dense_4 (Dense) (None, 128) 16512
_________________________________________________________________
re_lu_4 (ReLU) (None, 128) 0
_________________________________________________________________
dense_5 (Dense) (None, 128) 16512
_________________________________________________________________
re_lu_5 (ReLU) (None, 128) 0
_________________________________________________________________
dense_6 (Dense) (None, 128) 16512
_________________________________________________________________
re_lu_6 (ReLU) (None, 128) 0
_________________________________________________________________
dense_7 (Dense) (None, 128) 16512
_________________________________________________________________
re_lu_7 (ReLU) (None, 128) 0
_________________________________________________________________
dense_8 (Dense) (None, 128) 16512
_________________________________________________________________
re_lu_8 (ReLU) (None, 128) 0
_________________________________________________________________
dense_9 (Dense) (None, 10) 1290
=================================================================
Total params: 510,218
Trainable params: 510,218
Non-trainable params: 0
_________________________________________________________________
###Markdown
Lets train the larger network to see if it improves our metrics:
###Code
# compile the model
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(lr=1e-4),
metrics=["accuracy"])
# we need to name our training run, so we create a name with the
# current time
run_name = f"run_{datetime.now():%Y_%m_%d__%H_%M_%S}"
# create directory for storing tensorboard logs
logs_dir = os.path.join("logs", run_name)
os.makedirs(logs_dir, exist_ok=True)
# fit the model to the data
model.fit(sample_pp_train_imgs, sample_pp_train_labels,
validation_data=(pp_valid_imgs, pp_valid_labels),
batch_size=64,
epochs=10, # increased epochs
# set the tensorboard callback to enable tensorboard for the model
callbacks=[callbacks.TensorBoard(log_dir=logs_dir)])
###Output
Train on 10000 samples, validate on 10000 samples
Epoch 1/10
10000/10000 [==============================] - 2s 151us/step - loss: 3.1940 - acc: 0.1913 - val_loss: 2.1664 - val_acc: 0.2508
Epoch 2/10
10000/10000 [==============================] - 1s 131us/step - loss: 1.9997 - acc: 0.3042 - val_loss: 2.0380 - val_acc: 0.2843
Epoch 3/10
10000/10000 [==============================] - 1s 130us/step - loss: 1.8194 - acc: 0.3534 - val_loss: 1.9578 - val_acc: 0.3065
Epoch 4/10
10000/10000 [==============================] - 1s 133us/step - loss: 1.6940 - acc: 0.4037 - val_loss: 1.9269 - val_acc: 0.3277
Epoch 5/10
10000/10000 [==============================] - 1s 131us/step - loss: 1.5741 - acc: 0.4474 - val_loss: 1.9176 - val_acc: 0.3302
Epoch 6/10
10000/10000 [==============================] - 1s 137us/step - loss: 1.4861 - acc: 0.4821 - val_loss: 1.9167 - val_acc: 0.3462
Epoch 7/10
10000/10000 [==============================] - 1s 129us/step - loss: 1.4014 - acc: 0.5149 - val_loss: 1.9321 - val_acc: 0.3487
Epoch 8/10
10000/10000 [==============================] - 1s 138us/step - loss: 1.2957 - acc: 0.5524 - val_loss: 1.9470 - val_acc: 0.3508
Epoch 9/10
10000/10000 [==============================] - 1s 130us/step - loss: 1.2127 - acc: 0.5833 - val_loss: 1.9794 - val_acc: 0.3543
Epoch 10/10
10000/10000 [==============================] - 2s 150us/step - loss: 1.1214 - acc: 0.6222 - val_loss: 2.0110 - val_acc: 0.3528
###Markdown
By training a larger network, we were attain a higher training accuracy, partially addressing overfitting. Tuning the No. of EpochsLooking at the training `loss` learning curve, we can see that training stopped before the `loss` stopped improving (the training loss has not yet bottomed out). This is a sign that we have not trained for a suffcient no. of epochs.We train langer by increasing the no. of epoches:
###Code
# build the model
input_shape = train_imgs.shape[1:]
model = baseline_model.build_model(
input_shape, n_outputs=10, scale_width=2, scale_depth=6,
activation=layers.ReLU,
l2_lambda=0)
# compile the model
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(lr=1e-4),
metrics=["accuracy"])
# we need to name our training run, so we create a name with the
# current time
run_name = f"run_{datetime.now():%Y_%m_%d__%H_%M_%S}"
# create directory for storing tensorboard logs
logs_dir = os.path.join("logs", run_name)
os.makedirs(logs_dir, exist_ok=True)
# fit the model to the data
model.fit(sample_pp_train_imgs, sample_pp_train_labels,
validation_data=(pp_valid_imgs, pp_valid_labels),
batch_size=64,
epochs=60, # increased epochs
# set the tensorboard callback to enable tensorboard for the model
callbacks=[callbacks.TensorBoard(log_dir=logs_dir)])
###Output
Train on 10000 samples, validate on 10000 samples
Epoch 1/60
10000/10000 [==============================] - 1s 148us/step - loss: 4.7798 - acc: 0.1926 - val_loss: 3.0961 - val_acc: 0.2260
Epoch 2/60
10000/10000 [==============================] - 1s 123us/step - loss: 2.5068 - acc: 0.2802 - val_loss: 2.5563 - val_acc: 0.2549
Epoch 3/60
10000/10000 [==============================] - 1s 131us/step - loss: 2.0292 - acc: 0.3439 - val_loss: 2.3802 - val_acc: 0.2713
Epoch 4/60
10000/10000 [==============================] - 1s 123us/step - loss: 1.7777 - acc: 0.4000 - val_loss: 2.3244 - val_acc: 0.2789
Epoch 5/60
10000/10000 [==============================] - 1s 120us/step - loss: 1.6229 - acc: 0.4449 - val_loss: 2.2764 - val_acc: 0.2890
Epoch 6/60
10000/10000 [==============================] - 1s 132us/step - loss: 1.4725 - acc: 0.4934 - val_loss: 2.2225 - val_acc: 0.3065
Epoch 7/60
10000/10000 [==============================] - 2s 160us/step - loss: 1.3645 - acc: 0.5324 - val_loss: 2.2371 - val_acc: 0.3071
Epoch 8/60
10000/10000 [==============================] - 1s 141us/step - loss: 1.2663 - acc: 0.5673 - val_loss: 2.2726 - val_acc: 0.3078
Epoch 9/60
10000/10000 [==============================] - 1s 126us/step - loss: 1.1593 - acc: 0.6093 - val_loss: 2.2821 - val_acc: 0.3161
Epoch 10/60
10000/10000 [==============================] - 1s 124us/step - loss: 1.0817 - acc: 0.6339 - val_loss: 2.3158 - val_acc: 0.3127
Epoch 11/60
10000/10000 [==============================] - 1s 126us/step - loss: 1.0093 - acc: 0.6624 - val_loss: 2.3445 - val_acc: 0.3148
Epoch 12/60
10000/10000 [==============================] - 1s 118us/step - loss: 0.9123 - acc: 0.6994 - val_loss: 2.3909 - val_acc: 0.3312
Epoch 13/60
10000/10000 [==============================] - 1s 122us/step - loss: 0.8459 - acc: 0.7187 - val_loss: 2.4444 - val_acc: 0.3266
Epoch 14/60
10000/10000 [==============================] - 1s 121us/step - loss: 0.8157 - acc: 0.7281 - val_loss: 2.4970 - val_acc: 0.3293
Epoch 15/60
10000/10000 [==============================] - 1s 124us/step - loss: 0.7233 - acc: 0.7630 - val_loss: 2.5406 - val_acc: 0.3297
Epoch 16/60
10000/10000 [==============================] - 1s 124us/step - loss: 0.6549 - acc: 0.7947 - val_loss: 2.6114 - val_acc: 0.3313
Epoch 17/60
10000/10000 [==============================] - 1s 135us/step - loss: 0.5931 - acc: 0.8153 - val_loss: 2.6893 - val_acc: 0.3370
Epoch 18/60
10000/10000 [==============================] - 1s 132us/step - loss: 0.5503 - acc: 0.8327 - val_loss: 2.7385 - val_acc: 0.3359
Epoch 19/60
10000/10000 [==============================] - 1s 130us/step - loss: 0.5098 - acc: 0.8464 - val_loss: 2.7794 - val_acc: 0.3279
Epoch 20/60
10000/10000 [==============================] - 1s 123us/step - loss: 0.4606 - acc: 0.8638 - val_loss: 2.8563 - val_acc: 0.3347
Epoch 21/60
10000/10000 [==============================] - 1s 127us/step - loss: 0.4081 - acc: 0.8816 - val_loss: 2.9430 - val_acc: 0.3397
Epoch 22/60
10000/10000 [==============================] - 1s 127us/step - loss: 0.3717 - acc: 0.8946 - val_loss: 2.9937 - val_acc: 0.3408
Epoch 23/60
10000/10000 [==============================] - 1s 129us/step - loss: 0.3385 - acc: 0.9019 - val_loss: 3.1036 - val_acc: 0.3413
Epoch 24/60
10000/10000 [==============================] - 1s 121us/step - loss: 0.3126 - acc: 0.9125 - val_loss: 3.1835 - val_acc: 0.3452
Epoch 25/60
10000/10000 [==============================] - 1s 127us/step - loss: 0.2855 - acc: 0.9206 - val_loss: 3.2798 - val_acc: 0.3389
Epoch 26/60
10000/10000 [==============================] - 1s 127us/step - loss: 0.2641 - acc: 0.9246 - val_loss: 3.3040 - val_acc: 0.3410
Epoch 27/60
10000/10000 [==============================] - 1s 138us/step - loss: 0.2377 - acc: 0.9373 - val_loss: 3.4126 - val_acc: 0.3416
Epoch 28/60
10000/10000 [==============================] - 1s 133us/step - loss: 0.2283 - acc: 0.9390 - val_loss: 3.4892 - val_acc: 0.3373
Epoch 29/60
10000/10000 [==============================] - 1s 137us/step - loss: 0.1913 - acc: 0.9498 - val_loss: 3.5718 - val_acc: 0.3380
Epoch 30/60
10000/10000 [==============================] - 2s 150us/step - loss: 0.1853 - acc: 0.9521 - val_loss: 3.6824 - val_acc: 0.3370
Epoch 31/60
10000/10000 [==============================] - 1s 134us/step - loss: 0.1635 - acc: 0.9569 - val_loss: 3.7383 - val_acc: 0.3343
Epoch 32/60
10000/10000 [==============================] - 1s 134us/step - loss: 0.2193 - acc: 0.9364 - val_loss: 3.8320 - val_acc: 0.3278
Epoch 33/60
10000/10000 [==============================] - 1s 128us/step - loss: 0.2878 - acc: 0.9096 - val_loss: 3.8683 - val_acc: 0.3317
Epoch 34/60
10000/10000 [==============================] - 1s 133us/step - loss: 0.2438 - acc: 0.9242 - val_loss: 3.8914 - val_acc: 0.3437
Epoch 35/60
10000/10000 [==============================] - 1s 123us/step - loss: 0.1387 - acc: 0.9651 - val_loss: 3.9104 - val_acc: 0.3454
Epoch 36/60
10000/10000 [==============================] - 1s 131us/step - loss: 0.1053 - acc: 0.9747 - val_loss: 3.9678 - val_acc: 0.3396
Epoch 37/60
10000/10000 [==============================] - 1s 135us/step - loss: 0.0791 - acc: 0.9843 - val_loss: 4.0902 - val_acc: 0.3412
Epoch 38/60
10000/10000 [==============================] - 1s 131us/step - loss: 0.0535 - acc: 0.9919 - val_loss: 4.1095 - val_acc: 0.3423
Epoch 39/60
10000/10000 [==============================] - 1s 130us/step - loss: 0.0648 - acc: 0.9866 - val_loss: 4.2224 - val_acc: 0.3432
Epoch 40/60
10000/10000 [==============================] - 1s 129us/step - loss: 0.1321 - acc: 0.9632 - val_loss: 4.3056 - val_acc: 0.3403
Epoch 41/60
10000/10000 [==============================] - 1s 134us/step - loss: 0.4229 - acc: 0.8661 - val_loss: 4.2328 - val_acc: 0.3266
Epoch 42/60
10000/10000 [==============================] - 1s 128us/step - loss: 0.3580 - acc: 0.8824 - val_loss: 4.1060 - val_acc: 0.3421
Epoch 43/60
10000/10000 [==============================] - 1s 132us/step - loss: 0.1650 - acc: 0.9517 - val_loss: 4.2322 - val_acc: 0.3366
Epoch 44/60
10000/10000 [==============================] - 1s 123us/step - loss: 0.0658 - acc: 0.9851 - val_loss: 4.2403 - val_acc: 0.3461
Epoch 45/60
10000/10000 [==============================] - 1s 127us/step - loss: 0.0303 - acc: 0.9967 - val_loss: 4.3544 - val_acc: 0.3452
Epoch 46/60
10000/10000 [==============================] - 1s 130us/step - loss: 0.0190 - acc: 0.9987 - val_loss: 4.4080 - val_acc: 0.3432
Epoch 47/60
10000/10000 [==============================] - 1s 126us/step - loss: 0.0146 - acc: 0.9993 - val_loss: 4.4578 - val_acc: 0.3480
Epoch 48/60
10000/10000 [==============================] - 1s 130us/step - loss: 0.0112 - acc: 0.9995 - val_loss: 4.5037 - val_acc: 0.3444
Epoch 49/60
10000/10000 [==============================] - 1s 125us/step - loss: 0.0093 - acc: 0.9995 - val_loss: 4.5474 - val_acc: 0.3516
Epoch 50/60
10000/10000 [==============================] - 1s 133us/step - loss: 0.0075 - acc: 0.9999 - val_loss: 4.5956 - val_acc: 0.3498
Epoch 51/60
10000/10000 [==============================] - 1s 124us/step - loss: 0.0064 - acc: 0.9999 - val_loss: 4.6243 - val_acc: 0.3518
Epoch 52/60
10000/10000 [==============================] - 1s 128us/step - loss: 0.0055 - acc: 1.0000 - val_loss: 4.6679 - val_acc: 0.3541
Epoch 53/60
10000/10000 [==============================] - 1s 138us/step - loss: 0.0049 - acc: 1.0000 - val_loss: 4.7013 - val_acc: 0.3505
Epoch 54/60
10000/10000 [==============================] - 1s 132us/step - loss: 0.0045 - acc: 1.0000 - val_loss: 4.7329 - val_acc: 0.3518
Epoch 55/60
10000/10000 [==============================] - 1s 128us/step - loss: 0.0041 - acc: 1.0000 - val_loss: 4.7664 - val_acc: 0.3509
Epoch 56/60
10000/10000 [==============================] - 1s 123us/step - loss: 0.0037 - acc: 1.0000 - val_loss: 4.8081 - val_acc: 0.3539
Epoch 57/60
10000/10000 [==============================] - 1s 130us/step - loss: 0.0034 - acc: 1.0000 - val_loss: 4.8425 - val_acc: 0.3530
Epoch 58/60
10000/10000 [==============================] - 1s 126us/step - loss: 0.0031 - acc: 1.0000 - val_loss: 4.8748 - val_acc: 0.3533
Epoch 59/60
10000/10000 [==============================] - 1s 126us/step - loss: 0.0028 - acc: 1.0000 - val_loss: 4.9006 - val_acc: 0.3540
Epoch 60/60
10000/10000 [==============================] - 1s 127us/step - loss: 0.0027 - acc: 1.0000 - val_loss: 4.9407 - val_acc: 0.3513
###Markdown
By training longer we are able to increase our training accuracy tremedously.Consider our underfitting problem solved. OverfittingLooking at the dismal validation accuracy compared to the training accuracy, we are overfitting tremendously. RegularisationOne ways to reduce overfitting is is to introduce regularisation:Types of regularisation you can add:- dropout regularization - randomly _kills_ neurons during training.- l2 regularisation - forces the weights of the model to adapt small valueIn this practical we will just l2 regularisation to address overfitting by setting `l2_lambda`:
###Code
# build the model
input_shape = train_imgs.shape[1:]
model = baseline_model.build_model(
input_shape, n_outputs=10, scale_width=2, scale_depth=6,
activation=layers.ReLU,
l2_lambda=6e-2) # added l2 regularisatin
# compile the model
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(lr=1e-4),
metrics=["accuracy"])
# we need to name our training run, so we create a name with the
# current time
run_name = f"run_{datetime.now():%Y_%m_%d__%H_%M_%S}"
# create directory for storing tensorboard logs
logs_dir = os.path.join("logs", run_name)
os.makedirs(logs_dir, exist_ok=True)
# fit the model to the data
model.fit(sample_pp_train_imgs, sample_pp_train_labels,
validation_data=(pp_valid_imgs, pp_valid_labels),
batch_size=64,
epochs=60,
verbose=1,
# set the tensorboard callback to enable tensorboard for the model
callbacks=[callbacks.TensorBoard(log_dir=logs_dir)])
###Output
Train on 10000 samples, validate on 10000 samples
Epoch 1/60
10000/10000 [==============================] - 2s 176us/step - loss: 57.4283 - acc: 0.1938 - val_loss: 53.0112 - val_acc: 0.2212
Epoch 2/60
10000/10000 [==============================] - 2s 152us/step - loss: 50.7527 - acc: 0.2769 - val_loss: 49.0488 - val_acc: 0.2503
Epoch 3/60
10000/10000 [==============================] - 1s 150us/step - loss: 46.9141 - acc: 0.3440 - val_loss: 45.5151 - val_acc: 0.2814
Epoch 4/60
10000/10000 [==============================] - 2s 154us/step - loss: 43.3628 - acc: 0.4120 - val_loss: 42.1198 - val_acc: 0.3052
Epoch 5/60
10000/10000 [==============================] - 1s 145us/step - loss: 39.9937 - acc: 0.4609 - val_loss: 38.8570 - val_acc: 0.3232
Epoch 6/60
10000/10000 [==============================] - 2s 160us/step - loss: 36.7650 - acc: 0.5098 - val_loss: 35.7529 - val_acc: 0.3272
Epoch 7/60
10000/10000 [==============================] - 1s 146us/step - loss: 33.6985 - acc: 0.5344 - val_loss: 32.7804 - val_acc: 0.3478
Epoch 8/60
10000/10000 [==============================] - 2s 153us/step - loss: 30.8104 - acc: 0.5563 - val_loss: 29.9821 - val_acc: 0.3601
Epoch 9/60
10000/10000 [==============================] - 1s 146us/step - loss: 28.0857 - acc: 0.5792 - val_loss: 27.3438 - val_acc: 0.3695
Epoch 10/60
10000/10000 [==============================] - 2s 158us/step - loss: 25.5500 - acc: 0.5844 - val_loss: 24.8871 - val_acc: 0.3762
Epoch 11/60
10000/10000 [==============================] - 2s 152us/step - loss: 23.1784 - acc: 0.5991 - val_loss: 22.5885 - val_acc: 0.3932
Epoch 12/60
10000/10000 [==============================] - 2s 153us/step - loss: 20.9858 - acc: 0.6017 - val_loss: 20.4837 - val_acc: 0.4011
Epoch 13/60
10000/10000 [==============================] - 2s 151us/step - loss: 18.9696 - acc: 0.6069 - val_loss: 18.5287 - val_acc: 0.4090
Epoch 14/60
10000/10000 [==============================] - 2s 159us/step - loss: 17.1152 - acc: 0.6025 - val_loss: 16.7606 - val_acc: 0.4104
Epoch 15/60
10000/10000 [==============================] - 2s 152us/step - loss: 15.4176 - acc: 0.6132 - val_loss: 15.1273 - val_acc: 0.4221
Epoch 16/60
10000/10000 [==============================] - 2s 157us/step - loss: 13.8871 - acc: 0.6125 - val_loss: 13.6691 - val_acc: 0.4306
Epoch 17/60
10000/10000 [==============================] - 2s 152us/step - loss: 12.5001 - acc: 0.6150 - val_loss: 12.3369 - val_acc: 0.4278
Epoch 18/60
10000/10000 [==============================] - 2s 152us/step - loss: 11.2456 - acc: 0.6197 - val_loss: 11.1576 - val_acc: 0.4376
Epoch 19/60
10000/10000 [==============================] - 2s 152us/step - loss: 10.1207 - acc: 0.6306 - val_loss: 10.1335 - val_acc: 0.4332
Epoch 20/60
10000/10000 [==============================] - 2s 156us/step - loss: 9.1383 - acc: 0.6278 - val_loss: 9.2118 - val_acc: 0.4331
Epoch 21/60
10000/10000 [==============================] - 2s 153us/step - loss: 8.2531 - acc: 0.6327 - val_loss: 8.4056 - val_acc: 0.4299
Epoch 22/60
10000/10000 [==============================] - 2s 153us/step - loss: 7.4869 - acc: 0.6327 - val_loss: 7.6633 - val_acc: 0.4444
Epoch 23/60
10000/10000 [==============================] - 2s 168us/step - loss: 6.7991 - acc: 0.6376 - val_loss: 7.0457 - val_acc: 0.4408
Epoch 24/60
10000/10000 [==============================] - 2s 160us/step - loss: 6.2062 - acc: 0.6436 - val_loss: 6.4872 - val_acc: 0.4417
Epoch 25/60
10000/10000 [==============================] - 2s 156us/step - loss: 5.6848 - acc: 0.6460 - val_loss: 5.9854 - val_acc: 0.4503
Epoch 26/60
10000/10000 [==============================] - 2s 155us/step - loss: 5.2221 - acc: 0.6509 - val_loss: 5.5943 - val_acc: 0.4389
Epoch 27/60
10000/10000 [==============================] - 2s 152us/step - loss: 4.8147 - acc: 0.6636 - val_loss: 5.2335 - val_acc: 0.4492
Epoch 28/60
10000/10000 [==============================] - 2s 153us/step - loss: 4.4905 - acc: 0.6596 - val_loss: 4.9060 - val_acc: 0.4455
Epoch 29/60
10000/10000 [==============================] - 2s 159us/step - loss: 4.1887 - acc: 0.6612 - val_loss: 4.6319 - val_acc: 0.4459
Epoch 30/60
10000/10000 [==============================] - 1s 147us/step - loss: 3.9059 - acc: 0.6672 - val_loss: 4.4408 - val_acc: 0.4373
Epoch 31/60
10000/10000 [==============================] - 2s 161us/step - loss: 3.6782 - acc: 0.6694 - val_loss: 4.2201 - val_acc: 0.4309
Epoch 32/60
10000/10000 [==============================] - 2s 151us/step - loss: 3.4666 - acc: 0.6784 - val_loss: 4.0311 - val_acc: 0.4399
Epoch 33/60
10000/10000 [==============================] - 2s 158us/step - loss: 3.2990 - acc: 0.6772 - val_loss: 3.8934 - val_acc: 0.4346
Epoch 34/60
10000/10000 [==============================] - 2s 160us/step - loss: 3.1512 - acc: 0.6727 - val_loss: 3.7205 - val_acc: 0.4456
Epoch 35/60
10000/10000 [==============================] - 2s 152us/step - loss: 3.0050 - acc: 0.6767 - val_loss: 3.5698 - val_acc: 0.4456
Epoch 36/60
10000/10000 [==============================] - 2s 174us/step - loss: 2.8698 - acc: 0.6842 - val_loss: 3.5034 - val_acc: 0.4445
Epoch 37/60
10000/10000 [==============================] - 2s 154us/step - loss: 2.7667 - acc: 0.6837 - val_loss: 3.4355 - val_acc: 0.4279
Epoch 38/60
10000/10000 [==============================] - 2s 155us/step - loss: 2.6776 - acc: 0.6831 - val_loss: 3.3114 - val_acc: 0.4416
Epoch 39/60
10000/10000 [==============================] - 2s 161us/step - loss: 2.5843 - acc: 0.6854 - val_loss: 3.2489 - val_acc: 0.4436
Epoch 40/60
10000/10000 [==============================] - 1s 150us/step - loss: 2.5180 - acc: 0.6865 - val_loss: 3.2086 - val_acc: 0.4408
Epoch 41/60
10000/10000 [==============================] - 1s 149us/step - loss: 2.4288 - acc: 0.7001 - val_loss: 3.1503 - val_acc: 0.4418
Epoch 42/60
10000/10000 [==============================] - 2s 176us/step - loss: 2.3870 - acc: 0.6927 - val_loss: 3.0766 - val_acc: 0.4430
Epoch 43/60
10000/10000 [==============================] - 2s 154us/step - loss: 2.3333 - acc: 0.6930 - val_loss: 3.0384 - val_acc: 0.4377
Epoch 44/60
10000/10000 [==============================] - 2s 155us/step - loss: 2.2725 - acc: 0.7027 - val_loss: 3.0440 - val_acc: 0.4254
Epoch 45/60
10000/10000 [==============================] - 2s 158us/step - loss: 2.2347 - acc: 0.7004 - val_loss: 2.9582 - val_acc: 0.4447
Epoch 46/60
10000/10000 [==============================] - 2s 162us/step - loss: 2.1777 - acc: 0.7091 - val_loss: 3.0042 - val_acc: 0.4358
Epoch 47/60
10000/10000 [==============================] - 2s 159us/step - loss: 2.1450 - acc: 0.7076 - val_loss: 2.8773 - val_acc: 0.4502
Epoch 48/60
10000/10000 [==============================] - 2s 159us/step - loss: 2.1076 - acc: 0.7072 - val_loss: 2.9190 - val_acc: 0.4282
Epoch 49/60
10000/10000 [==============================] - 2s 158us/step - loss: 2.0788 - acc: 0.7090 - val_loss: 2.9470 - val_acc: 0.4293
Epoch 50/60
10000/10000 [==============================] - 2s 159us/step - loss: 2.0458 - acc: 0.7157 - val_loss: 2.8877 - val_acc: 0.4431
Epoch 51/60
10000/10000 [==============================] - 1s 148us/step - loss: 2.0252 - acc: 0.7129 - val_loss: 2.8301 - val_acc: 0.4465
Epoch 52/60
10000/10000 [==============================] - 2s 160us/step - loss: 1.9815 - acc: 0.7284 - val_loss: 2.8577 - val_acc: 0.4400
Epoch 53/60
10000/10000 [==============================] - 2s 156us/step - loss: 1.9779 - acc: 0.7231 - val_loss: 2.8455 - val_acc: 0.4382
Epoch 54/60
10000/10000 [==============================] - 2s 155us/step - loss: 1.9542 - acc: 0.7263 - val_loss: 2.8407 - val_acc: 0.4280
Epoch 55/60
10000/10000 [==============================] - 2s 152us/step - loss: 1.9370 - acc: 0.7215 - val_loss: 2.8813 - val_acc: 0.4197
Epoch 56/60
10000/10000 [==============================] - 2s 155us/step - loss: 1.9122 - acc: 0.7280 - val_loss: 2.8115 - val_acc: 0.4317
Epoch 57/60
10000/10000 [==============================] - 1s 149us/step - loss: 1.8787 - acc: 0.7392 - val_loss: 2.8133 - val_acc: 0.4463
Epoch 58/60
10000/10000 [==============================] - 2s 158us/step - loss: 1.8535 - acc: 0.7430 - val_loss: 2.8387 - val_acc: 0.4381
Epoch 59/60
10000/10000 [==============================] - 2s 154us/step - loss: 1.8558 - acc: 0.7399 - val_loss: 2.8269 - val_acc: 0.4381
Epoch 60/60
10000/10000 [==============================] - 2s 160us/step - loss: 1.8441 - acc: 0.7409 - val_loss: 2.8205 - val_acc: 0.4296
###Markdown
> Note: Adding l2 regularisation will increase your training and validation > losses compared to without l2. So please do not be alarmed. Bias Variance TradeoffRegularisation was solved our overfitting problem but has also caused our model's training accuracy to drop significantly. This is because regularisation is is no magic bullet. Its simply a trade of overfitting (variance) with underfitting(bias).> Argueablely, this is the best (dense only) neural network we can build with > this small 1/5 sample of the dataset. More DataAnother way to address overfitting to to simply to train with more data.> Neural Networks excell at fitting large datasets.Remember how we sampled the dataset earlier to reduce it to 1/5 of its actual size? To improve overfitting we can try retraining with the original size of the training set.> We won't be doing it in the practical because it takes quite some time to train> Heres the code:
###Code
# build the model
input_shape = train_imgs.shape[1:]
model = baseline_model.build_model(
input_shape, n_outputs=10, scale_width=2, scale_depth=6,
activation=layers.ReLU,
l2_lambda=1e-2) # admend l2 regularisation
# compile the model
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(lr=1e-4),
metrics=["accuracy"])
# we need to name our training run, so we create a name with the
# current time
run_name = f"run_{datetime.now():%Y_%m_%d__%H_%M_%S}"
# create directory for storing tensorboard logs
logs_dir = os.path.join("logs", run_name)
os.makedirs(logs_dir, exist_ok=True)
# fit the model to the data
model.fit(pp_train_imgs, pp_train_labels, # train with entire actual training set
validation_data=(pp_valid_imgs, pp_valid_labels),
batch_size=64,
epochs=60,
verbose=1,
# set the tensorboard callback to enable tensorboard for the model
callbacks=[callbacks.TensorBoard(log_dir=logs_dir)])
###Output
Train on 50000 samples, validate on 10000 samples
Epoch 1/60
50000/50000 [==============================] - 4s 71us/step - loss: 11.4329 - acc: 0.2517 - val_loss: 9.8500 - val_acc: 0.3130
Epoch 2/60
50000/50000 [==============================] - 3s 69us/step - loss: 9.1769 - acc: 0.3590 - val_loss: 8.5998 - val_acc: 0.3839
Epoch 3/60
50000/50000 [==============================] - 3s 68us/step - loss: 7.9921 - acc: 0.4176 - val_loss: 7.4945 - val_acc: 0.4195
Epoch 4/60
50000/50000 [==============================] - 3s 68us/step - loss: 6.9115 - acc: 0.4544 - val_loss: 6.4717 - val_acc: 0.4384
Epoch 5/60
50000/50000 [==============================] - 3s 68us/step - loss: 5.9296 - acc: 0.4801 - val_loss: 5.5628 - val_acc: 0.4596
Epoch 6/60
50000/50000 [==============================] - 3s 68us/step - loss: 5.0738 - acc: 0.5037 - val_loss: 4.7941 - val_acc: 0.4708
Epoch 7/60
50000/50000 [==============================] - 3s 68us/step - loss: 4.3654 - acc: 0.5188 - val_loss: 4.1732 - val_acc: 0.4841
Epoch 8/60
50000/50000 [==============================] - 3s 68us/step - loss: 3.7926 - acc: 0.5346 - val_loss: 3.6820 - val_acc: 0.4873
Epoch 9/60
50000/50000 [==============================] - 4s 70us/step - loss: 3.3479 - acc: 0.5463 - val_loss: 3.3121 - val_acc: 0.4962
Epoch 10/60
50000/50000 [==============================] - 3s 70us/step - loss: 3.0002 - acc: 0.5555 - val_loss: 3.0227 - val_acc: 0.5013
Epoch 11/60
50000/50000 [==============================] - 4s 71us/step - loss: 2.7381 - acc: 0.5640 - val_loss: 2.8030 - val_acc: 0.5054
Epoch 12/60
50000/50000 [==============================] - 4s 72us/step - loss: 2.5280 - acc: 0.5752 - val_loss: 2.6484 - val_acc: 0.5061
Epoch 13/60
50000/50000 [==============================] - 3s 70us/step - loss: 2.3657 - acc: 0.5822 - val_loss: 2.4918 - val_acc: 0.5140
Epoch 14/60
50000/50000 [==============================] - 3s 69us/step - loss: 2.2315 - acc: 0.5898 - val_loss: 2.4062 - val_acc: 0.5123
Epoch 15/60
50000/50000 [==============================] - 3s 69us/step - loss: 2.1217 - acc: 0.5942 - val_loss: 2.2984 - val_acc: 0.5225
Epoch 16/60
50000/50000 [==============================] - 3s 69us/step - loss: 2.0297 - acc: 0.6021 - val_loss: 2.2392 - val_acc: 0.5225
Epoch 17/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.9539 - acc: 0.6090 - val_loss: 2.1738 - val_acc: 0.5181
Epoch 18/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.8853 - acc: 0.6117 - val_loss: 2.1176 - val_acc: 0.5241
Epoch 19/60
50000/50000 [==============================] - 3s 67us/step - loss: 1.8285 - acc: 0.6211 - val_loss: 2.0679 - val_acc: 0.5289
Epoch 20/60
50000/50000 [==============================] - 3s 69us/step - loss: 1.7809 - acc: 0.6200 - val_loss: 2.0626 - val_acc: 0.5216
Epoch 21/60
50000/50000 [==============================] - 3s 68us/step - loss: 1.7329 - acc: 0.6286 - val_loss: 2.0279 - val_acc: 0.5253
Epoch 22/60
50000/50000 [==============================] - 4s 70us/step - loss: 1.6929 - acc: 0.6322 - val_loss: 1.9809 - val_acc: 0.5274
Epoch 23/60
50000/50000 [==============================] - 4s 72us/step - loss: 1.6559 - acc: 0.6371 - val_loss: 1.9826 - val_acc: 0.5289
Epoch 24/60
50000/50000 [==============================] - 4s 73us/step - loss: 1.6255 - acc: 0.6389 - val_loss: 1.9672 - val_acc: 0.5255
Epoch 25/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.5904 - acc: 0.6464 - val_loss: 1.9383 - val_acc: 0.5329
Epoch 26/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.5687 - acc: 0.6506 - val_loss: 1.9378 - val_acc: 0.5255
Epoch 27/60
50000/50000 [==============================] - 3s 67us/step - loss: 1.5446 - acc: 0.6514 - val_loss: 1.9223 - val_acc: 0.5341
Epoch 28/60
50000/50000 [==============================] - 3s 67us/step - loss: 1.5201 - acc: 0.6559 - val_loss: 1.9260 - val_acc: 0.5250
Epoch 29/60
50000/50000 [==============================] - 3s 68us/step - loss: 1.4991 - acc: 0.6611 - val_loss: 1.9087 - val_acc: 0.5330
Epoch 30/60
50000/50000 [==============================] - 3s 68us/step - loss: 1.4770 - acc: 0.6655 - val_loss: 1.8916 - val_acc: 0.5284
Epoch 31/60
50000/50000 [==============================] - 3s 67us/step - loss: 1.4561 - acc: 0.6693 - val_loss: 1.9051 - val_acc: 0.5303
Epoch 32/60
50000/50000 [==============================] - 3s 68us/step - loss: 1.4443 - acc: 0.6710 - val_loss: 1.8956 - val_acc: 0.5308
Epoch 33/60
50000/50000 [==============================] - 3s 70us/step - loss: 1.4318 - acc: 0.6707 - val_loss: 1.8938 - val_acc: 0.5268
Epoch 34/60
50000/50000 [==============================] - 4s 70us/step - loss: 1.4144 - acc: 0.6765 - val_loss: 1.8766 - val_acc: 0.5337
Epoch 35/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.4049 - acc: 0.6762 - val_loss: 1.8537 - val_acc: 0.5397
Epoch 36/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.3854 - acc: 0.6813 - val_loss: 1.8905 - val_acc: 0.5261
Epoch 37/60
50000/50000 [==============================] - 4s 70us/step - loss: 1.3744 - acc: 0.6835 - val_loss: 1.9152 - val_acc: 0.5244
Epoch 38/60
50000/50000 [==============================] - 4s 70us/step - loss: 1.3673 - acc: 0.6853 - val_loss: 1.8801 - val_acc: 0.5282
Epoch 39/60
50000/50000 [==============================] - 3s 69us/step - loss: 1.3576 - acc: 0.6865 - val_loss: 1.8604 - val_acc: 0.5351
Epoch 40/60
50000/50000 [==============================] - 4s 70us/step - loss: 1.3455 - acc: 0.6917 - val_loss: 1.8809 - val_acc: 0.5298
Epoch 41/60
50000/50000 [==============================] - 4s 70us/step - loss: 1.3305 - acc: 0.6970 - val_loss: 1.8705 - val_acc: 0.5381
Epoch 42/60
50000/50000 [==============================] - 3s 70us/step - loss: 1.3285 - acc: 0.6960 - val_loss: 1.8784 - val_acc: 0.5340
Epoch 43/60
50000/50000 [==============================] - 4s 74us/step - loss: 1.3138 - acc: 0.7015 - val_loss: 1.8947 - val_acc: 0.5309
Epoch 44/60
50000/50000 [==============================] - 4s 70us/step - loss: 1.3104 - acc: 0.6993 - val_loss: 1.8912 - val_acc: 0.5310
Epoch 45/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.3015 - acc: 0.7047 - val_loss: 1.8970 - val_acc: 0.5292
Epoch 46/60
50000/50000 [==============================] - 4s 70us/step - loss: 1.2914 - acc: 0.7058 - val_loss: 1.8778 - val_acc: 0.5380
Epoch 47/60
50000/50000 [==============================] - 3s 69us/step - loss: 1.2876 - acc: 0.7077 - val_loss: 1.8951 - val_acc: 0.5299
Epoch 48/60
50000/50000 [==============================] - 4s 76us/step - loss: 1.2748 - acc: 0.7134 - val_loss: 1.9027 - val_acc: 0.5361
Epoch 49/60
50000/50000 [==============================] - 4s 77us/step - loss: 1.2717 - acc: 0.7131 - val_loss: 1.9186 - val_acc: 0.5230
Epoch 50/60
50000/50000 [==============================] - 4s 73us/step - loss: 1.2637 - acc: 0.7159 - val_loss: 1.9079 - val_acc: 0.5288
Epoch 51/60
50000/50000 [==============================] - 4s 72us/step - loss: 1.2585 - acc: 0.7178 - val_loss: 1.9306 - val_acc: 0.5300
Epoch 52/60
50000/50000 [==============================] - 4s 72us/step - loss: 1.2500 - acc: 0.7180 - val_loss: 1.9292 - val_acc: 0.5291
Epoch 53/60
50000/50000 [==============================] - 4s 73us/step - loss: 1.2422 - acc: 0.7217 - val_loss: 1.9160 - val_acc: 0.5369
Epoch 54/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.2410 - acc: 0.7216 - val_loss: 1.9501 - val_acc: 0.5247
Epoch 55/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.2320 - acc: 0.7252 - val_loss: 1.9533 - val_acc: 0.5231
Epoch 56/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.2319 - acc: 0.7235 - val_loss: 1.9028 - val_acc: 0.5300
Epoch 57/60
50000/50000 [==============================] - 3s 69us/step - loss: 1.2256 - acc: 0.7268 - val_loss: 1.9286 - val_acc: 0.5298
Epoch 58/60
50000/50000 [==============================] - 3s 68us/step - loss: 1.2145 - acc: 0.7333 - val_loss: 1.9566 - val_acc: 0.5314
Epoch 59/60
50000/50000 [==============================] - 4s 71us/step - loss: 1.2079 - acc: 0.7329 - val_loss: 1.9628 - val_acc: 0.5239
Epoch 60/60
50000/50000 [==============================] - 4s 74us/step - loss: 1.2104 - acc: 0.7340 - val_loss: 1.9576 - val_acc: 0.5199
|
TotalGeneralisedVariation.ipynb | ###Markdown
Load Data and resize
###Code
data = dataexample.CAMERA.get(size=(32, 32))
###Output
_____no_output_____
###Markdown
Setup and run tgv denoising with cvxpy
###Code
# solution
u_cvx = Variable(data.shape)
w1_cvx = Variable(data.shape)
w2_cvx = Variable(data.shape)
# regularisation parameters
alpha0 = 0.1
alpha1 = 0.3
# fidelity term
fidelity = 0.5 * sum_squares(u_cvx - data.array)
regulariser = tgv(u_cvx, w1_cvx, w2_cvx, alpha1, alpha0)
# objective
obj = Minimize( regulariser + fidelity)
prob = Problem(obj, constraints = [])
# Choose solver ( SCS, MOSEK(license needed) )
tv_cvxpy = prob.solve(verbose = True, solver = SCS)
###Output
===============================================================================
CVXPY
v1.1.17
===============================================================================
(CVXPY) Dec 06 07:53:11 PM: Your problem has 3072 variables, 0 constraints, and 0 parameters.
(CVXPY) Dec 06 07:53:11 PM: It is compliant with the following grammars: DCP, DQCP
(CVXPY) Dec 06 07:53:11 PM: (If you need to solve this problem multiple times, but with different data, consider using parameters.)
(CVXPY) Dec 06 07:53:11 PM: CVXPY will first compile your problem; then, it will invoke a numerical solver to obtain a solution.
-------------------------------------------------------------------------------
Compilation
-------------------------------------------------------------------------------
(CVXPY) Dec 06 07:53:11 PM: Compiling problem (target solver=SCS).
(CVXPY) Dec 06 07:53:11 PM: Reduction chain: Dcp2Cone -> CvxAttr2Constr -> ConeMatrixStuffing -> SCS
(CVXPY) Dec 06 07:53:11 PM: Applying reduction Dcp2Cone
(CVXPY) Dec 06 07:53:11 PM: Applying reduction CvxAttr2Constr
(CVXPY) Dec 06 07:53:11 PM: Applying reduction ConeMatrixStuffing
(CVXPY) Dec 06 07:53:11 PM: Applying reduction SCS
(CVXPY) Dec 06 07:53:11 PM: Finished problem compilation (took 2.995e-02 seconds).
-------------------------------------------------------------------------------
Numerical solver
-------------------------------------------------------------------------------
(CVXPY) Dec 06 07:53:11 PM: Invoking solver SCS to obtain a solution.
------------------------------------------------------------------
SCS v3.0.0 - Splitting Conic Solver
(c) Brendan O'Donoghue, Stanford University, 2012
------------------------------------------------------------------
problem: variables n: 5121, constraints m: 9218
cones: q: soc vars: 9218, qsize: 2049
settings: eps_abs: 1.0e-05, eps_rel: 1.0e-05, eps_infeas: 1.0e-07
alpha: 1.50, scale: 1.00e-01, adaptive_scale: 1
max_iters: 100000, normalize: 1, warm_start: 0
acceleration_lookback: 10, acceleration_interval: 10
lin-sys: sparse-direct
nnz(A): 20994, nnz(P): 0
------------------------------------------------------------------
iter | pri res | dua res | gap | obj | scale | time (s)
------------------------------------------------------------------
0| 1.36e+01 5.11e-01 2.58e+03 -1.29e+03 1.00e-01 7.93e-03
250| 2.58e-03 1.79e-04 7.20e-03 8.46e+00 1.00e-01 4.30e-01
500| 1.06e-03 2.19e-05 1.66e-03 8.47e+00 1.00e-01 1.16e+00
750| 8.23e-04 2.36e-05 9.50e-04 8.47e+00 1.00e-01 1.91e+00
1000| 5.55e-04 4.07e-06 6.86e-04 8.47e+00 1.00e-01 2.65e+00
1250| 4.14e-04 6.05e-06 4.16e-04 8.47e+00 1.00e-01 3.19e+00
1500| 4.23e-04 5.41e-07 3.15e-04 8.47e+00 1.00e-01 3.69e+00
1750| 2.66e-04 6.43e-06 1.95e-04 8.47e+00 1.00e-01 4.37e+00
2000| 1.80e-04 2.44e-07 1.37e-04 8.47e+00 1.00e-01 5.18e+00
2250| 1.42e-04 1.95e-07 1.15e-04 8.47e+00 1.00e-01 5.83e+00
2500| 1.32e-04 1.65e-07 1.02e-04 8.47e+00 1.00e-01 6.60e+00
2750| 2.92e-01 2.51e-02 3.18e-06 8.47e+00 1.00e-01 7.31e+00
2875| 1.01e-04 4.41e-06 6.91e-05 8.47e+00 1.00e-01 7.66e+00
------------------------------------------------------------------
status: solved
timings: total: 7.69e+00s = setup: 3.15e-02s + solve: 7.66e+00s
lin-sys: 5.91e+00s, cones: 5.44e-01s, accel: 4.64e-01s
------------------------------------------------------------------
objective = 8.473567
------------------------------------------------------------------
-------------------------------------------------------------------------------
Summary
-------------------------------------------------------------------------------
(CVXPY) Dec 06 07:53:18 PM: Problem status: optimal
(CVXPY) Dec 06 07:53:18 PM: Optimal value: 8.476e+00
(CVXPY) Dec 06 07:53:18 PM: Compilation took 2.995e-02 seconds
(CVXPY) Dec 06 07:53:18 PM: Solver (including time spent in interface) took 7.711e+00 seconds
###Markdown
Setup TGV denoising using CIL and the PDHG algorithm
###Code
ig = data.geometry
K11 = GradientOperator(ig)
K22 = SymmetrisedGradientOperator(K11.range)
K12 = IdentityOperator(K11.range)
K21 = ZeroOperator(ig, K22.range)
K = BlockOperator(K11, -K12, K21, K22, shape=(2,2) )
f1 = alpha1 * MixedL21Norm()
f2 = alpha0 * MixedL21Norm()
F = BlockFunction(f1, f2)
G = BlockFunction(0.5 * L2NormSquared(b=data), ZeroFunction())
sigma = 1./np.sqrt(12)
tau = 1./np.sqrt(12)
# Setup and run the PDHG algorithm
pdhg_tgv = PDHG(f=F,g=G,operator=K,
max_iteration = 1000, sigma=sigma, tau=tau,
update_objective_interval = 500)
pdhg_tgv.run(verbose = 2)
###Output
Initialised GradientOperator with C backend running with 20 threads
PDHG setting up
PDHG configured
Iter Max Iter Time/Iter Primal Dual Primal-Dual
[s] Objective Objective Gap
0 1000 0.000 2.15422e+02 -0.00000e+00 2.15422e+02
###Markdown
Compare solutions
###Code
np.testing.assert_almost_equal(pdhg_tgv.solution[0].array, u_cvx.value, decimal=3)
# print objectives
print("CVX objective = {}".format(obj.value))
print("CIL objective = {}".format(pdhg_tgv.objective[-1]))
# show middle line profiles
N, M = data.shape
plt.figure()
plt.plot(pdhg_tgv.solution[0].array[int(N/2)], label="CIL")
plt.plot(u_cvx.value[int(N/2)], label="CVXpy")
plt.legend()
plt.show()
show2D([pdhg_tgv.solution[0].array, u_cvx.value, np.abs(pdhg_tgv.solution[0].array - u_cvx.value)], num_cols = 3, origin="upper")
###Output
CVX objective = 8.476068484702772
CIL objective = 8.476922034844755
|
virtual-scada/Notebooks/LinearForwardSample.ipynb | ###Markdown
Data ProcessingFirst we add some data to our project. In this sample we get rid of 30% of rows, removing a column in a given row with probability 50%.
###Code
p = pd.read_csv('C:/Users/Serhan/PycharmProjects/virtual-scada/Data/output_p.csv', header = None)
q = pd.read_csv('C:/Users/Serhan/PycharmProjects/virtual-scada/Data/output_q.csv', header = None)
v = pd.read_csv('C:/Users/Serhan/PycharmProjects/virtual-scada/Data/output_v.csv', header = None)
a = pd.read_csv('C:/Users/Serhan/PycharmProjects/virtual-scada/Data/output_a.csv', header = None)
pRemoved = removeRows(p, .3, colPercentage = .5)
qRemoved = removeRows(q, .3, colPercentage = .5)
###Output
..\virtualscada\vs.py:46: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
dataRemoved[i][j] = None
###Markdown
Virtual SCADAThen we fill in our missing values. Our code takes in the fixed real power, reactive power, voltage, and phase angle and fills in the missing values using linear regression trained on the supplied data.
###Code
pFilled, qFilled = fillValuesLRForward(pRemoved, qRemoved, v, a)
###Output
_____no_output_____
###Markdown
ResultsNow we look at the results of our filling. The following code manually computes the RMSE of our fill by comparing it with the values we removed at the first step.
###Code
numRows, numCols = p.shape
predictions = []
actuals = []
for i in range(numCols):
for j in range(numRows):
if np.isnan(pRemoved[i][j]):
predictions.append(pFilled[i][j])
actuals.append(p[i][j])
print(np.sqrt(np.nanmean((np.array(predictions)-np.array(actuals))**2)))
###Output
0.0048253951140950916
###Markdown
Here is a plot of the results. The first plot is the actual power, the second is the power after being filled, and the third plot is the plot of the power with the missing values
###Code
sampleBus = np.random.randint(0,29)
plt.figure(figsize=(10, 11))
plt.subplot(221)
plt.plot(p[sampleBus][0:50])
plt.title('Actual Voltage')
plt.xlabel('Time Stamp [unit]')
plt.ylabel('Voltage Magnitude')
plt.subplot(222)
plt.plot(pFilled[sampleBus][0:50])
plt.title('Filled in Voltage')
plt.xlabel('Time Stamp [unit]')
plt.ylabel('Voltage Magnitude')
plt.subplot(223)
plt.plot(pRemoved[sampleBus][0:50])
plt.title('MissingVoltage')
plt.xlabel('Time Stamp [unit]')
plt.ylabel('Voltage Magnitude')
###Output
_____no_output_____ |
1 Topic Modeling - Desmoid Tumors.ipynb | ###Markdown
PubMed Extracts - Desmoid Tumors Extract topic(s) or keywords to narrow down the search space in connection to Desmoid Cancer. I primarily used PubMed Extracts as the starting point.
###Code
# import libraries
import pandas as pd
from tqdm import tqdm
from collections import deque, OrderedDict
import time
import gensim
from gensim.corpora import Dictionary
import spacy
import scispacy
import nltk
from nltk import ngrams, FreqDist
from corextopic import corextopic as ct
from negspacy.negation import Negex
import numpy as np
from scipy.spatial.distance import cosine
%%time
# I will use SciSpacy model to clean the text
# and extract entity for topic modeling
#nlp_eng = spacy.load('en_core_web_lg')
#nlp_sci = spacy.load('en_core_sci_lg')
nlp_craft = spacy.load('en_ner_craft_md')
#nlp_jnlpba = spacy.load('en_ner_jnlpba_md')
nlp_bionlp = spacy.load('en_ner_bionlp13cg_md')
#nlp_bc5cdr = spacy.load('en_ner_bc5cdr_md')
# The following file contains all the extracts from PubMed
# that has mentioned "Desmoid Cancer"
FILE = '/Volumes/Promise Disk/DataScience/Pubmed/desmoid_text/desmoid_pubmed_batch_0.csv'
%%time
df = pd.read_csv(FILE)
df.info()
# Since abstract usually contains a bunch of words/phrases that are
# non-informative to the analysis. The following list contains
# those words/phrases to remove before further analysis
word_to_remove = ['- ','ABSTRACT:','BACKGROUND ','CASE: ',
'CASE PRESENTATION:','CASE REPORT ',
'CASE SUMMARY:','CLINICAL QUESTION/LEVEL OF EVIDENCE: ',
'CONCLUSIONS:','CONCLUSIONS.-: ','CONCLUSIONS: - ','Conclusion: ',
'Conclusions: ','CONCLUSION:','DATA SOURCES.-: ','DATA SOURCES: - ',
'DIAGNOSES: ','DIAGNOSES AND OUTCOMES: ','DISCUSSION:',
'INTERPRETATION:','INTERVENTIONS: ','FUNDING: ','LESSONS: ',
'MATERIALS AND METHODS: ','METHODS:','METHODS: ','Methods:',
'METHOD:','OBJECTIVES:','OBJECTIVE:','OBJECTIVE AND METHOD:',
'OBJECTIVE.-: ','OBJECTIVE: - ','OUTCOMES: ','PATIENT CONCERNS: ',
'PRESENTATION OF CASE: ','RESULTS:','RESULT:',
'MATERIALS AND METHODS:', '(',')','MEDLINE', 'FINDINGS']
%%time
# clean the original abstract by removing the non-informative words/phrases
# I also remove the abstract that is too short to be useful
t = deque()
for i in range(len(df)):
text = df.loc[i,'text']
for word in word_to_remove:
text = text.replace(word,'')
if len(text.split(' '))>40:
t.append(text)
print (len(t))
# Helper functions
def extract_specific_tokens(nlp, paragraph): # using English common POS as starting point
POS_to_remove = ['ADP','ADV','AUX',
'CONJ','SCONJ','SPACE',
'DET','INTJ','NUM','PRON',
'CCONJ','PUNCT','SYM',
'X','VERB','PART'] # extract nouns (and some adjectives) to enhance the information content
doc = nlp(paragraph)
pr = []
for token in doc:
if token.pos_ not in POS_to_remove:
if '%' not in token.text:
pr.append(token.text)
else:
pass
return pr
def extract_keyword(text):
start_pos = text.find('"')
if text[start_pos+1:-2]=='':
pass
else:
return text[start_pos+1:-1]
###Output
_____no_output_____
###Markdown
Topic Modeling - Latent Dirichlet Allocation
###Code
%%time
# Set of language libraries from different domains
# to clean up the text and extract entities
nlp_eng = spacy.load('en_core_web_lg')
nlps = [nlp_craft, nlp_bionlp]
nlp_names = ['nlp_craft', 'nlp_bionlp']
label_to_remove = ['DISEASE','CANCER','MULTI_TISSUE_STRUCTURE','PATHOLOGICAL_FORMATION','ORGAN','TISSUE','ORGANISM_SUBDIVISION','CL','CELL_TYPE','CELL','SO','GO','CELLULAR_COMPONENT','ORGANISM_SUBSTANCE','TAXON','ORGANISM']
# Process to extract entities for topic analysis
doc_list = []
for paragraph in tqdm(t):
text = ' '.join(extract_specific_tokens(nlp_eng, paragraph)) # remove common words
doc_list.append(text)
new_doc = []
for paragraph in tqdm(doc_list):
for nlp in nlps: # use different biomedical domain corpus to enrich the document informative content
doc = nlp(paragraph)
pr = [ent.text for ent in doc.ents if ent.label_ not in label_to_remove] # extract biomedical domain relevant entity
new_doc.append(pr)
len(new_doc) # print out the total number of documents in the corpus
word = Dictionary(new_doc)
corpus = [word.doc2bow(doc) for doc in new_doc]
%%time
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=word,
num_topics=10,
random_state=42,
update_every=1,
passes=100,
alpha='auto',
per_word_topics=True)
# Below are 10 topics with keywords identified from PubMed abstracts
for topic in lda_model.print_topics(num_words=20):
print (topic)
print ()
%%time
# Extract keywords for further analysis from topics
keyword_lst = []
for topic in tqdm(lda_model.print_topics(num_words=20)):
index, formula = topic
components = formula.split(" + ")
for component in components:
keyword = extract_keyword(component)
keyword_lst.append(keyword)
len(list(set(keyword_lst)))
list(set(keyword_lst))
# Extract gene-liked items
genes = [key for key in list(set(keyword_lst)) if (len(key)>2 and len(key)<8)]
len(genes), genes
# remove those non-gene like keys
genes.remove('FAP')
genes.remove('desmin')
genes.remove('flaps')
genes.remove('protein')
genes.remove('citrate')
genes.remove('FAP DT')
genes.remove('cyclic')
genes.remove('hyaline')
genes.remove('4.1B')
genes.remove('Radical')
genes.remove('radical')
genes.remove('pigment')
genes.remove('M CSF')
genes.remove('drug')
genes.remove('acid')
genes.remove('midkine')
genes.remove('VBL MTX')
len(genes), genes
###Output
_____no_output_____
###Markdown
Supplement: Negative Matrix Factorization
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
def dummy(doc):
return doc
cv = TfidfVectorizer(tokenizer=dummy,preprocessor=dummy)
tf = cv.fit_transform(new_doc)
from sklearn.decomposition import NMF
nmf = NMF(n_components=10,random_state=1,alpha=.1,l1_ratio=.5).fit(tf)
import matplotlib.pyplot as plt
def plot_top_words(model, feature_names, n_top_words, title):
fig, axes = plt.subplots(2, 5, figsize=(30, 15), sharex=True)
axes = axes.flatten()
for topic_idx, topic in enumerate(model.components_):
top_features_ind = topic.argsort()[:-n_top_words - 1:-1]
top_features = [feature_names[i] for i in top_features_ind]
weights = topic[top_features_ind]
ax = axes[topic_idx]
ax.barh(top_features, weights, height=0.7)
ax.set_title(f'Topic {topic_idx +1}',
fontdict={'fontsize': 30})
ax.invert_yaxis()
ax.tick_params(axis='both', which='major', labelsize=12)
for i in 'top right left'.split():
ax.spines[i].set_visible(False)
fig.suptitle(title, fontsize=40)
plt.subplots_adjust(top=0.90, bottom=0.05, wspace=0.90, hspace=0.3)
plt.show()
tfidf_feature_names = cv.get_feature_names()
plot_top_words(nmf, tfidf_feature_names, 20,'Topics in NMF model (Frobenius norm)')
###Output
_____no_output_____
###Markdown
Additional input from NMR:- Indole-3-acetic acid (IAA): Indole-3-acetic acid (IAA) has recently shown anticancer activity in combination with horseradish peroxidase. The current study demonstrated that IAA irradiated with ultraviolet B (IAA(UVB)) is able to generate free radicals and induce cell death in a time-dependent fashion in PC-3 prostate cancer cells, while PC-3 cells treated with IAA alone exhibited no toxic responses. It was also found through Western blot analysis that the cytotoxic effect of IAA(UVB) resulted from apoptosis.- C-reactive protein (CRP): C-reactive protein (CRP) is a predominant protein of the acute phase response; its blood levels have long been used as a minimally invasive index of any ongoing inflammatory response, including that occurring in cancer.- CD34- LEF1: LEF1 knockdown experiments in cell lines reveal that depending on the cellular context, LEF1 can induce pro-apoptotic signals. LEF1 can also suppress proliferation, migration and invasiveness of Rhabdomyosarcoma (RMS) cells both in vitro and in vivo. Furthermore, LEF1 can induce myodifferentiation of the tumor cells. This may involve regulation of other LEF1/TCF factors i.e. TCF1, whereas β-catenin activity plays a subordinate role. Together these data suggest that LEF1 rather has tumor suppressive functions and attenuates aggressiveness in a subset of RMS- S45P- T41A, T41I- NGF: Nerve Growth Factor (NGF) research has shown that this factor acts not only outside its classical domain of the peripheral and central nervous system, but also on non-neuronal and cancer cells. This latter observation has led to divergent hypothesis about the role of NGF, its specific distribution pattern within the tissues and its implication in induction as well as progression of carcinogenesis. Studies indicate that the only presence of NGF is unable to generate cell carcinogenesis, both in normal neuronal and non-neuronal cells/tissues. However, it cannot be excluded the possibility that the co-expression of NGF and pro-carcinogenic molecules might open to different consequence. Whether NGF plays a direct or an indirect role in cell proliferation during carcinogenesis remains to demonstrate.- MYH: MUTYH-associated polyposis (also known as MYH-associated polyposis) is an autosomal recessive polyposis syndrome. The disorder is caused by mutations in both alleles (genetic copies) of the DNA repair gene, MUTYH. The MUTYH gene encodes a base excision repair protein, which corrects oxidative damage to DNA.- Alu: a RNA sequence that induces epithelial-to-mesenchymal transition (EMT) by acting as a molecular sponge of oncogene miR-566. In normal cells, unmethylated Alu elements tend to locate in the vicinity of functionally rich regions and display epigenetic features consistent with a direct impact on genome regulation. In cancer cells, Alu repeats are more resistant to hypomethylation than other retroelements. Genome segmentation based on high/low rates of Alu hypomethylation allows the identification of genomic compartments with differential genetic, epigenetic, and transcriptomic features. Alu hypomethylated regions show low transcriptional activity, late DNA replication, and its extent is associated with higher chromosomal instability.- Casein kinase 1 alpha (CK1α), encoded by CSNK1A1 in humans, is a member of the CK1 family of proteins that has broad serine/threonine protein kinase activity and is one of the main components of the Wnt/β-catenin signaling pathway.
###Code
NMR_keys = ['CSNK1A1','Alu','MUTYH','NGF','T41A','T41I','S45P','LEF1','CD34','CRP','IAA']
all_genes = genes+NMR_keys
len(NMR_keys), len(all_genes)
df=pd.DataFrame(all_genes,columns=['geneIds'])
df.to_csv('desmoid_keyphrases.csv',index=False)
###Output
_____no_output_____ |
ROBO_SAE.ipynb | ###Markdown
Title of Database: Wall-Following navigation task with mobile robot SCITOS-G5 The data were collected as the SCITOS G5 navigates through the room following the wall in a clockwisedirection, for 4 rounds. To navigate, the robot uses 24 ultrasound sensors arranged circularly around its "waist". The numbering of the ultrasound sensors starts at the front of the robot and increases in clockwise direction.
###Code
# modules
from keras.layers import Input, Dense, Dropout
from keras.models import Model
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.optimizers import RMSprop
from keras.callbacks import TensorBoard
from __future__ import print_function
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from sklearn import preprocessing
from keras import layers
from keras import initializers
from matplotlib import axes
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import keras
import matplotlib.pyplot as plt
import numpy as np
import math
import pydot
import graphviz
import pandas as pd
import IPython
import itertools
%matplotlib inline
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 20}
rc('font', **font)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
###Output
_____no_output_____
###Markdown
Import and basic data inspection
###Code
# import
data_raw = pd.read_csv('data/sensor_readings_24.csv', sep=",", header=None)
data = data_raw.copy()
###Output
_____no_output_____
###Markdown
The dataframe consists of only positive values and the classes are encoded as strings in the variable with index `24`
###Code
data.head()
###Output
_____no_output_____
###Markdown
Whats the distribution of the classes?
###Code
df_tab = data_raw
df_tab[24] = df_tab[24].astype('category')
tab = pd.crosstab(index=df_tab[24], columns="frequency")
tab.index.name = 'Class/Direction'
tab/tab.sum()
###Output
_____no_output_____
###Markdown
The `Move_Forward` and the `Sharp-Right-Turn` Class combine nearly 80% of all observated classes. So it might happen, that the accuracy may still be high with around 75% although most of the features are eliminated. Preprocessing **0. Mapping integer values to the classes.**
###Code
mapping = {key: value for (key, value) in zip(data[24].unique(), range(len(data[24].unique())))}
print(mapping)
data.replace({24:mapping}, inplace=True)
data[24].unique()
###Output
_____no_output_____
###Markdown
**1. Take a random sample of 90% of the rows from the dataframe. To ensure reproducability the `random_state` variable is set. The other 10% are placed aside for validation after training. The last column is the class column and is stored in the y variables respectively.**
###Code
data_train = data.sample(frac=0.9, random_state=42)
data_val = data.drop(data_train.index)
df_x_train = data_train.iloc[:,:-1]
df_y_train = data_train.iloc[:,-1]
df_x_val = data_val.iloc[:,:-1]
df_y_val = data_val.iloc[:,-1]
###Output
_____no_output_____
###Markdown
**2. Normalization between 0 and 1**
###Code
x_train = df_x_train.values
x_train = (x_train - x_train.min()) / (x_train.max() - x_train.min())
y_train = df_y_train.values
y_train_cat = y_train
x_val = df_x_val.values
x_val = (x_val - x_val.min()) / (x_val.max() - x_val.min())
y_val = df_y_val.values
y_eval = y_val
###Output
_____no_output_____
###Markdown
**3. Make useful categorical variables out of the single column data by one-hot encoding it.**
###Code
y_train = keras.utils.to_categorical(y_train, 4)
y_val = keras.utils.to_categorical(y_val, 4)
###Output
_____no_output_____
###Markdown
** 4. Set Global Parameters**
###Code
epochsize = 150
batchsize = 24
shuffle = False
dropout = 0.1
num_classes = 4
input_dim = x_train.shape[1]
hidden1_dim = 30
hidden2_dim = 30
class_names = mapping.keys()
###Output
_____no_output_____
###Markdown
Train Neural Net *Due to a tight schedule we will not perform any cross validation. So it might happen that our accuracy estimators lack a little bit in potential of generalization. We shall live with that. Another setup of experiments would be, that we loop over some different dataframes samples up in the preprocessing steps and repeat all the steps below to finally average the results.* The dimension of the hidden layers are set arbitrarily but some runs have shown that 30 is a good number. The `input_dim` Variable is set to 24 because initially there are 24 features. The aim is to build the best possible neural net. Optimizer RMSprop is a mini batch gradient descent algorithm which divides the gradient by a running average of the learning rate. More information: http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf The weights are initialized by a normal distribution with mean 0 and standard deviation of 0.05.
###Code
input_data = Input(shape=(input_dim,), dtype='float32', name='main_input')
hidden_layer1 = Dense(hidden1_dim, activation='relu', input_shape=(input_dim,), kernel_initializer='normal')(input_data)
dropout1 = Dropout(dropout)(hidden_layer1)
hidden_layer2 = Dense(hidden2_dim, activation='relu', input_shape=(input_dim,), kernel_initializer='normal')(dropout1)
dropout2 = Dropout(dropout)(hidden_layer2)
output_layer = Dense(num_classes, activation='softmax', kernel_initializer='normal')(dropout2)
model = Model(inputs=input_data, outputs=output_layer)
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
plot_model(model, to_file='images/robo1_nn.png', show_shapes=True, show_layer_names=True)
IPython.display.Image("images/robo1_nn.png")
model.fit(x_train, y_train,
batch_size=batchsize,
epochs=epochsize,
verbose=0,
shuffle=shuffle)
nn_score = model.evaluate(x_val, y_val)[1]
print(nn_score)
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_eval, model.predict(x_val).argmax(axis=-1))
np.set_printoptions(precision=2)
# Plot normalized confusion matrix
plt.figure(figsize=(20,10))
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
###Output
Normalized confusion matrix
[[ 0.9 0.03 0.08 0. ]
[ 0. 0.95 0.05 0. ]
[ 0.01 0.01 0.98 0. ]
[ 0. 0.03 0.13 0.85]]
###Markdown
Comparison The following data is from a paper published in March 2017. You can find that here: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5375835/
###Code
IPython.display.Image("images/2018-01-25 18_44_01-PubMed Central, Table 2_ Sensors (Basel). 2017 Mar; 17(3)_ 549. Published online.png")
###Output
_____no_output_____
###Markdown
One can easily see that our results are better. So we go further with that result and check how good our SAW might become. Stacked Autoencoder For this dataset we decided to go with a 24-16-8-16-24 architecture. First layer
###Code
input_img = Input(shape=(input_dim,))
encoded1 = Dense(16, activation='relu')(input_img)
decoded1 = Dense(input_dim, activation='relu')(encoded1)
class1 = Dense(num_classes, activation='softmax')(decoded1)
autoencoder1 = Model(input_img, class1)
autoencoder1.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
encoder1 = Model(input_img, encoded1)
encoder1.compile(optimizer=RMSprop(), loss='binary_crossentropy')
autoencoder1.fit(x_train
, y_train
, epochs=50
, batch_size=24
, shuffle=True
, verbose=False
)
score1 = autoencoder1.evaluate(x_val, y_val, verbose=0)
print('Test accuracy:', score1[1])
###Output
Test accuracy: 0.95695970696
###Markdown
Second layer
###Code
first_layer_code = encoder1.predict(x_train)
encoded_2_input = Input(shape=(16,))
encoded2 = Dense(8, activation='relu')(encoded_2_input)
decoded2 = Dense(16, activation='relu')(encoded2)
class2 = Dense(num_classes, activation='softmax')(decoded2)
autoencoder2 = Model(encoded_2_input, class2)
autoencoder2.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
encoder2 = Model(encoded_2_input, encoded2)
encoder2.compile(optimizer=RMSprop(), loss='binary_crossentropy')
autoencoder2.fit(first_layer_code
, y_train
, epochs=50
, batch_size=24
, shuffle=True
, verbose=False
)
first_layer_code_val = encoder1.predict(x_val)
score2 = autoencoder2.evaluate(first_layer_code_val, y_val, verbose=0)
print('Test loss:', score2[0])
print('Test accuracy:', score2[1])
###Output
Test loss: 0.138333105124
Test accuracy: 0.958333333333
###Markdown
Data Reconstruction with SAE
###Code
sae_encoded1 = Dense(16, activation='relu')(input_img)
sae_encoded2 = Dense(8, activation='relu')(sae_encoded1)
sae_decoded1 = Dense(16, activation='relu')(sae_encoded2)
sae_decoded2 = Dense(24, activation='sigmoid')(sae_decoded1)
sae = Model(input_img, sae_decoded2)
sae.layers[1].set_weights(autoencoder1.layers[1].get_weights())
sae.layers[2].set_weights(autoencoder2.layers[1].get_weights())
sae.compile(loss='binary_crossentropy', optimizer=RMSprop())
sae.fit(x_train
, x_train
, epochs=50
, batch_size=24
, shuffle=True
, verbose=False
)
score4 = sae.evaluate(x_val, x_val, verbose=0)
print('Test loss:', score4)
###Output
Test loss: 0.46517931236
###Markdown
Classification
###Code
input_img = Input(shape=(input_dim,))
sae_classifier_encoded1 = Dense(16, activation='relu')(input_img)
sae_classifier_encoded2 = Dense(8, activation='relu')(sae_classifier_encoded1)
class_layer = Dense(num_classes, activation='softmax')(sae_classifier_encoded2)
sae_classifier = Model(inputs=input_img, outputs=class_layer)
sae_classifier.layers[1].set_weights(autoencoder1.layers[1].get_weights())
sae_classifier.layers[2].set_weights(autoencoder2.layers[1].get_weights())
sae_classifier.compile(loss='binary_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
sae_classifier.fit(x_train, y_train
, epochs=50
, verbose=True
, batch_size=24
, shuffle=True)
score5 = sae_classifier.evaluate(x_val, y_val)
print('Test accuracy:', score5[1])
###Output
546/546 [==============================] - 0s 438us/step
Test accuracy: 0.962912087912
###Markdown
Plot a two dimensional representation of the data
###Code
third_layer_code = encoder2.predict(encoder1.predict(x_train))
encoded_4_input = Input(shape=(8,))
encoded4 = Dense(2, activation='sigmoid')(encoded_4_input)
decoded4 = Dense(8, activation='sigmoid')(encoded4)
class4 = Dense(num_classes, activation='softmax')(decoded4)
autoencoder4 = Model(encoded_4_input, class4)
autoencoder4.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
encoder4 = Model(encoded_4_input, encoded4)
encoder4.compile(optimizer=RMSprop(), loss='binary_crossentropy')
autoencoder4.fit(third_layer_code
, y_train
, epochs=100
, batch_size=24
, shuffle=True
, verbose=True
)
third_layer_code_val = encoder2.predict(encoder1.predict(x_val))
score4 = autoencoder4.evaluate(third_layer_code_val, y_val, verbose=0)
print('Test loss:', score4[0])
print('Test accuracy:', score4[1])
fourth_layer_code = encoder4.predict(encoder2.predict(encoder1.predict(x_train)))
value1 = [x[0] for x in fourth_layer_code]
value2 = [x[1] for x in fourth_layer_code]
y_classes = y_train_cat
data = {'value1': value1, 'value2': value2, 'class' : y_classes}
data = pd.DataFrame.from_dict(data)
data.head()
groups = data.groupby('class')
# Plot
fig, ax = plt.subplots(figsize=(20,10))
# plt.figure(figsize=(20,10))
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
for name, group in groups:
ax.plot(group.value1, group.value2, marker='o', linestyle='', ms=3, label=name, alpha=0.7)
ax.legend()
plt.show()
###Output
_____no_output_____ |
Ml/MLproject/Untitled0(1).ipynb | ###Markdown
visulazing normilazed data
###Code
plt.figure(figsize=(16,9))
plt.xlabel("Days")
plt.ylabel("Close Price")
plt.plot(df['Close'], label='Close Price history')
plt.show()
df = pd.read_csv('/content/drive/MyDrive/MLproject/DRREDDY.csv')
df
df.describe()
df.isna().any()
fig = plt.figure(figsize =(160,9))
df.hist()
fig.show()
import seaborn as sns
fig, ax = plt.subplots(figsize=(10,10))
dataplot = sns.heatmap(df.corr(), cmap="YlGnBu", annot=True)
dataplot.show()
df["Date"] = pd.to_datetime(df.Date,format='%Y-%m-%d')
#plot
plt.figure(figsize=(16,8))
plt.plot(df['Date'],df['Close'], label='Close Price history')
plt.plot(df['Date'],df['Open'], label='open Price history')
plt.plot(df['Date'],df['High'], label='High Price history')
plt.legend()
plt.show()
df1 = df
df1.index=df1['Date']
df1['year'] = df1.index.year
fig = plt.figure(figsize =(20,8))
# Horizontal Bar Plot
plt.bar(df1['year'],df1['Close'])
plt.show()
df.isna().any()
new_data = pd.DataFrame(index=range(0,len(df)),columns=['Date', 'Close'])
data = df.sort_index(ascending=True, axis=0)
for i in range(0,len(df)):
new_data['Date'][i] = df['Date'][i]
new_data['Close'][i] = df['Close'][i]
import fastai
from fastai.tabular import add_datepart
add_datepart(new_data, 'Date')
new_data.drop('Elapsed', axis=1, inplace=True) #elapsed will be the time stamp
new_data
new_data['mon_fri'] = 0
for i in range(0,len(new_data)):
if (new_data['Dayofweek'][i] == 0 or new_data['Dayofweek'][i] == 4):
new_data['mon_fri'][i] = 1
else:
new_data['mon_fri'][i] = 0
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
df1=scaler.fit_transform(np.array(new_data).reshape(-1,1))
train = df1[:3714]
valid = df1[3714:]
x_train = train.drop('Close', axis=1)
y_train = train['Close']
x_valid = valid.drop('Close', axis=1)
y_valid = valid['Close']
#implement linear regression
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x_train,y_train)
preds = model.predict(x_valid)
print('Variance score: %.2f' % model.score(x_valid, y_valid))
rms=np.sqrt(np.mean(np.power((np.array(y_valid)-np.array(preds)),2)))
print("rms : %.2f" %rms)
valid['Predictions'] = 0
valid['Predictions'] = preds
valid.index = df1[3714:].index
train.index = df1[:3714].index
plt.figure(figsize=(16,8))
plt.plot(train['Close'], label='Real value')
plt.plot(valid[['Close', 'Predictions']], label='Predicted value')
plt.show()
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
"""Entry point for launching an IPython kernel.
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
|
Python/Traditional algrothims/Trees/RandomForestRegression/RandomForestRegression.ipynb | ###Markdown
构建随机森林回归模型 0.import工具库
###Code
import pandas as pd
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_boston
###Output
_____no_output_____
###Markdown
1.加载数据
###Code
boston_house = load_boston()
boston_feature_name = boston_house.feature_names
boston_features = boston_house.data
boston_target = boston_house.target
boston_feature_name
print(boston_house.DESCR)
boston_features[:5,:]
boston_target
###Output
_____no_output_____
###Markdown
构建模型
###Code
help(RandomForestRegressor)
rgs = RandomForestRegressor(n_estimators=15)
rgs = rgs.fit(boston_features, boston_target)
rgs
rgs.predict(boston_features)
from sklearn import tree
rgs2 = tree.DecisionTreeRegressor()
rgs2.fit(boston_features, boston_target)
rgs2.predict(boston_features)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.