path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Notebooks/Word_Prediction_using_Quadgrams_Memory_Efficient_Encoded_keys.ipynb | ###Markdown
Word prediction based on QuadgramThis program reads the corpus line by line so it is slower than the program which reads the corpusin one go.This reads the corpus one line at a time loads it into the memory.Also this uses encoded keys making it even more memory efficient Import modules
###Code
#import the modules necessary
from nltk.util import ngrams
from collections import defaultdict
from collections import OrderedDict
import nltk
import string
import time
start_time = time.time()
###Output
_____no_output_____
###Markdown
Do preprocessing: Encode keys for dictionary storage
###Code
#return: string
#arg:list,list,dict
#for encoding keys for the dictionary
#for encoding keys ,index has been used for each unique word
#for mapping keys with their index
def encodeKey(s,index,vocab_dict):
key = ''
#print (s)
for t in s:
#print (t)
if t not in vocab_dict:
vocab_dict[t] = index[0]
index[0] = index[0] + 1
key = key + str(vocab_dict[t]) + '#'
#print(key)
return key
###Output
_____no_output_____
###Markdown
Remove the punctuations and lowercase the tokens
###Code
#returns: string
#arg: string
#remove punctuations and make the string lowercase
def removePunctuations(sen):
#split the string into word tokens
temp_l = sen.split()
i = 0
#changes the word to lowercase and removes punctuations from it
for word in temp_l :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
temp_l[i] = word.lower()
i=i+1
#spliting is being don here beacause in sentences line here---so after punctuation removal it should
#become "here so"
content = " ".join(temp_l)
return content
###Output
_____no_output_____
###Markdown
Tokenize the corpus data
###Code
#returns : void
#arg: string,dict,dict,dict,list
#loads the corpus for the dataset and makes the frequency count of quadgram and trigram strings
def loadCorupus(filename,tri_dict,quad_dict,vocab_dict,index):
w1 = '' #for storing the 3rd last word to be used for next token set
w2 = '' #for storing the 2nd last word to be used for next token set
w3 = '' #for storing the last word to be used for next token set
i = 0
sen = ''
token = []
with open(filename,'r') as file:
#read the data line by line
for line in file:
token = line.split()
i = 0
for word in token :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
token[i] = word.lower()
i=i+1
content = " ".join(token)
token = content.split()
if not token:
continue
#first add the previous words
if w2!= '':
token.insert(0,w2)
if w3!= '':
token.insert(1,w3)
#tokens for trigrams
temp1 = list(ngrams(token,3))
if w1!= '':
token.insert(0,w1)
#tokens for quadgrams
temp2 = list(ngrams(token,4))
#count the frequency of the trigram sentences
for t in temp1:
sen = encodeKey(t,index,vocab_dict)
tri_dict[sen] += 1
#count the frequency of the quadgram sentences
for t in temp2:
sen = encodeKey(t,index,vocab_dict)
quad_dict[sen] += 1
#then take out the last 3 words
n = len(token)
w1 = token[n -3]
w2 = token[n -2]
w3 = token[n -1]
###Output
_____no_output_____
###Markdown
Find the probability
###Code
#returns : float
#arg : string sentence,string word,dict,dict
def findprobability(s,w,tri_dict,quad_dict):
c1 = 0 # for count of sentence 's' with word 'w'
c2 = 0 # for count of sentence 's'
s1 = s + w
if s1 in quad_dict:
c1 = quad_dict[s1]
if s in tri_dict:
c2 = tri_dict[s]
if c2 == 0:
return 0
return c1/c2
###Output
_____no_output_____
###Markdown
Decode key
###Code
#arg: list
#return: string,dict
#for decoding keys
def decodeKey(s,vocab_dict):
key = ''
l = []
item = list(vocab_dict.items())
temp_l = s.split('#')
del temp_l[len(temp_l)-1]
index = 0
for c in temp_l:
if c != ' ':
index = int(c)
l.append(item[index][0])
key = ' '.join(l)
return key
###Output
_____no_output_____
###Markdown
Driver function for doing the prediction
###Code
#returns : void
#arg: string,dict,dict,dict,list
def doPrediction(sen,tri_dict,quad_dict,vocab_dict,index):
#remove punctuations and make it lowercase
temp_l = sen.split()
i = 0
for word in temp_l :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
temp_l[i] = word.lower()
i=i+1
content = " ".join(temp_l)
temp_l = content.split()
#encode the sentence before checking
sen = encodeKey(temp_l,index,vocab_dict)
max_prob = 0
#when there is no probable word available
#now for guessing the word which should exist we use quadgram
right_word = 'apple'
for word in vocab_dict:
#print(word)
#encode the word before checking
dict_l = []
dict_l.append(word)
word = encodeKey(dict_l,index,vocab_dict)
prob = findprobability(sen,word,tri_dict,quad_dict)
if prob > max_prob:
max_prob = prob
right_word = word
#decode the right word
right_word = decodeKey(right_word,vocab_dict)
print('Word Prediction is :',right_word)
def main():
tri_dict = defaultdict(int)
quad_dict = defaultdict(int)
vocab_dict = OrderedDict() #for mapping of words with their index ==> key:word value:index of key in dict\n",
index = [0] #list for assigning index value to keys\n",
loadCorupus('mycorpus.txt',tri_dict,quad_dict,vocab_dict,index)
cond = False
#take input
while(cond == False):
sen = input('Enter the string\n')
sen = removePunctuations(sen)
temp = sen.split()
if len(temp) < 3:
print("Please enter atleast 3 words !")
else:
cond = True
temp = temp[-3:]
sen = " ".join(temp)
doPrediction(sen,tri_dict,quad_dict,vocab_dict,index)
if __name__ == '__main__':
main()
###Output
Enter the string
emma by jane
Word Prediction is : austen
|
jupyterhub/notebooks/zz_under_construction/zz_old/Spark/Intro/Lab 2 - Spark SQL/Lab 2 - Spark SQL - Instructor Notebook.ipynb | ###Markdown
###Code
<img src='https://raw.githubusercontent.com/bradenrc/sparksql_pot/master/sparkSQL3.png' width="80%" height="80%"></img>
<img src='https://raw.githubusercontent.com/bradenrc/sparksql_pot/master/sparkSQL1.png' width="80%" height="80%"></img>
###Output
_____no_output_____
###Markdown
Getting started:Create a SQL Context from the Spark Context, sc, which is predefined in every notebook
###Code
from pyspark.sql import SQLContext
sqlContext = SQLContext(sc)
###Output
_____no_output_____
###Markdown
SQL Context queries Dataframes, not RDDs. A data file on world banks will downloaded from GitHub after removing any previous data that may exist
###Code
!rm world_bank* -f
!wget https://raw.githubusercontent.com/bradenrc/sparksql_pot/master/world_bank.json.gz
###Output
--2016-03-30 16:20:21-- https://raw.githubusercontent.com/bradenrc/sparksql_pot/master/world_bank.json.gz
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 23.235.47.133
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|23.235.47.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 446287 (436K) [application/octet-stream]
Saving to: 'world_bank.json.gz'
100%[======================================>] 446,287 --.-K/s in 0.04s
2016-03-30 16:20:22 (11.7 MB/s) - 'world_bank.json.gz' saved [446287/446287]
###Markdown
A Dataframe will be created using the sqlContext to read the file. Many other types are supported including text and Parquet
###Code
example1_df = sqlContext.read.json("world_bank.json.gz")
###Output
_____no_output_____
###Markdown
Spark SQL has the ability to infer the schema of JSON data and understand the structure of the data
###Code
print example1_df.printSchema()
###Output
root
|-- _id: struct (nullable = true)
| |-- $oid: string (nullable = true)
|-- approvalfy: string (nullable = true)
|-- board_approval_month: string (nullable = true)
|-- boardapprovaldate: string (nullable = true)
|-- borrower: string (nullable = true)
|-- closingdate: string (nullable = true)
|-- country_namecode: string (nullable = true)
|-- countrycode: string (nullable = true)
|-- countryname: string (nullable = true)
|-- countryshortname: string (nullable = true)
|-- docty: string (nullable = true)
|-- envassesmentcategorycode: string (nullable = true)
|-- grantamt: long (nullable = true)
|-- ibrdcommamt: long (nullable = true)
|-- id: string (nullable = true)
|-- idacommamt: long (nullable = true)
|-- impagency: string (nullable = true)
|-- lendinginstr: string (nullable = true)
|-- lendinginstrtype: string (nullable = true)
|-- lendprojectcost: long (nullable = true)
|-- majorsector_percent: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- Name: string (nullable = true)
| | |-- Percent: long (nullable = true)
|-- mjsector_namecode: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- code: string (nullable = true)
| | |-- name: string (nullable = true)
|-- mjtheme: array (nullable = true)
| |-- element: string (containsNull = true)
|-- mjtheme_namecode: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- code: string (nullable = true)
| | |-- name: string (nullable = true)
|-- mjthemecode: string (nullable = true)
|-- prodline: string (nullable = true)
|-- prodlinetext: string (nullable = true)
|-- productlinetype: string (nullable = true)
|-- project_abstract: struct (nullable = true)
| |-- cdata: string (nullable = true)
|-- project_name: string (nullable = true)
|-- projectdocs: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- DocDate: string (nullable = true)
| | |-- DocType: string (nullable = true)
| | |-- DocTypeDesc: string (nullable = true)
| | |-- DocURL: string (nullable = true)
| | |-- EntityID: string (nullable = true)
|-- projectfinancialtype: string (nullable = true)
|-- projectstatusdisplay: string (nullable = true)
|-- regionname: string (nullable = true)
|-- sector: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- Name: string (nullable = true)
|-- sector1: struct (nullable = true)
| |-- Name: string (nullable = true)
| |-- Percent: long (nullable = true)
|-- sector2: struct (nullable = true)
| |-- Name: string (nullable = true)
| |-- Percent: long (nullable = true)
|-- sector3: struct (nullable = true)
| |-- Name: string (nullable = true)
| |-- Percent: long (nullable = true)
|-- sector4: struct (nullable = true)
| |-- Name: string (nullable = true)
| |-- Percent: long (nullable = true)
|-- sector_namecode: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- code: string (nullable = true)
| | |-- name: string (nullable = true)
|-- sectorcode: string (nullable = true)
|-- source: string (nullable = true)
|-- status: string (nullable = true)
|-- supplementprojectflg: string (nullable = true)
|-- theme1: struct (nullable = true)
| |-- Name: string (nullable = true)
| |-- Percent: long (nullable = true)
|-- theme_namecode: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- code: string (nullable = true)
| | |-- name: string (nullable = true)
|-- themecode: string (nullable = true)
|-- totalamt: long (nullable = true)
|-- totalcommamt: long (nullable = true)
|-- url: string (nullable = true)
None
###Markdown
Let's take a look at the first two rows of data
###Code
for row in example1_df.take(2):
print row
print "*" * 20
###Output
Row(_id=Row($oid=u'52b213b38594d8a2be17c780'), approvalfy=u'1999', board_approval_month=u'November', boardapprovaldate=u'2013-11-12T00:00:00Z', borrower=u'FEDERAL DEMOCRATIC REPUBLIC OF ETHIOPIA', closingdate=u'2018-07-07T00:00:00Z', country_namecode=u'Federal Democratic Republic of Ethiopia!$!ET', countrycode=u'ET', countryname=u'Federal Democratic Republic of Ethiopia', countryshortname=u'Ethiopia', docty=u'Project Information Document,Indigenous Peoples Plan,Project Information Document', envassesmentcategorycode=u'C', grantamt=0, ibrdcommamt=0, id=u'P129828', idacommamt=130000000, impagency=u'MINISTRY OF EDUCATION', lendinginstr=u'Investment Project Financing', lendinginstrtype=u'IN', lendprojectcost=550000000, majorsector_percent=[Row(Name=u'Education', Percent=46), Row(Name=u'Education', Percent=26), Row(Name=u'Public Administration, Law, and Justice', Percent=16), Row(Name=u'Education', Percent=12)], mjsector_namecode=[Row(code=u'EX', name=u'Education'), Row(code=u'EX', name=u'Education'), Row(code=u'BX', name=u'Public Administration, Law, and Justice'), Row(code=u'EX', name=u'Education')], mjtheme=[u'Human development'], mjtheme_namecode=[Row(code=u'8', name=u'Human development'), Row(code=u'11', name=u'')], mjthemecode=u'8,11', prodline=u'PE', prodlinetext=u'IBRD/IDA', productlinetype=u'L', project_abstract=Row(cdata=u'The development objective of the Second Phase of General Education Quality Improvement Project for Ethiopia is to improve learning conditions in primary and secondary schools and strengthen institutions at different levels of educational administration. The project has six components. The first component is curriculum, textbooks, assessment, examinations, and inspection. This component will support improvement of learning conditions in grades KG-12 by providing increased access to teaching and learning materials and through improvements to the curriculum by assessing the strengths and weaknesses of the current curriculum. This component has following four sub-components: (i) curriculum reform and implementation; (ii) teaching and learning materials; (iii) assessment and examinations; and (iv) inspection. The second component is teacher development program (TDP). This component will support improvements in learning conditions in both primary and secondary schools by advancing the quality of teaching in general education through: (a) enhancing the training of pre-service teachers in teacher education institutions; and (b) improving the quality of in-service teacher training. This component has following three sub-components: (i) pre-service teacher training; (ii) in-service teacher training; and (iii) licensing and relicensing of teachers and school leaders. The third component is school improvement plan. This component will support the strengthening of school planning in order to improve learning outcomes, and to partly fund the school improvement plans through school grants. It has following two sub-components: (i) school improvement plan; and (ii) school grants. The fourth component is management and capacity building, including education management information systems (EMIS). This component will support management and capacity building aspect of the project. This component has following three sub-components: (i) capacity building for education planning and management; (ii) capacity building for school planning and management; and (iii) EMIS. The fifth component is improving the quality of learning and teaching in secondary schools and universities through the use of information and communications technology (ICT). It has following five sub-components: (i) national policy and institution for ICT in general education; (ii) national ICT infrastructure improvement plan for general education; (iii) develop an integrated monitoring, evaluation, and learning system specifically for the ICT component; (iv) teacher professional development in the use of ICT; and (v) provision of limited number of e-Braille display readers with the possibility to scale up to all secondary education schools based on the successful implementation and usage of the readers. The sixth component is program coordination, monitoring and evaluation, and communication. It will support institutional strengthening by developing capacities in all aspects of program coordination, monitoring and evaluation; a new sub-component on communications will support information sharing for better management and accountability. It has following three sub-components: (i) program coordination; (ii) monitoring and evaluation (M and E); and (iii) communication.'), project_name=u'Ethiopia General Education Quality Improvement Project II', projectdocs=[Row(DocDate=u'28-AUG-2013', DocType=u'PID', DocTypeDesc=u'Project Information Document (PID), Vol.', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=090224b081e545fb_1_0', EntityID=u'090224b081e545fb_1_0'), Row(DocDate=u'01-JUL-2013', DocType=u'IP', DocTypeDesc=u'Indigenous Peoples Plan (IP), Vol.1 of 1', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=000442464_20130920111729', EntityID=u'000442464_20130920111729'), Row(DocDate=u'22-NOV-2012', DocType=u'PID', DocTypeDesc=u'Project Information Document (PID), Vol.', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=090224b0817b19e2_1_0', EntityID=u'090224b0817b19e2_1_0')], projectfinancialtype=u'IDA', projectstatusdisplay=u'Active', regionname=u'Africa', sector=[Row(Name=u'Primary education'), Row(Name=u'Secondary education'), Row(Name=u'Public administration- Other social services'), Row(Name=u'Tertiary education')], sector1=Row(Name=u'Primary education', Percent=46), sector2=Row(Name=u'Secondary education', Percent=26), sector3=Row(Name=u'Public administration- Other social services', Percent=16), sector4=Row(Name=u'Tertiary education', Percent=12), sector_namecode=[Row(code=u'EP', name=u'Primary education'), Row(code=u'ES', name=u'Secondary education'), Row(code=u'BS', name=u'Public administration- Other social services'), Row(code=u'ET', name=u'Tertiary education')], sectorcode=u'ET,BS,ES,EP', source=u'IBRD', status=u'Active', supplementprojectflg=u'N', theme1=Row(Name=u'Education for all', Percent=100), theme_namecode=[Row(code=u'65', name=u'Education for all')], themecode=u'65', totalamt=130000000, totalcommamt=130000000, url=u'http://www.worldbank.org/projects/P129828/ethiopia-general-education-quality-improvement-project-ii?lang=en')
********************
Row(_id=Row($oid=u'52b213b38594d8a2be17c781'), approvalfy=u'2015', board_approval_month=u'November', boardapprovaldate=u'2013-11-04T00:00:00Z', borrower=u'GOVERNMENT OF TUNISIA', closingdate=None, country_namecode=u'Republic of Tunisia!$!TN', countrycode=u'TN', countryname=u'Republic of Tunisia', countryshortname=u'Tunisia', docty=u'Project Information Document,Integrated Safeguards Data Sheet,Integrated Safeguards Data Sheet,Project Information Document,Integrated Safeguards Data Sheet,Project Information Document', envassesmentcategorycode=u'C', grantamt=4700000, ibrdcommamt=0, id=u'P144674', idacommamt=0, impagency=u'MINISTRY OF FINANCE', lendinginstr=u'Specific Investment Loan', lendinginstrtype=u'IN', lendprojectcost=5700000, majorsector_percent=[Row(Name=u'Public Administration, Law, and Justice', Percent=70), Row(Name=u'Public Administration, Law, and Justice', Percent=30)], mjsector_namecode=[Row(code=u'BX', name=u'Public Administration, Law, and Justice'), Row(code=u'BX', name=u'Public Administration, Law, and Justice')], mjtheme=[u'Economic management', u'Social protection and risk management'], mjtheme_namecode=[Row(code=u'1', name=u'Economic management'), Row(code=u'6', name=u'Social protection and risk management')], mjthemecode=u'1,6', prodline=u'RE', prodlinetext=u'Recipient Executed Activities', productlinetype=u'L', project_abstract=None, project_name=u'TN: DTF Social Protection Reforms Support', projectdocs=[Row(DocDate=u'29-MAR-2013', DocType=u'PID', DocTypeDesc=u'Project Information Document (PID), Vol.1 of 1', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=000333037_20131024115616', EntityID=u'000333037_20131024115616'), Row(DocDate=u'29-MAR-2013', DocType=u'ISDS', DocTypeDesc=u'Integrated Safeguards Data Sheet (ISDS), Vol.1 of 1', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=000356161_20131024151611', EntityID=u'000356161_20131024151611'), Row(DocDate=u'29-MAR-2013', DocType=u'ISDS', DocTypeDesc=u'Integrated Safeguards Data Sheet (ISDS), Vol.1 of 1', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=000442464_20131031112136', EntityID=u'000442464_20131031112136'), Row(DocDate=u'29-MAR-2013', DocType=u'PID', DocTypeDesc=u'Project Information Document (PID), Vol.1 of 1', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=000333037_20131031105716', EntityID=u'000333037_20131031105716'), Row(DocDate=u'16-JAN-2013', DocType=u'ISDS', DocTypeDesc=u'Integrated Safeguards Data Sheet (ISDS), Vol.1 of 1', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=000356161_20130305113209', EntityID=u'000356161_20130305113209'), Row(DocDate=u'16-JAN-2013', DocType=u'PID', DocTypeDesc=u'Project Information Document (PID), Vol.1 of 1', DocURL=u'http://www-wds.worldbank.org/servlet/WDSServlet?pcont=details&eid=000356161_20130305113716', EntityID=u'000356161_20130305113716')], projectfinancialtype=u'OTHER', projectstatusdisplay=u'Active', regionname=u'Middle East and North Africa', sector=[Row(Name=u'Public administration- Other social services'), Row(Name=u'General public administration sector')], sector1=Row(Name=u'Public administration- Other social services', Percent=70), sector2=Row(Name=u'General public administration sector', Percent=30), sector3=None, sector4=None, sector_namecode=[Row(code=u'BS', name=u'Public administration- Other social services'), Row(code=u'BZ', name=u'General public administration sector')], sectorcode=u'BZ,BS', source=u'IBRD', status=u'Active', supplementprojectflg=u'N', theme1=Row(Name=u'Other economic management', Percent=30), theme_namecode=[Row(code=u'24', name=u'Other economic management'), Row(code=u'54', name=u'Social safety nets')], themecode=u'54,24', totalamt=0, totalcommamt=4700000, url=u'http://www.worldbank.org/projects/P144674?lang=en')
********************
###Markdown
Now let's register a table which is a pointer to the Dataframe and allows data access via Spark SQL
###Code
#Simply use the Dataframe Object to create the table:
example1_df.registerTempTable("world_bank")
#now that the table is registered we can execute sql commands
#NOTE that the returned object is another Dataframe:
temp_df = sqlContext.sql("select * from world_bank limit 2")
print type(temp_df)
print "*" * 20
print temp_df
#one nice feature of the notebooks and python is that we can show it in a table via Pandas
sqlContext.sql("select id, borrower from world_bank limit 2").toPandas()
#Here is a simple group by example:
query = """
select
regionname ,
count(*) as project_count
from world_bank
group by regionname
order by count(*) desc
"""
sqlContext.sql(query).toPandas()
#subselect works as well:
query = """
select * from
(select
regionname ,
count(*) as project_count
from world_bank
group by regionname
order by count(*) desc) table_alias
limit 2
"""
sqlContext.sql(query).toPandas()
###Output
_____no_output_____
###Markdown
Simple Example of Adding a Schema (headers) to an RDD and using it as a dataframe In the example below a simple RDD is created with Random Data in two columns and an ID column.
###Code
import random
#first let's create a simple RDD
#create a Python list of lists for our example
data_e2 = []
for x in range(1,6):
random_int = int(random.random() * 10)
data_e2.append([x, random_int, random_int^2])
#create the RDD with the random list of lists
rdd_example2 = sc.parallelize(data_e2)
print rdd_example2.collect()
from pyspark.sql.types import *
#now we can assign some header information
# The schema is encoded in a string.
schemaString = "ID VAL1 VAL2"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = StructType(fields)
# Apply the schema to the RDD.
schemaExample = sqlContext.createDataFrame(rdd_example2, schema)
# Register the DataFrame as a table.
schemaExample.registerTempTable("example2")
# Pull the data
print schemaExample.collect()
#In Dataframes we can reference the columns names for example:
for row in schemaExample.take(2):
print row.ID, row.VAL1, row.VAL2
#Again a simple sql example:
sqlContext.sql("select * from example2").toPandas()
###Output
_____no_output_____
###Markdown
Another Example of creating a Dataframe from an RDD
###Code
#Remember this RDD:
print type(rdd_example2)
print rdd_example2.collect()
#we can use Row to specify the name of the columns with a Map, then use that to create the Dataframe
from pyspark.sql import Row
rdd_example3 = rdd_example2.map(lambda x: Row(id=x[0], val1=x[1], val2=x[2]))
print rdd_example3.collect()
#now we can convert rdd_example3 to a Dataframe
df_example3 = rdd_example3.toDF()
df_example3.registerTempTable("df_example3")
print type(df_example3)
#now a simple SQL statement
sqlContext.sql("select * from df_example3").toPandas()
###Output
_____no_output_____
###Markdown
Joins are supported, here is a simple example with our two new tablesWe can join example2 and example3 on ID
###Code
query = """
select
*
from
example2 e2
inner join df_example3 e3 on
e2.id = e3.id
"""
print sqlContext.sql(query).toPandas()
#Alternatively you can join within Python as well
df_example4 = df_example3.join(schemaExample, schemaExample["id"] == df_example3["ID"] )
for row in df_example4.take(5):
print row
###Output
_____no_output_____
###Markdown
One of the more powerful features is the ability to create Functions and Use them in SQL Here is a simple example
###Code
#first we create a Python function:
def simple_function(v):
return int(v * 10)
#test the function
print simple_function(3)
#now we can register the function for use in SQL
sqlContext.registerFunction("simple_function", simple_function)
#now we can apply the filter in a SQL Statement
query = """
select
ID,
VAL1,
VAL2,
simple_function(VAL1) as s_VAL1,
simple_function(VAL2) as s_VAL1
from
example2
"""
sqlContext.sql(query).toPandas()
#note that the VAL1 and VAL2 look like strings, we can cast them as well
query = """
select
ID,
VAL1,
VAL2,
simple_function(cast(VAL1 as int)) as s_VAL1,
simple_function(cast(VAL2 as int)) as s_VAL1
from
example2
"""
sqlContext.sql(query).toPandas()
###Output
_____no_output_____
###Markdown
Pandas ExamplePandas is a common abstraction for working with data in Python.We can turn Pandas Dataframes into Spark Dataframes, the advantage of this could be scale or allowing us to run SQL statements agains the data.
###Code
#import pandas library
import pandas as pd
print pd
###Output
_____no_output_____
###Markdown
First, let's grab some UFO data to play with
###Code
!rm SIGHTINGS.csv -f
!wget https://www.quandl.com/api/v3/datasets/NUFORC/SIGHTINGS.csv
#using the CSV file from earlier, we can create a Pandas Dataframe:
pandas_df = pd.read_csv("SIGHTINGS.csv")
pandas_df.head()
#now convert to Spark Dataframe
spark_df = sqlContext.createDataFrame(pandas_df)
#explore the first two rows:
for row in spark_df.take(2):
print row
#register the Spark Dataframe as a table
spark_df.registerTempTable("ufo_sightings")
#now a SQL statement
print sqlContext.sql("select * from ufo_sightings limit 10").collect()
###Output
_____no_output_____
###Markdown
Visualizing the DataHere are some simple ways to create charts using Pandas output In order to display in the notebook we need to tell matplotlib to render inlineat this point import the supporting libraries as well
###Code
%matplotlib inline
import matplotlib.pyplot as plt, numpy as np
###Output
_____no_output_____
###Markdown
Pandas can call a function "plot" to create the charts.Since most charts are created from aggregates the recordset should be small enough to store in PandasWe can take our UFO data from before and create a Pandas Dataframe from the Spark Dataframe
###Code
ufos_df = spark_df.toPandas()
###Output
_____no_output_____
###Markdown
To plot we call the "plot" method and specify the type, x and y axis columnsand optionally the size of the chart.Many more details can be found here:http://pandas.pydata.org/pandas-docs/stable/visualization.html
###Code
ufos_df.plot(kind='bar', x='Reports', y='Count', figsize=(12, 5))
###Output
_____no_output_____
###Markdown
This doesn't look good, there are too many observations, let's check how many:
###Code
print sqlContext.sql("select count(*) from ufo_sightings limit 10").collect()
###Output
_____no_output_____
###Markdown
Ideally we could just group by year, there are many ways we could solve that:1) parse the Reports column in SQL and output the year, then group on it2) create a simple Python function to parse the year and call it via sql3) as shown below: use map against the Dataframe and append a new column with "year"Tge example below takes the existing data for each row and appends a new column "year" by taking the first for characters from the Reports columnReports looks like this for example:2016-01-31
###Code
ufos_df = spark_df.map(lambda x: Row(**dict(x.asDict(), year=int(x.Reports[0:4]))))
###Output
_____no_output_____
###Markdown
Quick check to verify we get the expected results
###Code
print ufos_df.take(5)
###Output
_____no_output_____
###Markdown
Register the new Dataframe as a table "ufo_withyear"
###Code
ufos_df.registerTempTable("ufo_withyear")
###Output
_____no_output_____
###Markdown
Now we can group by year, order by year and filter to the last 66 years
###Code
query = """
select
sum(count) as count,
year
from ufo_withyear
where year > 1950
group by year
order by year
"""
pandas_ufos_withyears = sqlContext.sql(query).toPandas()
pandas_ufos_withyears.plot(kind='bar', x='year', y='count', figsize=(12, 5))
###Output
_____no_output_____ |
req2.3_filter_ectopylasm.ipynb | ###Markdown
The same as in req2.1 and 2.2, but now cleanly from ectopylasm.
###Code
import numpy as np
import ectopylasm as ep
import ipyvolume as ipv
xyz = np.array((np.random.random(1000), np.random.random(1000), np.random.random(1000)))
###Output
_____no_output_____
###Markdown
Define shapes
###Code
thickness = 0.2
# plane
point = (0.5, 0.5, 0.5)
normal = (0, 1, 0) # make it normalized to one
# cone
height = 0.5
radius = 0.5
base_pos = ep.Point(0.5, 0.5, 0.5)
cone = ep.Cone(height, radius, base_pos=base_pos)
###Output
_____no_output_____
###Markdown
Filter points
###Code
plane_points = ep.filter_points_plane(xyz, point, normal, thickness)
cone_points = ep.filter_points_cone(xyz, cone, thickness)
###Output
_____no_output_____
###Markdown
Note that plane points filtering is still very slow (on 18 July 2019, 8:41, it takes 1-2 minutes), this will be optimized later. Plot results
###Code
ipv.clear()
ipv.scatter(*xyz, marker='circle_2d')
ep.plot_thick_plane(point, normal, thickness, color='green')
ipv.scatter(*np.array(plane_points).T, marker='circle_2d', color='green')
ep.plot_thick_cone(cone, thickness)
ipv.scatter(*np.array(cone_points).T, marker='circle_2d', color='blue')
ipv.show()
###Output
_____no_output_____ |
examples/national_grid/main.ipynb | ###Markdown
Example 6 - exploring National Grid datafeedsLW's friend Ayrton has created a python wrapper for a National Grid API: https://github.com/AyrtonB/NGDataPortal In this notebook the capabilities of the wrapper will be explored.
###Code
# !pip install NGDataPortal pandas requests matplotlib
from NGDataPortal import Wrapper
from pathlib import Path
Path("output").mkdir(parents=True, exist_ok=True)
stream = 'generation-mix-national'
wrapper = Wrapper(stream)
df = wrapper.query_API()
df.head()
import json
# Inspecting package, I can see a list of streams:
{"contracted-energy-volumes-and-data": "6c33447d-4e15-448d-9ed0-4516a35657a4", "firm-frequency-response-auction-results": "340ae31e-b010-46fc-af87-e89778d438ef", "fast-reserve-tender-reports": "7f9357b2-0591-45d9-8e0d-0bd7d613a5ff", "balancing-services-charging-report-bcr": "06806fef-a9b8-40d7-bbb5-105d662eac14", "current-balancing-services-use-of-system-bsuos-data": "2c05a930-13c2-400f-bd3b-a7e6fb9f61cf", "weekly-wind-availability": "bb375594-dd0b-462b-9063-51e93c607e41", "mbss": "eb3afc32-fe39-4f33-8808-95b4463e20f8", "firm-frequency-response-market-information": "fa1c517f-44e5-470f-813c-5f690dc463fe", "balancing-services-use-of-system-bsuos-daily-cost": "b19a3594-3647-4d06-a119-7d97d538d496", "outturn-voltage-costs": "1b47a532-9f22-49c1-ae2a-d84dcc6d7408", "fast-reserve-market-information-reports": "37e68cbc-ac83-4e52-b10c-b4c49553365f", "bsuos-monthly-cost": "0d638634-1285-41ac-b965-d0e06964a302", "bsuos-monthly-forecast": "a7c7711a-fac4-4bb9-bf23-abea5a2ea616", "short-term-operating-reserve-stor": "ef2bbb5f-ee5c-40c3-bd4b-5a36d1d5f5dc", "system-frequency-data": "f0933bdd-1b0e-4dd3-aa7f-5498df1ba5b9", "short-term-operating-reserve-tender-reports": "88ef0c84-83c5-4c84-9846-6fd44d8a6037", "daily-wind-availability": "7aa508eb-36f5-4298-820f-2fa6745ae2e7", "historic-demand-data": "11252656-007c-45a4-87db-9d5cc6e8535a", "weekly-opmr": "693ca90e-9d48-4a29-92ad-0bf007bba5c2", "daily-opmr": "0eede912-8820-4c66-a58a-f7436d36b95f", "2-day-ahead-demand-forecast": "cda26f27-4bb6-4632-9fb5-2d029ca605e1", "day-ahead-constraint-flows-and-limits": "d7d4ea81-c14d-41a0-8ed2-f281ae9df8d7", "disaggregated-bsad": "48fbc6ea-381e-40d6-9633-d1be09a89a0b", "aggregated-bsad": "cfb65cd4-e41c-4587-9c78-31004827bee6", "balancing-services-adjustment-data-forward-contracts": "7ce8164f-0f0c-4940-b821-ca232e2eefaf", "thermal-constraint-costs": "d195f1d8-7d9e-46f1-96a6-4251e75e9bd0", "daily-demand-update": "177f6fa4-ae49-4182-81ea-0c6b35f26ca6", "balancing-services-use-of-system-bsuos-daily-forecast": "c1be6c7c-c36d-46cb-8038-098075599bb0", "obligatory-reactive-power-service-orps-utilisation": "d91e4fd2-1f27-4d0b-8473-b4b19af7f3dc", "7-day-ahead-national-forecast": "70d3d674-15a6-4e41-83b4-410440c0b0b9", "firm-frequency-response-post-tender-reports": "e692dc29-e94c-4be7-8067-4fc6af8bab22", "upcoming-trades": "48f96ddb-1038-4760-8a39-608713ba163f", "day-ahead-wind-forecast": "b2f03146-f05d-4824-a663-3a4f36090c71", "1-day-ahead-demand-forecast": "aec5601a-7f3e-4c4c-bf56-d8e4184d3c5b", "embedded-wind-and-solar-forecasts": "db6c038f-98af-4570-ab60-24d71ebd0ae5", "generation-mix-national": "0a168493-5d67-4a26-8344-2fe0a5d4d20b"}
stream = 'daily-wind-availability'
wrapper = Wrapper(stream)
df = wrapper.query_API()
df.head()
###Output
{'resource_id': '0a168493-5d67-4a26-8344-2fe0a5d4d20b'}
###Markdown
Plot availability of one wind farm
###Code
ax = df.plot('Date', 'MW', figsize=(12, 6), title='Daily Wind Availablility for BMU ABRBO-1')
ax.set_ylabel("MW of Wind Available")
###Output
_____no_output_____
###Markdown
Plot total wind availabilityNeed to define a larer query - try using start and end dates
###Code
stream = 'daily-wind-availability'
wrapper = Wrapper(stream)
from datetime import datetime
from datetime import timedelta
start_date = datetime.today()
end_date = start_date + timedelta(days=10)
dt_col = 'Date'
df = wrapper.query_API(start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d'), dt_col=dt_col)
len(df)
df
import pandas as pd
wind_availability = df.groupby(['Date', '_id']).sum()['MW'].astype('int32')
plot = wind_availability.plot(figsize=(12, 6), title='Daily Wind Availablility')
plot.get_figure().savefig('output/daily_wind_availability.png')
df.to_csv('daily-wind-availability.csv')
###Output
_____no_output_____
###Markdown
2 day ahead demand forecast
###Code
stream = '2-day-ahead-demand-forecast'
wrapper = Wrapper(stream)
df = wrapper.query_API()
df
###Output
{'resource_id': '7aa508eb-36f5-4298-820f-2fa6745ae2e7'}
###Markdown
1 day ahead demand forecast
###Code
stream = '1-day-ahead-demand-forecast'
wrapper = Wrapper(stream)
df = wrapper.query_API()
df
if df['FORECASTDEMAND'].max() > 50000:
print(f"Big electricity use day tomorrow: {df['FORECASTDEMAND'].max()}MW")
else:
print(f"Peak electricity demand forecast tomorrow: {df['FORECASTDEMAND'].max()}MW")
df[['CP_ST_TIME', 'FORECASTDEMAND']].plot('CP_ST_TIME', 'FORECASTDEMAND', ylim = (0, 50000))
###Output
_____no_output_____
###Markdown
freeze requirements
###Code
# !pip freeze > requirements.txt
###Output
_____no_output_____ |
docs/examples/eog_analyze/eog_analyze.ipynb | ###Markdown
Analyze Electrooculography (EOG) This example can be referenced by [citing the package](https://neuropsychology.github.io/NeuroKit/cite_us.html).This example shows how to use NeuroKit to analyze EOG data.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import neurokit2 as nk
# This "decorative" cell should be hidden from the docs once this is implemented:
# https://github.com/microsoft/vscode-jupyter/issues/1182
plt.rcParams['figure.figsize'] = [15, 5] # Bigger images
plt.rcParams['font.size']= 14
###Output
_____no_output_____
###Markdown
Explore the EOG signal Let's load the example dataset corresponding to a vertical EOG signal.
###Code
eog_signal = nk.data('eog_100hz.csv')
nk.signal_plot(eog_signal)
###Output
_____no_output_____
###Markdown
Let's zoom in to some areas where clear blinks are present.
###Code
nk.signal_plot(eog_signal[100:1700])
###Output
_____no_output_____
###Markdown
Clean the signal We can now filter the signal to remove some noise and trends.
###Code
eog_cleaned = nk.eog_clean(eog_signal, sampling_rate=100, method='neurokit')
###Output
_____no_output_____
###Markdown
Let's visualize the same chunk and compare the clean version with the original signal.
###Code
nk.signal_plot([eog_signal[100:1700], eog_cleaned[100:1700]],
labels=["Raw Signal", "Cleaned Signal"])
###Output
_____no_output_____
###Markdown
Detect and visualize eye blinks We will nor run a peak detection algorithm to detect peaks location.
###Code
blinks = nk.eog_findpeaks(eog_cleaned, sampling_rate=100, method="mne")
blinks
events = nk.epochs_create(eog_cleaned, blinks, sampling_rate=100, epochs_start=-0.3, epochs_end=0.7)
data = nk.epochs_to_array(events) # Convert to 2D array
data = nk.standardize(data) # Rescale so that all the blinks are on the same scale
# Plot with their median (used here as a robust average)
plt.plot(data, linewidth=0.4)
plt.plot(np.median(data, axis=1), linewidth=3, linestyle='--', color="black")
###Output
_____no_output_____ |
BostonHousing.ipynb | ###Markdown
Boston Housing Prediction- Author: Rishu Shrivastava, Babu Sivaprakasam- Link: https://www.kaggle.com/c/boston-housing- Last Update: 02 Apr 2018 Importing libraries
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Loading the boston dataset - Train and Test
###Code
data_path = "C:/Users/Rishu/Desktop/dATA/boston/"
boston_data=pd.read_csv(data_path+'train.csv')
boston_data.info()
boston_data.head()
boston_data_test=pd.read_csv(data_path+'test.csv')
boston_data_test.head()
###Output
_____no_output_____
###Markdown
Understanding the distribution and relationship of the data - Describing the data to understand the mean and std for all of the features
###Code
boston_data.describe()
###Output
_____no_output_____
###Markdown
- Plotting the target price value: "medv"
###Code
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
sns.distplot(boston_data['medv'], rug=True, color="b")
plt.axvline(boston_data['medv'].mean(), color="b", linestyle='solid', linewidth=2)
plt.axvline(boston_data['medv'].median(), color="b", linestyle='dashed', linewidth=2)
plt.show()
print ("Mean Price value :",boston_data['medv'].mean())
print ("Standard Deviation:",boston_data['medv'].std())
###Output
C:\Users\Rishu\Anaconda3\lib\site-packages\statsmodels\nonparametric\kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j
###Markdown
From the above plot we can understand that the price of the houses ranges in an average price range of: 22.76 (in $1000).So any ML algorithm with a bad accuracy will end up predicting the mean value. - Understanding the features and its relation with the price of the boston houses "medv"From the data set, let us consider the following features (only the prime contenders out of 15 features):1. **crim** *(per capita crime rate by town)*: An area with higher crime rate is expected to have lesser price than with a well established areas.2. **indus** *(proportion of non-retail business acres per town)*: Non retail business can be an important factor in the prices of house as it provides convienence to the house owners and people. But acres of non-retail business land doesn't give much insight into the prices of the house. Hence we can give this feature low priority, subject to correlation with medv data.3. **nox** *(nitrogen oxides concentration)*: Nitrogen oxide can be a major factor in the housing prices as the preferences of buying a house with lower pollution would be higher.4. **rm** *(average number of rooms per dwelling)*: Higher the number of rooms, higher the price.5. **dis** *(weighted mean of distances to five Boston employment centres)*: Closer the distance to offices, expected to have more interest in the areas as it reduces commute. Not sure about the American way, but closer to offices, higher the house prices.6. **ptratio** *(pupil-teacher ratio by town)*: Though i am not 100% sure about the relation between pupil and teacher. I am assuming more the number of pupil to teacher ratio, people are expected to send kids there, in turn making the prices higher. We can give this feature low priority as it may not be closely related to house pricing, subject to correlation with medv data.7. **black** *(the proportion of blacks by town)*: Subject to correlation with the target data8. **lstat** *(lower status of the population)* : People who are earning lower wages are not expected to live in a high priced houses. Hence lower the lstat higher the housing prices. - Finding correlation with target and the selected features
###Code
ax = plt.subplots(figsize = (14,7))
sns.heatmap(boston_data[['crim','indus','nox','rm','dis','rad','tax','ptratio','black','lstat','medv']].corr(),
linecolor = 'white', square=True, annot=True)
plt.show()
sns.jointplot(x='lstat', y='medv', data=boston_data, color="r", kind="reg")
plt.show()
###Output
C:\Users\Rishu\Anaconda3\lib\site-packages\statsmodels\nonparametric\kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j
###Markdown
- Most co-related featuresBased on the above co-relation chart, we would like to take into consider the features which are more closely related to the target value. The features in consideration are:1. **indus**2. **rm** : Highest positive correlation with medv (coeff: 0.69)3. **ptratio**4. **lstat** : Highly negative correlated feature with coefficient of -0.74Now let us visualize the distribution of the 4 selected features in a pairplot
###Code
# Pair plot of the features
sns.pairplot(boston_data[['indus','rm','ptratio','lstat','medv']])
plt.show()
###Output
_____no_output_____
###Markdown
Now let us plot a distribution chart of the selected features. This would help us understand the distribution of the data a little better.
###Code
fig = plt.figure(figsize=(14,7))
plt.subplot(2,2,1)
sns.distplot(boston_data['indus'], rug=True, color="b")
plt.axvline(boston_data['indus'].mean(), color="b", linestyle='solid', linewidth=2)
plt.axvline(boston_data['indus'].median(), color="b", linestyle='dashed', linewidth=2)
plt.subplot(2,2,2)
sns.distplot(boston_data['rm'], rug=True, color="r")
plt.axvline(boston_data['rm'].mean(), color="r", linestyle='solid', linewidth=2)
plt.axvline(boston_data['rm'].median(), color="r", linestyle='dashed', linewidth=2)
plt.subplot(2,2,3)
sns.distplot(boston_data['ptratio'], rug=True, color="g")
plt.axvline(boston_data['ptratio'].mean(), color="g", linestyle='solid', linewidth=2)
plt.axvline(boston_data['ptratio'].median(), color="g", linestyle='dashed', linewidth=2)
plt.subplot(2,2,4)
sns.distplot(boston_data['lstat'], rug=True, color="y")
plt.axvline(boston_data['lstat'].mean(), color="y", linestyle='solid', linewidth=2)
plt.axvline(boston_data['lstat'].median(), color="y", linestyle='dashed', linewidth=2)
plt.show()
###Output
C:\Users\Rishu\Anaconda3\lib\site-packages\statsmodels\nonparametric\kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j
###Markdown
From the above dist plots we can conclude that the distribution of the data in 3 out of 4 features have skewed data distribution. The feature : **RM** is currently having the distribution in a somewhat normalized fashion.**PTRATIO** is negatively skewed; **LSTAT** and **INDUS** are positively skewedNow we need to normalize these data sets to bring the data into a normal distribution.
###Code
fig = plt.figure(figsize=(14,7))
plt.subplot(2,2,1)
sns.distplot(np.log(boston_data['indus']), rug=True, color="b")
plt.axvline(np.log(boston_data['indus']).mean(), color="b", linestyle='solid', linewidth=2)
plt.axvline(np.log(boston_data['indus']).median(), color="b", linestyle='dashed', linewidth=2)
plt.subplot(2,2,2)
sns.distplot(boston_data['rm'], rug=True, color="r")
plt.axvline(boston_data['rm'].mean(), color="r", linestyle='solid', linewidth=2)
plt.axvline(boston_data['rm'].median(), color="r", linestyle='dashed', linewidth=2)
plt.subplot(2,2,3)
sns.distplot(np.log(boston_data['ptratio']), rug=True, color="g")
plt.axvline(np.log(boston_data['ptratio']).mean(), color="g", linestyle='solid', linewidth=2)
plt.axvline(np.log(boston_data['ptratio']).median(), color="g", linestyle='dashed', linewidth=2)
plt.subplot(2,2,4)
sns.distplot(np.log(boston_data['lstat']), rug=True, color="y")
plt.axvline(np.log(boston_data['lstat']).mean(), color="y", linestyle='solid', linewidth=2)
plt.axvline(np.log(boston_data['lstat']).median(), color="y", linestyle='dashed', linewidth=2)
plt.show()
###Output
C:\Users\Rishu\Anaconda3\lib\site-packages\statsmodels\nonparametric\kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j
###Markdown
After applying logarithm test to the above data set, it seems that only **LSTAT** is responding correctly and is getting normally distributed. **PTRATIO** and **INDUS** is not having any significant impact in the normalization.Now let us examine the co-relation between the features and the MEDV feature:
###Code
fig = plt.figure(figsize=(14,7))
plt.subplot(2,2,1)
x = np.log(boston_data[['indus']])
sns.regplot(x=x, y="medv", data=boston_data, color="b")
plt.subplot(2,2,2)
x2 = boston_data[['rm']]
sns.regplot(x=x2, y="medv", data=boston_data, color="r")
plt.subplot(2,2,3)
x3 = np.log(boston_data[['ptratio']])
sns.regplot(x=x3, y="medv", data=boston_data, color="g")
plt.subplot(2,2,4)
x4 = np.log(boston_data[['lstat']])
sns.regplot(x=x4, y="medv", data=boston_data, color="y")
plt.show()
###Output
_____no_output_____
###Markdown
Building the data with the selected features
###Code
boston_data['lstat_log']=np.log(boston_data['lstat'])
boston_data_test['lstat_log_test']=np.log(boston_data_test['lstat'])
#boston_data['ptratio_log']=np.log(boston_data['ptratio'])
#boston_data_test['ptratio_log_test']=np.log(boston_data_test['ptratio'])
#boston_data['indus_log']=np.log(boston_data['indus'])
#boston_data_test['indus_log_test']=np.log(boston_data_test['indus'])
X = boston_data[['rm','lstat_log']]
X_bd_test=boston_data_test[['rm','lstat_log_test']]
y = boston_data[['medv']]
###Output
_____no_output_____
###Markdown
- Splitting the train data for train and cross-validation datasets
###Code
from sklearn.model_selection import train_test_split
X_train, X_cv, y_train, y_cv = train_test_split(X, y, random_state=0)
print(len(X_train), len(y_train), len(X_cv), len(y_cv))
###Output
249 249 84 84
###Markdown
Model Fitting- Using DecisionTreeRegressor for finding the maximum score
###Code
from sklearn.tree import DecisionTreeRegressor
max_score = 0
max_depth = 0
def decision_tree(j):
dtr = DecisionTreeRegressor(random_state=0,max_depth=j)
return dtr.fit(X_train, y_train)
for i in range(1,11):
_dtr = decision_tree(i)
clf_score = _dtr.score(X_cv,y_cv)
print("Decision Tree Regressor at max_depth:",i," scored: ",clf_score)
if clf_score>max_score:
max_score = clf_score
max_depth = i
###Output
Decision Tree Regressor at max_depth: 1 scored: 0.459730776541
Decision Tree Regressor at max_depth: 2 scored: 0.725856285508
Decision Tree Regressor at max_depth: 3 scored: 0.776509251005
Decision Tree Regressor at max_depth: 4 scored: 0.778173690897
Decision Tree Regressor at max_depth: 5 scored: 0.759604780937
Decision Tree Regressor at max_depth: 6 scored: 0.770327766409
Decision Tree Regressor at max_depth: 7 scored: 0.776652032069
Decision Tree Regressor at max_depth: 8 scored: 0.735726562605
Decision Tree Regressor at max_depth: 9 scored: 0.750414744115
Decision Tree Regressor at max_depth: 10 scored: 0.738548174904
###Markdown
- Selecting the max depth
###Code
print("The maximum score is achieved at a depth of : ",max_depth," with score of :",max_score)
###Output
The maximum score is achieved at a depth of : 4 with score of : 0.778173690897
###Markdown
- Training the model with max_depth
###Code
dtr_clf = decision_tree(max_depth)
###Output
_____no_output_____
###Markdown
- Finding the importance of feature in the regression algorithm
###Code
sns.barplot(X_train.columns, dtr_clf.feature_importances_)
plt.show()
###Output
_____no_output_____
###Markdown
We can conclude that **rm** and **lstat** are two of them most important factor in the prices of the house in boston area. - Visualizing the decision made on the dataset
###Code
from IPython.display import Image
import pydotplus
from sklearn.externals.six import StringIO
from sklearn import tree
dot_data = StringIO()
tree.export_graphviz(dtr_clf, out_file=dot_data,
feature_names=X_train.columns,
class_names="medv",
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
###Output
_____no_output_____
###Markdown
Create Final Submission (Kaggle only) - Plotting the prediction against the TEST data
###Code
bd_predict = dtr_clf.predict(X_bd_test)
plt.scatter(boston_data_test['ID'],bd_predict)
plt.show()
print ("Mean Price value before modelling:",boston_data['medv'].mean())
print ("Mean Price value after modelling :",bd_predict.mean())
###Output
Mean Price value before modelling: 22.768768768768783
Mean Price value after modelling : 22.8302041087
###Markdown
- Generate the test dataframe as csv output
###Code
submission = pd.DataFrame({
"ID": boston_data_test['ID'],
"medv": bd_predict
})
submission.to_csv(data_path+'output.csv', index=False)
###Output
_____no_output_____ |
char-prediction/himym/HIMYM_Predictor.ipynb | ###Markdown
Imports and Initial Config
###Code
import os
import torch
import pickle
import numpy as np
from google.colab import drive
from torch import nn
from torch import optim
from datetime import datetime
dev = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
###Output
_____no_output_____
###Markdown
Data Loading and Preprocessing Load Data from Drive
###Code
drive.mount('/content/drive')
data_path = os.path.join("drive", "My Drive", "data", "himym.txt")
with open(data_path, "r") as input_file:
text = input_file.read()
# Print the first letters of the text
text[:150]
###Output
_____no_output_____
###Markdown
Turn Data into Label Encodings
###Code
# Convenience Dictionaries
characters = set(text)
id2char = dict(enumerate(characters))
char2id = {c:i for i,c in enumerate(characters)}
assert char2id[id2char[5]] == 5
num_characters = len(characters)
text_labels = [char2id[c] for c in text]
print("The text consists of {} distinct characters.".format(num_characters))
###Output
The text consists of 93 distinct characters.
###Markdown
Batch Generator
###Code
def to_one_hot(text_labels, num_characters):
eye = torch.eye(num_characters)
X = torch.zeros((text_labels.shape[0], text_labels.shape[1], num_characters))
for i, sentence_labels in enumerate(text_labels):
X[i] = eye[sentence_labels]
return X
# Outputs tensor of X with shape [batch_size, seq_len, num_chars] and y with shape [batch_size, seq_len]
def get_next_training_batch(labels=text_labels, num_chars=num_characters, seq_len=128, batch_size=32):
"""
Returns a training batch generator which itself returns batches with
tuples of the following format
X of shape [batch_size, seq_len, num_chars] (one-hot-encoded) and
y of shape [batch_size, seq_len] (label-encoded)
Arguments:
labels: label encodings of the text to create batches from
num_chars: the total number of characters
seq_len: the length of the character sequence of each batch
batch_size: the number of character sequences per batch
"""
for batch_offset in range(0, len(labels), batch_size * (seq_len + 1)):
if len(labels) < batch_offset + batch_size * (seq_len + 1):
return
batch = labels[batch_offset:batch_offset + batch_size * (seq_len + 1)]
X_text_labels = torch.Tensor([batch[i:i+seq_len] for i in range(0, len(batch), seq_len + 1)]).long()
X_one_hot = to_one_hot(X_text_labels, num_characters)
y_text_labels = torch.Tensor([batch[i+1:i+seq_len+1] for i in range(0, len(batch), seq_len + 1)]).long()
yield X_one_hot.to(dev), y_text_labels.to(dev)
# Test the implementation to see if it generates valid outpus
X_sample, y_sample = next(get_next_training_batch(seq_len=8, batch_size=5))
assert X_sample.shape == torch.Size([5, 8, num_characters])
assert y_sample.shape == torch.Size([5, 8])
assert X_sample[0, 1].argmax().item() == y_sample[0][0]
assert X_sample[1, 2].argmax().item() == y_sample[1][1]
assert X_sample[4, 7].argmax().item() == y_sample[4][6]
def tensor_to_text(tensor):
"""
Converts a tensor representation back to a string representation.
Arguments:
tensor: a torch.Tensor object with the following shape:
3D: [batch_size, seq_len, num_chars]
2D: [batch_size, seq_len]
1D: [seq_len]
"""
if len(tensor.shape) == 3:
return tensor_to_text(tensor.argmax(dim=2))
if len(tensor.shape) == 2:
return [tensor_to_text(line) for line in tensor]
if len(tensor.shape) == 1:
return "".join([tensor_to_text(char_encoding) for char_encoding in tensor])
if len(tensor.shape) == 0:
return id2char[tensor.item()]
print("3D:", tensor_to_text(X_sample))
print("2D:", tensor_to_text(y_sample))
print("1D:", tensor_to_text(torch.Tensor([char2id["J"], char2id["o"], char2id["n"], char2id["a"], char2id["s"]])))
print("0D:", tensor_to_text(torch.tensor(char2id["J"])))
torch.Tensor([15])
###Output
_____no_output_____
###Markdown
Custom GRU Model
###Code
class GRUCell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
# Weights and Biases
# See https://en.wikipedia.org/wiki/Gated_recurrent_unit#Fully_gated_unit
## z
self.W_xz = nn.Parameter(torch.zeros(self.input_size, self.hidden_size))
self.U_hz = nn.Parameter(torch.zeros(self.hidden_size, self.hidden_size))
self.b_z = nn.Parameter(torch.zeros(self.hidden_size))
## r
self.W_xr = nn.Parameter(torch.zeros_like(self.W_xz))
self.U_hr = nn.Parameter(torch.zeros_like(self.U_hz))
self.b_r = nn.Parameter(torch.zeros_like(self.b_z))
## h
self.W_xh = nn.Parameter(torch.zeros_like(self.W_xz))
self.U_hh = nn.Parameter(torch.zeros_like(self.U_hz))
self.b_h = nn.Parameter(torch.zeros_like(self.b_z))
self.init_weights()
def init_weights(self):
for weight in self.parameters():
if len(weight.shape) > 1:
# Init matrices with random noise
nn.init.xavier_normal_(weight)
else:
# Init biases with zeros
nn.init.zeros_(weight)
def init_hidden(self, batch_size):
h = nn.Parameter(torch.zeros((batch_size, self.hidden_size))).to(dev)
nn.init.zeros_(h)
return h
def forward(self, x, h=None):
"""
Argument shapes:
x is of shape [batch_size, input_size]
h is of shape [batch_size, hidden_size]
Output shape:
h is of shape [batch_size, hidden_size]
"""
assert len(x.shape) == 2
if h is None:
h = self.init_hidden(x.shape[0])
z = torch.sigmoid(x.mm(self.W_xz) + h.mm(self.U_hz) + self.b_z)
r = torch.sigmoid(x.mm(self.W_xr) + h.mm(self.U_hr) + self.b_r)
h = z * h + (1 - z) * torch.tanh(x.mm(self.W_xh) + (r * h).mm(self.U_hh) + self.b_h)
return h
class CharRNN(nn.Module):
def __init__(self, num_characters=num_characters, hidden_size=512, batch_first=True, drop=0.5):
super().__init__()
self.num_characters = num_characters
self.hidden_size = hidden_size
self.batch_first = batch_first
self.cell = GRUCell(num_characters, hidden_size)
#self.cell = nn.GRU(num_characters, hidden_size)
self.dropout = nn.Dropout(drop)
self.dense = nn.Linear(self.hidden_size, self.num_characters)
def forward(self, X, h_0=None):
"""
Argument shapes:
X is of shape [batch_size, seq_len, num_chars] if self.batch_first
X is of shape [seq_len, batch_size, num_chars] if not self.batch_first
---
h is of shape [batch_size, hidden_size]
Output shapes:
y_hat is of shape [batch_size * seq_len, num_chars]
h_t is of shape [batch_size, hidden_size]
"""
assert len(X.shape) == 3
# Put seq_len in the front
if self.batch_first:
X = X.permute(1, 0, 2)
# X is now of shape [seq_len, batch_size, num_chars]
h_t = h_0
output = torch.zeros((X.shape[0], X.shape[1], self.hidden_size)).to(dev)
for t, x_t in enumerate(X):
# Iterate over sequence
h_t = self.cell(x_t, h_t)
output[t] = h_t # [batch_size, hidden_size]
# TODO: Permute output back?!
output = output.permute(1, 0, 2)
output = output.contiguous().view(-1, self.hidden_size) # [batch_size * seq_len, hidden_size]
output = self.dropout(output)
y_hat = self.dense(output) # [batch_size * seq_len, num_chars]
return y_hat, h_t
###Output
_____no_output_____
###Markdown
Training and Prediction
###Code
def predict_next_char(rnn, char, h):
x = to_one_hot(torch.LongTensor([[char2id[char]]]), num_characters).to(dev)
y_hat, h = rnn(x, h)
next_char = tensor_to_text(torch.softmax(y_hat, dim=-1).argmax())[0]
return next_char, h
def predict_text(rnn, h=None, seq_len=150, starting_with="\n"):
result = [c for c in starting_with]
for char in starting_with:
_, h = predict_next_char(rnn, char, h)
current_char = result[-1]
for i in range(seq_len):
current_char, h = predict_next_char(rnn, current_char, h)
result.append(current_char)
return "".join(result)
def train(rnn, n_epochs=50, learning_rate=2e-3, print_every=100, batch_size=64, seq_len=128, predict_len=150):
outname = os.path.join("drive", "My Drive", "results", str(datetime.now()) + ".txt")
with open(outname, "w") as f:
f.write("Training {}, num layers: {}, hidden size: {}, batch size: {}, sequence length: {}".format(str(datetime.now()), 1, rnn.hidden_size, batch_size, seq_len))
rnn.train()
step = 0
losses = []
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(rnn.parameters(), lr=learning_rate)
for epoch in range(n_epochs):
h = None
for X, y in get_next_training_batch(seq_len=seq_len, batch_size=batch_size):
step += 1
rnn.zero_grad()
y_hat, h = rnn(X, h)
h = h.data # in order to not go through entire history
loss = criterion(y_hat, y.view(batch_size * seq_len))
losses.append(loss.item())
loss.backward()
# Apply gradient clipping
nn.utils.clip_grad_norm_(rnn.parameters(), 5)
optimizer.step()
if not step % print_every:
rnn.eval()
running_loss = sum(losses) / len(losses)
losses = []
out_string = "\n-----------\n" \
+ "Epoch: {}".format(epoch + 1) + "/{}".format(n_epochs) \
+ " | Iteration: {}".format(step) \
+ " | Loss {:.5f}".format(running_loss) \
+ "\n-----------\n"
pred_string = predict_text(rnn, seq_len=predict_len)
print(out_string)
print(pred_string)
with open(outname, "a") as f:
f.write("\n" + str(datetime.now()))
f.write(out_string)
f.write(pred_string)
rnn.train()
rnn.eval()
charRNN = CharRNN(hidden_size=1024)
charRNN.to(dev)
train(charRNN, n_epochs=75, predict_len=256)
model_path = os.path.join("drive", "My Drive", "results", str(datetime.now))
torch.save(charRNN.state_dict(), os.path.join(model_path, "model-state.pth"))
pickle.dump(char2id, os.path.join(model_path, "char2id"))
pickle.dump(id2char, os.path.join(model_path, "id2char"))
charRNN = CharRNN(hidden_size=1024).cuda()
charRNN.load_state_dict(torch.load(os.path.join(model_path, "model-state2020-05-27 20:29:25.892763.pth")))
print(predict_text(charRNN, seq_len=1500, starting_with="Ted: Kids"))
###Output
_____no_output_____ |
week7/7_Assignment.ipynb | ###Markdown
Week 7 Lab: Text Analytics This week's assignment will focus on text analysis of BBC News articles. Our Dataset: **Dataset:** bbc.csv(Provided in folder assign_wk7)Consists of 2225 documents from the BBC news website corresponding to stories in five topical areas from 2004-2005. Class Labels: 5 (business, entertainment, politics, sport, tech)
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
sns.set()
news = pd.read_csv('assign_wk7/bbc.csv', header=0, usecols=[1,2], names=['raw_text', 'target_names'])
news.head(10)
###Output
_____no_output_____
###Markdown
Text Analytics Lab**Objective:** To demostrate all of the text analysis techniques covered int his week's lecture material. Preparation of the text data for analysis * Elimination of stopwords, punctuation, digits, lowercase
###Code
news.target_names.value_counts()
print(news.raw_text[3])
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit_transform(news['target_names'])
news['target'] = le.transform(news['target_names'])
news['target_names'] = le.inverse_transform(news['target'])
news.to_csv('assign_wk7/bbc_renamed.csv', header=True, index=False)
df = pd.read_csv('assign_wk7/bbc_renamed.csv')
df
df['word_cnt'] = df.raw_text.apply(lambda x: len(str(x).split(" ")))
df['char_cnt'] = df.raw_text.str.len()
from nltk.corpus import stopwords
stop = stopwords.words('english')
df['stopwords'] = df.raw_text.apply(lambda x: len([x for x in x.split() if x in stop]))
df['clean_text'] = df.raw_text.apply(lambda x: " ".join(x.lower() for x in x.split()))
df['clean_text'] = df.clean_text.str.replace('\S+@\S+','') #looking for the case of XXXX@XXXX
df['clean_text'] = df.clean_text.str.replace('http\S+','') #looking for http or https web addresses
df['clean_text'] = df.clean_text.str.replace('\S+.com','') #looking for email addresses that end in '.com'
df['clean_text'] = df.clean_text.str.replace('\S+.edu','') #looking for email addresses that end in '.edu'
df['clean_text'] = df.clean_text.str.replace('[^\w\s]', '')
df['clean_text'] = df.clean_text.str.replace('\d+', '')
from nltk.corpus import stopwords, words
stop = stopwords.words('english')
df['clean_text'] = df.clean_text.apply(lambda x: " ".join(w for w in x.split() if w not in stop))
df.head()
###Output
_____no_output_____
###Markdown
Identify the 10 most frequently used words in the text * How about the ten least frequently used words? * How does lemmatization change the most/least frequent words? - Explain and demonstrate this topic
###Code
import nltk
freq = freq = pd.Series(' '.join(df.clean_text).split()).value_counts().to_dict()
top_10 = list(freq.items())[:10]
bottom_10 = list(freq.items())[-10:]
print("The 10 most frequently used words in the text are: \n\n" + str(top_10))
print("\n The 10 least frequently used words in the text are: \n\n" + str(bottom_10))
###Output
The 10 most frequently used words in the text are:
[('said', 7244), ('mr', 3004), ('would', 2554), ('also', 2141), ('people', 1954), ('new', 1942), ('us', 1901), ('one', 1733), ('year', 1628), ('could', 1505)]
The 10 least frequently used words in the text are:
[('aniston', 1), ('joeys', 1), ('dispossessed', 1), ('sixyearrun', 1), ('nephew', 1), ('phoebe', 1), ('butlersloss', 1), ('rotten', 1), ('thirteen', 1), ('mu', 1)]
###Markdown
1 letter words are no friend of mine.
###Code
df['clean_text'] = df.clean_text.apply(lambda x: " ".join(x for x in x.split() if len(x) > 1))
freq = pd.Series(' '.join(df.clean_text).split()).value_counts().to_dict()
bottom_10 = list(freq.items())[-10:]
print("The 10 least frequently used words in the text are: \n\n" + str(bottom_10))
import nltk
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
#establish the lemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
df['clean_text'] = df.clean_text.apply(lambda x: " ".join(wordnet_lemmatizer.lemmatize(w) for w in x.split()))
df['clean_text'] = df.clean_text.apply(lambda x: " ".join(x for x in x.split() if len(x) > 1))
freq = pd.Series(' '.join(df.clean_text).split()).value_counts().to_dict()
top_10 = list(freq.items())[:10]
bottom_10 = list(freq.items())[-10:]
print("The 10 most frequently used words in the text are: \n\n" + str(top_10))
print("\n The 10 least frequently used words in the text are: \n\n" + str(bottom_10))
###Output
The 10 most frequently used words in the text are:
[('said', 7244), ('mr', 3045), ('year', 2851), ('would', 2554), ('also', 2141), ('people', 2029), ('new', 1942), ('one', 1803), ('could', 1505), ('game', 1461)]
The 10 least frequently used words in the text are:
[('fuck', 1), ('twat', 1), ('malarkey', 1), ('whittle', 1), ('littman', 1), ('circs', 1), ('swears', 1), ('erica', 1), ('congruent', 1), ('mu', 1)]
###Markdown
Oh wow hahaha.... to be fair 'malarkey' is a great word. At least they are the least frequently used words ... right?Lemmetization seemed to remove the words that were a combination of two words. This is because it couldn't recognize a base-word for words like "areconsulting" or "butlersloss". The most frequent words did not change much except for their notable frequency. Lemmetization detected the word 'years' and converted it to the base-word 'year' before tallying the frequency of the base-word, which caused an increase. Generate a world cloud for the text
###Code
from wordcloud import WordCloud
wc = WordCloud(width=1000, height=600, max_words=200).generate_from_frequencies(freq)
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
plt.imshow(wc, interpolation='bilinear')
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Demonstrate the generation of n-grams and part of speech tagging
###Code
tokens = ' '.join(df.clean_text).split()
ngrams_2 = nltk.bigrams(tokens)
freq_2grams = pd.Series(ngrams_2).value_counts().to_dict()
list(freq_2grams.items())[:20]
ngrams_3 = nltk.trigrams(tokens)
freq_3grams = pd.Series(ngrams_3).value_counts().to_dict()
list(freq_3grams.items())[:20]
from nltk.tag import pos_tag
pos_tags = pos_tag(tokens)
pos_tags[:10]
from collections import Counter
pos_tags = Counter([j for i,j in pos_tag(tokens)])
pos_tags
pos_tags_df = pd.DataFrame.from_dict(pos_tags, orient='index', columns=['qty'])
pos_tags_df.to_csv('assign_wk7/pos_tags.csv')
postag = pd.read_csv('assign_wk7/pos_tags.csv', header=0, names=['tag', 'qty'])
most_common_pos = postag.sort_values(by='qty', ascending=False)
mcp = most_common_pos.head(-10)
mcp
least_common_pos = postag.sort_values(by='qty', ascending=True)
lcp = least_common_pos.head(10)
lcp
f, ax = plt.subplots(figsize=(20,10))
sns.barplot(x=mcp['tag'], y=mcp['qty'])
###Output
_____no_output_____
###Markdown
NN (singular nouns) are the greatest.
###Code
f, ax = plt.subplots(figsize=(20,10))
sns.barplot(x=lcp['tag'], y=lcp['qty'])
###Output
_____no_output_____
###Markdown
The least common parts of speech are tied with POS (possessive ending like "'s") and TO (prepositions or infinitive markers like 'to'). Create a Topic model of the text * Find the optimal number of topics * test the accuracy of your model * Display your results 2 different ways. 1) Print the topics and explain any insights at this point. 2) Graph the topics and explain any insights at this point.
###Code
lem_ls = list(df.clean_text.apply(lambda x: list(x.split())))
print(lem_ls[:2])
###Output
[['uk', 'economy', 'facing', 'major', 'risk', 'uk', 'manufacturing', 'sector', 'continue', 'face', 'serious', 'challenge', 'next', 'two', 'year', 'british', 'chamber', 'merce', 'bcc', 'said', 'group', 'quarterly', 'survey', 'panies', 'found', 'export', 'picked', 'last', 'three', 'month', 'best', 'level', 'eight', 'year', 'rise', 'came', 'despite', 'exchange', 'rate', 'cited', 'major', 'concern', 'however', 'bcc', 'found', 'whole', 'uk', 'economy', 'still', 'faced', 'major', 'risk', 'warned', 'growth', 'set', 'slow', 'recently', 'forecast', 'economic', 'growth', 'slow', 'little', 'manufacturer', 'domestic', 'sale', 'growth', 'fell', 'back', 'slightly', 'quarter', 'survey', 'firm', 'found', 'employment', 'manufacturing', 'also', 'fell', 'job', 'expectation', 'lowest', 'level', 'year', 'despite', 'positive', 'news', 'export', 'sector', 'worrying', 'sign', 'manufacturing', 'bcc', 'said', 'result', 'reinforce', 'concern', 'sector', 'persistent', 'inability', 'sustain', 'recovery', 'outlook', 'service', 'sector', 'uncertain', 'despite', 'increase', 'export', 'order', 'quarter', 'bcc', 'noted', 'bcc', 'found', 'confidence', 'increased', 'quarter', 'across', 'manufacturing', 'service', 'sector', 'although', 'overall', 'failed', 'reach', 'level', 'start', 'reduced', 'threat', 'interest', 'rate', 'increase', 'contributed', 'improved', 'confidence', 'said', 'bank', 'england', 'raised', 'interest', 'rate', 'five', 'time', 'november', 'august', 'last', 'year', 'rate', 'kept', 'hold', 'since', 'amid', 'sign', 'falling', 'consumer', 'confidence', 'slowdown', 'output', 'pressure', 'cost', 'margin', 'relentless', 'increase', 'regulation', 'threat', 'higher', 'tax', 'remain', 'serious', 'problem', 'bcc', 'director', 'general', 'david', 'frost', 'said', 'consumer', 'spending', 'set', 'decelerate', 'significantly', 'next', 'month', 'unlikely', 'investment', 'export', 'rise', 'sufficiently', 'strongly', 'pick', 'slack'], ['aid', 'climate', 'top', 'davos', 'agenda', 'climate', 'change', 'fight', 'aid', 'leading', 'list', 'concern', 'first', 'day', 'world', 'economic', 'forum', 'swiss', 'resort', 'davos', 'business', 'political', 'leader', 'around', 'globe', 'listen', 'uk', 'prime', 'minister', 'tony', 'blair', 'opening', 'speech', 'wednesday', 'mr', 'blair', 'focus', 'africa', 'development', 'plan', 'global', 'warming', 'earlier', 'day', 'came', 'update', 'effort', 'million', 'people', 'antiaids', 'drug', 'end', 'world', 'health', 'organisation', 'said', 'people', 'poor', 'country', 'lifeextending', 'drug', 'six', 'month', 'earlier', 'amounting', 'million', 'needed', 'bn', 'funding', 'gap', 'still', 'stood', 'way', 'hitting', 'target', 'said', 'theme', 'stressed', 'mr', 'blair', 'whose', 'attendance', 'announced', 'last', 'minute', 'want', 'dominate', 'uk', 'chairmanship', 'group', 'industrialised', 'state', 'issue', 'discussed', 'fiveday', 'conference', 'range', 'china', 'economic', 'power', 'iraq', 'future', 'sunday', 'election', 'aside', 'mr', 'blair', 'world', 'leader', 'expected', 'attend', 'including', 'french', 'president', 'jacques', 'chirac', 'due', 'speak', 'video', 'link', 'bad', 'weather', 'delayed', 'helicopter', 'south', 'african', 'president', 'thabo', 'mbeki', 'whose', 'arrival', 'delayed', 'ivory', 'coast', 'peace', 'talk', 'ukraine', 'new', 'president', 'viktor', 'yushchenko', 'also', 'newly', 'elected', 'palestinian', 'leader', 'mahmoud', 'abbas', 'showbiz', 'figure', 'also', 'put', 'appearance', 'frontman', 'bono', 'wellknown', 'campaigner', 'trade', 'development', 'issue', 'angelina', 'jolie', 'goodwill', 'campaigner', 'un', 'refugee', 'unlike', 'previous', 'year', 'protest', 'wef', 'expected', 'muted', 'antiglobalisation', 'campaigner', 'called', 'demonstration', 'planned', 'weekend', 'time', 'people', 'expected', 'converge', 'brazilian', 'resort', 'porto', 'alegre', 'world', 'social', 'forum', 'socalled', 'antidavos', 'campaigner', 'globalisation', 'fair', 'trade', 'many', 'cause', 'contrast', 'davos', 'forum', 'dominated', 'business', 'issue', 'outsourcing', 'corporate', 'leadership', 'boss', 'fifth', 'world', 'panies', 'led', 'attend', 'survey', 'published', 'eve', 'conference', 'pricewaterhousecoopers', 'said', 'four', 'ten', 'business', 'leader', 'confident', 'panies', 'would', 'see', 'sale', 'rise', 'asian', 'american', 'executive', 'however', 'much', 'confident', 'european', 'counterpart', 'political', 'discussion', 'focusing', 'iran', 'iraq', 'china', 'likely', 'dominate', 'medium', 'attention']]
###Markdown
BTW: After installing gensim and the dependencies of the library, restart jupyter-notebook or it will not work. After issues importing the libraries I found the following solution:MTKnife from StackOverflow said "My problem apparently was trying to import gensim right after installing it, while Jupyter Notebook was running. Restarting Jupyter and Sypder fixed the problems with both environments." Reference: https://stackoverflow.com/questions/61182206/module-smart-open-has-no-attribute-local-file
###Code
import smart_open
import gensim
import gensim.corpora as corpora
id2word = corpora.Dictionary(lem_ls)
corpus = [id2word.doc2bow(post) for post in lem_ls]
###Output
_____no_output_____
###Markdown
And....now we wait
###Code
lda_model = gensim.models.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=10,
random_state=42,
chunksize=100,
passes=10,
per_word_topics=True)
print(lda_model.print_topics())
###Output
[(0, '0.015*"said" + 0.015*"year" + 0.010*"bn" + 0.009*"market" + 0.008*"sale" + 0.006*"firm" + 0.006*"price" + 0.006*"bank" + 0.006*"uk" + 0.006*"growth"'), (1, '0.019*"said" + 0.018*"phone" + 0.012*"mobile" + 0.010*"system" + 0.009*"network" + 0.008*"firm" + 0.008*"people" + 0.007*"service" + 0.007*"could" + 0.007*"software"'), (2, '0.025*"music" + 0.011*"cell" + 0.011*"song" + 0.010*"album" + 0.009*"band" + 0.009*"drug" + 0.009*"pp" + 0.008*"court" + 0.008*"artist" + 0.007*"yukos"'), (3, '0.011*"film" + 0.011*"year" + 0.009*"best" + 0.009*"said" + 0.007*"award" + 0.007*"one" + 0.006*"also" + 0.005*"show" + 0.005*"star" + 0.005*"first"'), (4, '0.020*"spyware" + 0.013*"copy" + 0.012*"dvd" + 0.009*"fa" + 0.008*"pirated" + 0.007*"ripguard" + 0.006*"orchestra" + 0.006*"macrovision" + 0.006*"chart" + 0.005*"pany"'), (5, '0.012*"oscar" + 0.010*"aviator" + 0.009*"best" + 0.009*"actor" + 0.008*"dollar" + 0.008*"actress" + 0.007*"nomination" + 0.006*"film" + 0.006*"ray" + 0.006*"baby"'), (6, '0.024*"said" + 0.018*"mr" + 0.011*"would" + 0.008*"government" + 0.006*"people" + 0.006*"party" + 0.006*"say" + 0.006*"labour" + 0.005*"minister" + 0.005*"election"'), (7, '0.013*"said" + 0.012*"people" + 0.010*"technology" + 0.008*"game" + 0.007*"user" + 0.006*"mr" + 0.006*"also" + 0.006*"digital" + 0.006*"music" + 0.005*"new"'), (8, '0.013*"game" + 0.009*"said" + 0.007*"player" + 0.007*"england" + 0.006*"win" + 0.006*"first" + 0.006*"time" + 0.005*"back" + 0.005*"last" + 0.005*"world"'), (9, '0.004*"evans" + 0.004*"ukraine" + 0.003*"walmart" + 0.003*"hamm" + 0.003*"fannie" + 0.002*"ossie" + 0.002*"mae" + 0.002*"borussia" + 0.002*"yushchenko" + 0.002*"dortmund"')]
###Markdown
The top 10 words:- topic 0: said, year, bn, market, sale, firm, price, bank, growth, share- topic 1: said, phone, mobile, system, network, firm, could, people, software, service- topic 2: music, song, album, band, cell, drug, court, artist, pp, yukos- topic 3: film, year, said, best, one, award, also, show, star, first- topic 4: spyware, dvd, copy, fa, pirated, riqguard, chart, macrovision, orchestra, osullivan- topic 5: oscar, aviator, best, actor, dollar, actress, nomination, film, ray, baby- topic 6: said, mr, would, government, people, party, labour, say, minister, election- topic 7: said, people, technology, game, user, mr, digital, also, music, mobile- topic 8: game, said, england, player, win, first, time, back, last, team- topic 9: evans, ukraine, walmart, fannie, hamm, ossie, mae, borussia, yushchenko, dortmund
###Code
from gensim.models import CoherenceModel
coherence_model_lda = CoherenceModel(model=lda_model,
texts=lem_ls,
dictionary=id2word,
coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
###Output
Coherence Score: 0.507078407675348
###Markdown
A coherence schore of .50 can be better so I will now optimize the base model.
###Code
scores = []
for i in range(2,15):
print(f'Calcuting for {i} topics')
lda_model = gensim.models.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=i,
random_state=42,
chunksize=100,
passes=10,
per_word_topics=True)
# compute the coherence score
coherence_model_lda = CoherenceModel(model=lda_model,
texts=lem_ls,
dictionary=id2word,
coherence='c_v')
# retreive the coherence_scores
coherence_lda = coherence_model_lda.get_coherence()
scores.append((i,coherence_lda))
scores
###Output
_____no_output_____
###Markdown
The best model is of 10 topics.
###Code
bf_lda_model = gensim.models.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=10,
random_state=42,
chunksize=100,
passes=10,
per_word_topics=True)
import pyLDAvis.gensim_models
import pickle
import pyLDAvis
# Visualize the topics
pyLDAvis.enable_notebook()
LDAvis_prepared = pyLDAvis.gensim_models.prepare(bf_lda_model, corpus, id2word)
pyLDAvis.save_html(LDAvis_prepared,'assign_wk7/news_topic_model_viz.html')
###Output
C:\Users\SCULLY\anaconda3\lib\site-packages\sklearn\decomposition\_lda.py:28: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
EPS = np.finfo(np.float).eps
|
18CSE124_dmdwlab5_assignment5.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib','inline')
plt.style.use('seaborn-whitegrid')
path="https://raw.githubusercontent.com/chirudukuru/DMDW/main/student-mat.csv"
df=pd.read_csv(path)
df
df1=df[['traveltime','studytime']]
df1.head()
x=df1['traveltime']
y=df1['studytime']
sns.lineplot(x,y, dashes=True)
plt.show()
from scipy.stats import norm
correlation=df1.corr()
print(correlation)
sns.heatmap(correlation,cmap='BrBG')
plt.show()
covar=df1.cov()
print(covar)
sns.heatmap(covar)
plt.show()
# Normalization
df
df.shape
age=np.array(df['age'])
age
age=np.array(df['age'])
print("max age",max(age))
age=age.reshape(395,1)
age=np.array(df['age'])
print("MIn age",min(age))
age=age.reshape(395,1)
from scipy import stats
zscore=np.array(stats.zscore(age))
zscore=zscore[0:394]
zscore=zscore.reshape(2,197)
zscore
#Decimal Normalization
dn=[]
dn.append(age/pow(10,2) )
dn=np.array(dn)
dn
#min-max Normalization
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
age=np.array(df['age'])
age=age.reshape(-1, 1)
MinMax = scaler.fit(age)
MinMax
scaler.transform(age)
###Output
_____no_output_____ |
YouTube-tutorials/Examples/Downloading and Saving S&P 100 Stock Pricing Data using IEX Finance.ipynb | ###Markdown
ObjectiveThe purpose of this Jupyter notebook is to show you how to download open, high, low, close, volume data for companies in the S&P 500. We will use pandas-datareader to download this stock market data, and will save it to disk for use at a later time. Import Libraries
###Code
# Import datetime to set a begin and end
# date on the stock information to download.
from datetime import datetime
# Import pandas to handle to stock data
# we download.
import pandas as pd
# Import pandas datareader to download stock data
# using IEX Finance.
import pandas_datareader.data as web
# Versions used:
# pandas==0.22.0
# pandas-datareader==0.6.0
###Output
_____no_output_____
###Markdown
Getting the S&P 100 CompaniesI downloaded a listing of the S&P 100 Companies and saved that information in an Excel workbook. This file can be found in the "Datasets" directory I have created.You could also download similar information from here: https://www.barchart.com/stocks/indices/sp/sp100. There you can also find the S&P 500, Russel 2000, and other indicies components.
###Code
filename = r"Datasets\s&p100_companies.xlsx"
sp_one_hundred = pd.read_excel(filename)
###Output
_____no_output_____
###Markdown
Reviewing our dataAs you can see below, there are two columns: Symbol and Name for Company name. We'll grab the symbol data and use these ticker symbols to help us bulk download the OHLCV dataset in the next step.
###Code
sp_one_hundred.head()
###Output
_____no_output_____
###Markdown
Grab the ticker symbols from the DataFrame above. This will create a NumPy array which contains all of the ticker symbols we are interested in downloading.
###Code
sp_one_hundred_tickers = sp_one_hundred['Symbol'].values
###Output
_____no_output_____
###Markdown
Bulk Downloading the DataNow that we have all of the ticker symbols, let's download some data!We will first create a variable called stock_data_dictionary. This is going to be a simple Python dictionary, where each key will be the ticker symbol, and each value for the key will be a DataFrame containing that stock's OHLCV DataFrame.We need to specify the start and end date for the data we wish to download. In this example I'm going to download the past month of data to keep it light. You can go back as far as 5 years ago to download data using this method through IEX Finance if you wish.Finally, we will use a for loop to go through each ticker symbol and download that data from the pandas-datareader package. We save the ticker symbol as the key and the dataframe as the value as seen in the last line of the cell block below.
###Code
stock_data_dictionary = {}
start = datetime(2018, 9, 12)
end = datetime(2018, 10, 12)
for stock in sp_one_hundred_tickers:
stock_data_dictionary[stock] = web.DataReader(stock, 'iex', start, end)
###Output
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
1y
###Markdown
Converting the Dictionary to a Pandas PanelThe next thing we will do is convert the stock_data_dictionary with all of our information into a pandas Panel. I like to use the Panel because I think it is easy to use and we will be able to save the file directly to the disk once it is converted.For future reference, the Panel was depracated in the 0.20.X release of pandas, and will show a FutureWarning in pandas version 0.23.X. However, for our purposes we can continue to use this right now.
###Code
stock_data_panel = pd.Panel(stock_data_dictionary)
###Output
_____no_output_____
###Markdown
Preview the PanelLet's take a look at our Panel and see how the data is formatted.We see that there are 3 axis, one for Items, a Major axis, and a Minor axis. The Items axis contains the stock ticker information. The Major axis contains the dates (this will be the rows in our data). The Minor axis contains the OHLCV values (this will be the columns in our data).We can also preview one stock as shown below. For this example we'll use the tail method to look at those most recent pricing data on AAPL stock.
###Code
stock_data_panel
stock_data_panel['AAPL'].tail()
###Output
_____no_output_____
###Markdown
Save the file to diskNow you can save the file to disk using the to_pickle method. I do this because it helps keep the file size down, but you could save this as an Excel file or csv as well if you wanted to.
###Code
stock_data_panel.to_pickle('s&p_100_pricing_data_9-12-2018_to_10-12-2018')
###Output
_____no_output_____ |
Copy_of_C3_W2_Lab_2_sarcasm_classifier.ipynb | ###Markdown
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
**Note:** This notebook can run using TensorFlow 2.5.0
###Code
#!pip install tensorflow==2.5.0
import json
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
vocab_size = 10000
embedding_dim = 16
max_length = 100
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
training_size = 20000
# sarcasm.json
!gdown --id 1xRU3xY5-tkiPGvlz5xBJ18_pHWSRzI4v
with open("./sarcasm.json", 'r') as f:
datastore = json.load(f)
sentences = []
labels = []
for item in datastore:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# Need this block to get it to work with TensorFlow 2.x
import numpy as np
training_padded = np.array(training_padded)
training_labels = np.array(training_labels)
testing_padded = np.array(testing_padded)
testing_labels = np.array(testing_labels)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
num_epochs = 30
history = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=2)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_sentence(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
print(decode_sentence(training_padded[0]))
print(training_sentences[2])
print(labels[2])
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
import io
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vecs.tsv')
files.download('meta.tsv')
sentence = ["granny starting to fear spiders in the garden might be real", "game of thrones season finale showing this sunday night"]
sequences = tokenizer.texts_to_sequences(sentence)
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
print(model.predict(padded))
###Output
_____no_output_____ |
dpotatoes.ipynb | ###Markdown
mmdpotatoesGrade potato quality by shape and skin spots. ย DescriptionThe input image is a gray-scale image of several washed potatoes. The shape of the potatoes is analysed using skeleton feature and the skin spots are detected. These two features can be used to evaluate their visual quality.
###Code
import numpy as np
from PIL import Image
import ia870 as ia
import matplotlib.pyplot as plt
print(ia.__version__)
###Output
ia870 Python Morphology Toolbox version 0.8 25th Aug 2014 - in progress - migrating to Python 3
###Markdown
ReadingThe input image is read.
###Code
a_pil = Image.open('data/potatoes.tif').convert('L')
a = np.array (a_pil)
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(10, 5))
axes.set_title('a')
axes.imshow(a, cmap='gray')
axes.axis('off')
###Output
_____no_output_____
###Markdown
ThresholdingConvert to binary objects by thresholding
###Code
b = ia.iathreshad(a,90)
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(10, 5))
axes.set_title('b')
axes.imshow(b, cmap='gray')
axes.axis('off')
###Output
_____no_output_____
###Markdown
Skeleton of the potato shapesThe binary image is thinned and the result overlayed on the original image
###Code
c = ia.iathin(b)
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(10, 5))
axes.set_title('a, c')
axes.imshow(ia.iagshow(a, c).transpose(1, 2, 0))
axes.axis('off')
###Output
_____no_output_____
###Markdown
Closing tophatTo detect the skin spots, a closing tophat can enhance the dark areas of the image
###Code
d = ia.iacloseth(a,ia.iasedisk(5))
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(10, 5))
axes.set_title('d')
axes.imshow(d, cmap='gray')
axes.axis('off')
###Output
_____no_output_____
###Markdown
ย Thresholding and maskingThe tophat is thresholded and the result is masked with the binary image of the potatoes as we are interested only on the spots inside them
###Code
e = ia.iathreshad(d,20)
f = ia.iaintersec(e,b)
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(10, 5))
axes.set_title('f')
axes.imshow(f, cmap = 'gray')
axes.axis('off')
###Output
_____no_output_____
###Markdown
Final displayShow both results: skeleton and skin spots overlayed on the original image
###Code
f = ia.iaintersec(e, b)
(fig, axes) = plt.subplots(nrows=1, ncols=2,figsize=(10, 5))
axes[0].set_title('f')
axes[0].imshow(a, cmap = 'gray')
axes[0].axis('off')
axes[1].set_title('a, f, c')
axes[1].imshow(ia.iagshow(a, f, c).transpose(1, 2, 0))
axes[1].axis('off')
###Output
_____no_output_____ |
titanic/.ipynb_checkpoints/titanic_final-v1-checkpoint.ipynb | ###Markdown
Titanic
###Code
# ๅฏผๅ
ฅๆ้ๆจกๅ
import pandas as pd
import os
import matplotlib.pyplot as plt
%matplotlib inline
try:
from sklearn.compose import ColumnTransformer
except ImportError:
from future_encoders import ColumnTransformer # Scikit-Learn < 0.20
try:
from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler,OneHotEncoder
from sklearn.base import BaseEstimator,TransformerMixin
###Output
_____no_output_____
###Markdown
0. ๆฐๆฎๅญๅจๆ
ๅตๆฅ็ - ๆฐๆฎๅญๅจๅจdataๆไปถๅคน
###Code
!cd /share/GitHub/Kaggle/titanic
!pwd
!ls
!cd data
print('-'*50)
!ls data
###Output
/share/GitHub/Kaggle/titanic
data titanic_final.ipynb Titanicๅญฆไน ็ฌฌไธๆฌกๅฐ็ป_20190414.ipynb
outstanding-case titanic_final-v1.ipynb Untitled.ipynb
--------------------------------------------------
gender_result_2019414.csv gender_submission.csv train.csv
gender_result_2019415.csv test.csv
###Markdown
1. ่ฏปๅๆฐๆฎ
###Code
filepath = '/share/GitHub/Kaggle/titanic/data' # ็ปๅฏน่ทฏๅพ
filename = 'train.csv'
def load_titanic_data(filepath=filepath,filename=filename):
fullpath = os.path.join(filepath,filename)
return pd.read_csv(fullpath)
df_raw = load_titanic_data() # ่ฏปๅ็train
df_raw.sample(5)
###Output
_____no_output_____
###Markdown
ๅไธช็ปดๅบฆๆไน่ฏดๆ:- PassengerId: ๅๆ ทๆฌๅฏไธๆ ่ฏ ๏ผๆ ๆไน ๅป้ค๏ผ- Survived: Survival 0 = No, 1 = Yes ็ฎๆ ๅ้- Pclass: Ticket class 1 = 1st, 2 = 2nd, 3 = 3rd- Sex: sex- Age: age in years- SibSp: of siblings / spouses aboard the Titanic- Parch: of parents / children aboard the Titanic- Ticket: Ticket number ๏ผๆ ๆไนๅป้ค๏ผ- fare: Passenger fare- Cabin: Cabin number (ๆ ๆไนๅป้ค)- embarked: Port of Embarkation C = Cherbourg, Q = Queenstown, S = Southampton
###Code
df_train = df_raw.copy()
df_train_raw = df_train.drop(['PassengerId','Name','Ticket','Cabin'],axis = 1) #dropไผไบง็ไธไธชๆฐๆฎๅคๆฌ๏ผไธไผๅฝฑๅdf_train
df_train_raw.head(5)
df_train.head(5) #dropไผไบง็ไธไธชๆฐๆฎๅคๆฌ๏ผไธไผๅฝฑๅdf_train
###Output
_____no_output_____
###Markdown
2.ๆฐๆฎๆธ
ๆด & ๆฐๆฎ้ขๅค็
###Code
df_raw.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
PassengerId 891 non-null int64
Survived 891 non-null int64
Pclass 891 non-null int64
Name 891 non-null object
Sex 891 non-null object
Age 714 non-null float64
SibSp 891 non-null int64
Parch 891 non-null int64
Ticket 891 non-null object
Fare 891 non-null float64
Cabin 204 non-null object
Embarked 889 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 83.6+ KB
###Markdown
- ๅฏไปฅ็ๅฐage, Cabin, Embarkedๆฏๆ็ผบๅคฑๅผ็๏ผไฝCabinๅทฒ่ขซๅ ๅป๏ผๆไปฅๅช้่ฆๅกซๅ
AGEๅEmbarked็็ผบๅคฑๅฐฑๅฏไปฅ- ่่็ไธไธage็ๅๅธ
###Code
# fig = plt.figure(figsize=(10,10))
df_train_raw.Age.hist(bins=80, figsize=(8,8))
###Output
_____no_output_____
###Markdown
- ๅฏไปฅ็ๅฐๆฏ0ๅฒๅคงไธไบๆ10ๅคไธช- age: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5- ไธบไบๅ้ค0ๅฒๅฉดๅฟ็ๅฝฑๅ๏ผfillnaๅไธญไฝๆฐ
###Code
df_train_raw.Embarked.value_counts()
###Output
_____no_output_____
###Markdown
- EMbarked็จไผๆฐๅกซๅ
###Code
# ไธบไบๅ้ขๆฐๆฎๆธ
ๆด่ฝฌๆขๆนไพฟ๏ผๆdf_train_rawๅๅไธบNumๅCat
df_train_num = df_train_raw[['Age','SibSp','Parch','Fare']]
df_train_cat = df_train_raw[['Pclass','Sex','Embarked']]
# ่ชๅฎไน่ฝฌๆขๅจ
# class DataFrameSelector(BaseEstimator,TransformerMixin):
# def __init__(self, attribute_names):
# self.attribute = attribute_names
# def fit(self, X, y = None):
# return self
# def transform(self,X):
# return X[self.attribute].values
# ็ปดๅบฆๅ็งฐๅ่กจ
num_attribs = list(df_train_num)
cat_attribs = list(df_train_cat)
num_pipeline = Pipeline([
# ('selector',DataFrameSelector(num_attribs)),
('imputer',SimpleImputer(strategy = 'median')),
('std_scaler',StandardScaler()),
])
cat_pipeline = Pipeline([
# ('selector',DataFrameSelector(cat_attribs)),
('imputer',SimpleImputer(strategy = 'most_frequent')),
('label_binarizer', OneHotEncoder()),
])
full_pipeline = ColumnTransformer([
('num_pipeline',num_pipeline,num_attribs),
('cat_pipeline',cat_pipeline,cat_attribs),
])
np_train_prepared = full_pipeline.fit_transform(df_train_raw)
np_train_prepared
np_train_labels = df_train_raw['Survived'].values
np_train_labels
###Output
_____no_output_____
###Markdown
ๅฐ็ป- ๅค็็ผบๅคฑๅผ๏ผ็ถๅๅฏนไบๆฐๅผๅๅๅ็ฑปๅๆฐๆฎ่ฟ่กๅๅ๏ผๅๅซ่ฟ่กๆฐๆฎๅค็๏ผๆฐๅผๅ็จไธญไฝๆฐๅกซๅ
๏ผๅ็ฑปๅ็จไผๆฐๅกซๅ
- ๆฐๅผๅ:ๆฐๆฎๆ ๅๅ- ๅ็ฑปๅ:OneHot็ผ็ - ๅจๅ
ทไฝๆไฝๆน้ข้่ฆ่กฅๅ
3.่ฎญ็ปๆจกๅๅไธบไธไธช้จๅ- ๅไธๆจกๅ- ้ๆๆนๆณ - ๆจกๅๆง่ฝๆฏ่พ ๅไธๆจกๅ ้ๆบๆฃฎๆ - ่ฟไธไผ็จ่ฟๅค็ๆจกๅๅป้ๆฉ๏ผๅ
็จrfๅปfit- ็ฝๆ ผๆ็ดขๅ้ๆบๆ็ดขๅๅซๅปๆฅๆพๆๅฅฝ็่ถ
ๅๆฐ- ๅฆไฝ็จ็ฑปไผผ้
็ฝฎๆต็จๅป่ฐๆดๅๆฐ๏ผ๏ผ๏ผ
###Code
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV,RandomizedSearchCV
param_grid1 = [
# try 12 (3ร4) combinations of hyperparameters
{'n_estimators':[3,10,30],'max_features':[2,4,6,8]},
# then try 6 (2ร3) combinations with bootstrap set as False
{'bootstrap':[False],'n_estimators':[3,10],'max_features':[2,3,4]},
]
forest_cla = RandomForestClassifier(min_samples_split=100,
min_samples_leaf=20,max_depth=10,max_features='sqrt' ,random_state=42)
grid_search = GridSearchCV(forest_cla, param_grid=param_grid1,cv=10,
scoring = 'roc_auc',return_train_score = True)
grid_search.fit(np_train_prepared,np_train_labels)
grid_search.best_params_
grid_search.best_score_
print(grid_search.best_params_,grid_search.best_score_)
from scipy.stats import randint
param_grid2 = {
'n_estimators': randint(low=1, high=100),
'max_features': randint(low=1, high=10),
}
rdm_search = RandomizedSearchCV(forest_cla, param_distributions=param_grid2,cv=10,
scoring = 'roc_auc',return_train_score = True ,n_iter=1000)
rdm_search.fit(np_train_prepared,np_train_labels)
print(rdm_search.best_params_,rdm_search.best_score_)
###Output
{'max_features': 3, 'n_estimators': 63} 0.855305879565
###Markdown
SVM
###Code
from sklearn.svm import SVC
import numpy as np
# ็ฝๆ ผๆ็ดข
# ่ฎพๅฎๆ็ดขๅๆฐ๏ผ
param_grid_svc = [
{'kernel':['poly'],'gamma':np.logspace(-3, 3, 5),'C':np.logspace(-2, 3, 5)},
]
rbf_kernel_svm_clf = SVC()
grid_search_svm = GridSearchCV(rbf_kernel_svm_clf, param_grid=param_grid_svc,cv=10,
scoring = 'roc_auc',return_train_score = True)
grid_search_svm.fit(np_train_prepared,np_train_labels)
print(grid_search_svm.best_params_)
###Output
_____no_output_____
###Markdown
็ฅ็ป็ฝ็ป ้ๆๆจกๅ ๆจกๅๆง่ฝๆฏ่พ
###Code
# CROSS_VAL_PREDICT
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
np_train_labels_pred = cross_val_predict(grid_search_svm.best_estimator_,np_train_prepared,np_train_labels, cv=10)
confusion_matrix(np_train_labels,np_train_labels_pred)
###Output
_____no_output_____
###Markdown
ๅฐ็ป- ่ฟ้่ฆ็ ็ฉถ๏ผ๏ผ๏ผ- ็ฝๆ ผๆ็ดขๆฏ็ปๅบๆๆ็ๅฏ่ฝ- ้ๆบๆฏ่ฟญไปฃ --- 4.ๆต่ฏๆฐๆฎ
###Code
df_test_raw = load_titanic_data('./data','test.csv')
df_test_raw.info()
###Output
_____no_output_____
###Markdown
ๅฏไปฅ็ๅฐAGEๅFARE้่ฆ็ผบๅคฑๅผๅกซๅ
###Code
# df_train = df_raw.copy()
# df_train_raw = df_train.drop(['PassengerId','Name','Ticket','Cabin'],axis=1) #dropไผไบง็ไธไธชๆฐๆฎๅคๆฌ๏ผไธไผๅฝฑๅdf_train
df_test = df_test_raw.copy()
df_test_drop = df_test.drop(['PassengerId','Name','Ticket','Cabin'],axis=1)
df_test_drop.head()
# df_test_num = df_test_drop[['Age','SibSp','Parch','Fare']]
# df_test_cat = df_test_drop[['Pclass','Sex','Embarked']]
np_test_prepared = full_pipeline.fit_transform(df_test_drop)
np_test_prepared
###Output
_____no_output_____
###Markdown
้ขๆต
###Code
final_model = grid_search_svm.best_estimator_
final_prediction = final_model.predict(np_test_prepared)
final_prediction
###Output
_____no_output_____
###Markdown
5.่ฏ้ช็ปๆ่ฝฌๆขๆsubmissionๆ ผๅผ
###Code
num_list = range(892,1310)
result_list = final_prediction.tolist()
df_submission = pd.DataFrame({'PassengerId':num_list,'Survived':result_list})
df_submission.head()
df_submission.to_csv ("gender_result_2019415.csv" , encoding = "utf-8",index=None)
###Output
_____no_output_____ |
Python/ML_DL/DL/Neural-Networks-Demystified-master/.ipynb_checkpoints/Part 4 Backpropagation-checkpoint.ipynb | ###Markdown
Neural Networks Demystified Part 4: Backpropagation @stephencwelch
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('GlcnxUlrtek')
###Output
_____no_output_____
###Markdown
Variables |Code Symbol | Math Symbol | Definition | Dimensions| :-: | :-: | :-: | :-: ||X|$$X$$|Input Data, each row in an example| (numExamples, inputLayerSize)||y |$$y$$|target data|(numExamples, outputLayerSize)||W1 | $$W^{(1)}$$ | Layer 1 weights | (inputLayerSize, hiddenLayerSize) ||W2 | $$W^{(2)}$$ | Layer 2 weights | (hiddenLayerSize, outputLayerSize) ||z2 | $$z^{(2)}$$ | Layer 2 activation | (numExamples, hiddenLayerSize) ||a2 | $$a^{(2)}$$ | Layer 2 activity | (numExamples, hiddenLayerSize) ||z3 | $$z^{(3)}$$ | Layer 3 activation | (numExamples, outputLayerSize) ||J | $$J$$ | Cost | (1, outputLayerSize) ||dJdz3 | $$\frac{\partial J}{\partial z^{(3)} } = \delta^{(3)}$$ | Partial derivative of cost with respect to $z^{(3)}$ | (numExamples,outputLayerSize)||dJdW2|$$\frac{\partial J}{\partial W^{(2)}}$$|Partial derivative of cost with respect to $W^{(2)}$|(hiddenLayerSize, outputLayerSize)||dz3dz2|$$\frac{\partial z^{(3)}}{\partial z^{(2)}}$$|Partial derivative of $z^{(3)}$ with respect to $z^{(2)}$|(numExamples, hiddenLayerSize)||dJdW1|$$\frac{\partial J}{\partial W^{(1)}}$$|Partial derivative of cost with respect to $W^{(1)}$|(inputLayerSize, hiddenLayerSize)||delta2|$$\delta^{(2)}$$|Backpropagating Error 2|(numExamples,hiddenLayerSize)||delta3|$$\delta^{(3)}$$|Backpropagating Error 1|(numExamples,outputLayerSize)| Last time, we decided to use gradient descent to train our Neural Network, so it could make better predictions of your score on a test based on how many hours you slept, and how many hours you studied the night before. To perform gradient descent, we need an equation and some code for our gradient, dJ/dW. Our weights, W, are spread across two matrices, W1 and W2. Weโll separate our dJ/dW computation in the same way, by computing dJdW1 and dJdW2 independently. We should have just as many gradient values as weight values, so when weโre done, our matrices dJdW1 and dJdW2 will be the same size as W1 and W2. $$\frac{\partial J}{\partial W^{(2)}} = \frac{\partial \sum \frac{1}{2}(y-\hat{y})^2}{\partial W^{(2)}}$$ Letโs work on dJdW2 first. The sum in our cost function adds the error from each example to create our overall cost. Weโll take advantage of the sum rule in differentiation, which says that the derivative of the sums equals the sum of the derivatives. We can move our sigma outside and just worry about the derivative of the inside expression first. $$\frac{\partial J}{\partial W^{(2)}} = \sum \frac{\partial \frac{1}{2}(y-\hat{y})^2}{\partial W^{(2)}}$$ To keep things simple, weโll temporarily forget about our summation. Once weโve computed dJdW for a single example, weโll add all our individual derivative terms together. We can now evaluate our derivative. The power rule tells us to bring down our exponent, 2, and multiply. To finish our derivative, weโll need to apply the chain rule. The chain rule tells us how to take the derivative of a function inside of a function, and generally says we take the derivative of the outside function and then multiply it by the derivative of the inside function. One way to express the chain rule is as the product of derivatives, this will come in very handy as we progress through backpropagation. In fact, a better name for backpropagation might be: donโt stop doing the chain rule. ever. Weโve taken the derivative of the outside of our cost function - now we need to multiply it by the derivative of the inside. Y is just our test scores, which wonโt change, so the derivative of y, a constant, with respect to W two is 0! yHat, on the other hand, does change with respect to W two, so weโll apply the chain rule and multiply our results by minus dYhat/dW2. $$\frac{\partial J}{\partial W^{(2)}} = -(y-\hat{y}) \frac{\partial \hat{y}}{\partial W^{(2)}}$$ We now need to think about the derivative of yHat with respect to W2. Equation 4 tells us that yHat is our activation function of z3, so we it will be helpful to apply the chain rule again to break dyHat/dW2 into dyHat/dz3 times dz3/dW2. $$\frac{\partial J}{\partial W^{(2)}} = -(y-\hat{y})\frac{\partial \hat{y}}{\partial z^{(3)}} \frac{\partial z^{(3)}}{\partial W^{(2)}}$$ To find the rate of change of yHat with respect to z3, we need to differentiate our sigmoid activation function with respect to z. $$f(z) = \frac{1}{1+e^{-z}}$$ $$f^\prime(z) = \frac{e^{-z}}{(1+e^{-z})^2}$$ Now is a good time to add a new python method for the derivative of our sigmoid function, sigmoid Prime. Our derivative should be the largest where our sigmoid function is the steepest, at the value z equals zero.
###Code
%pylab inline
#Import code from last time
from partTwo import *
def sigmoid(z):
#Apply sigmoid activation function to scalar, vector, or matrix
return 1/(1+np.exp(-z))
def sigmoidPrime(z):
#Derivative of sigmoid function
return np.exp(-z)/((1+np.exp(-z))**2)
testValues = np.arange(-5,5,0.01)
plot(testValues, sigmoid(testValues), linewidth=2)
plot(testValues, sigmoidPrime(testValues), linewidth=2)
grid(1)
legend(['sigmoid', 'sigmoidPrime'])
###Output
_____no_output_____
###Markdown
We can now replace dyHat/dz3 with f prime of z 3. $$\frac{\partial z^{(3)}}{\partial W^{(2)}}= -(y-\hat{y}) f^\prime(z^{(3)}) \frac{\partial z^{(3)}}{\partial W^{(2)}}$$ Our final piece of the puzzle is dz3dW2, this term represents the change of z, our third layer activity, with respect to the weights in the second layer. Z three is the matrix product of our activities, a two, and our weights, w two. The activities from layer two are multiplied by their correspond weights and added together to yield z3. If we focus on a single synapse for a moment, we see a simple linear relationship between W and z, where a is the slope. So for each synapse, dz/dW(2) is just the activation, a on that synapse! $$z^{(3)} = a^{(2)}W^{(2)} \tag{3}\\$$ Another way to think about what the calculus is doing here is that it is โbackpropagatingโ the error to each weight, by multiplying by the activity on each synapses, the weights that contribute more to the error will have larger activations, and yield larger dJ/dW2 values, and those weights will be changed more when we perform gradient descent. We need to be careful with our dimensionality here, and if weโre clever, we can take care of that summation we got rid of earlier. The first part of our equation, y minus yHat is of the same dimension as our output data, 3 by 1. F prime of z three is of the same size, 3 by 1, and our first operation is scalar multiplication. Our resulting 3 by 1 matrix is referred to as the backpropagating error, delta 3. We determined that dz3/dW2 is equal to the activity of each synapse. Each value in delta 3 needs to be multiplied by each activity. We can achieve this by transposing a2 and matrix multiplying by delta3. $$\frac{\partial J}{\partial W^{(2)}} = (a^{(2)})^T\delta^{(3)}\tag{6}$$ $$\delta^{(3)} = -(y-\hat{y}) f^\prime(z^{(3)}) $$ Whatโs cool here is that the matrix multiplication also takes care of our earlier omission โ it adds up the dJ/dW terms across all our examples. Another way to think about whatโs happening here is that is that each example our algorithm sees has a certain cost and a certain gradient. The gradient with respect to each example pulls our gradient descent algorithm in a certain direction. It's like every example gets a vote on which way is downhill, and when we perform batch gradient descent we just add together everyoneโs vote, call it downhill, and move in that direction. Weโll code up our gradients in python in a new method, cost function prime. Numpyโs multiply method performs element-wise multiplication, and the dot method performs matrix multiplication.
###Code
# Part of NN Class (won't work alone, needs to be included in class as
# shown in below and in partFour.py):
def costFunctionPrime(self, X, y):
#Compute derivative with respect to W and W2 for a given X and y:
self.yHat = self.forward(X)
delta3 = np.multiply(-(y-self.yHat), self.sigmoidPrime(self.z3))
dJdW2 = np.dot(self.a2.T, delta3)
###Output
_____no_output_____
###Markdown
We have one final term to compute: dJ/dW1. The derivation begins the same way, computing the derivative through our final layer: first dJ/dyHat, then dyHat/dz3, and we called these two taken together form our backpropagating error, delta3. We now take the derivative โacrossโ our synapses, this is a little different from out job last time, computing the derivative with respect to the weights on our synapses. $$\frac{\partial J}{\partial W^{(1)}} = (y-\hat{y})\frac{\partial \hat{y}}{\partial W^{(1)}}$$$$\frac{\partial J}{\partial W^{(1)}} = (y-\hat{y})\frac{\partial \hat{y}}{\partial z^{(3)}}\frac{\partial z^{(3)}}{\partial W^{(1)}}$$$$\frac{\partial J}{\partial W^{(1)}} = -(y-\hat{y}) f^\prime(z^{(3)}) \frac{\partial z^{(3)}}{\partial W^{(1)}}$$$$\frac{\partial z^{(3)}}{\partial W^{(1)}} = \frac{\partial z^{(3)}}{\partial a^{(2)}}\frac{\partial a^{(2)}}{\partial W^{(1)}}$$ Thereโs still a nice linear relationship along each synapse, but now weโre interested in the rate of change of z(3) with respect to a(2). Now the slope is just equal to the weight value for that synapse. We can achieve this mathematically by multiplying by W(2) transpose. $$\frac{\partial J}{\partial W^{(1)}} = \delta^{(3)} (W^{(2)})^{T}\frac{\partial a^{(2)}}{\partial W^{(1)}}$$ $$\frac{\partial J}{\partial W^{(1)}} = \delta^{(3)} (W^{(2)})^{T}\frac{\partial a^{(2)}}{\partial z^{(2)}}\frac{\partial z^{(2)}}{\partial W^{(1)}}$$ Our next term to work on is da(2)/dz(2) โ this step is just like the derivative across our layer 3 neurons, so we can just multiply by f prime(z2). $$\frac{\partial J}{\partial W^{(1)}} = \delta^{(3)} (W^{(2)})^{T}f^\prime(z^{(2)})\frac{\partial z^{(2)}}{\partial W^{(1)}}$$ Our final computation here is dz2/dW1. This is very similar to our dz3/dW2 computation, there is a simple linear relationship on the synapses between z2 and w1, in this case though, the slope is the input value, X. We can use the same technique as last time by multiplying by X transpose, effectively applying the derivative and adding our dJ/dW1โs together across all our examples. $$\frac{\partial J}{\partial W^{(1)}} = X^{T}\delta^{(3)} (W^{(2)})^{T}f^\prime(z^{(2)})$$ Or: $$\frac{\partial J}{\partial W^{(1)}} = X^{T}\delta^{(2)} \tag{7}$$ Where: $$\delta^{(2)} = \delta^{(3)} (W^{(2)})^{T}f^\prime(z^{(2)})$$ All thatโs left is to code this equation up in python. Whatโs cool here is that if we want to make a deeper neural network, we could just stack a bunch of these operations together.
###Code
# Whole Class with additions:
class Neural_Network(object):
def __init__(self):
#Define Hyperparameters
self.inputLayerSize = 2
self.outputLayerSize = 1
self.hiddenLayerSize = 3
#Weights (parameters)
self.W1 = np.random.randn(self.inputLayerSize,self.hiddenLayerSize)
self.W2 = np.random.randn(self.hiddenLayerSize,self.outputLayerSize)
def forward(self, X):
#Propogate inputs though network
self.z2 = np.dot(X, self.W1)
self.a2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.a2, self.W2)
yHat = self.sigmoid(self.z3)
return yHat
def sigmoid(self, z):
#Apply sigmoid activation function to scalar, vector, or matrix
return 1/(1+np.exp(-z))
def sigmoidPrime(self,z):
#Gradient of sigmoid
return np.exp(-z)/((1+np.exp(-z))**2)
def costFunction(self, X, y):
#Compute cost for given X,y, use weights already stored in class.
self.yHat = self.forward(X)
J = 0.5*sum((y-self.yHat)**2)
return J
def costFunctionPrime(self, X, y):
#Compute derivative with respect to W and W2 for a given X and y:
self.yHat = self.forward(X)
delta3 = np.multiply(-(y-self.yHat), self.sigmoidPrime(self.z3))
dJdW2 = np.dot(self.a2.T, delta3)
delta2 = np.dot(delta3, self.W2.T)*self.sigmoidPrime(self.z2)
dJdW1 = np.dot(X.T, delta2)
return dJdW1, dJdW2
###Output
_____no_output_____
###Markdown
So how should we change our Wโs to decrease our cost? We can now compute dJ/dW, which tells us which way is uphill in our 9 dimensional optimization space.
###Code
NN = Neural_Network()
cost1 = NN.costFunction(X,y)
dJdW1, dJdW2 = NN.costFunctionPrime(X,y)
dJdW1
dJdW2
###Output
_____no_output_____
###Markdown
If we move this way by adding a scalar times our derivative to our weights, our cost will increase, and if we do the opposite, subtract our gradient from our weights, we will move downhill and reduce our cost. This simple step downhill is the core of gradient descent and a key part of how even very sophisticated learning algorithms are trained.
###Code
scalar = 3
NN.W1 = NN.W1 + scalar*dJdW1
NN.W2 = NN.W2 + scalar*dJdW2
cost2 = NN.costFunction(X,y)
print cost1, cost2
dJdW1, dJdW2 = NN.costFunctionPrime(X,y)
NN.W1 = NN.W1 - scalar*dJdW1
NN.W2 = NN.W2 - scalar*dJdW2
cost3 = NN.costFunction(X, y)
print cost2, cost3
###Output
0.623202502031 0.429162161135
|
ML-Base-MOOC/chapt-5 PCA/02- PCA.ipynb | ###Markdown
PCA
###Code
import numpy as np
import matplotlib.pyplot as plt
X = np.empty((100, 2))
X[:, 0] = np.random.uniform(0., 100., size=100)
X[:, 1] = 0.75 * X[:, 0] + 3. + np.random.normal(0, 10, size=100)
plt.scatter(X[:, 0], X[:, 1])
###Output
_____no_output_____
###Markdown
1. Demean **ๅๅผๅฝ้ถ๏ผๆๆๆ ทๆฌๅๅปๅๅผ**
###Code
def demean(X):
# ๅจ่กๆนๅๆฑๅๅผ๏ผๅพๅฐๆฏไธๅ็ๅๅผ
return X - np.mean(X, axis=0)
X_demean = demean(X)
plt.scatter(X_demean[:, 0], X_demean[:, 1])
np.mean(X_demean, axis=0)
###Output
_____no_output_____
###Markdown
2. ๆขฏๅบฆไธๅๆณ
###Code
# ๆ็จๅฝๆฐ
def f(w, X):
return np.sum((X.dot(w)**2)) / len(X)
# ๆฐๅญฆๆนๆณๆฑๆขฏๅบฆ
def dF_math(w, X):
return X.T.dot(X.dot(w)) * 2. / len(X)
# debugๆนๅผๆฑๆขฏๅบฆ
def dF_debug(w, X, epsilon=0.0001):
# debug
res = np.empty(len(w))
for i in range(len(w)):
w_1 = w.copy()
w_1[i] += epsilon
w_2 = w.copy()
w_2[i] -= epsilon
res[i] = (f(w_1, X) - f(w_2, X)) / (2 * epsilon)
return res
# ๆ w ๅไฝๅ
def direction(w):
return w / np.linalg.norm(w)
# ๆขฏๅบฆไธๅๆณ
def gradient_ascent(dF, X, initial_w, eta, n_iters = 1e4, epsilon=1e-8):
cur_iter = 0
w = direction(initial_w)
while cur_iter < n_iters:
gradient = dF(w, X)
last_w = w
w = w + eta * gradient
w = direction(w) # ๆณจๆ 1๏ผๆฏๆฌกๆฑไธไธชๅไฝๆนๅ
if abs(f(w, X) - f(last_w, X)) < epsilon:
break
cur_iter += 1
return w
initial_w = np.random.random(X.shape[1]) # ๆณจๆ 2๏ผไธ่ฝ็จ0ๅ้ๅผๅง
initial_w
eta = 0.01
###Output
_____no_output_____
###Markdown
ๆณจๆ 3๏ผไธ่ฝไฝฟ็จ StandardScalerๆ ๅๅๅ้
###Code
gradient_ascent(dF_debug, X_demean, initial_w, eta)
gradient_ascent(dF_math, X_demean, initial_w, eta)
###Output
_____no_output_____
###Markdown
็ปๅถๅพๅฝข
###Code
w = gradient_ascent(dF_math, X_demean, initial_w, eta)
plt.scatter(X_demean[:, 0], X_demean[:, 1])
plt.plot([0, w[0]*50], [0, w[1]*50], color='r')
###Output
_____no_output_____
###Markdown
PCA
###Code
import numpy as np
import matplotlib.pyplot as plt
X = np.empty((100, 2))
X[:, 0] = np.random.uniform(0., 100., size=100)
X[:, 1] = 0.75 * X[:, 0] + 3. + np.random.normal(0, 10, size=100)
plt.scatter(X[:, 0], X[:, 1])
###Output
_____no_output_____
###Markdown
1. Demean **ๅๅผๅฝ้ถ๏ผๆๆๆ ทๆฌๅๅปๅๅผ**
###Code
def demean(X):
# ๅจ่กๆนๅๆฑๅๅผ๏ผๅพๅฐๆฏไธๅ็ๅๅผ
return X - np.mean(X, axis=0)
X_demean = demean(X)
plt.scatter(X_demean[:, 0], X_demean[:, 1])
np.mean(X_demean, axis=0)
###Output
_____no_output_____
###Markdown
2. ๆขฏๅบฆไธๅๆณ
###Code
# ๆ็จๅฝๆฐ
def f(w, X):
return np.sum((X.dot(w)**2)) / len(X)
# ๆฐๅญฆๆนๆณๆฑๆขฏๅบฆ
def dF_math(w, X):
return X.T.dot(X.dot(w)) * 2. / len(X)
# debugๆนๅผๆฑๆขฏๅบฆ
def dF_debug(w, X, epsilon=0.0001):
# debug
res = np.empty(len(w))
for i in range(len(w)):
w_1 = w.copy()
w_1[i] += epsilon
w_2 = w.copy()
w_2[i] -= epsilon
res[i] = (f(w_1, X) - f(w_2, X)) / (2 * epsilon)
return res
# ๆ w ๅไฝๅ
def direction(w):
return w / np.linalg.norm(w)
# ๆขฏๅบฆไธๅๆณ
def gradient_ascent(dF, X, initial_w, eta, n_iters = 1e4, epsilon=1e-8):
cur_iter = 0
w = direction(initial_w)
while cur_iter < n_iters:
gradient = dF(w, X)
last_w = w
w = w + eta * gradient
w = direction(w) # ๆณจๆ 1๏ผๆฏๆฌกๆฑไธไธชๅไฝๆนๅ
if abs(f(w, X) - f(last_w, X)) < epsilon:
break
cur_iter += 1
return w
initial_w = np.random.random(X.shape[1]) # ๆณจๆ 2๏ผไธ่ฝ็จ0ๅ้ๅผๅง
initial_w
eta = 0.01
###Output
_____no_output_____
###Markdown
ๆณจๆ 3๏ผไธ่ฝไฝฟ็จ StandardScalerๆ ๅๅๅ้
###Code
gradient_ascent(dF_debug, X_demean, initial_w, eta)
gradient_ascent(dF_math, X_demean, initial_w, eta)
###Output
_____no_output_____
###Markdown
็ปๅถๅพๅฝข
###Code
w = gradient_ascent(dF_math, X_demean, initial_w, eta)
plt.scatter(X_demean[:, 0], X_demean[:, 1])
# ๅ
็ปๅถ [0, 0]็น๏ผ ๅ็ปๅถ ๅไฝๅ้ ็ป็น
plt.plot([0, w[0]*50], [0, w[1]*50], color='r')
###Output
_____no_output_____ |
teoria/2.1_conseguir_y_analizar_los_datos.ipynb | ###Markdown
Flujo de trabajo de un proyecto de Machine Learning 1. Conseguir y analizar los datos Conseguir datos:1. Alguien nos entrega los datos (YAY!)2. Datasets pรบblicos: - [scikit learn](https://scikit-learn.org/stable/datasets/index.html) - [Kaggle](https://www.kaggle.com/) - [UCI Machine learning Repository](https://archive.ics.uci.edu/ml/datasets.html)3. Internet of Things4. Web crawling: [Scrapy](https://scrapy.org/) Analizar datos: Anรกlisis descriptivo- Tipos de datos- Medidas de tendencia central- Medidas de simetrรญa y curtosis- Distribuciรณn de los datos- Medidas de posiciรณn no central- Medidas de dispersiรณn Anรกlisis descriptivo- ***Tipos de datos***- Medidas de tendencia central- Medidas de simetrรญa y curtosis- Distribuciรณn de los datos- Medidas de posiciรณn no central- Medidas de dispersiรณn Variables cuantitativasSe expresan mediante nรบmeros y permite realizar operaciones aritmรฉticas.- Variables discretas: Toma valores aislados - `Edad: 2, 5, 7, 12, 15, 26, 45, 47, 50, 54, 65, 73`- Variables continuas: Valores comprendidos entre dos nรบmeros - `Altura: 1.25, 1.27, 1.29, 1.54, 1.67, 1.71, 1.75` Variables cualitativasCaracterรญsticas que no pueden ser medidas con nรบmeros- Variable cualitativa nominal: Caracterรญstica que no puede sermedida con nรบmeros y no tienen orden - `Sexo: hombre, mujer`- Variable cualitativa ordinal o cuasi-cuantitativa: Caracterรญsticas o cualidades en las que existe un orden - `Nivel: bajo, medio, alto` Anรกlisis descriptivo- Tipos de datos- ***Medidas de tendencia central***- Medidas de simetrรญa y curtosis- Distribuciรณn de los datos- Medidas de posiciรณn no central- Medidas de dispersiรณn Medidas de tendencia centralUn conjunto N de observaciones puede que por sรญ solo no nos diga nada. En cambio, si se conoce que estรกn situados alrededor de uno o varios valores centrales ya tenemos una referencia que sintetiza la informaciรณn- Media- Mediana- Moda __Media aritmรฉtica__ - La media no tiene porquรฉ ser un valor propio de la variable - Es muy sensible a valores extremos - Se comporta de forma natural en relaciรณn a las operaciones aritmรฉticas __Media ponderada__ - Es la media aritmรฉtica que se utiliza cuando a cada valor de la variable (xi) se le otorga una ponderaciรณn o peso distinto de la frecuencia o repeticiรณn.
###Code
import numpy as np
import matplotlib.pyplot as plt
incomes = np.random.normal(27000, 15000, 10000)
plt.hist(incomes, 50)
plt.show()
np.mean(incomes)
print(f'Media sin outliers {np.mean(incomes)}')
## Ahora introducimos un outlier:
incomes = np.append(incomes, [1000000000])
print(f'Media con outliers {np.mean(incomes)}')
###Output
_____no_output_____
###Markdown
__Mediana__ - Ordenadas las observaciones y acumuladas sus frecuencias relativas es aquel valor de la variable estadรญstica que deja el 50% de las observaciones inferiores a รฉl. - No se ve tan afectada como la media por la presencia de valores extremos. - Si el nรบmero de observaciones es par y por tanto hay dos tรฉrminos centrales, la mediana serรก la media de ambos
###Code
import numpy as np
import matplotlib.pyplot as plt
incomes = np.random.normal(27000, 15000, 10000)
plt.hist(incomes, 50)
plt.show()
print(f'Mediana sin outliers {np.median(incomes)}')
## Ahora introducimos un outlier:
incomes = np.append(incomes, [1000000000])
print(f'Mediana con outliers {np.median(incomes)}')
###Output
_____no_output_____
###Markdown
__Moda__ - Valor de la variable estadรญstica con mayor frecuencia absoluta. - No se ve afectada por los valores extremos. - Se suele utilizar como complemento a la media y la mediana ya que por sรญ sola no aporta una informaciรณn determinante de la distribuciรณn - Si la variable estรก medida por intervalos puede haber mรกs de una moda, en ese caso se denomina distribuciรณn plurimodal
###Code
# Generamos datos de edades random para 500 personas:
ages = np.random.randint(18, high=90, size=500)
ages[:10]
from scipy import stats
stats.mode(ages)
###Output
_____no_output_____
###Markdown
Anรกlisis descriptivo- Tipos de datos- Medidas de tendencia central- ***Medidas de simetrรญa y curtosis***- Distribuciรณn de los datos- Medidas de posiciรณn no central- Medidas de dispersiรณn Medidas de simetrรญaLas medidas de asimetrรญa son indicadores que permiten establecer el grado de simetrรญa (o asimetrรญa) que presenta una distribuciรณn de probabilidad de una variable aleatoria sin tener que hacer su representaciรณn grรกfica. - **Asimetrรญa positiva:** - Ejemplos: puntuaciones de un examen difรญcil, salarios...- **Asimetrรญa negativa:** - Ejemplos: puntuaciones de un exรกmen fรกcil [Ref](https://es.wikipedia.org/wiki/Asimetr%C3%ADa_estad%C3%ADstica) Curtosis y apuntamientoLa medida de curtosis o apuntamiento determina el grado de concetraciรณn que presentan los valores en la regiรณn central de la distribuciรณn y en relaciรณn a la distribuciรณn normal[Ref](https://www.monografias.com/trabajos87/medidas-forma-asimetria-curtosis/medidas-forma-asimetria-curtosis.shtml)
###Code
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import skew, kurtosis
vals = np.random.normal(0, 1, 10000)
plt.hist(vals, 50)
plt.show()
# A skewness value > 0 means that there is more weight in the left tail of the distribution
print(f'Skew: {skew(vals)}')
print(f'Kurtosis: {kurtosis(vals)}')
from scipy.stats import gamma
# https://docs.scipy.org/doc/scipy-0.13.0/reference/generated/scipy.stats.skew.html
data_gamma = gamma.rvs(a=10, size=10000)
plt.hist(data_gamma, 50)
plt.show()
print(f'Skew: {skew(data_gamma)}')
print(f'Kurtosis: {kurtosis(data_gamma)}')
###Output
_____no_output_____
###Markdown
Anรกlisis descriptivo- Tipos de datos- Medidas de tendencia central- Medidas de simetrรญa y curtosis- ***Distribuciรณn de los datos***- Medidas de posiciรณn no central- Medidas de dispersiรณn Distribuciรณn de probabilidad- Una distribuciรณn de probabilidad permite conocer el comportamiento de la variable, describir y entender cรณmo varรญan los valores de la caracterรญstica estudiada en los individuos.- Proporciona informaciรณn sobre los valores que puede tomar una variable en los individuos observados y la frecuencia con que ocurren. __Distribuciรณn normal o Gaussiana__- Asociada a fenรณmenos naturales: - caracteres morfolรณgicos de una especie (talle, peso, distancia...) - caracteres fisiolรณgicos (efecto de un fรกrmaco...) - caracteres sociolรณgicos (notas de un exรกmen...)
###Code
from scipy.stats import norm
import matplotlib.pyplot as plt
x = np.arange(-3, 3, 0.001)
plt.plot(x, norm.pdf(x))
import numpy as np
import matplotlib.pyplot as plt
mu = 5.0
sigma = 2.0
values = np.random.normal(mu, sigma, 10000)
plt.hist(values, 50)
plt.show()
###Output
_____no_output_____
###Markdown
Anรกlisis descriptivo- Tipos de datos- Medidas de tendencia central- Medidas de simetrรญa y curtosis- Distribuciรณn de los datos- ***Medidas de posiciรณn no central***- Medidas de dispersiรณn Medidas de posiciรณn no centralAyudan a localizar el valor de la variable que acumula cierto porcentaje especรญfico dedatos.Estas medidas dividen a la poblaciรณn en partes iguales y sirven para clasificar a unindividuo dentro de una determinada muestra o poblaciรณn (mismo concepto que lamediana). - __Cuartiles (Q)__ - Encuentran el valor acumulado al 25%, 50% y 75%, respectivamente. - Medida de localizaciรณn que divide a la poblaciรณn en cuatro partes iguales (Q1, Q2 y Q3).- __Deciles (D)__ - Representan el 10%, 20%, ... , 90% de los datos acumulados respectivamente. - Medida de localizaciรณn que divide a la poblaciรณn en diez partes iguales - dk = Decil k-simo es aquel valor de la variable que deja a su izquierda el kยท10 % de la distribuciรณn.- __Percentiles (P)__ - Representan el 1%, 2%, ... , 99% de los datos acumulados respectivamente - Medida de localizaciรณn que divide a la poblaciรณn en cien partes iguales. - Pk = Percentil k-รฉsimo es aquel valor que deja a su izquierda el K*1% de la distribuciรณn.
###Code
import numpy as np
import matplotlib.pyplot as plt
vals = np.random.normal(0, 0.5, 10000)
plt.hist(vals, 50)
plt.show()
np.percentile(vals, 50)
np.percentile(vals, 90)
np.percentile(vals, 20)
###Output
_____no_output_____
###Markdown
Anรกlisis descriptivo- Tipos de datos- Medidas de tendencia central- Medidas de simetrรญa y curtosis- Distribuciรณn de los datos- Medidas de posiciรณn no central- ***Medidas de dispersiรณn*** Medidas de dispersiรณn- Varianza- Desviaciรณn tรญpica Varianza- Es la media aritmรฉtica de los cuadrados de las diferencias de las observaciones con la media- Al igual que la media en caso de que la variable se presente en intervalos se tomarรก la marca de clase como valor xi .- No sirve para comparar variables de diferentes medidas, por ello es mรกs frecuente hacer uso de la desviaciรณn tรญpica. Desviaciรณn tรญpica- Es la raรญz cuadrada positiva de la varianza- Es la mejor medida de dispersiรณn y la mรกs utilizada- Si la distribuciรณn de frecuencias se aproxima a una normal se verifica: - El 68% de los valores de la variable estรกn comprendidos entre ยฑ ฯ - El 95% de los valores de la variable estรกn comprendidos entre ยฑ 2 ฯ - El 99% de los valores de la variable estรกn comprendidos entre ยฑ 3 ฯ
###Code
import numpy as np
import matplotlib.pyplot as plt
incomes = np.random.normal(100.0, 50.0, 10000)
plt.hist(incomes, 50)
plt.show()
incomes.std()
incomes.var()
###Output
_____no_output_____ |
units/SLU12_Validation_metrics_for_regression/Example Notebook - SLU12 (Validation metrics for regression).ipynb | ###Markdown
SLU12 - Validation metrics for regression: Example NotebookIn this notebook [some regression validation metrics offered by scikit-learn](http://scikit-learn.org/stable/modules/model_evaluation.htmlcommon-cases-predefined-values) are presented.
###Code
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
# some scikit-learn regression validation metrics
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
np.random.seed(60)
###Output
_____no_output_____
###Markdown
Load DataLoad the Boston house-prices dataset, fit a Linear Regression, and make prediction on the dataset (used to create the model).
###Code
data = load_boston()
x = pd.DataFrame(data['data'], columns=data['feature_names'])
y = pd.Series(data['target'])
lr = LinearRegression()
lr.fit(x, y)
y_hat = lr.predict(x)
###Output
_____no_output_____
###Markdown
Mean Absolute Error$$MAE = \frac{1}{N} \sum_{n=1}^N \left| y_n - \hat{y}_n \right|$$
###Code
mean_absolute_error(y, y_hat)
###Output
_____no_output_____
###Markdown
Mean Squared Error$$MSE = \frac{1}{N} \sum_{n=1}^N (y_n - \hat{y}_n)^2$$
###Code
mean_squared_error(y, y_hat)
###Output
_____no_output_____
###Markdown
Root Mean Squared Error$$RMSE = \sqrt{MSE}$$
###Code
np.sqrt(mean_squared_error(y, y_hat))
###Output
_____no_output_____
###Markdown
Rยฒ score$$\bar{y} = \frac{1}{N} \sum_{n=1}^N y_n$$$$Rยฒ = 1 - \frac{MSE(y, \hat{y})}{MSE(y, \bar{y})} = 1 - \frac{\frac{1}{N} \sum_{n=1}^N (y_n - \hat{y}_n)^2}{\frac{1}{N} \sum_{n=1}^N (y_n - \bar{y})^2}= 1 - \frac{\sum_{n=1}^N (y_n - \hat{y}_n)^2}{\sum_{n=1}^N (y_n - \bar{y})^2}$$
###Code
r2_score(y, y_hat)
###Output
_____no_output_____ |
algoExpert/find_loop/solution.ipynb | ###Markdown
Find Loop[link](https://www.algoexpert.io/questions/Find%20Loop) My Solution
###Code
# This is an input class. Do not edit.
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
def findLoop(head):
# Write your code here.
p1 = head.next
p2 = head.next.next
while p1 is not p2:
p1 = p1.next
p2 = p2.next.next
p2 = head
while p1 is not p2:
p1 = p1.next
p2 = p2.next
return p2
###Output
_____no_output_____
###Markdown
Expert Solution
###Code
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
# O(m) time | O(1) space
def findLoop(head):
first = head.next
second = head.next.next
while first != second:
first = first.next
second = second.next.next
first = head
while first != second:
first = first.next
second = second.next
return first
###Output
_____no_output_____ |
dev/archived/Finding implicit markets.ipynb | ###Markdown
The inspiration for these virtual markets was the input of 'soybean' to 'market for soybean, feed' which has a reference product 'soybean, feed'. We can't just test exact matching, need to be a bit [more flexible](https://github.com/seatgeek/thefuzz) on these virtual markets.
###Code
def similar(a, b):
return fuzz.partial_ratio(a, b) > 90 or fuzz.ratio(a, b) > 40
def find_uncertain_virtual_markets(database):
db = bd.Database(database)
found = {}
for act in tqdm(db):
rp = act.get("reference product")
if not rp:
continue
inpts = defaultdict(list)
for exc in act.technosphere():
if exc.input == exc.output:
continue
elif exc['uncertainty type'] < 2:
continue
inpts[exc.input['reference product']].append(exc)
for key, lst in inpts.items():
if len(lst) > 1 and similar(rp, key) and 0.98 <= sum([exc['amount'] for exc in lst]) <= 1.02:
found[act] = lst
return found
found = find_uncertain_virtual_markets("ecoinvent 3.8 cutoff")
len(found)
ng = list(found)[5]
ng, found[ng]
found
###Output
_____no_output_____
###Markdown
We can use the [dirichlet](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.dirichlet.html) to model parameters with a fixed sum, but this distribution is sensitive to the concentration values.
###Code
from scipy.stats import dirichlet
import numpy as np
import seaborn as sb
x = np.array([exc['amount'] for exc in found[ng]])
alpha = x.copy()
dirichlet.mean(alpha)
rvs = dirichlet.rvs(alpha, size=1000)
sb.displot(rvs[:, 0])
rvs = dirichlet.rvs(alpha * 10, size=1000)
sb.displot(rvs[:, 0])
rvs = dirichlet.rvs(alpha * 500, size=1000)
sb.displot(rvs[:, 0])
sb.displot(rvs[:, 1])
###Output
_____no_output_____
###Markdown
We can use these new values in Monte Carlo assessment (in place of the independent sampling which results in broken mass balances). The exact approach here will probably be different; for example, one could use trade statistics to create regional markets with much higher precision.The underlying concepts in the following are documented in [bw_processing](https://github.com/brightway-lca/bw_processing) and [matrix_utils](https://github.com/brightway-lca/matrix_utils). In this notebook we will use in-memory datapackages for our fixes.
###Code
import bw_processing as bwp
indices_array = np.array([(exc.input.id, exc.output.id) for exc in found[ng]], dtype=bwp.INDICES_DTYPE)
# Redefine alpha to make sure order is consistent
# Transpose to get rows or exchange indices, columns of possible values
data_array = dirichlet.rvs(np.array([exc['amount'] for exc in found[ng]]) * 500, size=1000).T
# technosphere inputs must be flipped
flip_array = np.ones(len(found[ng]), dtype=bool)
dp = bwp.create_datapackage()
dp.add_persistent_array(
matrix="technosphere_matrix",
data_array=data_array,
name="ng-fix-dz-es",
indices_array=indices_array,
flip_array=flip_array,
)
###Output
_____no_output_____
###Markdown
Compare Monte Carlo results with and without the fix
###Code
ipcc = ('IPCC 2013', 'climate change', 'GWP 100a')
_, data_objs, _ = bd.prepare_lca_inputs({ng: 1}, method=ipcc)
###Output
_____no_output_____
###Markdown
Default is to use three datapackages: biosphere database, ecoinvent database, and LCIA method
###Code
data_objs
import bw2calc as bc
lca = bc.LCA({ng.id: 1}, data_objs=data_objs, use_distributions=True)
lca.lci()
lca.lcia()
unmodified = np.array([lca.score for _ in zip(lca, range(250))])
fixed = bc.LCA({ng.id: 1}, data_objs=data_objs + [dp], use_arrays=True, use_distributions=True)
fixed.lci()
fixed.lcia()
modified = np.array([fixed.score for _ in zip(fixed, range(250))])
###Output
_____no_output_____
###Markdown
Uncertainty for this example is not huge, so difference is not obvious
###Code
np.mean(modified), np.std(modified), np.mean(unmodified), np.std(modified)
for exc in found[ng]:
lca.redo_lcia({exc.input.id: 1})
print(lca.score)
for exc in found[ng]:
print(exc['scale'])
sum([
lca.technosphere_matrix[lca.dicts.product[row], lca.dicts.activity[col]]
for row, col in indices_array
])
sum([
fixed.technosphere_matrix[fixed.dicts.product[row], fixed.dicts.activity[col]]
for row, col in indices_array
])
sb.displot(unmodified, kde=True)
sb.displot(modified, kde=True)
###Output
_____no_output_____ |
001_skim_lit_project_v1_0.ipynb | ###Markdown
This is a natural language processing(nlp) project. The paper we are implementing here is available at https://arxiv.org/abs/1710.06071And reading through the paper above, we see that the model architeture that they use is achieve their best results is available here. https://arxiv.org/abs/1612.05251 1. **Get Data**
###Code
!git clone https://github.com/Franck-Dernoncourt/pubmed-rct
!ls pubmed-rct
!ls pubmed-rct/PubMed_20k_RCT_numbers_replaced_with_at_sign/
!ls pubmed-rct/PubMed_20k_RCT_numbers_replaced_with_at_sign/
data_dir = "/content/pubmed-rct/PubMed_20k_RCT_numbers_replaced_with_at_sign/"
# Check all of the filenames in the target directory
import os
filenames = [data_dir + filename for filename in os.listdir(data_dir)]
filenames
###Output
_____no_output_____
###Markdown
**Preprocess data**
###Code
###Output
_____no_output_____ |
notebooks/A_thaliana/Effect_of_annotation_evidence.ipynb | ###Markdown
The effect of annotation evidenceThis notebook contains the analysis of the effect of annotation evidence on pan-genome results. Specifically, we compare pan-genomes which are constructed from the same data, except annotation evidence: 1) No evidence (liftover + ab-initio only)2) Standard evidence3) High quality (HQ) evidence
###Code
import os
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
from plotly.subplots import make_subplots
from intervaltree import Interval, IntervalTree
from itertools import chain
pio.templates.default = "plotly_white"
pg_order = ['no_ev', 'normal_ev', 'HQ_ev']
samples = ['An-1', 'C24', 'Cvi-0', 'Eri', 'Kyo', 'Ler', 'Sha', 'TAIR10']
n_samples = len(samples)
###Output
_____no_output_____
###Markdown
PathsPaths to dirs containing pan-genome analyses
###Code
dn_dir = "/groups/itay_mayrose_nosnap/liorglic/Projects/PGCM/output/A_thaliana_pan_genome/de_novo"
mtp_dir = "/groups/itay_mayrose_nosnap/liorglic/Projects/PGCM/output/A_thaliana_pan_genome/map_to_pan"
# de novo pan-genomes
dn_pan_genomes = {
'no_ev': os.path.join(dn_dir, "x50_no_ev/RESULT"),
'normal_ev': os.path.join(dn_dir, "x50/RESULT"),
'HQ_ev': os.path.join(dn_dir, "x50_HQ_ev/RESULT")
}
# map-to-pan pan-genomes
mtp_pan_genomes = {
'no_ev': os.path.join(mtp_dir, "x50_no_ev/RESULT"),
'normal_ev': os.path.join(mtp_dir, "x50/RESULT"),
'HQ_ev': os.path.join(mtp_dir, "x50_HQ_ev/RESULT")
}
figs_path = "/groups/itay_mayrose_nosnap/liorglic/Projects/PGCM/figs/FINAL"
###Output
_____no_output_____
###Markdown
Load and preprocess data PAV matrices
###Code
# de novo
dn_pav = {
pg :
pd.read_csv(os.path.join(dn_pan_genomes[pg],"all_samples/pan_genome/pan_PAV.tsv"), sep='\t', index_col='gene')
for pg in dn_pan_genomes
}
# map-to-pan
mtp_pav = {
pg :
pd.read_csv(os.path.join(mtp_pan_genomes[pg],"all_samples/pan_genome/pan_PAV.tsv"), sep='\t', index_col='gene')
for pg in mtp_pan_genomes
}
for pg in dn_pav:
dn_pav[pg].columns = dn_pav[pg].columns.map(lambda x: x.split('_')[0])
dn_pav[pg] = dn_pav[pg][samples]
for pg in mtp_pav:
mtp_pav[pg].columns = mtp_pav[pg].columns.map(lambda x: x.split('_')[0])
dn_pav[pg] = dn_pav[pg][samples]
mtp_pav[pg].index = mtp_pav[pg].index.str.replace(':','_')
###Output
_____no_output_____
###Markdown
Calculate occupancy and occupancy class(core, shell, singleton)
###Code
def occup_class(occup, core_cut):
if occup >= core_cut:
return 'Core'
elif occup == 1:
return 'Singleton'
else:
return 'Shell'
for pg in dn_pav:
# calculate occupancy
dn_pav[pg]['occupancy'] = dn_pav[pg].apply(sum, axis=1)
# discard genes with occupancy 0
dn_pav[pg] = dn_pav[pg].query('occupancy > 0')
# occupancy class
dn_pav[pg]['occup_class'] = dn_pav[pg].apply(lambda row: occup_class(row['occupancy'], n_samples), axis=1)
for pg in mtp_pav:
# calculate occupancy
mtp_pav[pg]['occupancy'] = mtp_pav[pg].apply(sum, axis=1)
# discard genes with occupancy 0
mtp_pav[pg] = mtp_pav[pg].query('occupancy > 0')
# occupancy class
mtp_pav[pg]['occup_class'] = mtp_pav[pg].apply(lambda row: occup_class(row['occupancy'], n_samples), axis=1)
###Output
_____no_output_____
###Markdown
Genes per sample
###Code
dn_gene_counts = {}
for pg in pg_order:
sample_counts = []
for sample in samples:
sample_pav = dn_pav[pg][sample]
sample_present = sample_pav.loc[sample_pav == 1]
ref_nonref = pd.Series(sample_present.index.str.startswith('PanGene')).map({False: 'Reference', True: 'Nonreference'}).value_counts().sort_index()
ref_nonref.name = sample
sample_counts.append(ref_nonref)
dn_gene_counts[pg] = pd.concat(sample_counts, axis=1).transpose()
dn_gene_counts_df = pd.concat([dn_gene_counts[pg] for pg in pg_order], axis=1)
dn_gene_counts_df.columns = pd.MultiIndex.from_product([['No-evidence','Standard evidence','HQ evidence'], ['Nonreference','Reference']])
dn_gene_counts_df
mtp_gene_counts = {}
for pg in pg_order:
sample_counts = []
for sample in samples:
sample_pav = mtp_pav[pg][sample]
sample_present = sample_pav.loc[sample_pav == 1]
ref_nonref = pd.Series(sample_present.index.str.startswith('PanGene')).map({False: 'Reference', True: 'Nonreference'}).value_counts().sort_index()
ref_nonref.name = sample
sample_counts.append(ref_nonref)
mtp_gene_counts[pg] = pd.concat(sample_counts, axis=1).transpose()
mtp_gene_counts_df = pd.concat([mtp_gene_counts[pg] for pg in pg_order], axis=1)
mtp_gene_counts_df.columns = pd.MultiIndex.from_product([['No-evidence','Standard evidence','HQ evidence'], ['Nonreference','Reference']])
mtp_gene_counts_df
dn_nonref_counts = dn_gene_counts_df[[('No-evidence','Nonreference'),('Standard evidence','Nonreference'),('HQ evidence','Nonreference')]]
dn_nonref_counts.columns = ['No-evidence','Standard evidence','HQ evidence']
dn_nonref_counts = dn_nonref_counts.dropna()
dn_nonref_counts_melt = dn_nonref_counts.reset_index().melt(id_vars='index',
value_vars=['No-evidence','Standard evidence','HQ evidence'])
dn_nonref_counts_melt.columns = ['sample','PG','genes']
dn_nonref_counts_melt['pipeline'] = 'De novo'
mtp_nonref_counts = mtp_gene_counts_df[[('No-evidence','Nonreference'),('Standard evidence','Nonreference'),('HQ evidence','Nonreference')]]
mtp_nonref_counts.columns = ['No-evidence','Standard evidence','HQ evidence']
mtp_nonref_counts = mtp_nonref_counts.dropna()
mtp_nonref_counts_melt = mtp_nonref_counts.reset_index().melt(id_vars='index',
value_vars=['No-evidence','Standard evidence','HQ evidence'])
mtp_nonref_counts_melt.columns = ['sample','PG','genes']
mtp_nonref_counts_melt['pipeline'] = 'Map-to-pan'
nonref_counts_melt = pd.concat([dn_nonref_counts_melt, mtp_nonref_counts_melt])
xbase = pd.Series(nonref_counts_melt["PG"].unique()).reset_index().rename(columns={"index":"x",0:"PG"})
nonref_counts_melt = nonref_counts_melt.merge(xbase, on="PG").set_index("pipeline")
#samples_color_map = dict(zip(gene_counts_melt['sample'].unique(), pio.templates['plotly'].layout.colorway[:8]))
sample_colors = ['blue','red','green','purple','orange','brown','lightblue','darkgreen']
sample_colors = dict(zip(samples, sample_colors))
nonref_counts_melt['color'] = nonref_counts_melt.apply(lambda row: sample_colors[row['sample']], axis=1)
pipeline_symbol_map = {'De novo': 'square',
'Map-to-pan': 'cross'}
nonref_counts_melt['symbol'] = nonref_counts_melt.apply(lambda row: pipeline_symbol_map[row.name], axis=1)
fig = go.Figure(
[
go.Scatter(
name=p,
x=nonref_counts_melt.loc[p, "x"] + i/5,
y=nonref_counts_melt.loc[p, "genes"],
text=nonref_counts_melt.loc[p, "PG"],
mode="markers",
marker={"color": nonref_counts_melt.loc[p, "color"], "symbol": nonref_counts_melt.loc[p, "symbol"], "size":7},
hovertemplate="(%{text},%{y})"
)
for i, p in enumerate(nonref_counts_melt.index.get_level_values("pipeline").unique())
]
)
fig.update_layout(xaxis={"tickmode":"array", "tickvals":xbase["x"], "ticktext":xbase["PG"]},
yaxis={'title': 'Number of genes'},
)
fig.update_xaxes(mirror=True, showline=True, linecolor='black', showgrid=False, zeroline=False)
fig.update_yaxes(mirror=True, showline=True, linecolor='black', showgrid=False, zeroline=False)
fig.update_layout(autosize=False, width=500)
fig.show()
###Output
_____no_output_____
###Markdown
Pan-genome size and compositionBasic stats of the total sizes and occupancy classes of the various PGs
###Code
dn_pg_composition = pd.concat([dn_pav[pg]['occup_class'].value_counts().rename(pg).sort_index()
for pg in pg_order], axis=1).transpose()
dn_pg_composition['Total'] = dn_pg_composition.apply(sum, axis=1)
dn_pg_composition.index = ['No-evidence','Standard evidence', 'HQ evidence']
mtp_pg_composition = pd.concat([mtp_pav[pg]['occup_class'].value_counts().rename(pg).sort_index()
for pg in pg_order], axis=1).transpose()
mtp_pg_composition['Total'] = mtp_pg_composition.apply(sum, axis=1)
mtp_pg_composition.index = ['No-evidence','Standard evidence', 'HQ evidence']
pg_composition = dn_pg_composition.join(mtp_pg_composition, rsuffix='_MTP')
pg_composition.columns = pd.MultiIndex.from_product([['De novo','Map-to-pan'],['Core','Shell','Singletons','Total']])
pg_composition
colors = ['grey','purple','darkgreen','lightblue','orange']
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0.1,
subplot_titles=('De novo', 'Map-to-pan'),
y_title="Number of pan-genes")
fig.add_trace(go.Bar(x=dn_pg_composition.index, y=dn_pg_composition['Core'], name='Core', legendrank=3), row=1, col=1)
fig.add_trace(go.Bar(x=dn_pg_composition.index, y=dn_pg_composition['Shell'], name='Shell', legendrank=2), row=1, col=1)
fig.add_trace(go.Bar(x=dn_pg_composition.index, y=dn_pg_composition['Singleton'], name='Singleton', legendrank=1), row=1, col=1)
fig.add_trace(go.Bar(x=mtp_pg_composition.index, y=mtp_pg_composition['Core'], name='Core', showlegend=False), row=2, col=1)
fig.add_trace(go.Bar(x=mtp_pg_composition.index, y=mtp_pg_composition['Shell'], name='Shell', showlegend=False), row=2, col=1)
fig.add_trace(go.Bar(x=mtp_pg_composition.index, y=mtp_pg_composition['Singleton'], name='Singleton', showlegend=False), row=2, col=1)
fig.update_layout(barmode='stack', colorway=colors[2:])
fig.update_xaxes(mirror=True, showline=True, linecolor='black')
fig.update_yaxes(mirror=True, showline=True, linecolor='black', showgrid=False)
fig.show()
###Output
_____no_output_____ |
code/report-specific/single_agent_random_interval_QLearning/single_agent_random_interval.ipynb | ###Markdown
Single Q Learning Agent against Random Interval Agents---Start with importing necessary packages, modules:
###Code
import sys
sys.path.append('.')
import gym
from IPython.display import clear_output
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.axes as ax
import warnings
import random
import os
import pickle
# pandas setting warnings can be ignored, as it is intendend often
warnings.simplefilter("ignore")
from QLearningAgents import QLearningBuyer, QLearningSeller
from RandOfferAgents import RandOfferBuyer, RandOfferSeller
from RandInterAgents import RandInterBuyer, RandInterSeller
from utils import *
from environments import MarketEnvironment
from info_settings import BlackBoxSetting
from matchers import RandomMatcher
###Output
_____no_output_____
###Markdown
Experiment Metrics---Define the experiment, Setup the market environment
###Code
%%time
#Number of Episodes
n_episodes = 1000
#Interval to print statistics
n_stats = 10 #n_episodes/100
#Negative reward for q-learning when agent is not yet done in Episode
negative_reward = -1
#How many games to run, to stabilize results
n_games = 1
for i in range(n_games):
# Get Agents with defined:
# Reservation prices
# Starting/Default prices
# Number of different classes of sellers and buyers
buyers, sellers, inequality, q_learn_agents = get_agents_equal(sellr_reserve = 90, buyer_reserve = 110,
sellr_default = 110, buyer_default = 90,
n_rnd_off_buyers = 0, n_rnd_off_sellrs = 0,
n_rnd_int_buyers = 3, n_rnd_int_sellrs = 3,
n_q_learn_buyers = 1, n_q_learn_sellrs = 0,
n_states = 20)
set_hyperparameters(buyers + sellers, alpha=0.1, gamma=0.95, epsilon=0.1)
dir_path = f'{n_episodes}_{n_stats}_B{len(buyers)}_S{len(sellers)}_Q{len(q_learn_agents)}'
if not os.path.exists(dir_path):
os.mkdir(dir_path)
market_env = MarketEnvironment(sellers=sellers, buyers=buyers, max_steps=30,
matcher=RandomMatcher(reward_on_reference=True), setting=BlackBoxSetting)
steps_list = learn(market_env, buyers, sellers, q_learn_agents, n_episodes, n_stats, negative_reward, inequality)
with open(f'{dir_path}/{i}buyers.pkl', 'wb') as f:
pickle.dump(buyers, f)
with open(f'{dir_path}/{i}sellers.pkl', 'wb') as f:
pickle.dump(sellers, f)
with open(f'{dir_path}/{i}steps.pkl', 'wb') as f:
pickle.dump(steps_list, f)
###Output
Episode 1000: Steps=1.8
B[i0]: Rewards=5.362134193598429
B[i1]: Rewards=7.635704472805332
B[i2]: Rewards=2.2623459040160654
B[q0]: Rewards=6.0339750179906035
S[i0]: Rewards=12.36804353042989
S[i1]: Rewards=14.60356520847613
S[i2]: Rewards=11.734231672683551
CPU times: user 1min 5s, sys: 460 ms, total: 1min 6s
Wall time: 1min 4s
###Markdown
Plot Averaged Results, Q Tables---* Buyer Rewards, Seller Rewards* Steps per Episode* Q Tables
###Code
for i in range(n_games):
with open(f'{dir_path}/{i}buyers.pkl', 'rb') as f:
buyers = pickle.load(f)
with open(f'{dir_path}/{i}sellers.pkl', 'rb') as f:
sellers = pickle.load(f)
with open(f'{dir_path}/{i}steps.pkl', 'rb') as f:
steps_list = pickle.load(f)
plot_stats(buyers, sellers, n_stats, dir_path, steps_list)
plt.show()
#Plot just the q Agent, which is the last
plot_stats([buyers[-1]], sellers, n_stats, dir_path)
plt.show()
plot_q_tables(q_learn_agents, dir_path)
plt.show()
save_stats([buyers[-1]], n_stats, steps_list, dir_path)
###Output
_____no_output_____ |
Notebooks/4. Making Predictions for New Locations.ipynb | ###Markdown
Making predictions for new locationsFor convenience, this notebook contains everything you need to make predictions using the saved model. You'll need a Google Earth Engine acount to download the inputs, but besides that all you need is some locations of interest. The first sections ets things up, the second is the fun bit where you'll download the data and make preditcions Setup
###Code
import ee
# ee.Authenticate() # Run once to link your EE account
ee.Initialize() # Run each session
from tqdm import tqdm # Progress bar
from datetime import timedelta
# A utility function to pull data for a set of locations
def sample(im, prop, lats, lons, scale=5000, reducer=ee.Reducer.first(), tileScale=4):
points = []
for lat, lon in zip(lats, lons):
xy = ee.Geometry.Point([lon, lat])
points.append(xy.buffer(scale))
vals = im.reduceRegions(collection=ee.FeatureCollection(points), scale=scale, reducer=reducer, tileScale=tileScale).getInfo()
if prop == '':
return [v['properties'] for v in vals['features']]
return [v['properties'][prop] for v in vals['features']]
def add_static_vars(df, scale=5000):
lights = ee.ImageCollection("NOAA/DMSP-OLS/CALIBRATED_LIGHTS_V4").filter(ee.Filter.date('2010-01-01', '2018-03-08')).first()
pop = ee.ImageCollection("CIESIN/GPWv411/GPW_UNWPP-Adjusted_Population_Density").filter(ee.Filter.date('2010-01-01', '2018-03-08')).first()
ims = [lights, pop]
for im in tqdm(ims):
for i, reducer in enumerate([ee.Reducer.mean(), ee.Reducer.min(), ee.Reducer.max()]):
sampled_values = sample(im, '', df['Lat'].values, df['Long'].values, reducer=reducer, scale=scale)
for k in sampled_values[0].keys():
arr = ['mean', 'min', 'max']
df[k+'_'+str(scale)+'_' + arr[i]] = [sv[k] if k in sv.keys() else None for sv in sampled_values]
if k == arr[i]:
df = df.rename(columns={k+'_'+str(scale)+'_' + arr[i]:'pop_density2010'+'_'+str(scale)+'_' + arr[i]})
return df
# Image Collections
gfs = ee.ImageCollection("NOAA/GFS0P25") # Weather data
S5p_collections = {} # Sentinel 5p data, which comes in multiple collections
for COL in ['L3_NO2', 'L3_O3', 'L3_CO', 'L3_HCHO', 'L3_AER_AI', 'L3_SO2', 'L3_CH4', 'L3_CLOUD']: #
S5p_collections[COL] = ee.ImageCollection('COPERNICUS/S5P/OFFL/'+COL).map(lambda image: image.addBands(image.metadata('system:time_start')))
# Properties for each image we want to keep
s5p_props = {
'L3_NO2':['NO2_column_number_density', 'tropospheric_NO2_column_number_density', 'stratospheric_NO2_column_number_density', 'NO2_slant_column_number_density', 'tropopause_pressure', 'absorbing_aerosol_index'],
'L3_O3':['O3_column_number_density', 'O3_effective_temperature'],
'L3_CO':['CO_column_number_density', 'H2O_column_number_density', 'cloud_height'],
'L3_HCHO':['tropospheric_HCHO_column_number_density', 'tropospheric_HCHO_column_number_density_amf', 'HCHO_slant_column_number_density'],
'L3_CLOUD':['cloud_fraction', 'cloud_top_pressure', 'cloud_top_height', 'cloud_base_pressure', 'cloud_base_height', 'cloud_optical_depth', 'surface_albedo'],
'L3_AER_AI':['absorbing_aerosol_index', 'sensor_altitude', 'sensor_azimuth_angle', 'sensor_zenith_angle', 'solar_azimuth_angle', 'solar_zenith_angle']
}
def add_timeseries(df, dates, reducer=ee.Reducer.first()):
# Prepare dataframe with date x city
date_col = []
city_col = []
for d in dates:
for c in df.City.unique():
date_col.append(d)
city_col.append(c)
data = pd.DataFrame({
'Date':date_col,
'City':city_col
})
data = pd.merge(data, df[['City', 'Lat', 'Long']], how='left', on='City')
for d in tqdm(dates):
# Weather is easy - a single image from the right date
weather_image = gfs.filter(ee.Filter.date(str(d.date()), str((d+timedelta(days=1)).date()))).first() # Filter to get the relevant image
# For the sentinel data, we get images from each collection and merge them
s5p_images = []
for COL in ['L3_NO2', 'L3_O3', 'L3_CO', 'L3_HCHO', 'L3_CLOUD', 'L3_AER_AI']:
collection = S5p_collections[COL].filter(ee.Filter.date(str((d-timedelta(days=5)).date()), str(d.date())))
image = collection.qualityMosaic('system:time_start') # The most recent image
image = image.select(s5p_props[COL])
s5p_images.append(image)
s5p_image = ee.ImageCollection(s5p_images).toBands() # Merge into one image
# Sample the weather data
samples = sample(weather_image, '', df['Lat'].values, df['Long'].values, reducer=reducer)
for prop in samples[0].keys():
data.loc[data.Date==d, prop] = [p[prop] for p in samples]
# Sample the sentinel data
samples = sample(s5p_image, '', df['Lat'].values, df['Long'].values)
for prop in samples[0].keys():
data.loc[data.Date==d, prop] = [p[prop] for p in samples]
return data
###Output
_____no_output_____
###Markdown
Now the fun bit
###Code
# Load your locations to match this format:
import pandas as pd
cities = pd.DataFrame({
'City':['Harare', 'Lusaka'],
'Lat':[-17.8,-15.38],
'Long':[31.08, 28.32]
})
cities.head()
# Specify dates
dates = pd.date_range('2020-01-01', '2020-01-04', freq='1D')
# Add static vars
cities_w_static = add_static_vars(cities.copy())
# Add timeseries
ts = add_timeseries(cities.copy(), dates, reducer=ee.Reducer.mean())
ts.head()
data = pd.merge(ts, cities_w_static, on=['City', 'Lat', 'Long'])
data.head()
# Feature Engineering
to_lag = ['2_CO_column_number_density',
'0_NO2_slant_column_number_density', '0_NO2_column_number_density',
'3_tropospheric_HCHO_column_number_density', '0_tropopause_pressure',
'3_HCHO_slant_column_number_density',
'relative_humidity_2m_above_ground', '1_O3_column_number_density',
'4_cloud_fraction', '0_stratospheric_NO2_column_number_density',
'4_surface_albedo', '3_tropospheric_HCHO_column_number_density_amf',
'u_component_of_wind_10m_above_ground', '4_cloud_optical_depth']
# Adding the same features
for shift in [1,2,3,5]:
for col in to_lag:
data[col+'shift'+str(shift)] = data.groupby(['City'])[col].transform(lambda x:x.shift(shift))
data[col+'nshift'+str(shift)] = data.groupby(['City'])[col].transform(lambda x:x.shift(-shift))
for attr in ['day', 'month','year','week', 'dayofweek', 'weekofyear', 'days_in_month', 'is_month_start', 'is_month_end', 'dayofyear']:
data[attr] = getattr(pd.DatetimeIndex(data['Date']), attr)
data['is_weekend'] = (data['dayofweek'] >= 5)*1
data['fortnight'] = data['day']%15
data['which_fortnight'] = data['day']//15
# Load the saved model
from catboost import CatBoostRegressor
model = CatBoostRegressor()
model.load_model('../Data/saved_models/catboost_01')
# Make predictions
preds = model.predict(data.drop(['Lat', 'Long', 'City', 'Date'], axis=1))
data['Predicted_PM25'] = preds
# View he result
data.head()
# In this example we can compate the two locations over the dates we specified. Both nice clean air apparently
data.groupby('City').mean()['Predicted_PM25']
###Output
_____no_output_____
###Markdown
Using this to make our final set of predictionsThese three datasets were generated earlier. This section for records only.
###Code
def make_preds(ts, cities_w_static, savename):
data = pd.merge(ts, cities_w_static, on=['City', 'Lat', 'Long'])
# Feature Engineering
to_lag = ['2_CO_column_number_density',
'0_NO2_slant_column_number_density', '0_NO2_column_number_density',
'3_tropospheric_HCHO_column_number_density', '0_tropopause_pressure',
'3_HCHO_slant_column_number_density',
'relative_humidity_2m_above_ground', '1_O3_column_number_density',
'4_cloud_fraction', '0_stratospheric_NO2_column_number_density',
'4_surface_albedo', '3_tropospheric_HCHO_column_number_density_amf',
'u_component_of_wind_10m_above_ground', '4_cloud_optical_depth']
# Adding the same features
for shift in [1,2,3,5]:
for col in to_lag:
data[col+'shift'+str(shift)] = data.groupby(['City'])[col].transform(lambda x:x.shift(shift))
data[col+'nshift'+str(shift)] = data.groupby(['City'])[col].transform(lambda x:x.shift(-shift))
for attr in ['day', 'month','year','week', 'dayofweek', 'weekofyear', 'days_in_month', 'is_month_start', 'is_month_end', 'dayofyear']:
data[attr] = getattr(pd.DatetimeIndex(data['Date']), attr)
data['is_weekend'] = (data['dayofweek'] >= 5)*1
data['fortnight'] = data['day']%15
data['which_fortnight'] = data['day']//15
model = CatBoostRegressor()
model.load_model('../Data/saved_models/catboost_01')
# Make predictions
preds = model.predict(data.drop(['Lat', 'Long', 'City', 'Date'], axis=1))
data['Predicted_PM25'] = preds
data[['City', 'Date', 'Lat', 'Long', 'Predicted_PM25', 'pop_density2010_5000_max']].to_csv(savename, index=False)
ts = pd.read_csv('../Data/intermediate/za_cities_timeseries.csv')
static = pd.read_csv('../Data/intermediate/za_cities_w_static.csv')
make_preds(ts, static, '../Data/results/za_cities_predictions.csv')
ts = pd.read_csv('../Data/intermediate/af_cities_timeseries.csv')
static = pd.read_csv('../Data/intermediate/af_cities_w_static.csv').drop(['admin_name', 'capital', 'city_ascii', 'country', 'iso2', 'iso3', 'population', 'id'], axis=1)
make_preds(ts, static, '../Data/results/af_cities_predictions.csv')
ts = pd.read_csv('../Data/intermediate/pcs_timeseries.csv')
static = pd.read_csv('../Data/intermediate/pcs_w_static.csv')
make_preds(ts, static, '../Data/results/pcs_predictions.csv')
###Output
_____no_output_____ |
Python/Extract weights from Keras's LSTM and calculate hidden and cell states-Copy1.ipynb | ###Markdown
Initialisation time series
###Code
import matplotlib.pyplot as plt
import tensorflow as tf
#from keras.backend.tensorflow_backend import set_session
import keras
import seaborn as sns
import pandas as pd
import sys, time
import numpy as np
import warnings
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
#from keras.models import Sequential
#from keras.layers import Dense
#from keras.layers import LSTM
#from math import sqrt
warnings.filterwarnings("ignore")
print("python {}".format(sys.version))
print("keras version {}".format(keras.__version__))
print("tensorflow version {}".format(tf.__version__))
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [0, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(0, 0.9))
scaler = scaler.fit(train)
print(scaler.data_min_)
print(scaler.data_max_)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = np.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
print(X.shape)
model = tf.keras.models.Sequential([
#tf.keras.layers.Input(shape=(1,1), name='input'),
tf.keras.layers.LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=False),
#tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1,name='output')
])
model.compile(loss='mean_squared_error', optimizer='adam')
# model.compile(optimizer='adam',
# loss='sparse_categorical_crossentropy',
# metrics=['accuracy'])
model.summary()
#for i in range(nb_epoch):
# model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
# model.reset_states()
model.fit(X, y, epochs=nb_epoch, batch_size=batch_size, verbose=0, shuffle=False)
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
###Output
_____no_output_____
###Markdown
EvaluationFonction de dรฉtermination de l'erreur absolue en pourcentageCombien de \% d'รฉcart en moyenne entre la donnรฉe prรฉdite et la donnรฉe mesurรฉe
###Code
def mean_absolute_percentage_error(y_true, y_pred):
return ((np.fabs(y_true - y_pred)/y_true).mean())
###Output
_____no_output_____
###Markdown
Importation des donnรฉesColonnes importรฉes : 2e, 3e et 8e colonnes2e colonne : Temps3e colonne : identifiant Antenne7e colonne : Quantitรฉ de donnรฉes
###Code
dataframe = read_csv('Z1_RYG_20190423_cost.csv', usecols=[0,4], engine='python')
dataset = dataframe.values
###Output
_____no_output_____
###Markdown
Sรฉlection des donnรฉesIsolation d'une antenne (ID : 39)Convertion du temps en heures
###Code
newdataset1 = []
newdataset2 = []
for i in range(len(dataset)):
a = i
b = dataset[i, 1]
newdataset1.append(a)
newdataset2.append(b)
raw_values = np.asarray(newdataset2)
l=len(raw_values)
plt.plot(raw_values[:int(l/3)])
plt.show()
plt.plot(raw_values[int(l/3):-int(l/3)])
plt.show()
plt.plot(raw_values[-int(l/3):])
plt.show()
###Output
_____no_output_____
###Markdown
Dรฉcoupage du jeu de donnรฉesDรฉfinition d'un jeu d'entrainement et d'un jeu de testMise ร l'รฉchelle des donnรฉes sur l'intervalle \[0,1\]
###Code
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:-int(l/3)], supervised_values[-int(l/3):]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
plt.plot(train_scaled[:int(l/3)])
plt.show()
plt.plot(test_scaled[:int(l/3)])
plt.show()
###Output
[-11.3272 -11.3272]
[11.03015 11.03015]
###Markdown
Entrainement
###Code
lstm_model = fit_lstm(train_scaled, 1, 1, 1)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped, batch_size=1)
###Output
(337959, 1, 1)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm (LSTM) (1, 1) 12
_________________________________________________________________
output (Dense) (1, 1) 2
=================================================================
Total params: 14
Trainable params: 14
Non-trainable params: 0
_________________________________________________________________
###Markdown
PrรฉdictionPrรฉdiction de la donnรฉe et รฉtablissement d'un jeu de donnรฉes prรฉdite.
###Code
# walk-forward validation on the test data
predictions = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
expected = raw_values[len(train) + i + 1]
predictions.append(yhat)
for i in range(len(test_scaled)):
print('hour=%d, Predicted=%f, Expected=%f' % ((i+1)/6, predictions[i], raw_values[len(train) + i + 1]))
###Output
_____no_output_____
###Markdown
Generate proper data with 1 data shift
###Code
# walk-forward validation on the test data
y_train = list()
X_train = list()
y_train.append(test_scaled[0, 0:-1][0])
X_train.append(0)
for i in range(1,len(test_scaled)):
# make one-step forecast
X_train.append(test_scaled[i-1, 0:-1][0])
y_train.append(test_scaled[i, 0:-1][0])
for i in range(len(test_scaled)):
print('X_train=%f, y_train=%f' % (X_train[i], y_train[i]))
###Output
_____no_output_____
###Markdown
LSTM modelOur goal is to create a LSTM model to predict y_train using the time series X_train
###Code
from keras import models
from keras import layers
def define_model(len_ts,
hidden_neurons = 1,
nfeature=1,
batch_size=None,
stateful=False):
in_out_neurons = 1
inp = layers.Input(batch_shape= (batch_size, len_ts, nfeature),
name="input")
rnn = layers.LSTM(hidden_neurons,
return_sequences=True,
stateful=stateful,
name="RNN")(inp)
dens = layers.TimeDistributed(layers.Dense(in_out_neurons,name="dense"))(rnn)
model = models.Model(inputs=[inp],outputs=[dens])
model.compile(loss="mean_squared_error",
sample_weight_mode="temporal",
optimizer="adam")
return(model,(inp,rnn,dens))
X_train_array = np.array(X_train)
y_train_array = np.array(y_train)
hunits = 1
model1, _ = define_model(
hidden_neurons = hunits,
len_ts = X_train_array.shape[0])
model1.summary()
w = np.zeros(y_train_array.shape[:2])
D=1
w[D:] = 1
w_train = w
from keras.callbacks import ModelCheckpoint
start = time.time()
hist1 = model1.fit(X_train_array, y_train_array,
batch_size=2**9,
epochs=200,
verbose=False,
sample_weight=w_train,
validation_split=0.05,
callbacks=[
ModelCheckpoint(filepath="weights{epoch:03d}.hdf5")])
end = time.time()
print("Time took {:3.1f} min".format((end-start)/60))
labels = ["loss","val_loss"]
for lab in labels:
plt.plot(hist1.history[lab],label=lab + " model1")
plt.yscale("log")
plt.legend()
plt.show()
for layer in model1.layers:
if "LSTM" in str(layer):
weightLSTM = layer.get_weights()
warr,uarr, barr = weightLSTM
print(warr)
print("\n")
print(uarr)
print("\n")
print(barr)
print(model1.layers[2].get_weights())
warr.shape,uarr.shape,barr.shape
def sigmoid(x):
return(1.0/(1.0+np.exp(-x)))
def LSTMlayer(weight,x_t,h_tm1,c_tm1):
'''
c_tm1 = np.array([0,0]).reshape(1,2)
h_tm1 = np.array([0,0]).reshape(1,2)
x_t = np.array([1]).reshape(1,1)
warr.shape = (nfeature,hunits*4)
uarr.shape = (hunits,hunits*4)
barr.shape = (hunits*4,)
'''
warr,uarr, barr = weight
s_t = (x_t.dot(warr) + h_tm1.dot(uarr) + barr)
hunit = uarr.shape[0]
i = sigmoid(s_t[:,:hunit])
f = sigmoid(s_t[:,1*hunit:2*hunit])
_c = np.tanh(s_t[:,2*hunit:3*hunit])
o = sigmoid(s_t[:,3*hunit:])
c_t = i*_c + f*c_tm1
h_t = o*np.tanh(c_t)
return(h_t,c_t)
c_tm1 = np.array([0]*hunits).reshape(1,hunits)
h_tm1 = np.array([0]*hunits).reshape(1,hunits)
xs = train_scaled[:,0]
for i in range(len(xs)):
x_t = xs[i].reshape(1,1)
h_tm1,c_tm1 = LSTMlayer(weightLSTM,x_t,h_tm1,c_tm1)
print("h3={}".format(h_tm1))
print("c3={}".format(c_tm1))
batch_size = 1
len_ts = len(xs)
nfeature = 1
inp = layers.Input(batch_shape= (batch_size, len_ts, nfeature),
name="input")
rnn,s,c = layers.LSTM(hunits,
return_sequences=True,
stateful=False,
return_state=True,
name="RNN")(inp)
states = models.Model(inputs=[inp],outputs=[s,c, rnn])
for layer in states.layers:
for layer1 in model1.layers:
if layer.name == layer1.name:
layer.set_weights(layer1.get_weights())
h_t_keras, c_t_keras, rnn = states.predict(xs.reshape(1,len_ts,1))
print("h3={}".format(h_t_keras))
print("c3={}".format(c_t_keras))
model1.layers[1].get_weights()[0]
file_object = open('parameters.h', 'w')
file_object.write("//\n// Generated by spiderweak using Python.\n//\n\n#ifndef CPP_PARAMETERS_H\n#define CPP_PARAMETERS_H\n\n")
file_object.write("#define HUNIT " + str(hunits) + "\n\n")
file_object.write("#endif //CPP_PARAMETERS_H\n\nconst int hunit = HUNIT;\n\nconst float lstm_cell_input_weights[4 * HUNIT] = {")
states.layers[1].get_weights()[0].tofile("weights.txt", sep=", ", format="%s")
weight_file = open("weights.txt", 'r')
for line in weight_file:
file_object.write(line);
weight_file.close()
file_object.write("};\n\nconst float lstm_cell_hidden_weights[4 * HUNIT * HUNIT] = {")
states.layers[1].get_weights()[1].tofile("weights.txt", sep=", ", format="%s")
weight_file = open("weights.txt", 'r')
for line in weight_file:
file_object.write(line);
weight_file.close()
file_object.write("};\n\nconst float lstm_cell_bias[4 * HUNIT] = {")
states.layers[1].get_weights()[2].tofile("weights.txt", sep=", ", format="%s")
weight_file = open("weights.txt", 'r')
for line in weight_file:
file_object.write(line);
weight_file.close()
file_object.write("};\n\nfloat lstm_cell_hidden_layer[HUNIT] = {")
h_t_keras.tofile("weights.txt", sep=", ", format="%s")
weight_file = open("weights.txt", 'r')
for line in weight_file:
file_object.write(line);
weight_file.close()
file_object.write("};\nfloat lstm_cell_cell_states[HUNIT] = {")
c_t_keras.tofile("weights.txt", sep=", ", format="%s")
weight_file = open("weights.txt", 'r')
for line in weight_file:
file_object.write(line);
weight_file.close()
file_object.write("};\n\nconst float dense_weights[HUNIT] = {")
model1.layers[2].get_weights()[0].tofile("weights.txt", sep=", ", format="%s")
weight_file = open("weights.txt", 'r')
for line in weight_file:
file_object.write(line);
weight_file.close()
file_object.write("};\nconst float dense_bias = ")
model1.layers[2].get_weights()[1].tofile("weights.txt", sep=", ", format="%s")
weight_file = open("weights.txt", 'r')
for line in weight_file:
file_object.write(line);
weight_file.close()
file_object.write(";\n")
file_object.close()
fig = plt.figure(figsize=(9,4))
ax = fig.add_subplot(1,2,1)
ax.plot(h_tm1.flatten(),h_t_keras.flatten(),"p")
ax.set_xlabel("h by hand")
ax.set_ylabel("h by Keras")
ax = fig.add_subplot(1,2,2)
ax.plot(c_tm1.flatten(),c_t_keras.flatten(),"p")
ax.set_xlabel("c by hand")
ax.set_ylabel("c by Keras")
plt.show()
###Output
_____no_output_____ |
dp_apply2weights.ipynb | ###Markdown
###Code
#model=load_model("/content/drive/MyDrive/Local Models for Diabetes/local_model1_99acc.pkl")
import pandas
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
import pickle
import tensorflow as tf
'''
filename = 'finalized_model.sav'
pickle.dump(model, open(filename, 'wb'))
'''
# my_model directory
!ls /content/drive/MyDrive/saved_model
# Contains an assets folder, saved_model.pb, and variables folder.
# Create a new model instance
new_model = tf.keras.models.load_model('/content/drive/MyDrive/saved_model/my_model1')
# Check its architecture
new_model.summary()
# Restore the weights
new_model.load_weights("/content/drive/MyDrive/saved_model/my_model1.hdf5")
w1, b1 = new_model.layers[0].get_weights()
layers=new_model.layers
layers
w1
b1
weights=[]
for layer in new_model.layers:
weights.append(layer.get_weights())
print(layer)
print(weights)
import numpy as np
epsilon=0.3
beta=1/epsilon
for i in range(len(layers)):
weights[i][0]+=np.random.laplace(0,beta,1)
weights[i][1]+=np.random.laplace(0,beta,1)
weights
'''
def priv():
new_labels=list()
for img in preds:
label_count=np.bincount(img,minlength=num_label)
epsilon=0.1
beta=1/epsilon
for i in range(len(label_count)):
label_count[i]+=np.random.laplace(0,beta,1)
new_labels.append(np.argmax(label_count))
'''
def sensitivity(db,query,num_entries):
dbs=get_parallel_dbs(db, num_entries)
a=query(db)
max_diff=0
for db in dbs:
b=query(db)
diff=torch.abs(a-b)
if(max_diff<diff):
max_diff=diff
return max_diff
# Create a new model instance
new_model2 = tf.keras.models.load_model('/content/drive/MyDrive/saved_model/my_model1')
# Check its architecture
new_model2.summary()
weights[2]
i=0
for layer in new_model2.layers:
layer.set_weights(weights[i])
print(weights[i])
i+=1
new_model2.save_weights("/content/drive/MyDrive/dpappliedweights1.h5")
###Output
_____no_output_____ |
H_Segmentation_and_MBA.ipynb | ###Markdown
Answer/Execute the following statements: RFM and K-Means1. Load the `new_df` from yesterday.2. Make a new df `history_df` where you place the RFM per CustomerID.3. For easier interpretation, convert the RFM values to log scale. (Hint: Use `.apply(math.log)`.3. Plot the 3 of them vs Amount.4. Create a 3D plot of RFM by running this (make sure to name your variables accordingly):
###Code
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111, projection='3d')
r = history_df.recency
f = history_df.frequency
m = history_df.monetary
ax.scatter(r, f, m, s=5)
ax.set_xlabel('Recency')
ax.set_ylabel('Frequency')
ax.set_zlabel('Monetary')
plt.show()
###Output
_____no_output_____
###Markdown
6. Write down your observations.7. Prepare the data for clustering by running this (make sure to name your variables accordingly):
###Code
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
feature_vector = ['recency','frequency', 'monetary']
X_subset = customer_history_df[feature_vector]
scaler = preprocessing.StandardScaler().fit(X_subset)
X_scaled = scaler.transform(X_subset)
###Output
_____no_output_____
###Markdown
8. Try out 1 <= k <= 20. 9. Validate whar the best k with elbow method, silhouette score, and Davies-Bouldin index.10. Write down your observations.11. If it is hard to decide for the best number of k, undo the log scaling and try clustering again.12. Determine what makes sense in the clustering, and decide what the best k is.13. To help you further, create a boxplot of clusters for each k for every RFM measure. The less the variance (or thickness of boxplot) the better.14. Also, explore on adding other freatures per customer such as Country, how long the customer has been a customer, etc. Supplementing with Market Basket Analysis1. Run this code to generate an encoded item listing:
###Code
items = list(new_df.Description.unique())
grouped = new_df.groupby('InvoiceNo')
transaction_level = grouped.aggregate(lambda x: tuple(x)).reset_index()[['InvoiceNo','Description']]
transaction_dict = {item:0 for item in items}
output_dict = dict()
temp = dict()
for rec in transaction_level.to_dict('records'):
invoice_num = rec['InvoiceNo']
items_list = rec['Description']
transaction_dict = {item:0 for item in items}
transaction_dict.update({item:1 for item in items if item in items_list})
temp.update({invoice_num:transaction_dict})
new = [v for k,v in temp.items()]
transaction_df = pd.DataFrame(new)
###Output
_____no_output_____ |
machine-learning-1/Clustering-empty.ipynb | ###Markdown
ClusteringThis lesson is inspired to the one by David Kirkby (UC Irvine), BSD3-licenced. To read it in your own time later, keep this link:https://github.com/dkirkby/MachineLearningStatistics What is clustering?"**Unsupervised classification**":For example, The separation between the two sets of observations hints that there is a structure: **at least two species of iris**. Clustering a toy datasetRemember the main ML pipeline:1. Define the task: Clustering1. Get the data1. Get an overview1. Prepare the data1. Select a model1. Train and evaluate1. Evaluate on test set1. Launch, monitor, maintain
###Code
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_hdf("data/cluster_d_data.hf5")
# Get an overview
#We want it to have every datapoint in the row and features in columns
#Here we find 2 features and 500 data
data.shape
plt.scatter(data['x1'], data['x2'])
#We will use the K-Means algorithm to cluster these
#SKLearn always has the same layout for any model
from sklearn.cluster import KMeans
#Instance of the KMeans model
#Tell it the number of clusters,
model = KMeans(n_clusters = 3)
#Method to fit the data
model.fit(data)
#List of binary values dividing points into groups
#Sometimes they will be flipped due to the randomness
#We aren't telling the computer what shoul be 1 or 0
#It starts with a random guess and then optimises it
#This shows the issues with random seeds and reproducibility
#Fix this by freezing the seed
print(model.labels_)
#Plot the data
def display (data, model):
labels = model.labels_
plt.scatter(
data["x1"], data['x2'],
c = labels, #color
cmap = plt.cm.viridis,
s=6,)
display(data, model)
###Output
_____no_output_____
###Markdown
Learning about the algorithmBased on looking at all the data we see that thereare issues when the clusters are wider than the separation betweenthe clusters, or when the separation is not a straight line (has to be convex not concave)It is too simplisticYou need to know how it works so you can pick hyperparametersAlso because the hypothesis under which the algorithm was developed may be different to your use case Excercise 2 - Try to use `sklearn.cluster.DBSCAN`- Try usinf 3d data "data/cluster_3d_data.hf5"- Implement KMean from scratch
###Code
from sklearn.cluster import DBSCAN
# Instance of the KMeans model
#Tell it the number of clusters,
model = DBSCAN()
#Method to fit the data
model.fit(data)
display(data, model)
###Output
_____no_output_____ |
Recommendation Systems/Recommendation System - Movies.ipynb | ###Markdown
Movie Recommendation
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
import os
dataset = pd.read_csv("ml-100k/u.data",sep='\t',names="user_id,item_id,rating,timestamp".split(","))
dataset.head()
len(dataset.user_id.unique()), len(dataset.item_id.unique())
dataset.user_id = dataset.user_id.astype('category').cat.codes.values
dataset.item_id = dataset.item_id.astype('category').cat.codes.values
dataset.head()
from sklearn.model_selection import train_test_split
train, test = train_test_split(dataset, test_size=0.2)
train.head()
test.head()
###Output
_____no_output_____
###Markdown
Method 1 - Using Matrix Factorization
###Code
import keras
from IPython.display import SVG
from keras.optimizers import Adam
from keras.layers import Input, Embedding, Flatten, Dot, Dense, Concatenate
from keras.utils.vis_utils import model_to_dot
from keras.models import Model
n_users, n_movies = len(dataset.user_id.unique()), len(dataset.item_id.unique())
n_latent_factors = 3
movie_input = keras.layers.Input(shape=[1],name='Item')
movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='Movie-Embedding')(movie_input)
movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)
user_input = keras.layers.Input(shape=[1],name='User')
user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,name='User-Embedding')(user_input))
#prod = keras.layers.Concatenate([movie_vec, user_vec], name='DotProduct')
prod = Dot(name="Dot-Product", axes=1)([movie_vec, user_vec])
model = keras.Model([user_input, movie_input], prod)
model.compile('adam', 'mean_squared_error')
SVG(model_to_dot(model, show_shapes=True, show_layer_names=True, rankdir='HB').create(prog='dot', format='svg'))
model.summary()
history = model.fit([train.user_id, train.item_id], train.rating, epochs=100, verbose=1)
pd.Series(history.history['loss']).plot(logy=True)
plt.xlabel("Epoch")
plt.ylabel("Train Error")
y_hat = np.round(model.predict([test.user_id, test.item_id]),0)
y_true = test.rating
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_true, y_hat)
movie_embedding_learnt = model.get_layer(name='Movie-Embedding').get_weights()[0]
pd.DataFrame(movie_embedding_learnt).describe()
user_embedding_learnt = model.get_layer(name='User-Embedding').get_weights()[0]
pd.DataFrame(user_embedding_learnt).describe()
###Output
_____no_output_____
###Markdown
Method 2 - Using Matrix Factorization (with Non Negative Constraint)
###Code
from keras.constraints import non_neg
movie_input = keras.layers.Input(shape=[1],name='Item')
movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='NonNegMovie-Embedding', embeddings_constraint=non_neg())(movie_input)
movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)
user_input = keras.layers.Input(shape=[1],name='User')
user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,name='NonNegUser-Embedding',embeddings_constraint=non_neg())(user_input))
prod = Dot(name="Dot-Product", axes=1)([movie_vec, user_vec])
model = keras.Model([user_input, movie_input], prod)
model.compile('adam', 'mean_squared_error')
history_nonneg = model.fit([train.user_id, train.item_id], train.rating, epochs=10, verbose=1)
movie_embedding_learnt = model.get_layer(name='NonNegMovie-Embedding').get_weights()[0]
pd.DataFrame(movie_embedding_learnt).describe()
###Output
_____no_output_____
###Markdown
Method 3 - Using Keras Deep Learning Network
###Code
n_latent_factors_user = 5
n_latent_factors_movie = 8
movie_input = keras.layers.Input(shape=[1],name='Item')
movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors_movie, name='Movie-Embedding')(movie_input)
movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)
movie_vec = keras.layers.Dropout(0.2)(movie_vec)
user_input = keras.layers.Input(shape=[1],name='User')
user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors_user,name='User-Embedding')(user_input))
user_vec = keras.layers.Dropout(0.2)(user_vec)
#concat = keras.layers.Concatenate([movie_vec, user_vec], mode='concat',name='Concat')
concat = Concatenate(name="Concat", axis=1)([movie_vec, user_vec])
dense = keras.layers.Dense(200,name='FullyConnected')(concat)
dropout_1 = keras.layers.Dropout(0.2,name='Dropout')(dense)
dense_2 = keras.layers.Dense(100,name='FullyConnected-1')(concat)
dropout_2 = keras.layers.Dropout(0.2,name='Dropout')(dense_2)
dense_3 = keras.layers.Dense(50,name='FullyConnected-2')(dense_2)
dropout_3 = keras.layers.Dropout(0.2,name='Dropout')(dense_3)
dense_4 = keras.layers.Dense(20,name='FullyConnected-3', activation='relu')(dense_3)
result = keras.layers.Dense(1, activation='relu',name='Activation')(dense_4)
adam = Adam(lr=0.005)
model = keras.Model([user_input, movie_input], result)
model.compile(optimizer=adam,loss= 'mean_absolute_error')
SVG(model_to_dot(model, show_shapes=True, show_layer_names=True, rankdir='HB').create(prog='dot', format='svg'))
model.summary()
history = model.fit([train.user_id, train.item_id], train.rating, epochs=10, verbose=1)
y_hat_2 = np.round(model.predict([test.user_id, test.item_id]),0)
print(mean_absolute_error(y_true, y_hat_2))
print(mean_absolute_error(y_true, model.predict([test.user_id, test.item_id])))
###Output
0.70775
0.727382292675972
###Markdown
Method 4 - Using 'Surprise' Package
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from surprise import Dataset
from surprise import accuracy
from surprise.model_selection import train_test_split
from surprise import SVD
# Load the movielens-100k dataset UserID::MovieID::Rating::Timestamp
data = Dataset.load_builtin('ml-100k')
trainset, testset = train_test_split(data, test_size=.15)
# trainset = data.build_full_trainset()
algo = SVD()
algo.fit(trainset)
# we can now query for specific predicions
uid = str(196) # raw user id
iid = str(302) # raw item id
# get a prediction for specific users and items.
pred = algo.predict(uid, iid, r_ui=4, verbose=True)
# run the trained model against the testset
test_pred = algo.test(testset)
# get RMSE
print("User-based Model : Test Set")
accuracy.rmse(test_pred, verbose=True)
# if you wanted to evaluate on the trainset
print("User-based Model : Training Set")
train_pred = algo.test(trainset.build_testset())
accuracy.rmse(train_pred)
###Output
user: 196 item: 302 r_ui = 4.00 est = 4.24 {'was_impossible': False}
User-based Model : Test Set
RMSE: 0.9313
User-based Model : Training Set
RMSE: 0.6833
###Markdown
Benchmark of the various algorithms in Surprise
###Code
'''This module runs a 5-Fold CV for all the algorithms (default parameters) on
the movielens datasets, and reports average RMSE, MAE, and total computation time. '''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import time
import datetime
import random
import numpy as np
import six
from tabulate import tabulate
from surprise.model_selection import cross_validate
from surprise.model_selection import KFold
from surprise import NormalPredictor
from surprise import BaselineOnly
from surprise import KNNBasic
from surprise import KNNWithMeans
from surprise import KNNBaseline
from surprise import SVD
from surprise import SVDpp
from surprise import NMF
from surprise import SlopeOne
from surprise import CoClustering
# Load the movielens-100k dataset UserID::MovieID::Rating::Timestamp
data = Dataset.load_builtin('ml-100k')
# The algorithms to cross-validate
classes = (SVD, SVDpp , NMF, SlopeOne, KNNBasic, KNNWithMeans, KNNBaseline, CoClustering, BaselineOnly, NormalPredictor)
np.random.seed(0)
random.seed(0)
kf = KFold(random_state=0) # folds will be the same for all algorithms.
table = []
for klass in classes:
start = time.time()
out = cross_validate(klass(), data, ['rmse', 'mae'], kf)
cv_time = str(datetime.timedelta(seconds=int(time.time() - start)))
mean_rmse = '{:.3f}'.format(np.mean(out['test_rmse']))
mean_mae = '{:.3f}'.format(np.mean(out['test_mae']))
new_line = [klass.__name__, mean_rmse, mean_mae, cv_time]
tabulate([new_line], tablefmt="pipe") # print current algo perf
table.append(new_line)
header = ['','RMSE','MAE','Time']
print(tabulate(table, header, tablefmt="pipe"))
###Output
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Computing the msd similarity matrix...
Done computing similarity matrix.
Estimating biases using als...
Computing the msd similarity matrix...
Done computing similarity matrix.
Estimating biases using als...
Computing the msd similarity matrix...
Done computing similarity matrix.
Estimating biases using als...
Computing the msd similarity matrix...
Done computing similarity matrix.
Estimating biases using als...
Computing the msd similarity matrix...
Done computing similarity matrix.
Estimating biases using als...
Computing the msd similarity matrix...
Done computing similarity matrix.
Estimating biases using als...
Estimating biases using als...
Estimating biases using als...
Estimating biases using als...
Estimating biases using als...
| | RMSE | MAE | Time |
|:----------------|-------:|------:|:--------|
| SVD | 0.936 | 0.738 | 0:00:24 |
| SVDpp | 0.922 | 0.723 | 0:13:24 |
| NMF | 0.964 | 0.758 | 0:00:26 |
| SlopeOne | 0.946 | 0.743 | 0:00:14 |
| KNNBasic | 0.98 | 0.774 | 0:00:16 |
| KNNWithMeans | 0.951 | 0.749 | 0:00:17 |
| KNNBaseline | 0.931 | 0.733 | 0:00:20 |
| CoClustering | 0.965 | 0.755 | 0:00:09 |
| BaselineOnly | 0.944 | 0.748 | 0:00:02 |
| NormalPredictor | 1.523 | 1.222 | 0:00:01 |
###Markdown
Gridsearch and Cross validation using 'Surprise'
###Code
from surprise.model_selection import cross_validate
from surprise.model_selection import GridSearchCV
param_grid = {'n_factors': [110, 120, 140, 160], 'n_epochs': [90, 100, 110], 'lr_all': [0.001, 0.003, 0.005, 0.008],
'reg_all': [0.08, 0.1, 0.15]}
gs = GridSearchCV(SVD, param_grid, measures=['rmse', 'mae'], cv=3)
gs.fit(data)
algo = gs.best_estimator['rmse']
print(gs.best_score['rmse'])
print(gs.best_params['rmse'])
# Run 5-fold cross-validation and print results.
cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, verbose=True)
###Output
0.9181839273749549
{'n_factors': 140, 'n_epochs': 100, 'lr_all': 0.005, 'reg_all': 0.1}
Evaluating RMSE, MAE of algorithm SVD on 5 split(s).
Fold 1 Fold 2 Fold 3 Fold 4 Fold 5 Mean Std
RMSE (testset) 0.9045 0.9101 0.9063 0.9169 0.9108 0.9097 0.0043
MAE (testset) 0.7142 0.7196 0.7153 0.7254 0.7199 0.7189 0.0040
Fit time 35.07 37.87 41.20 33.75 34.30 36.44 2.77
Test time 0.15 0.15 0.13 0.17 0.14 0.15 0.01
###Markdown
Top N Movies Recommendation
###Code
"""
This module illustrates how to retrieve the top-10 items with highest rating
prediction. We first train an SVD algorithm on the MovieLens dataset, and then
predict all the ratings for the pairs (user, item) that are not in the training
set. We then retrieve the top-10 prediction for each user.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
from surprise import SVD
from surprise import Dataset
import os, io
def get_top_n(predictions, n=10):
# First map the predictions to each user.
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
def read_item_names():
"""Read the u.item file from MovieLens 100-k dataset and returns a
mapping to convert raw ids into movie names.
"""
file_name = "ml-100k\\u.item"
rid_to_name = {}
with io.open(file_name, 'r', encoding='ISO-8859-1') as f:
for line in f:
line = line.split('|')
rid_to_name[line[0]] = line[1]
return rid_to_name
# First train an SVD algorithm on the movielens dataset.
data = Dataset.load_builtin('ml-100k')
trainset = data.build_full_trainset()
algo = SVD()
algo.fit(trainset)
''' create a pandas frame from surprise trainset
iterator = trainset.all_ratings()
new_df = pd.DataFrame(columns=['uid', 'iid', 'rating'])
i = 0
for (uid, iid, rating) in iterator:
new_df.loc[i] = [uid, iid, rating]
i = i+1
'''
# Than predict ratings for all pairs (u, i) that are NOT in the training set.
testset = trainset.build_anti_testset()
predictions = algo.test(testset)
top_n = get_top_n(predictions, n=3)
rid_to_name = read_item_names()
for uid, user_ratings in top_n.items():
print(uid, [rid_to_name[str(int(iid)+1)] for (iid, _) in user_ratings])
###Output
196 ['Maltese Falcon, The (1941)', 'Spawn (1997)', 'Everyone Says I Love You (1996)']
186 ['Dances with Wolves (1990)', 'Bringing Up Baby (1938)', '3 Ninjas: High Noon At Mega Mountain (1998)']
22 ['Mighty Aphrodite (1995)', 'Everyone Says I Love You (1996)', "What's Eating Gilbert Grape (1993)"]
244 ['Two Bits (1995)', 'Supercop (1992)', 'Three Colors: Blue (1993)']
166 ['Legends of the Fall (1994)', 'Cinema Paradiso (1988)', 'Snow White and the Seven Dwarfs (1937)']
298 ["What's Eating Gilbert Grape (1993)", 'Heat (1995)', 'Jack (1996)']
115 ['Trainspotting (1996)', 'Mr. Smith Goes to Washington (1939)', 'Cinema Paradiso (1988)']
253 ['Die Hard (1988)', 'Brazil (1985)', 'Heat (1995)']
305 ['Night of the Living Dead (1968)', 'D3: The Mighty Ducks (1996)', 'Lost Horizon (1937)']
6 ['It Happened One Night (1934)', 'Apocalypse Now (1979)', 'Amadeus (1984)']
62 ['Aliens (1986)', 'Full Metal Jacket (1987)', 'Cinema Paradiso (1988)']
286 ["Ulee's Gold (1997)", 'Apartment, The (1960)', "What's Eating Gilbert Grape (1993)"]
200 ['Heat (1995)', "What's Eating Gilbert Grape (1993)", 'In the Name of the Father (1993)']
210 ["What's Eating Gilbert Grape (1993)", 'Jack (1996)', 'Apartment, The (1960)']
224 ['Dances with Wolves (1990)', 'Brazil (1985)', 'Heat (1995)']
303 ['Nikita (La Femme Nikita) (1990)', 'Shining, The (1980)', 'Wings of Desire (1987)']
122 ['Jack (1996)', 'Lost World: Jurassic Park, The (1997)', 'Mighty Aphrodite (1995)']
194 ['Cinema Paradiso (1988)', '3 Ninjas: High Noon At Mega Mountain (1998)', "Ulee's Gold (1997)"]
291 ['Maltese Falcon, The (1941)', 'Supercop (1992)', 'Full Metal Jacket (1987)']
234 ['Henry V (1989)', 'Jack (1996)', 'Hard Eight (1996)']
119 ['Cat on a Hot Tin Roof (1958)', 'Manon of the Spring (Manon des sources) (1986)', 'Amadeus (1984)']
167 ["Mr. Holland's Opus (1995)", 'Raiders of the Lost Ark (1981)', 'Local Hero (1983)']
299 ['Ridicule (1996)', "What's Eating Gilbert Grape (1993)", 'Pump Up the Volume (1990)']
308 ['White Squall (1996)', 'Trainspotting (1996)', 'Haunted World of Edward D. Wood Jr., The (1995)']
95 ['Everyone Says I Love You (1996)', 'Mighty Aphrodite (1995)', 'Vertigo (1958)']
38 ['It Happened One Night (1934)', 'Die Hard (1988)', 'Apartment, The (1960)']
102 ['Jack (1996)', "What's Eating Gilbert Grape (1993)", 'Annie Hall (1977)']
63 ['Cinema Paradiso (1988)', 'Spawn (1997)', 'Phenomenon (1996)']
160 ["What's Eating Gilbert Grape (1993)", 'Return of the Jedi (1983)', 'Everyone Says I Love You (1996)']
50 ['Legends of the Fall (1994)', 'Heat (1995)', 'Pump Up the Volume (1990)']
301 ['3 Ninjas: High Noon At Mega Mountain (1998)', 'Heat (1995)', 'Jack (1996)']
225 ['Everyone Says I Love You (1996)', 'Mighty Aphrodite (1995)', 'Jack (1996)']
290 ['Annie Hall (1977)', 'In the Name of the Father (1993)', 'Grifters, The (1990)']
97 ['Apartment, The (1960)', '3 Ninjas: High Noon At Mega Mountain (1998)', 'Mighty Aphrodite (1995)']
157 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Wings of Desire (1987)']
181 ['3 Ninjas: High Noon At Mega Mountain (1998)', 'Everyone Says I Love You (1996)', 'Taxi Driver (1976)']
278 ['Supercop (1992)', 'Priest (1994)', 'Mighty Aphrodite (1995)']
276 ['Maltese Falcon, The (1941)', 'Shining, The (1980)', 'Terminator, The (1984)']
7 ['Restoration (1995)', 'Desperate Measures (1998)', '3 Ninjas: High Noon At Mega Mountain (1998)']
10 ['Everyone Says I Love You (1996)', 'Harold and Maude (1971)', 'Local Hero (1983)']
284 ['Legends of the Fall (1994)', 'Princess Bride, The (1987)', 'Apartment, The (1960)']
201 ['Dial M for Murder (1954)', 'Pump Up the Volume (1990)', 'Cinema Paradiso (1988)']
287 ['Local Hero (1983)', 'Jack (1996)', 'Everyone Says I Love You (1996)']
246 ['Jack (1996)', 'Local Hero (1983)', 'Everyone Says I Love You (1996)']
242 ["Ulee's Gold (1997)", 'Snow White and the Seven Dwarfs (1937)', 'It Happened One Night (1934)']
249 ['Trainspotting (1996)', 'My Fair Lady (1964)', 'Wings of Desire (1987)']
99 ['Dances with Wolves (1990)', 'It Happened One Night (1934)', 'Spawn (1997)']
178 ['Bringing Up Baby (1938)', 'Godfather: Part II, The (1974)', 'Jack (1996)']
251 ['Shining, The (1980)', 'Maltese Falcon, The (1941)', 'Rosencrantz and Guildenstern Are Dead (1990)']
81 ['Spawn (1997)', "What's Eating Gilbert Grape (1993)", 'Month by the Lake, A (1995)']
260 ['Trainspotting (1996)', 'Apartment, The (1960)', 'Clockwork Orange, A (1971)']
25 ["What's Eating Gilbert Grape (1993)", '3 Ninjas: High Noon At Mega Mountain (1998)', 'Everyone Says I Love You (1996)']
59 ['Ridicule (1996)', 'Turbo: A Power Rangers Movie (1997)', 'Right Stuff, The (1983)']
72 ['3 Ninjas: High Noon At Mega Mountain (1998)', 'Maltese Falcon, The (1941)', 'Notorious (1946)']
87 ['Raiders of the Lost Ark (1981)', 'Four Weddings and a Funeral (1994)', 'Cinema Paradiso (1988)']
42 ['Heat (1995)', 'Lawnmower Man, The (1992)', 'Deer Hunter, The (1978)']
292 ['Mighty Aphrodite (1995)', '2001: A Space Odyssey (1968)', 'Full Metal Jacket (1987)']
20 ['Mighty Aphrodite (1995)', 'Notorious (1946)', 'Jack (1996)']
13 ['Cinema Paradiso (1988)', 'Butch Cassidy and the Sundance Kid (1969)', 'Jack (1996)']
138 ['Cinema Paradiso (1988)', 'Jack (1996)', 'Brazil (1985)']
60 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Ridicule (1996)']
57 ['Maltese Falcon, The (1941)', 'Princess Bride, The (1987)', 'Brazil (1985)']
223 ["What's Eating Gilbert Grape (1993)", 'Brazil (1985)', 'Hot Shots! Part Deux (1993)']
189 ['Harold and Maude (1971)', "What's Eating Gilbert Grape (1993)", 'Local Hero (1983)']
243 ['Cinema Paradiso (1988)', 'Trainspotting (1996)', 'Spawn (1997)']
92 ['Supercop (1992)', 'Maltese Falcon, The (1941)', 'Wings of Desire (1987)']
241 ['Everyone Says I Love You (1996)', 'Cinema Paradiso (1988)', 'Maltese Falcon, The (1941)']
254 ['Shine (1996)', 'Lawrence of Arabia (1962)', 'Everyone Says I Love You (1996)']
293 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Haunted World of Edward D. Wood Jr., The (1995)']
127 ['Snow White and the Seven Dwarfs (1937)', 'It Happened One Night (1934)', 'Third Man, The (1949)']
222 ['Heat (1995)', 'Full Metal Jacket (1987)', 'Cinema Paradiso (1988)']
267 ['Spawn (1997)', 'Third Man, The (1949)', 'Shining, The (1980)']
11 ['Jack (1996)', 'Local Hero (1983)', "What's Eating Gilbert Grape (1993)"]
8 ["What's Eating Gilbert Grape (1993)", 'Everyone Says I Love You (1996)', 'Haunted World of Edward D. Wood Jr., The (1995)']
162 ["What's Eating Gilbert Grape (1993)", 'Princess Bride, The (1987)', 'Shining, The (1980)']
279 ['Ridicule (1996)', 'Night of the Living Dead (1968)', 'Dazed and Confused (1993)']
145 ['Apocalypse Now (1979)', 'Apartment, The (1960)', 'Casablanca (1942)']
28 ['Jack (1996)', 'Haunted World of Edward D. Wood Jr., The (1995)', 'Maltese Falcon, The (1941)']
135 ['3 Ninjas: High Noon At Mega Mountain (1998)', 'Everyone Says I Love You (1996)', 'Maltese Falcon, The (1941)']
32 ['Maltese Falcon, The (1941)', "What's Eating Gilbert Grape (1993)", 'Mighty Aphrodite (1995)']
90 ['M (1931)', 'Haunted World of Edward D. Wood Jr., The (1995)', 'Alice in Wonderland (1951)']
216 ['Return of the Jedi (1983)', 'Supercop (1992)', 'Apocalypse Now (1979)']
250 ['Pump Up the Volume (1990)', 'Big Night (1996)', 'It Happened One Night (1934)']
271 ['Maltese Falcon, The (1941)', 'Great Escape, The (1963)', 'Hard Eight (1996)']
265 ['It Happened One Night (1934)', 'Blues Brothers, The (1980)', 'Clockwork Orange, A (1971)']
198 ['Cinema Paradiso (1988)', 'Jack (1996)', "Ulee's Gold (1997)"]
168 ["What's Eating Gilbert Grape (1993)", 'Taxi Driver (1976)', 'Princess Bride, The (1987)']
110 ['Snow White and the Seven Dwarfs (1937)', 'Everyone Says I Love You (1996)', 'Haunted World of Edward D. Wood Jr., The (1995)']
58 ['Pump Up the Volume (1990)', 'English Patient, The (1996)', "Robert A. Heinlein's The Puppet Masters (1994)"]
237 ['Everyone Says I Love You (1996)', 'Wings of Desire (1987)', 'Legends of the Fall (1994)']
94 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Restoration (1995)']
128 ['Clockwork Orange, A (1971)', 'Akira (1988)', 'Notorious (1946)']
44 ['Jack (1996)', 'Three Colors: Blue (1993)', 'Lost World: Jurassic Park, The (1997)']
264 ['Harold and Maude (1971)', 'Apartment, The (1960)', 'Everyone Says I Love You (1996)']
41 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Mighty Aphrodite (1995)']
82 ['Princess Bride, The (1987)', 'Full Metal Jacket (1987)', 'Raiders of the Lost Ark (1981)']
262 ['Maltese Falcon, The (1941)', 'North by Northwest (1959)', 'Nikita (La Femme Nikita) (1990)']
174 ['Maltese Falcon, The (1941)', 'Everyone Says I Love You (1996)', "What's Eating Gilbert Grape (1993)"]
43 ['Maltese Falcon, The (1941)', 'It Happened One Night (1934)', 'Apartment, The (1960)']
84 ['Annie Hall (1977)', 'It Happened One Night (1934)', "Ulee's Gold (1997)"]
269 ['Cinema Paradiso (1988)', 'Jack (1996)', 'Better Off Dead... (1985)']
259 ['It Happened One Night (1934)', 'Jack (1996)', 'Apartment, The (1960)']
85 ['It Happened One Night (1934)', 'Jack (1996)', 'Clockwork Orange, A (1971)']
213 ['In the Name of the Father (1993)', '3 Ninjas: High Noon At Mega Mountain (1998)', 'Ridicule (1996)']
121 ['Cinema Paradiso (1988)', 'Jack (1996)', 'So I Married an Axe Murderer (1993)']
49 ['Trainspotting (1996)', 'Cinema Paradiso (1988)', 'Stand by Me (1986)']
155 ['Mighty Aphrodite (1995)', 'Everyone Says I Love You (1996)', 'Maltese Falcon, The (1941)']
68 ['Haunted World of Edward D. Wood Jr., The (1995)', 'Jack (1996)', 'Maltese Falcon, The (1941)']
172 ['Supercop (1992)', 'Legends of the Fall (1994)', 'Everyone Says I Love You (1996)']
19 ['Everyone Says I Love You (1996)', 'Cinema Paradiso (1988)', 'White Squall (1996)']
268 ['Boot, Das (1981)', 'It Happened One Night (1934)', 'Notorious (1946)']
5 ['2001: A Space Odyssey (1968)', 'Mighty Aphrodite (1995)', 'Shining, The (1980)']
80 ['Annie Hall (1977)', 'Mighty Aphrodite (1995)', 'Jack (1996)']
66 ["What's Eating Gilbert Grape (1993)", 'Princess Bride, The (1987)', 'Taxi Driver (1976)']
18 ['Hellraiser: Bloodline (1996)', 'Heat (1995)', 'Right Stuff, The (1983)']
26 ["What's Eating Gilbert Grape (1993)", 'Apartment, The (1960)', 'Maltese Falcon, The (1941)']
130 ['Cat on a Hot Tin Roof (1958)', 'Everyone Says I Love You (1996)', 'Tales From the Crypt Presents: Demon Knight (1995)']
256 ['Die Hard (1988)', 'Jack (1996)', 'Clockwork Orange, A (1971)']
1 ['Jack (1996)', 'English Patient, The (1996)', 'Quiet Man, The (1952)']
56 ['Mighty Aphrodite (1995)', 'Everyone Says I Love You (1996)', 'Annie Hall (1977)']
15 ['Mighty Aphrodite (1995)', '3 Ninjas: High Noon At Mega Mountain (1998)', 'Shining, The (1980)']
207 ['Harold and Maude (1971)', 'Apartment, The (1960)', 'Leaving Las Vegas (1995)']
232 ['Jack (1996)', 'Annie Hall (1977)', 'Cinema Paradiso (1988)']
52 ['Princess Bride, The (1987)', 'Jack (1996)', 'Cinema Paradiso (1988)']
161 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Rumble in the Bronx (1995)']
148 ['Heat (1995)', 'It Happened One Night (1934)', 'Maltese Falcon, The (1941)']
125 ['Cable Guy, The (1996)', 'Dances with Wolves (1990)', '3 Ninjas: High Noon At Mega Mountain (1998)']
83 ['Everyone Says I Love You (1996)', 'Sling Blade (1996)', 'Heat (1995)']
272 ["What's Eating Gilbert Grape (1993)", 'Jack (1996)', 'Apartment, The (1960)']
151 ['I.Q. (1994)', 'Heat (1995)', 'Month by the Lake, A (1995)']
54 ['Maltese Falcon, The (1941)', 'Rosencrantz and Guildenstern Are Dead (1990)', "What's Eating Gilbert Grape (1993)"]
16 ['Local Hero (1983)', 'Grifters, The (1990)', '3 Ninjas: High Noon At Mega Mountain (1998)']
91 ['Heat (1995)', 'Annie Hall (1977)', 'As Good As It Gets (1997)']
294 ['Jack (1996)', "What's Eating Gilbert Grape (1993)", 'Raiders of the Lost Ark (1981)']
229 ['Jack (1996)', 'Angels and Insects (1995)', 'Trainspotting (1996)']
36 ['Everyone Says I Love You (1996)', 'Raging Bull (1980)', 'Maltese Falcon, The (1941)']
70 ['It Happened One Night (1934)', "What's Eating Gilbert Grape (1993)", 'Everyone Says I Love You (1996)']
14 ['Cinema Paradiso (1988)', 'Pump Up the Volume (1990)', 'Haunted World of Edward D. Wood Jr., The (1995)']
295 ['Bridge on the River Kwai, The (1957)', 'Month by the Lake, A (1995)', 'Notorious (1946)']
233 ['In the Name of the Father (1993)', 'Month by the Lake, A (1995)', 'Apartment, The (1960)']
214 ['Nikita (La Femme Nikita) (1990)', 'White Squall (1996)', 'Pump Up the Volume (1990)']
192 ['Jack (1996)', 'Clockwork Orange, A (1971)', 'In the Name of the Father (1993)']
100 ['Everyone Says I Love You (1996)', 'Jack (1996)', "What's Eating Gilbert Grape (1993)"]
307 ['It Happened One Night (1934)', 'My Fair Lady (1964)', 'North by Northwest (1959)']
297 ['Maltese Falcon, The (1941)', '2001: A Space Odyssey (1968)', 'Harold and Maude (1971)']
193 ['Maltese Falcon, The (1941)', 'English Patient, The (1996)', 'Haunted World of Edward D. Wood Jr., The (1995)']
113 ['Apartment, The (1960)', 'Jack (1996)', "What's Eating Gilbert Grape (1993)"]
275 ['Mighty Aphrodite (1995)', 'Lost World: Jurassic Park, The (1997)', 'Maltese Falcon, The (1941)']
219 ['Everyone Says I Love You (1996)', 'So I Married an Axe Murderer (1993)', 'Shining, The (1980)']
218 ['Jack (1996)', 'Maltese Falcon, The (1941)', "What's Eating Gilbert Grape (1993)"]
123 ['Trainspotting (1996)', 'Priest (1994)', 'Stand by Me (1986)']
158 ['Local Hero (1983)', 'Trainspotting (1996)', "What's Eating Gilbert Grape (1993)"]
302 ['Trainspotting (1996)', 'Jack (1996)', "What's Eating Gilbert Grape (1993)"]
23 ['Cinema Paradiso (1988)', 'English Patient, The (1996)', 'Restoration (1995)']
296 ['Bringing Up Baby (1938)', 'Everyone Says I Love You (1996)', 'Annie Hall (1977)']
33 ['Everyone Says I Love You (1996)', 'Harold and Maude (1971)', 'Blues Brothers, The (1980)']
154 ['Supercop (1992)', 'Return of the Jedi (1983)', "What's Eating Gilbert Grape (1993)"]
77 ['Cinema Paradiso (1988)', 'Jack (1996)', 'Haunted World of Edward D. Wood Jr., The (1995)']
270 ['Trainspotting (1996)', 'Die Hard (1988)', 'Third Man, The (1949)']
187 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Mighty Aphrodite (1995)']
170 ['Jack (1996)', 'Everyone Says I Love You (1996)', 'Legends of the Fall (1994)']
101 ['Taxi Driver (1976)', 'Everyone Says I Love You (1996)', 'Jack (1996)']
184 ['Haunted World of Edward D. Wood Jr., The (1995)', 'Cinema Paradiso (1988)', 'Annie Hall (1977)']
112 ['Mighty Aphrodite (1995)', 'Clockwork Orange, A (1971)', 'Haunted World of Edward D. Wood Jr., The (1995)']
133 ['Jack (1996)', 'White Squall (1996)', 'Everyone Says I Love You (1996)']
215 ['Evil Dead II (1987)', 'Shining, The (1980)', 'Month by the Lake, A (1995)']
69 ['Supercop (1992)', 'Full Metal Jacket (1987)', 'Cinema Paradiso (1988)']
104 ['Clockwork Orange, A (1971)', 'Maltese Falcon, The (1941)', 'Princess Bride, The (1987)']
240 ['Spawn (1997)', 'Everyone Says I Love You (1996)', "What's Eating Gilbert Grape (1993)"]
144 ['Local Hero (1983)', 'Duck Soup (1933)', 'Golden Earrings (1947)']
191 ['Everyone Says I Love You (1996)', 'Maltese Falcon, The (1941)', 'Supercop (1992)']
61 ['Everyone Says I Love You (1996)', 'Haunted World of Edward D. Wood Jr., The (1995)', 'It Happened One Night (1934)']
142 ['Priest (1994)', 'Supercop (1992)', 'Full Metal Jacket (1987)']
177 ['It Happened One Night (1934)', 'Right Stuff, The (1983)', 'Jack (1996)']
203 ['Everyone Says I Love You (1996)', 'Heat (1995)', 'Maltese Falcon, The (1941)']
21 ['Spawn (1997)', 'Wings of Desire (1987)', 'Stand by Me (1986)']
197 ['Snow White and the Seven Dwarfs (1937)', 'Private Benjamin (1980)', 'Shining, The (1980)']
134 ['It Happened One Night (1934)', 'Haunted World of Edward D. Wood Jr., The (1995)', 'Jack (1996)']
180 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Haunted World of Edward D. Wood Jr., The (1995)']
236 ['Great Escape, The (1963)', 'Kolya (1996)', 'Spawn (1997)']
263 ['Raging Bull (1980)', 'Jack (1996)', 'Trainspotting (1996)']
109 ['Gandhi (1982)', 'Rosencrantz and Guildenstern Are Dead (1990)', 'Cinema Paradiso (1988)']
64 ['Cinema Paradiso (1988)', 'Jack (1996)', 'Maltese Falcon, The (1941)']
114 ['Jack (1996)', 'Full Metal Jacket (1987)', 'Henry V (1989)']
239 ['Platoon (1986)', 'Supercop (1992)', 'English Patient, The (1996)']
117 ['So I Married an Axe Murderer (1993)', "What's Eating Gilbert Grape (1993)", 'Cinema Paradiso (1988)']
65 ['Die Hard (1988)', 'Annie Hall (1977)', 'Gone with the Wind (1939)']
137 ["Ulee's Gold (1997)", 'It Happened One Night (1934)', 'Gandhi (1982)']
257 ['Jack (1996)', 'Cinema Paradiso (1988)', 'Maltese Falcon, The (1941)']
111 ["What's Eating Gilbert Grape (1993)", 'Maltese Falcon, The (1941)', 'Mighty Aphrodite (1995)']
285 ['Everyone Says I Love You (1996)', 'It Happened One Night (1934)', 'Haunted World of Edward D. Wood Jr., The (1995)']
96 ['Apartment, The (1960)', 'Harold and Maude (1971)', 'Month by the Lake, A (1995)']
116 ['Wrong Trousers, The (1993)', 'Jack (1996)', 'Cinema Paradiso (1988)']
73 ['Day the Earth Stood Still, The (1951)', 'Amadeus (1984)', 'Cinema Paradiso (1988)']
221 ['Cinema Paradiso (1988)', 'Jack (1996)', 'Apocalypse Now (1979)']
235 ['Supercop (1992)', '2001: A Space Odyssey (1968)', 'Pump Up the Volume (1990)']
164 ['It Happened One Night (1934)', 'Apartment, The (1960)', 'Clockwork Orange, A (1971)']
281 ['White Squall (1996)', 'Everyone Says I Love You (1996)', "What's Eating Gilbert Grape (1993)"]
182 ['Maltese Falcon, The (1941)', 'Apartment, The (1960)', 'Cinema Paradiso (1988)']
129 ["What's Eating Gilbert Grape (1993)", 'Notorious (1946)', 'Maltese Falcon, The (1941)']
45 ['Maltese Falcon, The (1941)', 'Wings of Desire (1987)', 'Annie Hall (1977)']
131 ['Brazil (1985)', 'I.Q. (1994)', 'Jack (1996)']
230 ['Everyone Says I Love You (1996)', 'Maltese Falcon, The (1941)', '3 Ninjas: High Noon At Mega Mountain (1998)']
126 ['Supercop (1992)', 'Heathers (1989)', 'Spawn (1997)']
231 ['Annie Hall (1977)', 'Maltese Falcon, The (1941)', 'Best Men (1997)']
280 ['Stand by Me (1986)', 'It Happened One Night (1934)', 'Harold and Maude (1971)']
288 ['Maltese Falcon, The (1941)', 'Cinema Paradiso (1988)', 'It Happened One Night (1934)']
152 ['Trainspotting (1996)', 'Lawnmower Man, The (1992)', 'Sting, The (1973)']
217 ['Notorious (1946)', 'Nikita (La Femme Nikita) (1990)', 'Pump Up the Volume (1990)']
79 ['Jack (1996)', "What's Eating Gilbert Grape (1993)", 'Trainspotting (1996)']
75 ['Raiders of the Lost Ark (1981)', 'Legends of the Fall (1994)', 'Mighty Aphrodite (1995)']
245 ['Month by the Lake, A (1995)', 'It Happened One Night (1934)', 'Jack (1996)']
282 ['Heavy Metal (1981)', 'Maltese Falcon, The (1941)', 'So I Married an Axe Murderer (1993)']
78 ["What's Eating Gilbert Grape (1993)", 'Jack (1996)', 'Cinema Paradiso (1988)']
118 ["Ulee's Gold (1997)", 'Kull the Conqueror (1997)', 'Apartment, The (1960)']
283 ['Brazil (1985)', 'Jack (1996)', "What's Eating Gilbert Grape (1993)"]
171 ['Maltese Falcon, The (1941)', 'Princess Bride, The (1987)', 'Cinema Paradiso (1988)']
107 ['Maltese Falcon, The (1941)', "What's Eating Gilbert Grape (1993)", 'Harold and Maude (1971)']
226 ["What's Eating Gilbert Grape (1993)", 'It Happened One Night (1934)', 'Everyone Says I Love You (1996)']
|
211201_Market_Basket_Analysis_Apriori/211201_Market_Basket_Item_Apriori_Algorithm_Cory_Randolph.ipynb | ###Markdown
Market Basket Item Apriori AlgorithmCMPE 256Cory Randolph12/01/2021 Prompt Learning Objective: Apply Apriori algorithm to generate association rules and predict the next basket item.Dataset: Excel Dataset contains Order ID, User ID, Product Item name.Consider Order ID as Transaction ID and group items by order id. Generate Association rules MIN_SUP: 0.0045Train Dataset:TRAIN-ARULES.csvTest Dataset: testarules.csv Imports Install needed packages
###Code
!pip install apyori
# Clear output for this cell
from IPython.display import clear_output
clear_output()
###Output
_____no_output_____
###Markdown
Import other needed packages
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from apyori import apriori
###Output
_____no_output_____
###Markdown
Data Load Data Load the data into Colab from a local CSV.Load the TRAIN-ARULES.csv
###Code
# Uncomment to load TRAIN-ARULES.csv data from local folder
# from google.colab import files
# files.upload()
###Output
_____no_output_____
###Markdown
Note: The files can also be dragged and dropped into the folder tab in Colabs left hand side bar menu. Load the data into Dataframes
###Code
# If file loaded into Colab directly:
# df = pd.read_csv('TRAIN-ARULES.csv')
url_github_data = 'https://raw.githubusercontent.com/coryroyce/code_assignments/main/211201_Market_Basket_Analysis_Apriori/data/TRAIN-ARULES.csv'
df = pd.read_csv(url_github_data)
df.head()
###Output
_____no_output_____
###Markdown
Describe Data Quick data overview. Note that we convert the dataframe to strings since the Order ID and User ID are categorical and not nuperical so mean and other stats don't apply to our data.
###Code
df.astype(str).describe(include = 'all')
###Output
_____no_output_____
###Markdown
Clean Data Group the data by order ID so that we can start to get the data in a format that works well for the apriori library/package. Note: Uncomment the print statements to see the logical progression of the data transformation.
###Code
def transform_data(df):
# Make a copy of the input data frame
df_temp = df.copy()
# print(df_temp.head())
# Group the data by the order id (make a list of product item sets for each order)
df_grouped = df_temp.groupby(by = ['order_id'])['product_name'].apply(list).reset_index(name='product_item_set')
# print(df_grouped.head())
# Unpack the list of product items into their own columns
df_grouped = df_grouped['product_item_set'].apply(pd.Series)
# print(df_grouped.head())
# Replace the Nan values with 0's
df_grouped.fillna(0,inplace=True)
# Convert the grouped dataframe into a lists of lists to work with the apriori package
data = df_grouped.astype(str).values.tolist()
# Remvove 0's from each "row"
data = [[ele for ele in sub if ele != '0'] for sub in data]
return data
data = transform_data(df)
# Display the first few rows of data for reference
print(data[0:2])
print(f'Number of item transactions: {len(data)}')
###Output
[['Organic Pink Lemonade Bunny Fruit Snacks', 'Dark Chocolate Minis', 'Sparkling Water, Natural Mango Essenced', 'Peach-Pear Sparkling Water', 'Organic Heritage Flakes Cereal', 'Popped Salted Caramel Granola Bars', 'Healthy Grains Granola Bar, Vanilla Blueberry', 'Flax Plus Organic Pumpkin Flax Granola', 'Sweet & Salty Nut Almond Granola Bars', 'Cool Mint Chocolate Energy Bar', 'Chocolate Chip Energy Bars', 'Trail Mix Fruit & Nut Chewy Granola Bars'], ['Creme De Menthe Thins', 'Milk Chocolate English Toffee Miniatures Candy Bars', "Baker's Pure Cane Ultrafine Sugar", 'Plain Bagels', 'Cinnamon Bread']]
Number of item transactions: 1418
###Markdown
Apriori Algorithm Apply the apriori library to the data in order to generate the association rules.From the assignment prompt we need to pass in the additional parameters of min_support = 0.0045.
###Code
%%time
# Note: Added a min_lenght argument so that it was not just a single item
association_rules = apriori(transactions = data, min_support=0.0045)#, min_length=3)
association_results = list(association_rules)
df_results = pd.DataFrame(association_results)
###Output
CPU times: user 25.7 s, sys: 73.9 ms, total: 25.8 s
Wall time: 25.8 s
###Markdown
See how many total rules were created
###Code
print(len(association_results))
###Output
1492
###Markdown
Review the first result
###Code
print(association_results[0])
###Output
RelationRecord(items=frozenset({'0% Greek Strained Yogurt'}), support=0.009873060648801129, ordered_statistics=[OrderedStatistic(items_base=frozenset(), items_add=frozenset({'0% Greek Strained Yogurt'}), confidence=0.009873060648801129, lift=1.0)])
###Markdown
View first few results
###Code
df_results.head()
###Output
_____no_output_____
###Markdown
What are the sizes of the item sets?
###Code
unique_item_set_lenghts = df_results['items'].apply(len).unique()
unique_item_set_lenghts
###Output
_____no_output_____
###Markdown
Additional Example Apply the Apriori algorithm to the simple dataset that was calculated by hand.
###Code
data_simple = [
['Noodles', 'Pickles', 'Milk'],
['Noodles', 'Cheese'],
['Cheese', 'Shoes'],
['Noodles', 'Pickles', 'Cheese'],
['Noodles', 'Pickles', 'Clothes', 'Cheese', 'Milk'],
['Pickles', 'Clothes', 'Milk'],
['Pickles', 'Clothes', 'Milk'],
]
###Output
_____no_output_____
###Markdown
Apply the algorithm to the simple data set.
###Code
%%time
# Note: Added a min_lenght argument so that it was not just a single item
association_rules_simple = apriori(transactions = data_simple, min_support=0.30, min_confidence = 0.80, min_length=3)
association_results_simple = list(association_rules_simple)
df_results_simple = pd.DataFrame(association_results_simple)
###Output
CPU times: user 2.24 ms, sys: 1.01 ms, total: 3.25 ms
Wall time: 3.14 ms
###Markdown
View all rules that meet the criteria
###Code
df_results_simple
###Output
_____no_output_____
###Markdown
View the details of the triple item set.
###Code
df_results_simple.iloc[3,2]
###Output
_____no_output_____ |
examples/plague.ipynb | ###Markdown
The Freshman Plague *Modeling and Simulation in Python*Copyright 2021 Allen DowneyLicense: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/)
###Code
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://raw.githubusercontent.com/AllenDowney/' +
'ModSimPy/master/modsim.py')
# import functions from modsim
from modsim import *
download('https://github.com/AllenDowney/ModSimPy/raw/master/' +
'chap11.py')
# import code from previous notebooks
from chap11 import make_system
from chap11 import update_func
from chap11 import run_simulation
###Output
_____no_output_____
###Markdown
[Click here to run this case study on Colab](https://colab.research.google.com/github/AllenDowney/ModSimPy/blob/master/examples/plague.ipynb) This case study picks up where Chapter 12 leaves off.
###Code
def add_immunization(system, fraction):
system.init.S -= fraction
system.init.R += fraction
tc = 3 # time between contacts in days
tr = 4 # recovery time in days
beta = 1 / tc # contact rate in per day
gamma = 1 / tr # recovery rate in per day
system = make_system(beta, gamma)
def calc_total_infected(results, system):
s_0 = results.S[0]
s_end = results.S[system.t_end]
return s_0 - s_end
###Output
_____no_output_____
###Markdown
Hand washingSuppose you are the Dean of Student Life, and you have a budget of just \$1200 to combat the Freshman Plague. You have two options for spending this money:1. You can pay for vaccinations, at a rate of \$100 per dose.2. You can spend money on a campaign to remind students to wash hands frequently.We have already seen how we can model the effect of vaccination. Nowlet's think about the hand-washing campaign. We'll have to answer twoquestions:1. How should we incorporate the effect of hand washing in the model?2. How should we quantify the effect of the money we spend on a hand-washing campaign?For the sake of simplicity, let's assume that we have data from asimilar campaign at another school showing that a well-funded campaigncan change student behavior enough to reduce the infection rate by 20%.In terms of the model, hand washing has the effect of reducing `beta`.That's not the only way we could incorporate the effect, but it seemsreasonable and it's easy to implement. Now we have to model the relationship between the money we spend and theeffectiveness of the campaign. Again, let's suppose we have data fromanother school that suggests:- If we spend \$500 on posters, materials, and staff time, we can change student behavior in a way that decreases the effective value of `beta` by 10%.- If we spend \$1000, the total decrease in `beta` is almost 20%.- Above \$1000, additional spending has little additional benefit. Logistic functionTo model the effect of a hand-washing campaign, I'll use a [generalized logistic function](https://en.wikipedia.org/wiki/Generalised_logistic_function) (GLF), which is a convenient function for modeling curves that have a generally sigmoid shape. The parameters of the GLF correspond to various features of the curve in a way that makes it easy to find a function that has the shape you want, based on data or background information about the scenario.
###Code
from numpy import exp
def logistic(x, A=0, B=1, C=1, M=0, K=1, Q=1, nu=1):
"""Computes the generalize logistic function.
A: controls the lower bound
B: controls the steepness of the transition
C: not all that useful, AFAIK
M: controls the location of the transition
K: controls the upper bound
Q: shift the transition left or right
nu: affects the symmetry of the transition
returns: float or array
"""
exponent = -B * (x - M)
denom = C + Q * exp(exponent)
return A + (K-A) / denom ** (1/nu)
###Output
_____no_output_____
###Markdown
The following array represents the range of possible spending.
###Code
spending = linspace(0, 1200, 21)
###Output
_____no_output_____
###Markdown
`compute_factor` computes the reduction in `beta` for a given level of campaign spending.`M` is chosen so the transition happens around \$500.`K` is the maximum reduction in `beta`, 20%.`B` is chosen by trial and error to yield a curve that seems feasible.
###Code
def compute_factor(spending):
"""Reduction factor as a function of spending.
spending: dollars from 0 to 1200
returns: fractional reduction in beta
"""
return logistic(spending, M=500, K=0.2, B=0.01)
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
percent_reduction = compute_factor(spending) * 100
make_series(spending, percent_reduction).plot()
decorate(xlabel='Hand-washing campaign spending (USD)',
ylabel='Percent reduction in infection rate',
title='Effect of hand washing on infection rate')
###Output
_____no_output_____
###Markdown
The result is the following function, whichtakes spending as a parameter and returns `factor`, which is the factorby which `beta` is reduced:
###Code
def compute_factor(spending):
return logistic(spending, M=500, K=0.2, B=0.01)
###Output
_____no_output_____
###Markdown
I use `compute_factor` to write `add_hand_washing`, which takes a`System` object and a budget, and modifies `system.beta` to model theeffect of hand washing:
###Code
def add_hand_washing(system, spending):
factor = compute_factor(spending)
system.beta *= (1 - factor)
###Output
_____no_output_____
###Markdown
Now we can sweep a range of values for `spending` and use the simulationto compute the effect:
###Code
def sweep_hand_washing(spending_array):
sweep = SweepSeries()
for spending in spending_array:
system = make_system(beta, gamma)
add_hand_washing(system, spending)
results = run_simulation(system, update_func)
sweep[spending] = calc_total_infected(results, system)
return sweep
###Output
_____no_output_____
###Markdown
Here's how we run it:
###Code
from numpy import linspace
spending_array = linspace(0, 1200, 20)
infected_sweep2 = sweep_hand_washing(spending_array)
###Output
_____no_output_____
###Markdown
The following figure shows the result.
###Code
infected_sweep2.plot()
decorate(xlabel='Hand-washing campaign spending (USD)',
ylabel='Total fraction infected',
title='Effect of hand washing on total infections')
###Output
_____no_output_____
###Markdown
Below \$200, the campaign has little effect. At \$800 it has a substantial effect, reducing total infections from more than 45% to about 20%. Above \$800, the additional benefit is small. OptimizationLet's put it all together. With a fixed budget of \$1200, we have todecide how many doses of vaccine to buy and how much to spend on thehand-washing campaign.Here are the parameters:
###Code
num_students = 90
budget = 1200
price_per_dose = 100
max_doses = int(budget / price_per_dose)
max_doses
###Output
_____no_output_____
###Markdown
The fraction `budget/price_per_dose` might not be an integer. `int` is abuilt-in function that converts numbers to integers, rounding down.We'll sweep the range of possible doses:
###Code
dose_array = linrange(max_doses)
###Output
_____no_output_____
###Markdown
In this example we call `linrange` with only one argument; it returns a NumPy array with the integers from 0 to `max_doses`, including both.Then we run the simulation for each element of `dose_array`:
###Code
def sweep_doses(dose_array):
sweep = SweepSeries()
for doses in dose_array:
fraction = doses / num_students
spending = budget - doses * price_per_dose
system = make_system(beta, gamma)
add_immunization(system, fraction)
add_hand_washing(system, spending)
results = run_simulation(system, update_func)
sweep[doses] = calc_total_infected(results, system)
return sweep
###Output
_____no_output_____
###Markdown
For each number of doses, we compute the fraction of students we canimmunize, `fraction` and the remaining budget we can spend on thecampaign, `spending`. Then we run the simulation with those quantitiesand store the number of infections.The following figure shows the result.
###Code
infected_sweep3 = sweep_doses(dose_array)
infected_sweep3.plot()
decorate(xlabel='Doses of vaccine',
ylabel='Total fraction infected',
title='Total infections vs. doses')
###Output
_____no_output_____ |
notebooks/data-umbrella-2020-10-27/0-overview.ipynb | ###Markdown
<img src="images/dask_horizontal.svg" width="45%" alt="Dask logo\"> Scaling your data work with Dask Materials & setup- Tutorial materials available at https://github.com/coiled/data-science-at-scale- Two ways to go through the tutorial: 1. Run locally on your laptop 2. Run using Binder (no setup required) About the speakers- **[James Bourbeau](https://www.jamesbourbeau.com/)**: Dask maintainer and Software Engineer at [Coiled](https://coiled.io/).- **[Hugo Bowne-Anderson](http://hugobowne.github.io/)**: Head of Data Science Evangelism and Marketing at [Coiled](https://coiled.io/). OverviewDask is a flexible, open source library for parallel computing in Python- Documentation: https://docs.dask.org- GitHub: https://github.com/dask/daskFrom a high-level Dask:- Enables parallel and larger-than-memory computations- Scales the existing Python ecosystem - Uses familiar APIs you're used to from projects like NumPy, Pandas, and scikit-learn - Allows you to scale existing workflows with minimal code changes- Dask works on your laptop, but also scales out to large clusters- Offers great built-in diagnosic tools <img src="images/dask-components.svg" width="85%" alt="Dask components\"> Dask Schedulers, Workers, and BeyondWork (Python code) is performed on a cluster, which consists of* a scheduler (which manages and sends the work / tasks to the workers)* workers, which compute the tasks.The client is "the user-facing entry point for cluster users." What this means is that the client lives wherever you are writing your Python code and the client talks to the scheduler, passing it the tasks.<img src="images/dask-cluster.svg" width="85%" alt="Dask components\"> Dask in action!
###Code
# Sets up Dask's distributed scheduler
from dask.distributed import Client
client = Client()
client
# Download data
%run prep.py -d flights
# Perform Pandas-like operations
import os
import dask.dataframe as dd
df = dd.read_csv(os.path.join("data", "nycflights", "*.csv"),
parse_dates={"Date": [0, 1, 2]},
dtype={"TailNum": str,
"CRSElapsedTime": float,
"Cancelled": bool})
df.groupby("Origin").DepDelay.mean().compute()
###Output
_____no_output_____
###Markdown
Tutorial goalsThe goal for this tutorial is to cover the basics of Dask. Attendees should walk away with an understanding of whatDask offers, how it works, and ideas of how Dask can help them effectively scale their own data intensive workloads.The tutorial consists of several Jupyter notebooks which contain explanatory material on how Dask works. Specifically, the notebooks presented cover the following topics:- [Dask Delayed](1-delayed.ipynb)- [Dask DataFrame](2-dataframe.ipynb)- [Machine Learning](3-machine-learning.ipynb)Each notebook also contains hands-on exercises to illustrate the concepts being presented. Let's look at our first example to get a sense for how they work. Exercise: Print `"Hello world!"`Use Python to print the string "Hello world!" to the screen.
###Code
# Your solution here
# Run this cell to see a solution
%load solutions/overview.py
###Output
_____no_output_____
###Markdown
Note that several of the examples here have been adapted from the Dask tutorial at https://tutorial.dask.org. Optional: Work directly from the cloud with Coiled Here I'll spin up a cluster on Coiled to show you just how easy it can be. Note that to do so, I've also signed into the [Coiled Beta](cloud.coiled.io/), pip installed `coiled`, and authenticated. You can do the same!You can also spin up [this hosted Coiled notebook](https://cloud.coiled.io/jobs/coiled/quickstart), which means you don't have to do anything locally.The plan:* use Coiled to load in **all** of the NYC taxi dataset from 10+ CSVs (8+ GBs) on an AWS cluster, * massage the data, * engineer a feature, and* compute the average tip as a function of the number of passengers.
###Code
import coiled
from dask.distributed import LocalCluster, Client
# Create a Software Environment
coiled.create_software_environment(
name="my-software-env",
conda="binder/environment.yml",
)
# Control the resources of your cluster by creating a new cluster configuration
coiled.create_cluster_configuration(
name="my-cluster-config",
worker_memory="16 GiB",
worker_cpu=4,
scheduler_memory="8 GiB",
scheduler_cpu=2,
software="my-software-env",
)
# Spin up cluster, instantiate a Client
cluster = coiled.Cluster(n_workers=10, configuration="my-cluster-config")
client = Client(cluster)
client
import dask.dataframe as dd
# Read data into a Dask DataFrame
df = dd.read_csv(
"s3://nyc-tlc/trip data/yellow_tripdata_2019-*.csv",
parse_dates=["tpep_pickup_datetime", "tpep_dropoff_datetime"],
dtype={
'RatecodeID': 'float64',
'VendorID': 'float64',
'passenger_count': 'float64',
'payment_type': 'float64'
},
storage_options={"anon":True}
)
df
%%time
# Prepare to compute the average tip
# as a function of the number of passengers
mean_amount = df.groupby("passenger_count").tip_amount.mean()
%%time
# Compute the average tip
# as a function of the number of passengers
mean_amount.compute()
client.shutdown()
###Output
_____no_output_____ |
sdk/jobs/single-step/scikit-learn/mnist/sklearn-mnist.ipynb | ###Markdown
Train a scikit-learn SVM on the mnist dataset.**Requirements** - In order to benefit from this tutorial, you will need:- A basic understanding of Machine Learning- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)- An Azure ML workspace with computer cluster - [Configure workspace](../../../configuration.ipynb) - A python environment- Installed Azure Machine Learning Python SDK v2 - [install instructions](../../../../README.md) - check the getting started section**Learning Objectives** - By the end of this tutorial, you should be able to:- Connect to your AML workspace from the Python SDK- Create and run a `Command` which executes a Python command- Use a local file as an `input` to the Command**Motivations** - This notebook explains how to setup and run a Command. The Command is a fundamental construct of Azure Machine Learning. It can be used to run a task on a specified compute (either local or on the cloud). The Command accepts `environment` and `compute` to setup required infrastructure. You can define a `command` to run on this infrastructure with `inputs`. 1. Connect to Azure Machine Learning WorkspaceThe [workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-workspace) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section we will connect to the workspace in which the job will be run. 1.1. Import the required libraries
###Code
# import required libraries
from azure.ai.ml import MLClient
from azure.ai.ml import command
from azure.identity import DefaultAzureCredential
###Output
_____no_output_____
###Markdown
1.2. Configure workspace details and get a handle to the workspaceTo connect to a workspace, we need identifier parameters - a subscription, resource group and workspace name. We will use these details in the `MLClient` from `azure.ai.ml` to get a handle to the required Azure Machine Learning workspace. We use the default [default azure authentication](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python) for this tutorial. Check the [configuration notebook](../../../configuration.ipynb) for more details on how to configure credentials and connect to a workspace.
###Code
# Enter details of your AML workspace
subscription_id = "<SUBSCRIPTION_ID>"
resource_group = "<RESOURCE_GROUP>"
workspace = "<AML_WORKSPACE_NAME>"
# get a handle to the workspace
ml_client = MLClient(
DefaultAzureCredential(), subscription_id, resource_group, workspace
)
###Output
_____no_output_____
###Markdown
2. Configure and run the CommandIn this section we will configure and run a standalone job using the `command` class. The `command` class can be used to run standalone jobs and can also be used as a function inside pipelines. 2.1 Configure the CommandThe `command` allows user to configure the following key aspects.- `code` - This is the path where the code to run the command is located- `command` - This is the command that needs to be run- `inputs` - This is the dictionary of inputs using name value pairs to the command. The key is a name for the input within the context of the job and the value is the input value. Inputs can be referenced in the `command` using the `${{inputs.}}` expression. To use files or folders as inputs, we can use the `Input` class. The `Input` class supports three parameters: - `type` - The type of input. This can be a `uri_file` or `uri_folder`. The default is `uri_folder`. - `path` - The path to the file or folder. These can be local or remote files or folders. For remote files - http/https, wasb are supported. - Azure ML `data`/`dataset` or `datastore` are of type `uri_folder`. To use `data`/`dataset` as input, you can use registered dataset in the workspace using the format ':'. For e.g Input(type='uri_folder', path='my_dataset:1') - `mode` - Mode of how the data should be delivered to the compute target. Allowed values are `ro_mount`, `rw_mount` and `download`. Default is `ro_mount`- `environment` - This is the environment needed for the command to run. Curated or custom environments from the workspace can be used. Or a custom environment can be created and used as well. Check out the [environment](../../../../assets/environment/environment.ipynb) notebook for more examples.- `compute` - The compute on which the command will run. In this example we are using a compute called `cpu-cluster` present in the workspace. You can replace it any other compute in the workspace. You can run it on the local machine by using `local` for the compute. This will run the command on the local machine and all the run details and output of the job will be uploaded to the Azure ML workspace.- `distribution` - Distribution configuration for distributed training scenarios. Azure Machine Learning supports PyTorch, TensorFlow, and MPI-based distributed training. The allowed values are `PyTorch`, `TensorFlow` or `Mpi`.- `display_name` - The display name of the Job- `description` - The description of the experiment
###Code
# create the command
job = command(
code="./src", # local path where the code is stored
command="pip install -r requirements.txt && python main.py --C ${{inputs.C}} --penalty ${{inputs.penalty}}",
inputs={"C": 0.8, "penalty": "l2"},
environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu@latest",
compute="cpu-cluster",
display_name="sklearn-mnist-example"
# experiment_name: sklearn-mnist-example
# description: Train a scikit-learn LogisticRegression model on the MNSIT dataset.
)
###Output
_____no_output_____
###Markdown
2.2 Run the CommandUsing the `MLClient` created earlier, we will now run this Command as a job in the workspace.
###Code
# submit the command
returned_job = ml_client.create_or_update(job)
###Output
_____no_output_____ |
References/Data.Analysis/Discovery.Statistics/7-1-multiple.regression.ipynb | ###Markdown
Multiple Regression using the basic model
###Code
import pandas as pd
import statsmodels.formula.api as smf
album2 = pd.read_csv("data/Album Sales 2.dat",sep="\t")
album2.head()
###Output
_____no_output_____
###Markdown
The First Model
###Code
model2 = smf.ols("sales ~ adverts", data=album2)
fit2 = model2.fit()
fit2.summary()
###Output
_____no_output_____
###Markdown
The Second Model
###Code
model3 = smf.ols("sales ~ adverts + airplay + attract", data=album2)
fit3 = model3.fit()
fit3.summary()
###Output
_____no_output_____
###Markdown
== The Standardized Modelcalculate **standardized beta estimates**
###Code
from scipy.stats.mstats import zscore
model4 = smf.ols("zscore(sales) ~ zscore(adverts) + zscore(airplay) + zscore(attract)", data=album2)
fit4 = model4.fit()
fit4.summary()
###Output
_____no_output_____
###Markdown
Comparing two models using F-ratio(ANOVA)
###Code
from statsmodels.stats.anova import anova_lm
anova_lm(fit2,fit3)
###Output
d:\python37\lib\site-packages\scipy\stats\_distn_infrastructure.py:903: RuntimeWarning: invalid value encountered in greater
return (a < x) & (x < b)
d:\python37\lib\site-packages\scipy\stats\_distn_infrastructure.py:903: RuntimeWarning: invalid value encountered in less
return (a < x) & (x < b)
d:\python37\lib\site-packages\scipy\stats\_distn_infrastructure.py:1912: RuntimeWarning: invalid value encountered in less_equal
cond2 = cond0 & (x <= _a)
###Markdown
Casewise diagnostics (Residual and influence statistics)
###Code
influence = fit3.get_influence()
influence.summary_frame()
album2["residuals"]=influence.resid
album2["standardized.residuals"]=zscore(influence.resid)
album2["studentized.residuals"]=influence.resid_studentized
album2["cooks.distance"]=influence.cooks_distance[0]
album2["dfbeta"]=influence.dfbeta.tolist()
album2["dffits"],dummy=influence.dffits
album2["leverage"]=influence.hat_diag_factor
album2["covariance.ratios"]=influence.cov_ratio
album2.head()
###Output
_____no_output_____
###Markdown
Finding outliers
###Code
album2["large.residual"] = (album2["standardized.residuals"] > 2) | (album2["standardized.residuals"] < -2)
sum(album2["large.residual"])
album2[album2["large.residual"]]
###Output
_____no_output_____
###Markdown
Accessing the assumption of independence (using Durbin-Waston test)
###Code
from statsmodels.stats.stattools import durbin_watson
durbin_watson(fit3.resid)
###Output
_____no_output_____
###Markdown
D-W statistic 0 means perfect positive autocorrection , 2 no autocorrelation 4, perfect negative autocorrection assessing the assumption of no multicollinearity
###Code
from statsmodels.stats.outliers_influence import variance_inflation_factor
VIFs=pd.Series([variance_inflation_factor(fit3.model.exog, i)
for i in [1,2,3]])
VIFs.index=[fit3.model.exog_names[i] for i in [1,2,3]]
VIFs
VIFs.max(), VIFs.mean()
###Output
_____no_output_____
###Markdown
the lagest VIF is greater than 10 then there is cause for concernthe average VIF is substantially greater than 1 then the regression may be biased
###Code
fit3.model.exog_names
###Output
_____no_output_____
###Markdown
plots of residuals
###Code
import matplotlib.pyplot as plt
plt.plot(fit3.fittedvalues,fit3.resid,'o')
plt.hist(album2["studentized.residuals"])
###Output
_____no_output_____ |
learn/array/numpy_array/array-v2.ipynb | ###Markdown
numpy้
ๅใฎ่จ็ฎ
###Code
arr = np.arange(11)
arr
###Output
_____no_output_____
###Markdown
ๅนณๆนๆ น
###Code
np.sqrt(arr)
###Output
_____no_output_____
###Markdown
่ช็ถๅฏพๆฐ
###Code
np.exp(arr)
###Output
_____no_output_____
###Markdown
ใฉใณใใ ใช่กๅ็ๆ
###Code
A = np.random.randn(10)
B = np.random.randn(10)
A
B
###Output
_____no_output_____
###Markdown
่กๅใฎๅ
###Code
np.add(A, B)
###Output
_____no_output_____
###Markdown
ๆๅคงๅค
###Code
np.maximum(A,B)
###Output
_____no_output_____ |
dev/dev8.ipynb | ###Markdown
3DC
###Code
%load_ext autoreload
%autoreload 2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
sample_size,
sample_duration,
shortcut_type='B',
num_classes=400):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(
block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(
block, 256, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], shortcut_type, stride=2)
last_duration = int(math.ceil(sample_duration / 16))
last_size = int(math.ceil(sample_size / 32))
self.avgpool = nn.AvgPool3d(
(last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
num_classes = 2
resnet_shortcut = 'B'
sample_size = 224
sample_duration = 16
model = resnet34(
num_classes=2,
shortcut_type=resnet_shortcut,
sample_size=sample_size,
sample_duration=sample_duration)
model
q = torch.rand((2, 3, 16, 224, 224))
model(q)
from kissing_detector import KissingDetector3DConv
model = KissingDetector3DConv(num_classes=2,
feature_extract=True,
use_vggish=True)
model(
torch.rand((1, 1, 96, 64)),
torch.rand((1, 3, 16, 224, 224))
)
xs = [
torch.rand((3, 224, 224))
for _ in range(10)
]
len(xs)
xs[0].shape
e.shape
out = []
for i in range(len(xs)):
# build an e
e = torch.zeros((16, 3, 224, 224))
e[-1] = xs[0]
for j in range(i):
e[- 1 - j] = xs[j]
# permute e
ee = e.permute((1, 0, 2, 3))
out.append(ee)
out[0].shape
out[1][0, -1, :, :]
# 1- broken 2d?
from experiments import ExperimentRunner
import params
ex = ExperimentRunner(params.experiment_test_3d, n_jobs=1)
ex.run()
from train import train_kd
train_kd()
###Output
_____no_output_____ |
docs/contribute/benchmarks/DL2/benchmarks_DL2_direction-reconstruction.ipynb | ###Markdown
Direction recontruction (DL2) **WARNING**This is still a work-in-progress, it will evolve with the pipeline comparisons and converge with ctaplot+cta-benchmarks. **Author(s):** - Dr. Michele Peresano (CEA-Saclay/IRFU/DAp/LEPCHE), 2020based on previous work by J. Lefacheur.**Description:**This notebook contains benchmarks for the _protopipe_ pipeline regarding the angular distribution of the showers selected for DL3 data.**NOTES:**- a more general set of benchmarks is being defined in cta-benchmarks/ctaplot,- follow [this](https://www.overleaf.com/16933164ghbhvjtchknf) document by adding new benchmarks or proposing new ones.**Requirements:**To run this notebook you will need a set of DL2 data produced on the grid with protopipe.The MC production to be used and the appropriate set of files to use for this notebook can be found [here](https://forge.in2p3.fr/projects/step-by-step-reference-mars-analysis/wikiThe-MC-sample ).The data format required to run the notebook is the current one used by _protopipe_ .Later on it will be the same as in _ctapipe_ + _pyirf_.**Development and testing:** As with any other part of _protopipe_ and being part of the official repository, this notebook can be further developed by any interested contributor. The execution of this notebook is not currently automatic, it must be done locally by the user - preferably _before_ pushing a pull-request. **IMPORTANT:** Please, if you wish to contribute to this notebook, before pushing anything to your branch (better even before opening the PR) clear all the output and remove any local directory paths that you used for testing (leave empty strings).**TODO:*** add missing benchmarks from CTA-MARS comparison* crosscheck with EventDisplay Table of contents - [Energy-dependent offset distribution](Energy-dependent-offset-distribution) - [Angular resolution](Angular-resolution) - [PSF asymmetry](PSF-asymmetry) - [True energy distributions](True-energy-distributions) Imports
###Code
%matplotlib inline
import matplotlib.pyplot as plt
cmap = dict()
import matplotlib.colors as colors
from matplotlib.colors import LogNorm, PowerNorm
count = 0
for key in colors.cnames:
if 'dark' in key:
#if key in key:
cmap[count] = key
count = count + 1
#cmap = {'black': 0, 'red': 1, 'blue': 2, 'green': 3}
cmap = {0: 'black', 1: 'red', 2: 'blue', 3: 'green'}
import os
from pathlib import Path
import numpy as np
import pandas as pd
import astropy.coordinates as c
import astropy.wcs as wcs
import astropy.units as u
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Functions
###Code
def compute_psf(data, ebins, radius):
nbin = len(ebins) - 1
psf = np.zeros(nbin)
psf_err = np.zeros(nbin)
for idx in range(nbin):
emin = ebins[idx]
emax = ebins[idx+1]
sel = data.loc[(data['true_energy'] >= emin) & (data['true_energy'] < emax), ['xi']]
if len(sel) != 0:
psf[idx] = np.percentile(sel['xi'], radius)
psf_err[idx] = psf[idx] / np.sqrt(len(sel))
else:
psf[idx] = 0.
psf_err[idx] = 0.
return psf, psf_err
def plot_psf(ax, x, y, err, **kwargs):
color = kwargs.get('color', 'red')
label = kwargs.get('label', '')
xlabel = kwargs.get('xlabel', '')
xlim = kwargs.get('xlim', None)
ax.errorbar(x, y, yerr=err, fmt='o', label=label, color=color) #, yerr=err, fmt='o') #, color=color, label=label)
ax.set_ylabel('PSF (68% containment)')
ax.set_xlabel('True energy [TeV]')
if xlim is not None:
ax.set_xlim(xlim)
return ax
###Output
_____no_output_____
###Markdown
Load data
###Code
# First we check if a _plots_ folder exists already.
# If not, we create it.
Path("./plots").mkdir(parents=True, exist_ok=True)
# EDIT ONLY THIS CELL WITH YOUR LOCAL SETUP INFORMATION
parentDir = "" # full path location to 'shared_folder'
analysisName = ""
indir = os.path.join(parentDir, "shared_folder/analyses", analysisName, "data/DL2")
infile = 'DL2_tail_gamma_merged.h5'
data_evt = pd.read_hdf(os.path.join(indir, infile), "/reco_events")
good_events = data_evt[(data_evt["is_valid"]==True) & (data_evt["NTels_reco"] >= 2) & (data_evt["gammaness"] >= 0.75)]
###Output
_____no_output_____
###Markdown
Benchmarks Here we use events with the following cuts:- valid reconstructed events- at least 2 reconstructed images, regardless of the camera- gammaness > 0.75 (mostly a conservative choice) Energy-dependent offset distribution
###Code
min_true_energy = [0.02, 0.2, 2, 20]
max_true_energy = [0.2, 2, 20, 200]
plt.figure(figsize=(10,5))
plt.xlabel("Offset [deg]")
plt.ylabel("Number of events")
for low_E, high_E in zip(min_true_energy, max_true_energy):
selected_events = good_events[(good_events["true_energy"]>low_E) & (good_events["true_energy"]<high_E)]
plt.hist(selected_events["offset"],
bins=100,
range = [0,10],
label=f"{low_E} < E_true [TeV] < {high_E}",
histtype="step",
linewidth=2)
plt.yscale("log")
plt.legend(loc="best")
plt.grid(which="both")
plt.savefig(f"./plots/DL3_offsets_{analysisName}.png")
plt.show()
###Output
_____no_output_____
###Markdown
Angular resolution[back to top](Table-of-contents) Here we compare how the multiplicity influences the performance of reconstructed events.
###Code
r_containment = 68
energy_bins = 21
max_energy_TeV = 0.0125
min_energy_TeV = 200.0
energy_edges = np.logspace(np.log10(0.01), np.log10(51), energy_bins + 1, True)
energy = np.sqrt(energy_edges[1:] * energy_edges[:-1])
multiplicity_cuts = ['NTels_reco == 2 & is_valid==True',
'NTels_reco == 3 & is_valid==True',
'NTels_reco == 4 & is_valid==True',
'NTels_reco >= 2 & is_valid==True']
events_selected_multiplicity = [good_events[(good_events["NTels_reco"]==2)],
good_events[(good_events["NTels_reco"]==3)],
good_events[(good_events["NTels_reco"]==4)],
good_events[(good_events["NTels_reco"]>=2)]]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 10))
axes = axes.flatten()
cmap = {0: 'black', 1: 'red', 2: 'blue', 3: 'green'}
limit = [0.01, 51]
for cut_idx, cut in enumerate(multiplicity_cuts):
#data_mult = data_evt.query(cut)
data_mult = events_selected_multiplicity[cut_idx]
psf, err_psf = compute_psf(data_mult, energy_edges, 68)
opt={'color': cmap[cut_idx], 'label': multiplicity_cuts[cut_idx]}
plot_psf(axes[0], energy, psf, err_psf, **opt)
y, tmp = np.histogram(data_mult['true_energy'], bins=energy_edges)
weights = np.ones_like(y)
#weights = weights / float(np.sum(y))
yerr = np.sqrt(y) * weights
centers = 0.5 * (energy_edges[1:] + energy_edges[:-1])
width = energy_edges[1:] - energy_edges[:-1]
axes[1].bar(centers, y * weights, width=width, yerr=yerr, **{'edgecolor': cmap[cut_idx], 'label': multiplicity_cuts[cut_idx], 'lw': 2, 'fill': False})
axes[1].set_ylabel('Number of events')
for ax in axes:
ax.set_xlim(limit)
ax.set_xscale('log')
ax.legend(loc='best')
ax.grid(which='both')
ax.set_xlabel('True energy [TeV]')
plt.tight_layout()
fig.savefig(f"./plots/DL3_PSF_{analysisName}.png")
###Output
_____no_output_____
###Markdown
PSF asymmetry[back to top](Table-of-contents)
###Code
reco_alt = good_events.reco_alt
reco_az = good_events.reco_az
# right now all reco_az for a 180ยฐ deg simualtion turn out to be all around -180ยฐ
#if ~np.count_nonzero(np.sign(reco_az) + 1):
reco_az = np.abs(reco_az)
# this is needed for projecting the angle onto the sky
reco_az_corr = reco_az * np.cos(np.deg2rad(good_events.reco_alt))
true_alt = good_events.iloc[0].true_alt
true_az = good_events.iloc[0].true_az
daz = reco_az - true_az
daz_corr = daz * np.cos(np.deg2rad(reco_alt))
dalt = reco_alt - true_alt
plt.figure(figsize=(5, 5))
plt.xlabel("Mis-recontruction [deg]")
plt.ylabel("Number of events")
plt.hist(daz_corr, bins=100, alpha=0.5, label = "azimuth")
plt.hist(dalt, bins=100, alpha=0.5, label = "altitude")
plt.legend()
plt.yscale("log")
plt.grid()
print("Mean and STDs of sky-projected mis-reconstruction axes")
print('daz = {:.4f} +/- {:.4f} deg'.format(daz_corr.mean(), daz_corr.std()))
print('dalt = {:.4f} +/- {:.4f} deg'.format(dalt.mean(), dalt.std()))
plt.show()
###Output
Mean and STDs of sky-projected mis-reconstruction axes
daz = -0.0070 +/- 0.1190 deg
dalt = 0.0003 +/- 0.1429 deg
###Markdown
2D representation with **orange** events being those with **offset 20 TeV**
###Code
angcut = (good_events['offset'] < 1) & (good_events['true_energy'] > 20)
plt.figure(figsize=(5,5))
ax = plt.gca()
FOV_size = 2.5 # deg
ax.scatter(daz_corr, dalt, alpha=0.1, s=1, label='no angular cut')
ax.scatter(daz_corr[angcut], dalt[angcut], alpha=0.05, s=1, label='offset < 1 deg & E_true > 20 TeV')
ax.set_aspect('equal')
ax.set_xlabel('cent. Az [deg]')
ax.set_ylabel('cent. Alt [deg]')
ax.set_xlim(-FOV_size,FOV_size)
ax.set_ylim(-FOV_size,FOV_size)
plt.tight_layout()
plt.grid(which="both")
fig.savefig(f"./plots/PSFasymmetry_2D_altaz_{analysisName}.png")
###Output
_____no_output_____
###Markdown
True energy distributions[back to top](Table-of-contents)
###Code
plt.figure(figsize=(10,10))
plt.subplots_adjust(hspace=0.25)
true_energy_bin_edges = np.logspace(np.log10(0.02), np.log10(200), 5)
nbins = 200
for i in range(len(true_energy_bin_edges)-1):
plt.subplot(2, 2, i+1)
mask = (good_events["true_energy"]>true_energy_bin_edges[i]) & (good_events["true_energy"]<true_energy_bin_edges[i+1])
plt.hist2d(daz_corr[mask], dalt[mask], bins=[nbins,nbins], norm=LogNorm())
plt.gca().set_aspect('equal')
#plt.colorbar()
plt.xlim(-FOV_size, FOV_size)
plt.ylim(-FOV_size, FOV_size)
plt.title(f"{true_energy_bin_edges[i]:.2f} < E_true [TeV] < {true_energy_bin_edges[i+1]:.2f}")
plt.xlabel('cent. Az [deg]')
plt.ylabel('cent. Alt [deg]')
###Output
_____no_output_____ |
data_leakge.ipynb | ###Markdown
Considering leakageIt's very rare to find models that are accurate 98% of the time. It happens, but it's uncommon enough that we should inspect the data more closely for target leakage.Here is a summary of the data, which we can also find under the data tab:- card: 1 if credit card application accepted, 0 if not- reports: Number of major derogatory reports- age: Age n years plus twelfths of a year- income: Yearly income (divided by 10,000)- share: Ratio of monthly credit card expenditure to yearly income- expenditure: Average monthly credit card expenditure- owner: 1 if owns home, 0 if rents- selfempl: 1 if self-employed, 0 if not- dependents: 1 + number of dependents- months: Months living at current address- majorcards: Number of major credit cards held- active: Number of active credit accountsA few variables look suspicious. For example, does `expenditure` mean expenditure on this card or on cards used before appying?At this point, basic data comparisons can be very helpful:
###Code
# Comparision of expenditure with target
expenditure_cardholders = X.expenditure[y]
expenditure_noncardholders = X.expenditure[~y]
print("Fraction of clients who did not receive a card and had no expenditure: %.2f" \
%((expenditure_noncardholders == 0).mean()))
print("Fraction of clients who received a card and had no expenditure: %.2f" \
%((expenditure_cardholders == 0).mean()))
###Output
Fraction of clients who did not receive a card and had no expenditure: 1.00
Fraction of clients who received a card and had no expenditure: 0.02
###Markdown
As shown above, everyone who did not receive a card had no expenditures, while only 2% of those who received a card had no expenditures. It's not surprising that our model appeared to have a high accuracy. But this also seems to be a case of **target leakage**, where expenditures probably means ***expenditures on the card they applied for***.Since `share` is partially determined by `expenditure`, it should be excluded too. The variables `active` and `majorcards` are a little less clear, but from the description, they sound concerning. In most situations, it's better to be safe than sorry if you can't track down the people who created the data to find out more.We would run a model without target leakage as follows:
###Code
# Drop leaky predictors from the dataset
potential_leaks = ['expenditure', 'share', 'active', 'majorcards']
X2 = X.drop(potential_leaks, axis=1)
# model evaluation with leaky predictors removed
cv_scores = cross_val_score(my_pipeline, X2, y,
cv=5,
scoring='accuracy')
print("Cross-val accuracy: %f" % cv_scores.mean())
data.majorcards.nunique()
# Comparision of majorcards with target
majorcard_cardholders = X.majorcards[y]
majorcard_noncardholders = X.majorcards[~y]
print("Fraction of clients who received a card and had majorcards: %.2f" \
%((majorcard_cardholders == 1).mean()))
print("Fraction of clients who received a card and had no majorcard: %.2f" \
%((majorcard_cardholders == 0).mean()))
print("Fraction of clients who did not receive a card and had majorcards: %.2f" \
%((majorcard_noncardholders == 1).mean()))
print("Fraction of clients who did not receive a card and had no majorcard: %.2f" \
%((majorcard_noncardholders == 0).mean()))
###Output
Fraction of clients who received a card and had majorcards: 0.84
Fraction of clients who received a card and had no majorcard: 0.16
Fraction of clients who did not receive a card and had majorcards: 0.74
Fraction of clients who did not receive a card and had no majorcard: 0.26
|
Convolutional_Neural_Networks/Convolution+model+-+Step+by+Step+-+v2 (1).ipynb | ###Markdown
Convolutional Neural Networks: Step by StepWelcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. **Notation**:- Superscript $[l]$ denotes an object of the $l^{th}$ layer. - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.- Superscript $(i)$ denotes an object from the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example input. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer. - $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. - $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started! 1 - PackagesLet's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import numpy as np
import h5py
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - Outline of the AssignmentYou will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:- Convolution functions, including: - Zero Padding - Convolve window - Convolution forward - Convolution backward (optional)- Pooling functions, including: - Pooling forward - Create mask - Distribute value - Pooling backward (optional) This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:**Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. 3 - Convolutional Neural NetworksAlthough programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. 3.1 - Zero-PaddingZero-padding adds zeros around the border of an image: **Figure 1** : **Zero-Padding** Image (3 channels, RGB) with a padding of 2. The main benefits of padding are the following:- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the "same" convolution, in which the height/width is exactly preserved after one layer. - It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.**Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array "a" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:```pythona = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))```
###Code
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)
"""
### START CODE HERE ### (โ 1 line)
X_pad = np.pad(X,((0,0),(pad,pad),(pad,pad),(0,0)),'constant',constant_values=((0,0),(0,0),(0,0),(0,0)));
### END CODE HERE ###
return X_pad
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =", x.shape)
print ("x_pad.shape =", x_pad.shape)
print ("x[1,1] =", x[1,1])
print ("x_pad[1,1] =", x_pad[1,1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
###Output
x.shape = (4, 3, 3, 2)
x_pad.shape = (4, 7, 7, 2)
x[1,1] = [[ 0.90085595 -0.68372786]
[-0.12289023 -0.93576943]
[-0.26788808 0.53035547]]
x_pad[1,1] = [[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]]
###Markdown
**Expected Output**: **x.shape**: (4, 3, 3, 2) **x_pad.shape**: (4, 7, 7, 2) **x[1,1]**: [[ 0.90085595 -0.68372786] [-0.12289023 -0.93576943] [-0.26788808 0.53035547]] **x_pad[1,1]**: [[ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.] [ 0. 0.]] 3.2 - Single step of convolution In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: - Takes an input volume - Applies a filter at every position of the input- Outputs another volume (usually of different size) **Figure 2** : **Convolution operation** with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. **Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).
###Code
# GRADED FUNCTION: conv_single_step
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (โ 2 lines of code)
# Element-wise product between a_slice and W. Do not add the bias yet.
s = np.multiply(a_slice_prev,W);
# Sum over all entries of the volume s.
Z = np.sum(s);
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = Z+b;
### END CODE HERE ###
return Z
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_single_step(a_slice_prev, W, b)
print("Z =", Z)
###Output
Z = [[[-6.99908945]]]
###Markdown
**Expected Output**: **Z** -6.99908945068 3.3 - Convolutional Neural Networks - Forward passIn the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: **Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. **Hint**: 1. To select a 2x2 slice at the upper left corner of a matrix "a_prev" (shape (5,5,3)), you would do:```pythona_slice_prev = a_prev[0:2,0:2,:]```This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below. **Figure 3** : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** This figure shows only a single channel. **Reminder**:The formulas relating the output shape of the convolution to the input shape is:$$ n_H = \lfloor \frac{n_{H_{prev}} - f + 2ย \times pad}{stride} \rfloor +1 $$$$ n_W = \lfloor \frac{n_{W_{prev}} - f + 2ย \times pad}{stride} \rfloor +1 $$$$ n_C = \text{number of filters used in the convolution}$$For this exercise, we won't worry about vectorization, and will just implement everything with for-loops.
###Code
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
"""
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
"""
### START CODE HERE ###
# Retrieve dimensions from A_prev's shape (โ1 line)
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape (โ1 line)
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters" (โ2 lines)
stride = hparameters["stride"];
pad = hparameters["pad"];
# Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (โ2 lines)
n_H = int(((n_H_prev+(2*pad)-f)/stride)+1);
n_W = int(((n_W_prev+(2*pad)-f)/stride)+1);
# Initialize the output volume Z with zeros. (โ1 line)
Z = np.zeros((m,n_H,n_W,n_C));
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i,:,:,:]; # Select ith training example's padded activation
for h in range(0,n_H): # loop over vertical axis of the output volume
for w in range(0,n_W): # loop over horizontal axis of the output volume
for c in range(0,n_C): # loop over channels (= #filters) of the output volume
# Find the corners of the current "slice" (โ4 lines)
vert_start = h*(stride);
vert_end = vert_start+f;
horiz_start = w*(stride);
horiz_end = horiz_start +f;
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (โ1 line)
a_slice_prev = a_prev_pad[vert_start:vert_end,horiz_start:horiz_end,:];
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (โ1 line)
Z[i, h, w, c] =conv_single_step(a_slice_prev, W[:,:,:,c], b[:,:,:,c])
### END CODE HERE ###
# Making sure your output shape is correct
assert(Z.shape == (m, n_H, n_W, n_C))
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W = np.random.randn(2,2,3,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 2,
"stride": 2}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
print("Z's mean =", np.mean(Z))
print("Z[3,2,1] =", Z[3,2,1])
print("cache_conv[0][1][2][3] =", cache_conv[0][1][2][3])
###Output
Z's mean = 0.0489952035289
Z[3,2,1] = [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437
5.18531798 8.75898442]
cache_conv[0][1][2][3] = [-0.20075807 0.18656139 0.41005165]
###Markdown
**Expected Output**: **Z's mean** 0.0489952035289 **Z[3,2,1]** [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437 5.18531798 8.75898442] **cache_conv[0][1][2][3]** [-0.20075807 0.18656139 0.41005165] Finally, CONV layer should also contain an activation, in which case we would add the following line of code:```python Convolve the window to get back one output neuronZ[i, h, w, c] = ... Apply activationA[i, h, w, c] = activation(Z[i, h, w, c])```You don't need to do it here. 4 - Pooling layer The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: - Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over. 4.1 - Forward PoolingNow, you are going to implement MAX-POOL and AVG-POOL, in the same function. **Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.**Reminder**:As there's no padding, the formulas binding the output shape of the pooling to the input shape is:$$ n_H = \lfloor \frac{n_{H_{prev}} - f}{stride} \rfloor +1 $$$$ n_W = \lfloor \frac{n_{W_{prev}} - f}{stride} \rfloor +1 $$$$ n_C = n_{C_{prev}}$$
###Code
# GRADED FUNCTION: pool_forward
def pool_forward(A_prev, hparameters, mode = "max"):
"""
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
"""
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
### START CODE HERE ###
for i in range(0,m): # loop over the training examples
for h in range(0,n_H): # loop on the vertical axis of the output volume
for w in range(0,n_W): # loop on the horizontal axis of the output volume
for c in range (0,n_C): # loop over the channels of the output volume
# Find the corners of the current "slice" (โ4 lines)
vert_start = h*(stride);
vert_end = h*(stride) + f;
horiz_start = w*(stride);
horiz_end = w*(stride) + f;
# Use the corners to define the current slice on the ith training example of A_prev, channel c. (โ1 line)
a_prev_slice = A_prev[i,vert_start:vert_end,horiz_start:horiz_end,c];
# Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.
if mode == "max":
A[i, h, w, c] = np.max( a_prev_slice);
elif mode == "average":
A[i, h, w, c] = np.mean( a_prev_slice);
### END CODE HERE ###
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
np.random.seed(1)
A_prev = np.random.randn(2, 4, 4, 3)
hparameters = {"stride" : 2, "f": 3}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A =", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A =", A)
###Output
mode = max
A = [[[[ 1.74481176 0.86540763 1.13376944]]]
[[[ 1.13162939 1.51981682 2.18557541]]]]
mode = average
A = [[[[ 0.02105773 -0.20328806 -0.40389855]]]
[[[-0.22154621 0.51716526 0.48155844]]]]
###Markdown
**Expected Output:** A = [[[[ 1.74481176 0.86540763 1.13376944]]] [[[ 1.13162939 1.51981682 2.18557541]]]] A = [[[[ 0.02105773 -0.20328806 -0.40389855]]] [[[-0.22154621 0.51716526 0.48155844]]]] Congratulations! You have now implemented the forward passes of all the layers of a convolutional network. The remainer of this notebook is optional, and will not be graded. 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below. 5.1 - Convolutional layer backward pass Let's start by implementing the backward pass for a CONV layer. 5.1.1 - Computing dA:This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:$$ dA += \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1}$$Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. In code, inside the appropriate for-loops, this formula translates into:```pythonda_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]``` 5.1.2 - Computing dW:This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:$$ dW_c += \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2}$$Where $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. In code, inside the appropriate for-loops, this formula translates into:```pythondW[:,:,:,c] += a_slice * dZ[i, h, w, c]``` 5.1.3 - Computing db:This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:$$ db = \sum_h \sum_w dZ_{hw} \tag{3}$$As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. In code, inside the appropriate for-loops, this formula translates into:```pythondb[:,:,:,c] += dZ[i, h, w, c]```**Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above.
###Code
def conv_backward(dZ, cache):
"""
Implement the backward propagation for a convolution function
Arguments:
dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward(), output of conv_forward()
Returns:
dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
dW -- gradient of the cost with respect to the weights of the conv layer (W)
numpy array of shape (f, f, n_C_prev, n_C)
db -- gradient of the cost with respect to the biases of the conv layer (b)
numpy array of shape (1, 1, 1, n_C)
"""
### START CODE HERE ###
# Retrieve information from "cache"
(A_prev, W, b, hparameters) = None
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = None
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = None
# Retrieve information from "hparameters"
stride = None
pad = None
# Retrieve dimensions from dZ's shape
(m, n_H, n_W, n_C) = None
# Initialize dA_prev, dW, db with the correct shapes
dA_prev = None
dW = None
db = None
# Pad A_prev and dA_prev
A_prev_pad = None
dA_prev_pad = None
for i in range(None): # loop over the training examples
# select ith training example from A_prev_pad and dA_prev_pad
a_prev_pad = None
da_prev_pad = None
for h in range(None): # loop over vertical axis of the output volume
for w in range(None): # loop over horizontal axis of the output volume
for c in range(None): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = None
vert_end = None
horiz_start = None
horiz_end = None
# Use the corners to define the slice from a_prev_pad
a_slice = None
# Update gradients for the window and the filter's parameters using the code formulas given above
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += None
dW[:,:,:,c] += None
db[:,:,:,c] += None
# Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
dA_prev[i, :, :, :] = None
### END CODE HERE ###
# Making sure your output shape is correct
assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
return dA_prev, dW, db
np.random.seed(1)
dA, dW, db = conv_backward(Z, cache_conv)
print("dA_mean =", np.mean(dA))
print("dW_mean =", np.mean(dW))
print("db_mean =", np.mean(db))
###Output
_____no_output_____
###Markdown
** Expected Output: ** **dA_mean** 1.45243777754 **dW_mean** 1.72699145831 **db_mean** 7.83923256462 5.2 Pooling layer - backward passNext, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. 5.2.1 Max pooling - backward pass Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: $$ X = \begin{bmatrix}1 && 3 \\4 && 2\end{bmatrix} \quad \rightarrow \quad M =\begin{bmatrix}0 && 0 \\1 && 0\end{bmatrix}\tag{4}$$As you can see, this function creates a "mask" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. **Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward. Hints:- [np.max()]() may be helpful. It computes the maximum of an array.- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:```A[i,j] = True if X[i,j] = xA[i,j] = False if X[i,j] != x```- Here, you don't need to consider cases where there are several maxima in a matrix.
###Code
def create_mask_from_window(x):
"""
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
"""
### START CODE HERE ### (โ1 line)
mask = None
### END CODE HERE ###
return mask
np.random.seed(1)
x = np.random.randn(2,3)
mask = create_mask_from_window(x)
print('x = ', x)
print("mask = ", mask)
###Output
_____no_output_____
###Markdown
**Expected Output:** **x =**[[ 1.62434536 -0.61175641 -0.52817175] [-1.07296862 0.86540763 -2.3015387 ]] **mask =**[[ True False False] [False False False]] Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will "propagate" the gradient back to this particular input value that had influenced the cost. 5.2.2 - Average pooling - backward pass In max pooling, for each input window, all the "influence" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: $$ dZ = 1 \quad \rightarrow \quad dZ =\begin{bmatrix}1/4 && 1/4 \\1/4 && 1/4\end{bmatrix}\tag{5}$$This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. **Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)
###Code
def distribute_value(dz, shape):
"""
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
"""
### START CODE HERE ###
# Retrieve dimensions from shape (โ1 line)
(n_H, n_W) = None
# Compute the value to distribute on the matrix (โ1 line)
average = None
# Create a matrix where every entry is the "average" value (โ1 line)
a = None
### END CODE HERE ###
return a
a = distribute_value(2, (2,2))
print('distributed value =', a)
###Output
_____no_output_____
###Markdown
**Expected Output**: distributed_value =[[ 0.5 0.5] [ 0.5 0.5]] 5.2.3 Putting it together: Pooling backward You now have everything you need to compute backward propagation on a pooling layer.**Exercise**: Implement the `pool_backward` function in both modes (`"max"` and `"average"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.
###Code
def pool_backward(dA, cache, mode = "max"):
"""
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
"""
### START CODE HERE ###
# Retrieve information from cache (โ1 line)
(A_prev, hparameters) = None
# Retrieve hyperparameters from "hparameters" (โ2 lines)
stride = None
f = None
# Retrieve dimensions from A_prev's shape and dA's shape (โ2 lines)
m, n_H_prev, n_W_prev, n_C_prev = None
m, n_H, n_W, n_C = None
# Initialize dA_prev with zeros (โ1 line)
dA_prev = None
for i in range(None): # loop over the training examples
# select training example from A_prev (โ1 line)
a_prev = None
for h in range(None): # loop on the vertical axis
for w in range(None): # loop on the horizontal axis
for c in range(None): # loop over the channels (depth)
# Find the corners of the current "slice" (โ4 lines)
vert_start = None
vert_end = None
horiz_start = None
horiz_end = None
# Compute the backward propagation in both modes.
if mode == "max":
# Use the corners and "c" to define the current slice from a_prev (โ1 line)
a_prev_slice = None
# Create the mask from a_prev_slice (โ1 line)
mask = None
# Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (โ1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None
elif mode == "average":
# Get the value a from dA (โ1 line)
da = None
# Define the shape of the filter as fxf (โ1 line)
shape = None
# Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (โ1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None
### END CODE ###
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
###Output
_____no_output_____ |
Generating atari images using atrari environments.ipynb | ###Markdown
**DataSource: atari Environment** ***Changes we need to make: - atari Image resolution: 210 * 160 => 64 * 64 - atari image format: channel last => pytorch channel type channel first - data type, image has uint8 => calculation needs float32 ***how to make these changes: There are two ways for it: First - making changes directly to gym env by using InputWrapper - create a Input wrapper inheriting the gym observation wrapper - change the observation space to Box => but why? I don't get it. Second: get the observation from gym, postprocess it accordingly. - get obs using env.step - define a function to modify it.
###Code
''' constants we need '''
Image_size = 64 # output image size for our GAN
batch_size = 16 # batch size to generate from env
# saving env images to disk
saved_index = 0
max_save = 100
save = False
###Output
_____no_output_____
###Markdown
method 1: inputwrapper
###Code
class InputWrapper(gym.ObservationWrapper):
def __init__(self, *args):
super(InputWrapper, self).__init__(*args)
assert isinstance(self.observation_space, gym.spaces.Box)
old_space = self.observation_space
self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high),
dtype=np.float32)
def observation(self, observation):
global save
new_obs = cv2.resize(observation, (Image_size, Image_size))
if save and np.mean(new_obs) > 0.01:
self.save_images(new_obs)
new_obs = np.moveaxis(a = new_obs, source= 2,destination= 0)
new_obs = new_obs.astype(np.float32)
return new_obs
def save_images(self, obs):
global saved_index , max_save
if saved_index < max_save :
cv2.imwrite( './atari saved images/wrapper_method/img' + str(saved_index) + '.png', np.uint8(obs))
saved_index += 1
def iterate_batches(envs):
global saved_index
initial_images_of_env = [e.reset() for e in envs]
batch = []
# select a random environment from envs
env_gen = iter(lambda: random.choice(envs), None)
while True:
e = next(env_gen)
obs, reward, is_done, _ = e.step(e.action_space.sample())
if np.mean(obs) > 0.01:
batch.append(obs)
if len(batch) == batch_size:
batch_np = np.asarray(batch, np.float32) * 2 / 255.0 - 1
yield torch.tensor(batch_np)
batch.clear()
if is_done:
e.reset()
# env_names = ['Breakout-v0', 'AirRaid-v0', 'Pong-v0']
# envs = [InputWrapper(gym.make(name)) for name in env_names]
# for e in envs:
# print(e.observation_space.shape)
# x_max = 1
# x = 0
# for batch_v in iterate_batches(envs):
# if x < x_max:
# x+= 1
# print(batch_v.size())
# continue
# else:
# break
###Output
_____no_output_____
###Markdown
method 2: define these operation outside of environment
###Code
# ''' constants we need '''
# Image_size = 64 # output image size for our GAN
# batch_size = 16 # batch size to generate from env
# # saving env images to disk
# save = True
# saved_index = 0
# max_save = 100
# def save_image(obs):
# global saved_index
# if saved_index < max_save:
# cv2.imwrite(
# './atari saved images/non_wrapper_method/img' + str(saved_index) + '.png',
# np.uint8(obs))
# saved_index += 1
# def preprocess(obs):
# obs = cv2.resize(obs, (Image_size, Image_size))
# if save and saved_index < max_save:
# save_image(obs)
# obs = np.moveaxis(a=obs, source=2, destination=0)
# obs = obs.astype(np.float32)
# return obs
# def iterate_batches(envs):
# global saved_index, save, batch_size
# [e.reset() for e in envs]
# batch = []
# env_gen = iter(lambda: random.choice(envs), None)
# while True:
# e = next(env_gen)
# obs, reward, is_done, _ = e.step(e.action_space.sample())
# # check for non-zero mean of image, due to bug in one of the games to prevent flickering of images
# if np.mean(obs) > 0.01:
# obs = preprocess(obs)
# batch.append(obs)
# if len(batch) == batch_size:
# batch_np = np.asarray(batch, np.float32) * 2 / 255.0 - 1 # domain to -1 to 1
# yield torch.tensor(batch_np)
# batch.clear()
# if is_done:
# e.reset()
# env_names = ['Breakout-v0', 'AirRaid-v0', 'Pong-v0']
# envs = [gym.make(name) for name in env_names]
# x_max = 2
# x = 0
# for batch_v in iterate_batches(envs):
# if x < x_max:
# x+= 1
# print(batch_v.size())
# continue
# else:
# break
###Output
_____no_output_____
###Markdown
model
###Code
''' Discriminator constants '''
DISC_FILTERS = 64
input_channels = 3
class Discriminator(nn.Module):
def __init__(self, input_channels):
super(Discriminator, self).__init__()
self.conv_pipe = nn.Sequential(
# 64 -> 32
nn.Conv2d(in_channels= input_channels, out_channels= DISC_FILTERS,kernel_size= 4, stride= 2, padding= 1 ),
nn.ReLU(),
#32 -> 16
nn.Conv2d(in_channels= DISC_FILTERS, out_channels= DISC_FILTERS*2, kernel_size= 4, stride = 2, padding= 1),
nn.BatchNorm2d(DISC_FILTERS*2),
nn.ReLU(),
#16->8
nn.Conv2d(in_channels= DISC_FILTERS*2, out_channels= DISC_FILTERS*4, kernel_size=4, stride= 2, padding=1 ),
nn.BatchNorm2d(DISC_FILTERS*4),
nn.ReLU(),
#8->4
nn.Conv2d(in_channels= DISC_FILTERS*4, out_channels= DISC_FILTERS*8, kernel_size= 4, stride= 2, padding = 1),
nn.BatchNorm2d(DISC_FILTERS*8),
nn.ReLU(),
#4->1
nn.Conv2d(in_channels= DISC_FILTERS*8, out_channels= 1, kernel_size= 4, stride= 1, padding= 0),
nn.Sigmoid()
)
def forward(self, x):
out = self.conv_pipe(x)
#reshape
out = out.view(-1, 1).squeeze(dim = 1)
return out
# '''test your discriminator '''
# disc = Discriminator(input_channels)
# test_output = disc(batch_v)
# print(test_output)
''' generator constants'''
out_channels = 3
generator_filters = 64
latent_vector_size = 100
class Generator(nn.Module):
def __init__(self, out_channels):
super(Generator, self).__init__()
self.deconvpipe = nn.Sequential(
# 4*4
nn.ConvTranspose2d(in_channels= latent_vector_size, out_channels = generator_filters*8,kernel_size= 4, stride= 1, padding= 0),
nn.BatchNorm2d(generator_filters*8),
nn.ReLU(),
# 8*8
nn.ConvTranspose2d(in_channels= generator_filters*8, out_channels = generator_filters*4,kernel_size= 4, stride= 2, padding= 1),
nn.BatchNorm2d(generator_filters*4),
nn.ReLU(),
# 16*16
nn.ConvTranspose2d(in_channels= generator_filters*4, out_channels = generator_filters*2, kernel_size= 4, stride= 2, padding=1),
nn.BatchNorm2d(generator_filters*2),
nn.ReLU(),
# 32*32
nn.ConvTranspose2d(in_channels= generator_filters*2, out_channels = generator_filters, kernel_size=4, stride= 2, padding= 1),
nn.BatchNorm2d(generator_filters),
nn.ReLU(),
# 64*64
nn.ConvTranspose2d(in_channels= generator_filters, out_channels = out_channels, kernel_size = 4, stride= 2, padding= 1),
nn.Tanh()
)
def forward(self, x):
out = self.deconvpipe(x)
return out
# gen = Generator(out_channels)
# test_in = torch.FloatTensor(1, latent_vector_size, 1, 1).normal_(0,1)
# test_out = gen(test_in)
# print(gen)
# print(test_out.shape)
''' main script '''
device = "cuda" if torch.cuda.is_available() else "cpu"
print("used device: ", device)
gen = Generator(out_channels).to(device)
disc = Discriminator(input_channels).to(device)
print(gen)
print(disc)
env_names = ['Breakout-v0', 'AirRaid-v0', 'Pong-v0']
envs = [InputWrapper(gym.make(name)) for name in env_names]
print("input shape: ", envs[0].observation_space.shape)
objective = nn.BCELoss()
gopt = optim.Adam(params= gen.parameters(), lr= 0.0001, betas= (0.5, 0.999))
dopt = optim.Adam(params= disc.parameters(), lr = 0.0001, betas= (0.5, 0.999))
log = gym.logger
log.set_level(gym.logger.INFO)
''' train script '''
writer = SummaryWriter()
train_iter = 0
max_iter = 20000
report_every = 100
save_image_every_iter = 1000
true_labels = torch.ones(batch_size, dtype = torch.float32, device = device)
fake_labels = torch.zeros(batch_size, dtype = torch.float32, device = device)
disc_losses = []
gen_losses = []
for batch_v in iterate_batches(envs):
######################## train discriminator ############################################
## zero grad
dopt.zero_grad()
## prepare the inputs
gen_input = torch.FloatTensor(batch_size, latent_vector_size, 1,1).normal_(0,1).to(device)
batch_v = batch_v.to(device)
## forward the models
gen_output = gen(gen_input)
disc_output_on_real = disc(batch_v)
disc_output_on_fake = disc(gen_output.detach()) # we need only to train the disc so detach gen
## calculate loss
disc_loss = objective(disc_output_on_real, true_labels) + objective(disc_output_on_fake, fake_labels)
disc_losses.append(disc_loss.item())
## get gradients
disc_loss.backward()
## optizer step
dopt.step()
######################## train generator #################################################
## zero grad
gopt.zero_grad()
## forward the model
disc_output_g = disc(gen_output)
## calcualte loss
gen_loss = objective(disc_output_g, true_labels) # the output should be considered as real, if not,it's a loss
gen_losses.append(gen_loss.item())
## calculate gradients
gen_loss.backward()
## optimizer step
gopt.step()
################## summary writer ##########################################################
train_iter += 1
if train_iter %report_every == 0:
log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", train_iter, np.mean(gen_losses), np.mean(disc_losses))
writer.add_scalar("gen_loss", np.mean(gen_losses), train_iter)
writer.add_scalar("disc_loss", np.mean(disc_losses), train_iter)
gen_losses.clear()
disc_losses.clear()
if train_iter % save_image_every_iter == 0:
writer.add_image("fake",vutils.make_grid(gen_output.data[:64], normalize= True), train_iter )
writer.add_image("real", vutils.make_grid(batch_v.data[:64], normalize= True), train_iter)
if train_iter> max_iter:
break
writer.close()
def generate_images(n):
gen.eval()
gen_random = torch.FloatTensor(n,latent_vector_size, 1, 1).normal_(0,1).to(device)
images = gen(gen_random)
images = (images + 1)*255.0/2
images = images.to('cpu').detach().numpy()
images = np.moveaxis(images, 1, 3)
print("shape of data: ", images.shape, " type ", type(images))
return np.uint8(images)
images = generate_images(100)
for i in range(images.shape[0]):
cv2.imwrite('./atari saved images/GAN_generated_images/img'+str(i)+".png", images[i])
torch.save(gen.state_dict(), './saved_models/generator')
torch.save(disc.state_dict(), './saved_models/discriminator')
###Output
_____no_output_____ |
MATH/35_Linear_regression.ipynb | ###Markdown
์ ํํ๊ท๋ชจํ - $\hat y = f(x) \approx y$--- ์์ํญ ๊ฒฐํฉ - ์์ํญ์ ๋
๋ฆฝ๋ณ์ ๋ฐ์ดํฐ์ ์ถ๊ฐ - $X_a^TW_a = W_a^TX_a$
###Code
X0 = np.arange(10).reshape(5,2)
X0
import statsmodels.api as sm
X = sm.add_constant(X0)
X
###Output
_____no_output_____
###Markdown
--- ์ต์์์น๋ฒ - $\hat y = Xw$ --- $RSS$ $= e^Te$ $= (y-Xw)^T(y-Xw)$ $= y^Ty - 2y^TXw + w^TX^TXw$ $= \frac{dRSS}{dw} = 0$ $= X^TXw^* = X^Ty$ --- ์ง๊ต ๋ฐฉ์ ์ $X^Ty - X^TXw = 0$ $X^T(y-Xw) = 0$ $X^Te = 0$ --- $c_d^Te = 0$ or $c_d \perp e$--- ์ง๊ต์ ์ฑ์ง 1. ์์ฐจ์ ํ๊ท ์ 0 - $\sum_{i=0}^Ne_i = 0$ 2. x๋ฐ์ดํฐ์ ํ๊ท $ \bar x$์ ์์ธก๊ฐ์ y๋ฐ์ดํฐ์ ํ๊ท $\bar y$ - $\bar y = w^T\bar x$ Numpy
###Code
from sklearn.datasets import make_regression
bias = 100
X0 , y , w = make_regression(
n_samples = 200, n_features =1, bias = bias , noise = 10, coef = True, random_state =1
)
X = sm.add_constant(X0)
y = y.reshape(len(y),1)
w
w = np.linalg.inv(X.T @ X) @ X.T @ y
w
x_new = np.linspace(np.min(X0), np.max(X0), 10)
X_new = sm.add_constant(x_new)
y_new = np.dot(X_new, w)
plt.scatter(X0, y, label="์๋ ๋ฐ์ดํฐ")
plt.plot(x_new, y_new, 'rs-', label="ํ๊ท๋ถ์ ์์ธก")
plt.xlabel("x")
plt.ylabel("y")
plt.title("์ ํ ํ๊ท๋ถ์์ ์")
plt.legend()
###Output
_____no_output_____
###Markdown
--- scikit-learn
###Code
from sklearn.linear_model import LinearRegression
model = LinearRegression().fit(X0,y)
print(model.intercept_, model.coef_)
###Output
[99.79150869] [[86.96171201]]
###Markdown
- coef_ : ์ถ์ ๋ ๊ฐ์ค์น ๋ฒกํฐ- intercept_ : ์ถ์ ๋ ์์ํญpredict : ์๋ก์ด ์
๋ ฅ ๋ฐ์ดํฐ์ ๋ํ ์ถ๋ ฅ ๋ฐ์ดํฐ ์์ธก
###Code
model.predict([[-2],[-1],[0],[1],[2]])
###Output
_____no_output_____
###Markdown
---- OLS - ๋
๋ฆฝ๋ณ์์ ์ข
์๋ณ์๊ฐ ๋ชจ๋ ํฌํจ๋ ๋ฐ์ดํฐ ํ๋ ์์์ฑ, ์์ํญ ๊ฒฐํฉ์ X
###Code
df = pd.DataFrame({'x':X0[:,0],'y':y[:,0]})
df.tail()
dfy = df[['y']]
dfx = sm.add_constant(df[['x']]) # ์์ํฉ๊ฒฐํฉ์ ์๋
model = sm.OLS(dfy,dfx)
result = model.fit()
model = sm.OLS.from_formula('y~x', data =df)
result = model.fit()
print(result.summary())
result.predict({'x':[-2,-1,0,1,2]})
###Output
_____no_output_____
###Markdown
- params : ๊ฐ์ค์น๋ฒกํฐ- resid : ์์ฐจ๋ฒกํฐ
###Code
result.params
result.resid.plot(style = 'o')
plt.show()
result.resid.sum()
result.predict({'x': X0.mean()})
y.mean()
from sklearn.datasets import load_boston
boston = load_boston()
dfX0 = pd.DataFrame(boston.data, columns=boston.feature_names)
dfX = sm.add_constant(dfX0)
dfy = pd.DataFrame(boston.target, columns=["MEDV"])
model_boston2 = sm.OLS(dfy, dfX)
result_boston2 = model_boston2.fit()
print(result_boston2.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: MEDV R-squared: 0.741
Model: OLS Adj. R-squared: 0.734
Method: Least Squares F-statistic: 108.1
Date: Tue, 25 Feb 2020 Prob (F-statistic): 6.72e-135
Time: 15:53:36 Log-Likelihood: -1498.8
No. Observations: 506 AIC: 3026.
Df Residuals: 492 BIC: 3085.
Df Model: 13
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const 36.4595 5.103 7.144 0.000 26.432 46.487
CRIM -0.1080 0.033 -3.287 0.001 -0.173 -0.043
ZN 0.0464 0.014 3.382 0.001 0.019 0.073
INDUS 0.0206 0.061 0.334 0.738 -0.100 0.141
CHAS 2.6867 0.862 3.118 0.002 0.994 4.380
NOX -17.7666 3.820 -4.651 0.000 -25.272 -10.262
RM 3.8099 0.418 9.116 0.000 2.989 4.631
AGE 0.0007 0.013 0.052 0.958 -0.025 0.027
DIS -1.4756 0.199 -7.398 0.000 -1.867 -1.084
RAD 0.3060 0.066 4.613 0.000 0.176 0.436
TAX -0.0123 0.004 -3.280 0.001 -0.020 -0.005
PTRATIO -0.9527 0.131 -7.283 0.000 -1.210 -0.696
B 0.0093 0.003 3.467 0.001 0.004 0.015
LSTAT -0.5248 0.051 -10.347 0.000 -0.624 -0.425
==============================================================================
Omnibus: 178.041 Durbin-Watson: 1.078
Prob(Omnibus): 0.000 Jarque-Bera (JB): 783.126
Skew: 1.521 Prob(JB): 8.84e-171
Kurtosis: 8.281 Cond. No. 1.51e+04
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.51e+04. This might indicate that there are
strong multicollinearity or other numerical problems.
|
content/courseware/analytical-efolding.ipynb | ###Markdown
Advanced topic: Analytical solution of the global Energy Balance ModelThis notebook is part of [The Climate Laboratory](https://brian-rose.github.io/ClimateLaboratoryBook) by [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany. ____________ 1. The zero-dimensional Energy Balance Model: recap____________ Previously we considered a zero-dimensional **Energy Balance Model** with the governing equation$$ C \frac{dT_s}{dt} = (1-\alpha) Q - \tau \sigma T_s^4 $$where- $T_s$ is the global average surface temperature- $C$ is the **heat capacity** of Earth system, in units of J m$^{-2}$ K$^{-1}$.- $\tau$ is the transmissivity of the atmosphere (a measure of the strength of the greenhouse effect).We have seen that numerical solutions of this time-dependent model are easy to implement. However the model is solvable analytically once we make a (very good) approximation by linearing the OLR in terms of departures from equilibrium.The analytical solutions will give us considerable insight into what's actually going on in the model. ____________ 2. Linearizing about the equilibrium solution____________ Equilibrium solutionsWe've already seen that the equilibrium solution of the model is$$ T_{eq} = \left( \frac{(1-\alpha) Q}{\tau \sigma} \right)^\frac{1}{4} $$and tuned the model parameter based on this relationship. We are going to **linearize the equation** for small perturbations away from this equilibrium.Let $T_s = T_{eq} + T_s^\prime$ and restrict our solution to $T_s^\prime << T_{eq}$.Note this this is not a big restriction! For example, a 10 degree warming or cooling is just $\pm$3.4% of the absolute equilibrium temperature. Linearizing the governing equationNow use a first-order Taylor series expansion to write$$ \text{OLR} = \tau \sigma T_s^4 $$$$OLR = \tau \sigma T_s^4 = \tau \sigma \left( T_{eq} + T_s^\prime \right)^4 \approx \tau \sigma \left( T_{eq}^4 + 4 T_{eq}^3 T_s^\prime \right) $$ and the budget for the perturbation temperature thus becomes$$C \frac{d T_s^\prime}{d t} = -\lambda_0 T_s^\prime$$where we define$$\lambda_0 = 4 \tau \sigma T_{eq}^3 $$ Putting in our observational values, we get
###Code
lambda_0 = 4 * sigma * tau * Teq_observed**3
# This is an example of formatted text output in Python
print( 'lambda_0 = {:.2f} W m-2 K-1'.format(lambda_0) )
###Output
_____no_output_____
###Markdown
This is actually our first estimate of what is often called the **Planck feedback**. It is the tendency for a warm surface to cool by increased longwave radiation to space. It may also be refered to as the "no-feedback" climate response parameter. As we will see, $\lambda_0$ quantifies the sensitivity of the climate system in the absence of any actual feedback processes. ____________ 3. Solving the linear ODE____________ Now define$$ t^* = \frac{C}{\lambda_0} $$This is a positive constant with dimensions of time (seconds). With these definitions the temperature evolves according to$$ \frac{d T_s^\prime}{d t} = - \frac{T_s^\prime}{t^*}$$This is one of the simplest ODEs. Hopefully it looks familiar to most of you. It is the equation for an **exponential decay** process. We can easily solve for the temperature evolution by integrating from an initial condition $T_s^\prime(0)$:$$ \int_{T_s^\prime(0)}^{T_s^\prime(t)} \frac{d T_s^\prime}{T_s^\prime} = -\int_0^t \frac{dt}{t^*}$$$$\ln \bigg( \frac{T_s^\prime(t)}{T_s^\prime(0)} \bigg) = -\frac{t}{t^*}$$$$T_s^\prime(t) = T_s^\prime(0) \exp \bigg(-\frac{t}{t^*} \bigg)$$I hope that the mathematics is straightforward for everyone in this class. If not, go through it carefully and make sure you understand each step. ____________ 4. e-folding time for relaxation of global mean temperature____________ Our model says that surface temperature will relax toward its equilibrium value over a characteristic time scale $t^*$. This is an **e-folding time** โ the time it takes for the perturbation to decay by a factor $1/e = 0.37$*What should this timescale be for the climate system?*To estimate $t^*$ we need a value for the effective heat capacity $C$.Our "quick and dirty" estimate above used 100 meters of water to set this heat capacity. What is the right choice for water depth $H$? That turns out to be an interesting and subtle question. It depends very much on the timescale of the problem- days?- years?- decades?- millenia? We will revisit this question later in the course. For now, letโs just continue assuming $H = 100$ m (a bit deeper than the typical depth of the surface mixed layer in the oceans).Now calculate the e-folding time for the surface temperature:
###Code
tstar = C / lambda_0 # Calculated value of relaxation time constant
seconds_per_year = 60.*60.*24.*365.
print( 'The e-folding time is {:1.2e} seconds or about {:1.0f} years.'.format(tstar, tstar / seconds_per_year))
###Output
_____no_output_____ |
Demo/Partikelfysik/Ex2-histogram-over-vald-datamangd.ipynb | ###Markdown
Att rita ett histogram รถver รถnskad datamรคngd I den hรคr รถvningen bekantar vi oss med hur datapunkternas mรคngd inverkar pรฅ histogrammet. Fรถr undersรถkningen anvรคnde vi den invarianta massan som har behandlats i tidigare รถvningar. Datan vi anvรคnder kommer frรฅn mรคtningar med CERNs CMS-detektor. CMS-detektorn Med LHC-acceleratorn i CERN accelererar man partikelstrรถmmar och lรฅter dem kollidera med varandra. Med hjรคlp av CMS-detektorn kan man mรคta och visa de partiklar som uppstรฅr vid kollisionerna. Pรฅ bilden nedan kan man se hur CMS-detektorn ser ut nรคr den รคr รถppen.(Bild: Domenico Salvagnin, https://commons.wikimedia.org/wiki/File:[email protected]) 1) Start Vi bรถrjar med en kod som tar in de variabler och funktionspaket som vi behรถver. Dokumentets kodceller behรถver kรถras i rรคtt ordning fรถr att fungera. Du kan kรถra en cell genom att klicka pรฅ den och trycka **Ctrl + Enter**.
###Code
# Vi hรคmtar in funktionspaketen fรถrst. Pandas lรคser in datafiler, numpy lรฅter oss gรถra berรคkningar, och
# matplotlib.pyplot lรฅter oss rita grafer. Vi ger paketen kortnamn (pd, np och plt), sรฅ kan vi
# lรคttare anvรคnda dem senare.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Vi skapar en ny DataFrame av CMS' mรคtdata frรฅn filen "Zmumu_Run2011A_massoilla.csv".
# En DataFrame รคr en tabellvariabel. Ungefรคr som en excel-fil.
# Vi kallar variabeln "dataset"
dataset = pd.read_csv('https://raw.githubusercontent.com/cms-opendata-education/cms-jupyter-materials-finnish/master/Data/Zmumu_Run2011A_massoilla.csv')
# Vad innehรฅller filen? Kontrollera genom att skriva ut de 5 fรถrsta radena av DataFramen
# Minns du hur man gรถr? Skriv koden hรคr nedan.
# Vi skapar en serie-variabel (som i praktiken รคr en tabell med bara en kolumn) och kallar den "invariant_massa"
# Vi definierar den som "dataset"-variabelns kolumn 'M'.
invariant_massa = dataset['M']
# Hur mรฅnga vรคrden finns sparade i variabeln 'invariant_massa'?
# Kan du ta reda pรฅ? Skriv koden nedan.
###Output
_____no_output_____
###Markdown
Den hรคr gรฅngen vill vi sjรคlva vรคlja hur mรฅnga vรคrden av den invarianta massan som ska anvรคndas fรถr att rita histogrammet. Till det hรคr behรถver vi skapa en tom tabell, dit vi kan spara den รถnskade mรคngden vรคrden.
###Code
# Vi skapar en tom tabell 'valda', dรคr vi kan spara den valda mรคngden datapunkter.
valda = []
###Output
_____no_output_____
###Markdown
2) Val av datamรคngd Koden nedan frรฅgar anvรคndaren hur mรฅnga mรคtningar som ska ingรฅ, och sammanstรคller dem i ett histogram. I detta exempel skapar vi histogrammet pรฅ ett annat sรคtt รคn tidigare.Kรถr koden genom att klicka pรฅ kodcellen och trycka **Ctrl + Enter**. Du kan kรถra om cellen flera gรฅnger och vรคlja olika mรคngder data. Uppgiften Kontrollera koden. Vad tror du hรคnder om- du sรคtter in ett annat vรคrde รคn ett heltal nรคr cellen ber om en input?- du sรคtter in ett vรคrde som รคr stรถrre รคn antalet tillgรคngliga datapunkter?- tar bort **\n** frรฅn print-kommandona?Testa om du trodde rรคtt.Undersรถk hur den valda datamรคngden pรฅverkar histogrammet.- Vilka vรคrden pรฅ invariant massa verkar vanligast?- Vad kan du avgรถra frรฅn den informationen?- Hur pรฅverkas tolkningen av histogrammet nรคr vi รคndrar antalet *bins*?
###Code
# Vi ber anvรคndaren ange antalet vรคrden som ska anvรคndas och sparar detta som variabeln 'antal'.
# Koden krรคver input-vรคrdet skall vara ett heltal (integer).
antal = int(input('Ange รถnskat antal datapunkter: '))
# Vi gรถr en if-sats som kontrollerar variabeln 'antal', och en for-loop som tar rรคtt mรคngd element ur tabellen.
if antal > len(invariant_massa):
print('''\n Det angivna antalet รคr stรถrre รคn antalet tillgรคngliga datapunkter. Ett histogram kunde dรคrfรถr inte skapas.
Antalet tillgรคngliga datapunkter รคr %i.''' % len(invariant_massa))
else:
for f in range(antal):
M = invariant_massa[f]
valda.append(M)
print('\n Du valde %i mรคtvรคrden pรฅ invariant massa.' %(antal))
# Vi anvรคnder numpy-paketets histogram-funktion och skapar ett histogram รถver det valda antalet invarianta massor.
# Vi namnger histogrammet "histogram1".
histogram1 = np.histogram(valda, bins=120, range=(60,120))
# Vad hรคnder om vi รคndrar vรคrdena pรฅ parametrarna bins och range?
# Vi fรคrdigstรคller histogrammet.
# Vi vรคljer staplarnas bredd och histogrammets mitt.
hist1, bins1 = histogram1
width1 = 1.0*(bins1[1] - bins1[0])
center1 = (bins1[:-1] + bins1[1:])/2
# Vi ritar histogrammet med hjรคlp av matplotlib.pyplot (plt)
plt.bar(center1, hist1, align='center', width=width1)
# Vi namnger koordinataxlarna och ger grafen en titel.
plt.xlabel('Invariant massa [GeV/cยฒ]')
plt.ylabel('Antal observationer per stapel', fontsize=10)
plt.title('Histogram รถver tvรฅ myoners invarianta massa\n', fontsize=15)
# Vi lรฅser y-axeln till intervallet 0-800.
axes = plt.gca()
axes.set_ylim([0,800])
# Testa att byta y-axelns visningsintervall. Vad hรคnder om vi inte alls definierar nรฅgot intervall?
# Du kan hoppa รถver rader som inte verkar nรถdvรคndiga genom att sรคtta ett #-tecken framfรถr.
# Hur kan du รคndra x-axelns intervall?
# Vi รฅterstรคller listan sรฅ att vi ska kunna kรถra cellen igen.
valda = []
###Output
Ange รถnskat antal datapunkter: 200
Du valde 200 mรคtvรคrden pรฅ invariant massa.
|
Machine Learning/0) Time-series/tensorflow-nn-drop-out-batch-norm-lb-0-515.ipynb | ###Markdown
This is inspired by [Ceshine Lee](https://www.kaggle.com/ceshine/lgbm-starter?scriptVersionId=1852107) and [LingZhi's](https://www.kaggle.com/vrtjso/lgbm-one-step-ahead?scriptVersionId=1965435) LGBM kernel. This kernel tackles the problem using a 2-layer dense neural network that looks something like this:Technically, Tensorflow is used to build this neural network. Before feeding the data into the second layer, batch normalization is used for faster learning(quicker convergent in gradient descent) and Dropout layer is used for regularisation to prevent overfitting. Instead of a constant learning rate, I have used AdamOptimizer that decays the learning rate over time so that the whole training of network takes much lesser time in my experiment.I'm sorry that the naming conventions is a little confusing but feel free to ask questions!
###Code
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.metrics import mean_squared_error
import gc
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Some standard data reading, pre-processing, etc
###Code
df_train_X = pd.read_csv('../input/fork-of-lgbm-one-step-ahead-xgb/x_train.csv')
df_train_Y = pd.read_csv('../input/fork-of-lgbm-one-step-ahead-xgb/y_train.csv')
df_test_X = pd.read_csv('../input/fork-of-lgbm-one-step-ahead-xgb/x_test.csv')
df_test_Y = pd.read_csv('../input/fork-of-lgbm-one-step-ahead-xgb/y_test.csv')
df_Submission_X = pd.read_csv('../input/fork-of-lgbm-one-step-ahead-xgb/submissionX.csv')
itemsDF = pd.read_csv('../input/fork-of-lgbm-one-step-ahead-xgb/items_reindex.csv')
def NWRMSLE(y, pred, w):
return mean_squared_error(y, pred, sample_weight=w)**0.5
df_train_X.drop(['Unnamed: 0'], inplace=True,axis=1)
df_test_X.drop(['Unnamed: 0'], inplace=True,axis=1)
df_train_Y.drop(['Unnamed: 0'], inplace=True,axis=1)
df_test_Y.drop(['Unnamed: 0'], inplace=True,axis=1)
df_Submission_X.drop(['Unnamed: 0'], inplace=True,axis=1)
###Output
_____no_output_____
###Markdown
This is the start of building the computation graph of TensorFlow NN model.Let's declare some constant values for our TF NN model.
###Code
numFeatures = df_train_X.shape[1]
numLabels = 1
hiddenUnit = 20
learningRate = 0.01
numEpochs = 1000
###Output
_____no_output_____
###Markdown
Declare the placeholders for the input(x) and output(y_) layer.
###Code
x = tf.placeholder(tf.float64, [None, numFeatures],name="X_placeholder")
y_ = tf.placeholder(tf.float64, [None, numLabels],name="Y_placeholder")
###Output
_____no_output_____
###Markdown
Declare the first and second hidden layer by initializing the weights to a range of random normally distributed values.
###Code
weights = tf.Variable(tf.random_normal([numFeatures,hiddenUnit],stddev=0.1,name="weights", dtype=tf.float64))
weights2 = tf.Variable(tf.random_normal([hiddenUnit,1],name="weights2", dtype=tf.float64))
###Output
_____no_output_____
###Markdown
Declare the bias that will be multiplied together with the weights later. Similarly, we'll initializing the bias to a range of random normally distributed values.
###Code
bias = tf.Variable(tf.random_normal([1,hiddenUnit],stddev=0.1,name="bias", dtype=tf.float64))
bias2 = tf.Variable(tf.random_normal([1,1],stddev=0.1,name="bias2", dtype=tf.float64))
###Output
_____no_output_____
###Markdown
We'll define a placeholder for inputting the "perishable" feature which is used to compute the weighted loss
###Code
weightsNWR = tf.placeholder(tf.float32, [None, 1],name="weightsNWR")
###Output
_____no_output_____
###Markdown
Take this chance to populate the weight variables which will be used to pass to the placeholder during the training phase.
###Code
itemWeightsTrain = pd.concat([itemsDF["perishable"]] * 6) * 0.25 + 1
itemWeightsTrain = np.reshape(itemWeightsTrain,(itemWeightsTrain.shape[0], 1))
itemWeightsTest = itemsDF["perishable"]* 0.25 + 1
itemWeightsTest = np.reshape(itemWeightsTest,(itemWeightsTest.shape[0], 1))
###Output
_____no_output_____
###Markdown
First hidden layer is composed of multiplication of input, weights and the bias we have declared above
###Code
y = tf.matmul(x,weights) + bias
###Output
_____no_output_____
###Markdown
We'll pass the results of the first layer to a relu activation function to convert the linear values into a non-linear one.
###Code
y = tf.nn.relu(y)
###Output
_____no_output_____
###Markdown
Next, we'll set up a batch normalization function that normalize the values that comes from out the relu function. Normalization can improve learning speed because the path to the global minimum is reduced: Although many literatures say that batch norm is applied **before** activation function, I believe that it would be more beneficial if batch normalization is applied **after** the activation function so that the range of linear values will not be restricted to a down-sized range.
###Code
epsilon = 1e-3
batch_mean2, batch_var2 = tf.nn.moments(y,[0])
scale2 = tf.Variable(tf.ones([hiddenUnit],dtype=tf.float64),dtype=tf.float64)
beta2 = tf.Variable(tf.zeros([hiddenUnit],dtype=tf.float64),dtype=tf.float64)
y = tf.nn.batch_normalization(y,batch_mean2,batch_var2,beta2,scale2,epsilon)
###Output
_____no_output_____
###Markdown
We set up a dropout layer to intentionally deactivate certain units. This will improve generalization and reduce overfitting(better validation set score) because it force your layer to learn with different neurons the same "concept".Note that during the prediction phase, the dropout is deactivated.
###Code
dropout_placeholder = tf.placeholder(tf.float64,name="dropout_placeholder")
y=tf.nn.dropout(y,dropout_placeholder)
###Output
_____no_output_____
###Markdown
Next we'll build the second hidden layer. As usual, it's the multiplication of input, weights and the bias we have declared above
###Code
#create 1 more hidden layer
y = tf.matmul(y,weights2)+bias2
###Output
_____no_output_____
###Markdown
Pass the results to another relu activation function
###Code
y = tf.nn.relu(y)
###Output
_____no_output_____
###Markdown
The loss function that are trying to optimize, or the goal of training, is to minimize the weighted mean squared error. Perishable items are given a weight of 1.25 where all other items are given a weight of 1.00, as described in the competition details.
###Code
loss = tf.losses.mean_squared_error(predictions=y,labels=y_,weights=weightsNWR)
cost = tf.reduce_mean(loss)
###Output
_____no_output_____
###Markdown
As stated above, I have found AdamOptimizer, which decays the learning rate over time to be better than the GradientOptimizer option in terms of training speed. Beside that, AdamOptimizer also dampens the oscillations in the direction that do not point to the minimal so that the back-and-forth between these walls will be reduced and at the same time, we'll build up momentum in the direction of the minimum.
###Code
optimizer = tf.train.AdamOptimizer(learning_rate=learningRate).minimize(cost)
###Output
_____no_output_____
###Markdown
Finally, we'll create a TF session for training our model.
###Code
sess = tf.Session()
###Output
_____no_output_____
###Markdown
Initilize the variables that we have been setting up
###Code
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Finally, it's time to train our NN model! We are actually training 16 NN model (1 for each column of y values). There are 16 columns in Y train and Y test, which represents prediction for 16 days.We are training for 1000 epoch. At every 100 epoch, we do an output in terms of weighted mse to see if it overfits for test set. After 1000 epoch is done, we'll use the trained model for prediction by feeding the X submission data.Note that Dropout rate is set at 0.6(deactivate 40% of units) during training and back at 1.0(no deactivation) during prediction. Check these values when you are building your own drop out layers to ensure that you are not throwing away results during prediction.Also, training will be longer than Kaggle's allowable timeout limit so run this at your own local machine.
###Code
val_pred_nn = []
test_pred_nn = []
cate_vars_nn = []
submit_pred_nn=[]
trainingLoss=[]
validationLoss=[]
#step through all the dates(16)
for i in range(16):
print("Step %d" % (i+1))
trainY_NN = np.reshape(df_train_Y.iloc[:,i],(df_train_Y.shape[0], 1))
testY_NN = np.reshape(df_test_Y.iloc[:,i],(df_test_Y.shape[0], 1))
for epoch in range(numEpochs):
_,loss = sess.run([optimizer,cost], feed_dict={x: df_train_X, y_: trainY_NN,weightsNWR:itemWeightsTrain,dropout_placeholder:0.6})
if epoch%100 == 0:
print('Epoch', epoch, 'completed out of',numEpochs,'loss:',loss)
#trainingLoss.append(loss)
#check against test dataset
test_pred = sess.run(cost, feed_dict={x:df_test_X,y_: testY_NN,weightsNWR:itemWeightsTest,dropout_placeholder:1.0})
print('Acc for test dataset ',test_pred)
#validationLoss.append(test_pred)
tf_pred = sess.run(y,feed_dict={x:df_test_X,weightsNWR:itemWeightsTest,dropout_placeholder:1.0})
tf_predY = np.reshape(tf_pred,(tf_pred.shape[0],))
test_pred_nn.append(tf_predY)
print('score for step',(i+1))
print("Validation mse:", mean_squared_error(df_test_Y.iloc[:,i], tf_predY))
print('NWRMSLE:',NWRMSLE(df_test_Y.iloc[:,i], tf_predY,itemsDF["perishable"]*0.25+1))
#predict for submission set
nn_submit_predY = sess.run(y,feed_dict={x:df_Submission_X,dropout_placeholder:1.0})
nn_submit_predY = np.reshape(nn_submit_predY,(nn_submit_predY.shape[0],))
submit_pred_nn.append(nn_submit_predY)
gc.collect()
sess.run(tf.global_variables_initializer())
nnTrainY= np.array(test_pred_nn).transpose()
pd.DataFrame(nnTrainY).to_csv('nnTrainY.csv')
nnSubmitY= np.array(submit_pred_nn).transpose()
pd.DataFrame(nnSubmitY).to_csv('nnSubmitY.csv')
###Output
_____no_output_____
###Markdown
You can use the below NWRMSLE to compare test set score with other benchmarks, or finding out the optimal weights for your ensemble.
###Code
print('NWRMSLE:',NWRMSLE(df_test_Y,nnTrainY,itemsDF["perishable"]* 0.25 + 1))
###Output
_____no_output_____
###Markdown
With the prediction values from the NN model, prepare for submission. The following cells are pretty self-explantory.
###Code
#to reproduce the testing IDs
df_train = pd.read_csv(
'../input/favorita-grocery-sales-forecasting/train.csv', usecols=[1, 2, 3, 4, 5],
dtype={'onpromotion': bool},
converters={'unit_sales': lambda u: np.log1p(
float(u)) if float(u) > 0 else 0},
parse_dates=["date"],
skiprows=range(1, 66458909) # 2016-01-01
)
df_2017 = df_train.loc[df_train.date>=pd.datetime(2017,1,1)]
del df_train
df_2017 = df_2017.set_index(
["store_nbr", "item_nbr", "date"])[["unit_sales"]].unstack(
level=-1).fillna(0)
df_2017.columns = df_2017.columns.get_level_values(1)
#submitDF = pd.read_csv('../input/testforsubmit/testForSubmit.csv',index_col=False)
df_test = pd.read_csv(
"../input/favorita-grocery-sales-forecasting/test.csv", usecols=[0, 1, 2, 3, 4],
dtype={'onpromotion': bool},
parse_dates=["date"] # , date_parser=parser
).set_index(
['store_nbr', 'item_nbr', 'date']
)
print("Making submission...")
combinedSubmitPredY = nnSubmitY
df_preds = pd.DataFrame(
combinedSubmitPredY, index=df_2017.index,
columns=pd.date_range("2017-08-16", periods=16)
).stack().to_frame("unit_sales")
df_preds.index.set_names(["store_nbr", "item_nbr", "date"], inplace=True)
submission = df_test[["id"]].join(df_preds, how="left").fillna(0)
submission["unit_sales"] = np.clip(np.expm1(submission["unit_sales"]), 0, 1000)
submission[['id','unit_sales']].to_csv('submit_nn.csv',index=None)
###Output
_____no_output_____ |
Elise/modelInput/createBC_SOGTidesBiochemOBC_1100x10x40-high.ipynb | ###Markdown
This notebook generates forcing files for the 2D domain biogeochemistry.Assign constant but reasonable values at the boundaries so that it will be obvious if the BC's are functioning.
###Code
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import os
%matplotlib inline
resultsDir='/data/eolson/MEOPAR/SS2DSOGruns/'
N2chl=1.600
###Output
_____no_output_____
###Markdown
Load 3D T+S
###Code
f = nc.Dataset('/ocean/eolson/MEOPAR/NEMO-3.6-inputs/initial_conditions/nuts_SOG1100x10x40.nc')
allkeys=f.variables.keys
s=''
for key in f.variables.keys():
s=s+key+', '
print(s)
print(f.variables['POC'][0,:,7,500])
print(np.max(f.variables['NO3']))
NO3_val=np.max(f.variables['NO3'][:])*10**7
Si_val=np.max(f.variables['Si'][:])*10**7
NH4_val=np.max(f.variables['NH4'][:])*10**7
PHY_val=np.max(f.variables['PHY'][:])*10**7
PHY2_val=np.max(f.variables['PHY2'][:])*10**7
MYRI_val=np.max(f.variables['MYRI'][:])*10
MICZ_val=np.max(f.variables['MICZ'][:])*10
POC_val=np.max(f.variables['POC'][:])*10
DOC_val=np.max(f.variables['DOC'][:])*10
bSi_val=np.max(f.variables['bSi'][:])*10
f2=nc.Dataset('/ocean/eolson/MEOPAR/NEMO-3.6-inputs/boundary_conditions/TS_OBC.nc')
depth = f2.variables['deptht'][:]
times = f2.variables['time_counter'][:]
s=''
for key in f2.variables.keys():
s=s+key+', '
print(s)
print(depth)
print(times)
print(f2.variables['votemper'].shape)
###Output
[ 0.5000003 1.5000031 2.50001144 3.50003052 4.50007057
5.50015068 6.50031042 7.50062323 8.50123596 9.50243282
10.50476551 11.50931168 12.51816654 13.53541183 14.56898212
15.63428783 16.76117325 18.00713539 19.48178482 21.38997841
24.10025597 28.22991562 34.68575668 44.51772308 58.48433304
76.58558655 98.06295776 121.86651611 147.08946228 173.11448669
199.57304382 226.26029968 253.06663513 279.93453979 306.834198
333.75018311 360.67453003 387.60321045 414.53408813 441.46609497]
[ 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15.
16. 17. 18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30.
31. 32. 33. 34. 35. 36. 37. 38. 39. 40. 41. 42. 43. 44. 45.
46. 47. 48. 49. 50. 51. 52.]
(52, 40, 1, 100)
###Markdown
Save to netcdf
###Code
nemo = nc.Dataset('/ocean/eolson/MEOPAR/NEMO-3.6-inputs/boundary_conditions/bio_OBC_1100x10x40_52x40x1x100_high.nc', 'w', zlib=True)
Ny=8
#start and end points
length_rim =10
lengthi=Ny*length_rim #80
#time and depth
depth_levels =40
# dimensions
nemo.createDimension('xb', lengthi)
nemo.createDimension('yb', 1)
nemo.createDimension('time_counter', None)
nemo.createDimension('deptht', depth_levels)
# variables
# deptht
deptht = nemo.createVariable('deptht', 'float32', ('deptht',))
deptht.long_name = 'Vertical T Levels'
deptht.units = 'm'
deptht.positive = 'down'
deptht.valid_range = np.array((4., 428.))
deptht[:]=depth
# time_counter
time_counter = nemo.createVariable('time_counter', 'float32', ('time_counter'))
time_counter.long_name = 'Time axis'
time_counter.axis = 'T'
time_counter.units = 'weeks since beginning of year'
time_counter[:]=times
# NO3
voNO3 = nemo.createVariable('NO3', 'float32',
('time_counter','deptht','yb','xb'))
#voNO3.units = f.variables['NO3'].units
voNO3.long_name = f.variables['NO3'].long_name
voNO3.grid = 'SalishSea2D'
voNO3[:]=NO3_val
#Si
voSi = nemo.createVariable('Si', 'float32',
('time_counter','deptht','yb','xb'))
#voSi.units = f.variables['Si'].units
voSi.long_name = f.variables['Si'].long_name
voSi.grid = 'SalishSea2D'
voSi[:]=Si_val
#NH4
voNH4 = nemo.createVariable('NH4', 'float32',
('time_counter','deptht','yb','xb'))
#voNH4.units = f.variables['NH4'].units
voNH4.long_name = f.variables['NH4'].long_name
voNH4.grid = 'SalishSea2D'
voNH4[:]=NH4_val
#PHY
voPHY = nemo.createVariable('PHY', 'float32',
('time_counter','deptht','yb','xb'))
#voPHY.units = f.variables['PHY'].units
voPHY.long_name = f.variables['PHY'].long_name
voPHY.grid = 'SalishSea2D'
voPHY[:]=PHY_val
#PHY2
voPHY2 = nemo.createVariable('PHY2', 'float32',
('time_counter','deptht','yb','xb'))
#voPHY2.units = f.variables['PHY2'].units
voPHY2.long_name = f.variables['PHY2'].long_name
voPHY2.grid = 'SalishSea2D'
voPHY2[:]=PHY2_val
#MYRI
voMYRI = nemo.createVariable('MYRI', 'float32',
('time_counter','deptht','yb','xb'))
#voMYRI.units = f.variables['MYRI'].units
voMYRI.long_name = f.variables['MYRI'].long_name
voMYRI.grid = 'SalishSea2D'
voMYRI[:]=MYRI_val
#MICZ
voMICZ = nemo.createVariable('MICZ', 'float32',
('time_counter','deptht','yb','xb'))
#voMICZ.units = f.variables['MICZ'].units
voMICZ.long_name = f.variables['MICZ'].long_name
voMICZ.grid = 'SalishSea2D'
voMICZ[:]=MICZ_val
#POC
voPOC = nemo.createVariable('POC', 'float32',
('time_counter','deptht','yb','xb'))
#voPOC.units = f.variables['POC'].units
voPOC.long_name = f.variables['POC'].long_name
voPOC.grid = 'SalishSea2D'
voPOC[:]=POC_val
#DOC
voDOC = nemo.createVariable('DOC', 'float32',
('time_counter','deptht','yb','xb'))
#voDOC.units = f.variables['DOC'].units
voDOC.long_name = f.variables['DOC'].long_name
voDOC.grid = 'SalishSea2D'
voDOC[:]=DOC_val
#bSi
vobSi = nemo.createVariable('bSi', 'float32',
('time_counter','deptht','yb','xb'))
#vobSi.units = f.variables['bSi'].units
vobSi.long_name = f.variables['bSi'].long_name
vobSi.grid = 'SalishSea2D'
vobSi[:]=bSi_val
#O2
voO2 = nemo.createVariable('O2', 'float32',
('time_counter','deptht','yb','xb'))
#voO2.units = ''
voO2.long_name = 'oxygen'
voO2.grid = 'SalishSea2D'
voO2[:]=500.0
# nbidta, ndjdta, ndrdta
nbidta = nemo.createVariable('nbidta', 'int32' , ('yb','xb'))
nbidta.long_name = 'i grid position'
nbidta.units = 1
nbjdta = nemo.createVariable('nbjdta', 'int32' , ('yb','xb'))
nbjdta.long_name = 'j grid position'
nbjdta.units = 1
nbrdta = nemo.createVariable('nbrdta', 'int32' , ('yb','xb'))
nbrdta.long_name = 'position from boundary'
nbrdta.units = 1
for ir in range(length_rim):
nbidta[0,ir*Ny:(ir+1)*Ny] = ir
nbjdta[0,ir*Ny:(ir+1)*Ny] = range(Ny)
nbrdta[0,ir*Ny:(ir+1)*Ny] = ir
nemo.close()
times
times=np.array([1.,26., 53.])
times
nemo2 = nc.Dataset('/ocean/eolson/MEOPAR/NEMO-3.6-inputs/boundary_conditions/bio_OBC_1100x10x40_52x40x1x100_high_South.nc', 'w', zlib=True)
Ny=18
#start and end points
length_rim =5
lengthi=Ny*length_rim #80
#time and depth
depth_levels =40
# dimensions
nemo2.createDimension('xb', lengthi)
nemo2.createDimension('yb', 1)
nemo2.createDimension('time_counter', None)
nemo2.createDimension('deptht', depth_levels)
# variables
# deptht
deptht = nemo2.createVariable('deptht', 'float32', ('deptht',))
deptht.long_name = 'Vertical T Levels'
deptht.units = 'm'
deptht.positive = 'down'
deptht.valid_range = np.array((4., 428.))
deptht[:]=depth
# time_counter
time_counter = nemo2.createVariable('time_counter', 'float32', ('time_counter'))
time_counter.long_name = 'Time axis'
time_counter.axis = 'T'
time_counter.units = 'weeks since beginning of year'
time_counter[:]=times
# NO3
voNO3 = nemo2.createVariable('NO3', 'float32',
('time_counter','deptht','yb','xb'))
#voNO3.units = f.variables['NO3'].units
voNO3.long_name = f.variables['NO3'].long_name
voNO3.grid = 'SalishSea2D'
voNO3[:]=NO3_val
#Si
voSi = nemo2.createVariable('Si', 'float32',
('time_counter','deptht','yb','xb'))
#voSi.units = f.variables['Si'].units
voSi.long_name = f.variables['Si'].long_name
voSi.grid = 'SalishSea2D'
voSi[:]=Si_val
#NH4
voNH4 = nemo2.createVariable('NH4', 'float32',
('time_counter','deptht','yb','xb'))
#voNH4.units = f.variables['NH4'].units
voNH4.long_name = f.variables['NH4'].long_name
voNH4.grid = 'SalishSea2D'
voNH4[:]=NH4_val
#PHY
voPHY = nemo2.createVariable('PHY', 'float32',
('time_counter','deptht','yb','xb'))
#voPHY.units = f.variables['PHY'].units
voPHY.long_name = f.variables['PHY'].long_name
voPHY.grid = 'SalishSea2D'
voPHY[:]=PHY_val
#PHY2
voPHY2 = nemo2.createVariable('PHY2', 'float32',
('time_counter','deptht','yb','xb'))
#voPHY2.units = f.variables['PHY2'].units
voPHY2.long_name = f.variables['PHY2'].long_name
voPHY2.grid = 'SalishSea2D'
voPHY2[:]=PHY2_val
#MYRI
voMYRI = nemo2.createVariable('MYRI', 'float32',
('time_counter','deptht','yb','xb'))
#voMYRI.units = f.variables['MYRI'].units
voMYRI.long_name = f.variables['MYRI'].long_name
voMYRI.grid = 'SalishSea2D'
voMYRI[:]=MYRI_val
#MICZ
voMICZ = nemo2.createVariable('MICZ', 'float32',
('time_counter','deptht','yb','xb'))
#voMICZ.units = f.variables['MICZ'].units
voMICZ.long_name = f.variables['MICZ'].long_name
voMICZ.grid = 'SalishSea2D'
voMICZ[:]=MICZ_val
#POC
voPOC = nemo2.createVariable('POC', 'float32',
('time_counter','deptht','yb','xb'))
#voPOC.units = f.variables['POC'].units
voPOC.long_name = f.variables['POC'].long_name
voPOC.grid = 'SalishSea2D'
voPOC[:]=POC_val
#DOC
voDOC = nemo2.createVariable('DOC', 'float32',
('time_counter','deptht','yb','xb'))
#voDOC.units = f.variables['DOC'].units
voDOC.long_name = f.variables['DOC'].long_name
voDOC.grid = 'SalishSea2D'
voDOC[:]=DOC_val
#bSi
vobSi = nemo2.createVariable('bSi', 'float32',
('time_counter','deptht','yb','xb'))
#vobSi.units = f.variables['bSi'].units
vobSi.long_name = f.variables['bSi'].long_name
vobSi.grid = 'SalishSea2D'
vobSi[:]=bSi_val
#O2
voO2 = nemo2.createVariable('O2', 'float32',
('time_counter','deptht','yb','xb'))
#voO2.units = ''
voO2.long_name = 'oxygen'
voO2.grid = 'SalishSea2D'
voO2[:]=500.0
# nbidta, ndjdta, ndrdta
nbidta = nemo2.createVariable('nbidta', 'int32' , ('yb','xb'))
nbidta.long_name = 'i grid position'
nbidta.units = 1
nbjdta = nemo2.createVariable('nbjdta', 'int32' , ('yb','xb'))
nbjdta.long_name = 'j grid position'
nbjdta.units = 1
nbrdta = nemo2.createVariable('nbrdta', 'int32' , ('yb','xb'))
nbrdta.long_name = 'position from boundary'
nbrdta.units = 1
for ir in range(length_rim):
nbidta[0,ir*Ny:(ir+1)*Ny] = range(Ny)
nbjdta[0,ir*Ny:(ir+1)*Ny] = ir
nbrdta[0,ir*Ny:(ir+1)*Ny] = ir
nemo2.close()
# create tides OBCs...
tides=nc.Dataset('/data/eolson/MEOPAR/SS36runs/run_2d_2OBC/bdy_cond/Salish2D_3.6_K1_grid_T.nc')
! cp /ocean/eolson/MEOPAR/NEMO-3.6-inputs/boundary_conditions/Salish2D
print(tides.variables['nbidta'][:,:])
print(tides.variables['nbjdta'][:,:])
print(tides.variables['nbrdta'][:,:])
print(tides.variables['xb'][:])
print(tides.variables['yb'][:])
print(tides.variables['z1'][:,:])
print(tides.variables['z2'][:,:])
test=nc.Dataset('/data/eolson/MEOPAR/SS36runs/run_2d_2OBC/bdy_cond/bio_OBC_South.nc')
print(test.variables)
###Output
OrderedDict([('deptht', <class 'netCDF4._netCDF4.Variable'>
float32 deptht(deptht)
long_name: Vertical T Levels
units: m
positive: down
valid_range: [ 4. 428.]
unlimited dimensions:
current shape = (40,)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('time_counter', <class 'netCDF4._netCDF4.Variable'>
float32 time_counter(time_counter)
long_name: Time axis
axis: T
units: weeks since beginning of year
unlimited dimensions: time_counter
current shape = (2,)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('NO3', <class 'netCDF4._netCDF4.Variable'>
float32 NO3(time_counter, deptht, yb, xb)
long_name: Nitrate
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('Si', <class 'netCDF4._netCDF4.Variable'>
float32 Si(time_counter, deptht, yb, xb)
long_name: Silicate
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('NH4', <class 'netCDF4._netCDF4.Variable'>
float32 NH4(time_counter, deptht, yb, xb)
long_name: Ammonium
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('PHY', <class 'netCDF4._netCDF4.Variable'>
float32 PHY(time_counter, deptht, yb, xb)
long_name: PHY
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('PHY2', <class 'netCDF4._netCDF4.Variable'>
float32 PHY2(time_counter, deptht, yb, xb)
long_name: PHY2
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('MYRI', <class 'netCDF4._netCDF4.Variable'>
float32 MYRI(time_counter, deptht, yb, xb)
long_name: MYRI
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('MICZ', <class 'netCDF4._netCDF4.Variable'>
float32 MICZ(time_counter, deptht, yb, xb)
long_name: MICZ
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('POC', <class 'netCDF4._netCDF4.Variable'>
float32 POC(time_counter, deptht, yb, xb)
long_name: POC
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('DOC', <class 'netCDF4._netCDF4.Variable'>
float32 DOC(time_counter, deptht, yb, xb)
long_name: DOC
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('bSi', <class 'netCDF4._netCDF4.Variable'>
float32 bSi(time_counter, deptht, yb, xb)
long_name: bSi
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('O2', <class 'netCDF4._netCDF4.Variable'>
float32 O2(time_counter, deptht, yb, xb)
long_name: oxygen
grid: SalishSea2D
unlimited dimensions: time_counter
current shape = (2, 40, 1, 90)
filling on, default _FillValue of 9.969209968386869e+36 used
), ('nbidta', <class 'netCDF4._netCDF4.Variable'>
int32 nbidta(yb, xb)
long_name: i grid position
units: 1
unlimited dimensions:
current shape = (1, 90)
filling on, default _FillValue of -2147483647 used
), ('nbjdta', <class 'netCDF4._netCDF4.Variable'>
int32 nbjdta(yb, xb)
long_name: j grid position
units: 1
unlimited dimensions:
current shape = (1, 90)
filling on, default _FillValue of -2147483647 used
), ('nbrdta', <class 'netCDF4._netCDF4.Variable'>
int32 nbrdta(yb, xb)
long_name: position from boundary
units: 1
unlimited dimensions:
current shape = (1, 90)
filling on, default _FillValue of -2147483647 used
)])
|
04_alpha_factor_research/04_single_factor_zipline.ipynb | ###Markdown
Zipline Backtest with Single Factor > Please use with `conda` environment `ml4t-zipline`. Setup
###Code
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
sns.set_style('whitegrid') We are first going to illustrate the zipline alpha factor research workflow in an offline environment. In particular, we will develop and test a simple mean-reversion factor that measures how much recent performance has deviated from the historical average. Short-term reversal is a common strategy that takes advantage of the weakly predictive pattern that stock price increases are likely to mean-revert back down over horizons from less than a minute to one month. To this end, the factor computes the z-score for the last monthly return relative to the rolling monthly returns over the last year. At this point, we will not place any orders to simply illustrate the implementation of a CustomFactor and record the results during the simulation.After some basic settings, `MeanReversion` subclasses `CustomFactor` and defines a `compute()` method. It creates default inputs of monthly returns over an also default year-long window so that the monthly_return variable will have 252 rows and one column for each security in the Quandl dataset on a given day.The `compute_factors()` method creates a `MeanReversion` factor instance and creates long, short, and ranking pipeline columns. The former two contain Boolean values that could be used to place orders, and the latter reflects that overall ranking to evaluate the overall factor performance. Furthermore, it uses the built-in `AverageDollarVolume` factor to limit the computation to more liquid stocks The result would allow us to place long and short orders. We will see in the next chapter how to build a portfolio by choosing a rebalancing period and adjusting portfolio holdings as new signals arrive. - The `initialize()` method registers the compute_factors() pipeline, and the before_trading_start() method ensures the pipeline runs on a daily basis. - The `record()` function adds the pipeline's ranking column as well as the current asset prices to the performance DataFrame returned by the `run_algorithm()` function We will use the factor and pricing data stored in the performance DataFrame to evaluate the factor performance for various holding periods in the next section, but first, we'll take a look at how to create more complex signals by combining several alpha factors from a diverse set of data sources on the Quantopian platform. Run using jupyter notebook extension
###Code
%load_ext zipline
%%zipline --start 2015-1-1 --end 2018-1-1 --output single_factor.pickle
from zipline.api import (
attach_pipeline,
date_rules,
time_rules,
order_target_percent,
pipeline_output,
record,
schedule_function,
get_open_orders,
calendars
)
from zipline.finance import commission, slippage
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.factors import Returns, AverageDollarVolume
import numpy as np
import pandas as pd
MONTH = 21
YEAR = 12 * MONTH
N_LONGS = N_SHORTS = 25
VOL_SCREEN = 1000
class MeanReversion(CustomFactor):
"""Compute ratio of latest monthly return to 12m average,
normalized by std dev of monthly returns"""
inputs = [Returns(window_length=MONTH)]
window_length = YEAR
def compute(self, today, assets, out, monthly_returns):
df = pd.DataFrame(monthly_returns)
out[:] = df.iloc[-1].sub(df.mean()).div(df.std())
def compute_factors():
"""Create factor pipeline incl. mean reversion,
filtered by 30d Dollar Volume; capture factor ranks"""
mean_reversion = MeanReversion()
dollar_volume = AverageDollarVolume(window_length=30)
return Pipeline(columns={'longs': mean_reversion.bottom(N_LONGS),
'shorts': mean_reversion.top(N_SHORTS),
'ranking': mean_reversion.rank(ascending=False)},
screen=dollar_volume.top(VOL_SCREEN))
def exec_trades(data, assets, target_percent):
"""Place orders for assets using target portfolio percentage"""
for asset in assets:
if data.can_trade(asset) and not get_open_orders(asset):
order_target_percent(asset, target_percent)
def rebalance(context, data):
"""Compute long, short and obsolete holdings; place trade orders"""
factor_data = context.factor_data
record(factor_data=factor_data.ranking)
assets = factor_data.index
record(prices=data.current(assets, 'price'))
longs = assets[factor_data.longs]
shorts = assets[factor_data.shorts]
divest = set(context.portfolio.positions.keys()) - set(longs.union(shorts))
exec_trades(data, assets=divest, target_percent=0)
exec_trades(data, assets=longs, target_percent=1 / N_LONGS)
exec_trades(data, assets=shorts, target_percent=-1 / N_SHORTS)
def initialize(context):
"""Setup: register pipeline, schedule rebalancing,
and set trading params"""
attach_pipeline(compute_factors(), 'factor_pipeline')
schedule_function(rebalance,
date_rules.week_start(),
time_rules.market_open(),
calendar=calendars.US_EQUITIES)
context.set_commission(commission.PerShare(cost=.01, min_trade_cost=0))
context.set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
"""Run factor pipeline"""
context.factor_data = pipeline_output('factor_pipeline')
###Output
_____no_output_____ |
Big-Data-Clusters/CU4/Public/content/install/sop055-uninstall-azdata.ipynb | ###Markdown
SOP055 - Uninstall azdata CLI (using pip)=========================================Steps----- Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, hyperlinked suggestions, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("sop055-uninstall-azdata.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'python': []}
error_hints = {'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb']]}
install_hint = {'python': []}
###Output
_____no_output_____
###Markdown
Uninstall azdata CLI
###Code
import sys
run(f'python -m pip uninstall -r https://aka.ms/azdata -y')
###Output
_____no_output_____
###Markdown
Pip listVerify there are no azdata modules in the list
###Code
run(f'python -m pip list')
###Output
_____no_output_____
###Markdown
Related (SOP055, SOP064)
###Code
print('Notebook execution complete.')
###Output
_____no_output_____ |
notebook/Coronavirus_Detection_using_Chest_X_Ray.ipynb | ###Markdown
Connecting to Drive
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
Importing Required Libraries
###Code
from imutils import paths
import matplotlib.pyplot as plt
import argparse
import os
import numpy as np
import pandas as pd
import os
import shutil
import glob
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import preprocess_input, ResNet50
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from keras.preprocessing import image
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
###Output
_____no_output_____
###Markdown
Setting Configuration Parameters
###Code
training_path = "/content/drive/MyDrive/Data/Final_Set/train_test_split/train"
validation_path = "/content/drive/MyDrive/Data/Final_Set/train_test_split/validation"
testing_path = "/content/drive/MyDrive/Data/Final_Set/train_test_split/test"
###Output
_____no_output_____
###Markdown
Data Augmentation
###Code
training_data_generator = ImageDataGenerator(preprocessing_function= preprocess_input,
zoom_range= 0.2,
horizontal_flip= True,
shear_range= 0.2,
)
training = training_data_generator.flow_from_directory(directory=training_path,
target_size=(224,224))
validation_data_generator = ImageDataGenerator(preprocessing_function= preprocess_input )
validation = validation_data_generator.flow_from_directory(directory=validation_path,
target_size=(224,224))
testing_data_generator = ImageDataGenerator(preprocessing_function= preprocess_input )
testing = testing_data_generator.flow_from_directory(directory=testing_path ,
target_size=(224,224),
shuffle= False)
###Output
Found 800 images belonging to 2 classes.
###Markdown
Model Creation
###Code
class_type = {0:'Covid', 1:'Normal'}
INIT_LR = 1e-1
bModel = ResNet50(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)))
hModel = bModel.output
hModel = Flatten(name="flatten")(hModel)
hModel = Dense(2, activation="sigmoid", kernel_initializer='glorot_uniform')(hModel)
model = Model(inputs=bModel.input, outputs=hModel)
for layer in bModel.layers:
layer.trainable = False
#model.summary()
es = EarlyStopping(monitor= "val_accuracy" , min_delta= 0.01, patience= 5, verbose=1)
mc = ModelCheckpoint(filepath="/content/drive/MyDrive/Data/Trial_Model/Covid_model_5.h5", monitor="val_accuracy", verbose=1, save_best_only= True)
opt = Adam(lr=INIT_LR)
model.compile(optimizer=opt , loss = 'categorical_crossentropy', metrics=['accuracy'])
print("Compiling Starts")
hist = model.fit_generator(training, steps_per_epoch= 10, epochs= 30, validation_data= validation , validation_steps= 16, callbacks=[es,mc])
# plot the loss
plt.plot(hist.history['loss'], label='train loss')
plt.plot(hist.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss')
# plot the accuracy
plt.plot(hist.history['accuracy'], label='train acc')
plt.plot(hist.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Testing the Model Using Test set
###Code
import tensorflow as tf
model = tf.keras.models.load_model('/content/drive/MyDrive/Data/Trial_Model/Covid_model_5.h5')
y_test=testing.classes
results=model.predict(testing)
results = np.argmax(results, axis=1)
###Output
_____no_output_____
###Markdown
Calculating Accuracy
###Code
acc = model.evaluate(testing)[1]
print(f"The accuracy of your model is = {acc*100} %")
###Output
25/25 [==============================] - 132s 5s/step - loss: 95.8744 - accuracy: 0.9125
The accuracy of your model is = 91.25000238418579 %
###Markdown
Generating Classification Report
###Code
from sklearn.metrics import classification_report
print(classification_report(y_test,results,target_names=['Covid','Normal']))
###Output
precision recall f1-score support
Covid 0.92 0.94 0.93 500
Normal 0.90 0.86 0.88 300
accuracy 0.91 800
macro avg 0.91 0.90 0.91 800
weighted avg 0.91 0.91 0.91 800
###Markdown
Analysing the Confusion Matrix
###Code
from sklearn.metrics import confusion_matrix
import seaborn as sns
cm = confusion_matrix(y_test, results)
ax= plt.subplot()
sns.heatmap(cm, annot=True, cmap='Blues', fmt=".0f")
ax.set_xlabel('Predicted labels');
ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(['Covid','Normal']);
ax.yaxis.set_ticklabels(['Covid','Normal']);
###Output
_____no_output_____
###Markdown
Analysing the ROC-AUC curve
###Code
from sklearn.metrics import roc_curve, roc_auc_score
results_0 = [0 for _ in range(len(y_test))]
fpr_0, tpr_0, thresholds_0 = roc_curve(y_test,results_0)
fpr, tpr, thresholds = roc_curve(y_test, results)
auc_0 = roc_auc_score(y_test, results_0)
print('AUC for Dumb Model: %.3f' % auc_0)
auc = roc_auc_score(y_test, results)
print('AUC for Trained Model: %.3f' % auc)
plt.plot(fpr_0, tpr_0, linestyle='--', label='Dumb Model')
plt.plot(fpr, tpr, marker='.', label='Trained Model')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Using image from an external source
###Code
def get_img_array(img_path):
path = img_path
img = image.load_img(path, target_size=(224,224,3))
img = image.img_to_array(img)
img = np.expand_dims(img , axis= 0 )
return img
path = "/content/drive/MyDrive/Data/Final_Set/train_test_split/test/Covid/COVID-510.png"
img = get_img_array(path)
result = class_type[np.argmax(model.predict(img))]
print(f"The given X-Ray image is of type : {result}")
print()
print(f"The chances of image being Covid is : {round(model.predict(img)[0][0]*100,2)} %")
print()
print(f"The chances of image being Normal is : {round(model.predict(img)[0][1]*100,2) }%")
plt.imshow(img[0]/255, cmap = "gray")
plt.title("input image")
plt.show()
###Output
The given X-Ray image is of type : Covid
The chances of image being Covid is : 100.0 %
The chances of image being Normal is : 0.0%
|
4_sample_data_models/sample_dummy_pipe_grid-all-grouped.ipynb | ###Markdown
**Variables I will try to predict with my models:**- USENOW3: Do you currently use chewing tobacco, snuff, or snus every day, some days, or not at all? - classification - 0 = Don't know, Not sure or Refused, 1 = every day, 2 = some days, 3 = not at all- QLACTLM2: Are you limited in any way in any activities because of physical, mental, or emotional problems? - classification - 0 = Don't know, Not sure or Refused, 1 = yes, 2 = no- _RFHLTH: Adults with good or better health vs. fair or poor health - classification - based off of GENHLTH - 0 = Don't know, Not sure or Refused, 1 = Good or Better Health, 2 = Fair or Poor Health- _SMOKER3: Four-level smoker status: Everyday smoker, Someday smoker, Former smoker, Non-smoker - classification - based off of SMOKE100 & SMOKEDAY - 0 = Don't know, Not sure or Refused, 1 = Current smoker (now smokes every day), 2 = Current smoker (now smokes some days), 3 = Former smoker, 4 = Never smoked **Will OneHotEncode/ dummify ordinal/nominal features****Will only use a sample of the data set for models so they can run faster****Will use SMOTE to compensensate for imbalanced classes****Will aggregate all ACEs into two groups: Abuse and Household Challenges**
###Code
np.random.seed(151)
# taking a small sample so that my models will run a little faster
brfss_total_sample = brfss_total.sample(frac=0.05, axis=0)
brfss_total_sample.shape
# creating X variable with all features
X_all = brfss_total_sample.drop(columns=['USENOW3', 'QLACTLM2', '_RFHLTH', '_SMOKER3'])
# creating the 4 y's
y_tobacco = brfss_total_sample['USENOW3']
y_activity = brfss_total_sample['QLACTLM2']
y_health = brfss_total_sample['_RFHLTH']
y_smoker = brfss_total_sample['_SMOKER3']
#original baseline for tobacco
y_tobacco.value_counts(normalize=True)
#original baseline for activity
y_activity.value_counts(normalize=True)
#original baseline for health
y_health.value_counts(normalize=True)
#original baseline for smoker
y_smoker.value_counts(normalize=True)
# splitting X up so I can do some engineering on the nominal data and ACE columns
X_num = X_all[['PHYSHLTH', 'MENTHLTH', 'CHILDREN']]
X_cat = X_all[['_STATE', 'DISPCODE', 'HISPANC2', 'MARITAL', 'EMPLOY', 'RENTHOM1', 'SEX', 'MSCODE',
'_IMPAGE', '_PRACE', '_EDUCAG', '_INCOMG','_TOTINDA']]
ace = X_all[['ACEDEPRS', 'ACEDRINK', 'ACEDRUGS', 'ACEPRISN', 'ACEDIVRC', 'ACEPUNCH', 'ACEHURT', 'ACESWEAR',
'ACETOUCH', 'ACETTHEM', 'ACEHVSEX']]
# updating ACE columns to be a count depending on the question
# first 6 questions are yes or no, so yes will be be counted as 1 and no will be counted as 0
# last 5 are questions of frequency, never = 0, once = 1, more than once will equal 2 (since not given an exact number)
ace['ACEDEPRS'] = ace['ACEDEPRS'].map({1:1, 2:0, 0:0})
ace['ACEDRINK'] = ace['ACEDRINK'].map({1:1, 2:0, 0:0})
ace['ACEDRUGS'] = ace['ACEDRUGS'].map({1:1, 2:0, 0:0})
ace['ACEPRISN'] = ace['ACEPRISN'].map({1:1, 2:0, 0:0})
ace['ACEDIVRC'] = ace['ACEDIVRC'].map({1:1, 2:0, 0:0})
ace['ACEPUNCH'] = ace['ACEPUNCH'].map({1:0, 2:1, 3:2})
ace['ACEHURT'] = ace['ACEHURT'].map({1:0, 2:1, 3:2, 0:0})
ace['ACESWEAR'] = ace['ACESWEAR'].map({1:0, 2:1, 3:2, 0:0})
ace['ACETOUCH'] = ace['ACETOUCH'].map({1:0, 2:1, 3:2, 0:0})
ace['ACETTHEM'] = ace['ACETTHEM'].map({1:0, 2:1, 3:2, 0:0})
ace['ACEHVSEX'] = ace['ACEHVSEX'].map({1:0, 2:1, 3:2, 0:0})
ace['count'] = ace.sum(axis = 1)
X_num['ACE_Count'] = ace['count']
X_cat = X_cat.astype(str)
# dummifying nominal variables for X_all
X_dummies = pd.get_dummies(X_cat, drop_first=True)
X_dummies.head()
X_num.head()
# merging numerical and nominal data into one data frame
X_all = X_num.merge(X_dummies, left_index=True, right_index=True)
X_all.shape
# to compensate for unbalanced classes in my y's will use SMOTE
sm = SMOTE(random_state=151)
X_all1, y_tobacco = sm.fit_resample(X_all, y_tobacco)
sm2 = SMOTE(random_state=151)
X_all2, y_activity = sm2.fit_resample(X_all, y_activity)
sm3 = SMOTE(random_state=151)
X_all3, y_health = sm3.fit_resample(X_all, y_health)
sm4 = SMOTE(random_state=151)
X_all4, y_smoker = sm4.fit_resample(X_all, y_smoker)
# new baseline for tobacco
y_tobacco.value_counts(normalize=True)
# looks like SMOTE has increased the size of my y's more than 4x, so will probably take some time for models to run
y_tobacco.shape
# new baseline for activity
y_activity.value_counts(normalize=True)
# new baseline for health
y_health.value_counts(normalize=True)
# new baseline for smoker
y_smoker.value_counts(normalize=True)
X_all1.shape
# creating training and testing sets for all y's (stratified on y, but since the classes are equal probably didn't have to)
X_train_all, X_test_all, y_train_tobacco, y_test_tobacco = train_test_split(X_all1, y_tobacco, random_state = 151, stratify=y_tobacco)
X_train_all2, X_test_all2, y_train_activity, y_test_activity = train_test_split(X_all2, y_activity, random_state = 151, stratify=y_activity)
X_train_all3, X_test_all3, y_train_health, y_test_health = train_test_split(X_all3, y_health, random_state = 151, stratify=y_health)
X_train_all4, X_test_all4, y_train_smoker, y_test_smoker = train_test_split(X_all4, y_smoker, random_state = 151, stratify=y_smoker)
###Output
_____no_output_____
###Markdown
Pipeline and Gridsearch with all features as predictors (Logistic Regression)
###Code
pipe_all_log = make_pipeline(SelectKBest(f_classif), StandardScaler(), LogisticRegression(max_iter=10_000))
params_all_log = {'selectkbest__k': range(1, 137, 15),
'logisticregression__C': [0.01, 0.5, 1]}
gs_all_log = GridSearchCV(pipe_all_log, params_all_log, cv=3)
gs_all_log.fit(X_train_all, y_train_tobacco)
pipe2_all_log = make_pipeline(SelectKBest(f_classif), StandardScaler(), LogisticRegression(max_iter=10_000))
gs2_all_log = GridSearchCV(pipe2_all_log, params_all_log, cv=3)
gs2_all_log.fit(X_train_all2, y_train_activity)
pipe3_all_log = make_pipeline(SelectKBest(f_classif), StandardScaler(), LogisticRegression(max_iter=10_000))
gs3_all_log = GridSearchCV(pipe3_all_log, params_all_log, cv=3)
gs3_all_log.fit(X_train_all3, y_train_health)
pipe4_all_log = make_pipeline(SelectKBest(f_classif), StandardScaler(), LogisticRegression(max_iter=10_000))
gs4_all_log = GridSearchCV(pipe4_all_log, params_all_log, cv=3)
gs4_all_log.fit(X_train_all4, y_train_smoker)
tobacco_all_log_preds = gs_all_log.predict(X_test_all)
activity_all_log_preds = gs2_all_log.predict(X_test_all2)
health_all_log_preds = gs3_all_log.predict(X_test_all3)
smoker_all_log_preds = gs4_all_log.predict(X_test_all4)
tobacco_all_log_prec = precision_score(y_test_tobacco, tobacco_all_log_preds, average='micro')
activity_all_log_prec = precision_score(y_test_activity, activity_all_log_preds, average='micro')
health_all_log_prec = precision_score(y_test_health, health_all_log_preds, average='micro')
smoker_all_log_prec = precision_score(y_test_smoker, smoker_all_log_preds, average='micro')
print(f' training accuracy for tobacco: {gs_all_log.score(X_train_all, y_train_tobacco)}')
print(f' training accuracy for activity: {gs2_all_log.score(X_train_all2, y_train_activity)}')
print(f' training accuracy for health: {gs3_all_log.score(X_train_all3, y_train_health)}')
print(f' training accuracy for smoker: {gs4_all_log.score(X_train_all4, y_train_smoker)}')
print(f' testing accuracy for tobacco: {gs_all_log.score(X_test_all, y_test_tobacco)}')
print(f' testing accuracy for activity: {gs2_all_log.score(X_test_all2, y_test_activity)}')
print(f' testing accuracy for health: {gs3_all_log.score(X_test_all3, y_test_health)}')
print(f' testing accuracy for smoker: {gs4_all_log.score(X_test_all4, y_test_smoker)}')
print(f'Precision for tobacco: {tobacco_all_log_prec}')
print(f'Precision for activity: {activity_all_log_prec}')
print(f'Precision for health: {health_all_log_prec}')
print(f'Precision for smoker: {smoker_all_log_prec}')
print(gs_all_log.best_params_)
print(gs2_all_log.best_params_)
print(gs3_all_log.best_params_)
print(gs4_all_log.best_params_)
###Output
{'logisticregression__C': 0.5, 'selectkbest__k': 136}
{'logisticregression__C': 0.5, 'selectkbest__k': 136}
{'logisticregression__C': 1, 'selectkbest__k': 136}
{'logisticregression__C': 1, 'selectkbest__k': 136}
###Markdown
**observations** Pipeline and Gridsearch with all features as predictors (Random Forest Classifier)
###Code
pipe_all_rfc = make_pipeline(SelectKBest(f_classif), StandardScaler(), RandomForestClassifier())
params_all_rfc = {'selectkbest__k': range(1, 137, 15),
'randomforestclassifier__n_estimators': [100, 300, 500],
'randomforestclassifier__max_depth': [None, 3, 5], }
#'randomforestclassifier__min_samples_split': [1, 3, 5],
#'randomforestclassifier__min_samples_leaf': [1, 3, 5]}
gs_all_rfc = GridSearchCV(pipe_all_rfc, params_all_rfc, cv=3)
gs_all_rfc.fit(X_train_all, y_train_tobacco)
pipe2_all_rfc = make_pipeline(SelectKBest(f_classif), StandardScaler(), RandomForestClassifier())
gs2_all_rfc = GridSearchCV(pipe2_all_rfc, params_all_rfc, cv=3)
gs2_all_rfc.fit(X_train_all2, y_train_activity)
pipe3_all_rfc = make_pipeline(SelectKBest(f_classif), StandardScaler(), RandomForestClassifier())
gs3_all_rfc = GridSearchCV(pipe3_all_rfc, params_all_rfc, cv=3)
gs3_all_rfc.fit(X_train_all3, y_train_health)
pipe4_all_rfc = make_pipeline(SelectKBest(f_classif), StandardScaler(), RandomForestClassifier())
gs4_all_rfc = GridSearchCV(pipe4_all_rfc, params_all_rfc, cv=3)
gs4_all_rfc.fit(X_train_all4, y_train_smoker)
tobacco_all_rfc_preds = gs_all_rfc.predict(X_test_all)
activity_all_rfc_preds = gs2_all_rfc.predict(X_test_all2)
health_all_rfc_preds = gs3_all_rfc.predict(X_test_all3)
smoker_all_rfc_preds = gs4_all_rfc.predict(X_test_all4)
tobacco_all_rfc_prec = precision_score(y_test_tobacco, tobacco_all_rfc_preds, average='micro')
activity_all_rfc_prec = precision_score(y_test_activity, activity_all_rfc_preds, average='micro')
health_all_rfc_prec = precision_score(y_test_health, health_all_rfc_preds, average='micro')
smoker_all_rfc_prec = precision_score(y_test_smoker, smoker_all_rfc_preds, average='micro')
print(f' training accuracy for tobacco: {gs_all_rfc.score(X_train_all, y_train_tobacco)}')
print(f' training accuracy for activity: {gs2_all_rfc.score(X_train_all2, y_train_activity)}')
print(f' training accuracy for health: {gs3_all_rfc.score(X_train_all3, y_train_health)}')
print(f' training accuracy for smoker: {gs4_all_rfc.score(X_train_all4, y_train_smoker)}')
print(f' testing accuracy for tobacco: {gs_all_rfc.score(X_test_all, y_test_tobacco)}')
print(f' testing accuracy for activity: {gs2_all_rfc.score(X_test_all2, y_test_activity)}')
print(f' testing accuracy for health: {gs3_all_rfc.score(X_test_all3, y_test_health)}')
print(f' testing accuracy for smoker: {gs4_all_rfc.score(X_test_all4, y_test_smoker)}')
print(f'Precision for tobacco: {tobacco_all_rfc_prec}')
print(f'Precision for activity: {activity_all_rfc_prec}')
print(f'Precision for health: {health_all_rfc_prec}')
print(f'Precision for smoker: {smoker_all_rfc_prec}')
print(gs_all_rfc.best_params_)
print(gs2_all_rfc.best_params_)
print(gs3_all_rfc.best_params_)
print(gs4_all_rfc.best_params_)
###Output
{'randomforestclassifier__max_depth': None, 'randomforestclassifier__n_estimators': 500, 'selectkbest__k': 136}
{'randomforestclassifier__max_depth': None, 'randomforestclassifier__n_estimators': 300, 'selectkbest__k': 136}
{'randomforestclassifier__max_depth': None, 'randomforestclassifier__n_estimators': 100, 'selectkbest__k': 106}
{'randomforestclassifier__max_depth': None, 'randomforestclassifier__n_estimators': 500, 'selectkbest__k': 106}
###Markdown
Pipeline and Gridsearch with all features as predictors (Extra Trees Classifier)
###Code
pipe_all_etc = make_pipeline(SelectKBest(f_classif), StandardScaler(), ExtraTreesClassifier())
params_all_etc = {'selectkbest__k': range(1, 137, 15),
'extratreesclassifier__n_estimators': [100, 300, 500],
'extratreesclassifier__max_depth': [None, 3, 5], }
#'extratreesclassifier__min_samples_split': [1, 3, 5],
#'extratreesclassifier__min_samples_leaf': [1, 3, 5]}
gs_all_etc = GridSearchCV(pipe_all_etc, params_all_etc, cv=3)
gs_all_etc.fit(X_train_all, y_train_tobacco)
pipe2_all_etc = make_pipeline(SelectKBest(f_classif), StandardScaler(), ExtraTreesClassifier())
gs2_all_etc = GridSearchCV(pipe2_all_etc, params_all_etc, cv=3)
gs2_all_etc.fit(X_train_all2, y_train_activity)
pipe3_all_etc = make_pipeline(SelectKBest(f_classif), StandardScaler(), ExtraTreesClassifier())
gs3_all_etc = GridSearchCV(pipe3_all_etc, params_all_etc, cv=3)
gs3_all_etc.fit(X_train_all3, y_train_health)
pipe4_all_etc = make_pipeline(SelectKBest(f_classif), StandardScaler(), ExtraTreesClassifier())
gs4_all_etc = GridSearchCV(pipe4_all_etc, params_all_etc, cv=3)
gs4_all_etc.fit(X_train_all4, y_train_smoker)
tobacco_all_etc_preds = gs_all_etc.predict(X_test_all)
activity_all_etc_preds = gs2_all_etc.predict(X_test_all2)
health_all_etc_preds = gs3_all_etc.predict(X_test_all3)
smoker_all_etc_preds = gs4_all_etc.predict(X_test_all4)
tobacco_all_etc_prec = precision_score(y_test_tobacco, tobacco_all_etc_preds, average='micro')
activity_all_etc_prec = precision_score(y_test_activity, activity_all_etc_preds, average='micro')
health_all_etc_prec = precision_score(y_test_health, health_all_etc_preds, average='micro')
smoker_all_etc_prec = precision_score(y_test_smoker, smoker_all_etc_preds, average='micro')
print(f' training accuracy for tobacco: {gs_all_etc.score(X_train_all, y_train_tobacco)}')
print(f' training accuracy for activity: {gs2_all_etc.score(X_train_all2, y_train_activity)}')
print(f' training accuracy for health: {gs3_all_etc.score(X_train_all3, y_train_health)}')
print(f' training accuracy for smoker: {gs4_all_etc.score(X_train_all4, y_train_smoker)}')
print(f' testing accuracy for tobacco: {gs_all_etc.score(X_test_all, y_test_tobacco)}')
print(f' testing accuracy for activity: {gs2_all_etc.score(X_test_all2, y_test_activity)}')
print(f' testing accuracy for health: {gs3_all_etc.score(X_test_all3, y_test_health)}')
print(f' testing accuracy for smoker: {gs4_all_etc.score(X_test_all4, y_test_smoker)}')
print(f'Precision for tobacco: {tobacco_all_etc_prec}')
print(f'Precision for activity: {activity_all_etc_prec}')
print(f'Precision for health: {health_all_etc_prec}')
print(f'Precision for smoker: {smoker_all_etc_prec}')
print(gs_all_etc.best_params_)
print(gs2_all_etc.best_params_)
print(gs3_all_etc.best_params_)
print(gs4_all_etc.best_params_)
###Output
{'extratreesclassifier__max_depth': None, 'extratreesclassifier__n_estimators': 500, 'selectkbest__k': 136}
{'extratreesclassifier__max_depth': None, 'extratreesclassifier__n_estimators': 500, 'selectkbest__k': 136}
{'extratreesclassifier__max_depth': None, 'extratreesclassifier__n_estimators': 500, 'selectkbest__k': 76}
{'extratreesclassifier__max_depth': None, 'extratreesclassifier__n_estimators': 300, 'selectkbest__k': 136}
###Markdown
Pipeline and Gridsearch with all features as predictors (Ada Boost Classifier)
###Code
pipe_all_abc = make_pipeline(SelectKBest(f_classif), StandardScaler(), AdaBoostClassifier())
params_all_abc = {'selectkbest__k': range(1, 137, 15),
'adaboostclassifier__learning_rate': [0.5, 1.0],
'adaboostclassifier__n_estimators': [10, 15, 20, 25], }
gs_all_abc = GridSearchCV(pipe_all_abc, params_all_abc, cv=3)
gs_all_abc.fit(X_train_all, y_train_tobacco)
pipe2_all_abc = make_pipeline(SelectKBest(f_classif), StandardScaler(), AdaBoostClassifier())
gs2_all_abc = GridSearchCV(pipe2_all_abc, params_all_abc, cv=3)
gs2_all_abc.fit(X_train_all2, y_train_activity)
pipe3_all_abc = make_pipeline(SelectKBest(f_classif), StandardScaler(), AdaBoostClassifier())
gs3_all_abc = GridSearchCV(pipe3_all_abc, params_all_abc, cv=3)
gs3_all_abc.fit(X_train_all3, y_train_health)
pipe4_all_abc = make_pipeline(SelectKBest(f_classif), StandardScaler(), AdaBoostClassifier())
gs4_all_abc = GridSearchCV(pipe4_all_abc, params_all_abc, cv=3)
gs4_all_abc.fit(X_train_all4, y_train_smoker)
tobacco_all_abc_preds = gs_all_abc.predict(X_test_all)
activity_all_abc_preds = gs2_all_abc.predict(X_test_all2)
health_all_abc_preds = gs3_all_abc.predict(X_test_all3)
smoker_all_abc_preds = gs4_all_abc.predict(X_test_all4)
tobacco_all_abc_prec = precision_score(y_test_tobacco, tobacco_all_abc_preds, average='micro')
activity_all_abc_prec = precision_score(y_test_activity, activity_all_abc_preds, average='micro')
health_all_abc_prec = precision_score(y_test_health, health_all_abc_preds, average='micro')
smoker_all_abc_prec = precision_score(y_test_smoker, smoker_all_abc_preds, average='micro')
print(f' training accuracy for tobacco: {gs_all_abc.score(X_train_all, y_train_tobacco)}')
print(f' training accuracy for activity: {gs2_all_abc.score(X_train_all2, y_train_activity)}')
print(f' training accuracy for health: {gs3_all_abc.score(X_train_all3, y_train_health)}')
print(f' training accuracy for smoker: {gs4_all_abc.score(X_train_all4, y_train_smoker)}')
print(f' testing accuracy for tobacco: {gs_all_abc.score(X_test_all, y_test_tobacco)}')
print(f' testing accuracy for activity: {gs2_all_abc.score(X_test_all2, y_test_activity)}')
print(f' testing accuracy for health: {gs3_all_abc.score(X_test_all3, y_test_health)}')
print(f' testing accuracy for smoker: {gs4_all_abc.score(X_test_all4, y_test_smoker)}')
print(f'Precision for tobacco: {tobacco_all_abc_prec}')
print(f'Precision for activity: {activity_all_abc_prec}')
print(f'Precision for health: {health_all_abc_prec}')
print(f'Precision for smoker: {smoker_all_abc_prec}')
print(gs_all_abc.best_params_)
print(gs2_all_abc.best_params_)
print(gs3_all_abc.best_params_)
print(gs4_all_abc.best_params_)
###Output
{'adaboostclassifier__learning_rate': 1.0, 'adaboostclassifier__n_estimators': 25, 'selectkbest__k': 31}
{'adaboostclassifier__learning_rate': 1.0, 'adaboostclassifier__n_estimators': 25, 'selectkbest__k': 31}
{'adaboostclassifier__learning_rate': 1.0, 'adaboostclassifier__n_estimators': 25, 'selectkbest__k': 31}
{'adaboostclassifier__learning_rate': 1.0, 'adaboostclassifier__n_estimators': 25, 'selectkbest__k': 121}
###Markdown
Pipeline and Gridsearch with all features as predictors (XG Boost Classifier)
###Code
pipe_all_xgb = make_pipeline(SelectKBest(f_classif), StandardScaler(), xgb.XGBClassifier())
params_all_xgb = {'selectkbest__k': range(1, 137, 15),
'xgbclassifier__learning_rate': [0.5, 1.0],
'xgbclassifier__n_estimators': [10, 15, 20, 25],
'xgbclassifier__max_depth': [3, 5]}
gs_all_xgb = GridSearchCV(pipe_all_xgb, params_all_xgb, cv=3)
gs_all_xgb.fit(X_train_all, y_train_tobacco)
pipe2_all_xgb = make_pipeline(SelectKBest(f_classif), StandardScaler(), xgb.XGBClassifier())
gs2_all_xgb = GridSearchCV(pipe2_all_xgb, params_all_xgb, cv=3)
gs2_all_xgb.fit(X_train_all2, y_train_activity)
pipe3_all_xgb = make_pipeline(SelectKBest(f_classif), StandardScaler(), xgb.XGBClassifier())
gs3_all_xgb = GridSearchCV(pipe3_all_xgb, params_all_xgb, cv=3)
gs3_all_xgb.fit(X_train_all3, y_train_health)
pipe4_all_xgb = make_pipeline(SelectKBest(f_classif), StandardScaler(), xgb.XGBClassifier())
gs4_all_xgb = GridSearchCV(pipe4_all_xgb, params_all_xgb, cv=3)
gs4_all_xgb.fit(X_train_all4, y_train_smoker)
tobacco_all_xgb_preds = gs_all_xgb.predict(X_test_all)
activity_all_xgb_preds = gs2_all_xgb.predict(X_test_all2)
health_all_xgb_preds = gs3_all_xgb.predict(X_test_all3)
smoker_all_xgb_preds = gs4_all_xgb.predict(X_test_all4)
tobacco_all_xgb_prec = precision_score(y_test_tobacco, tobacco_all_xgb_preds, average='micro')
activity_all_xgb_prec = precision_score(y_test_activity, activity_all_xgb_preds, average='micro')
health_all_xgb_prec = precision_score(y_test_health, health_all_xgb_preds, average='micro')
smoker_all_xgb_prec = precision_score(y_test_smoker, smoker_all_xgb_preds, average='micro')
print(f' training accuracy for tobacco: {gs_all_xgb.score(X_train_all, y_train_tobacco)}')
print(f' training accuracy for activity: {gs2_all_xgb.score(X_train_all2, y_train_activity)}')
print(f' training accuracy for health: {gs3_all_xgb.score(X_train_all3, y_train_health)}')
print(f' training accuracy for smoker: {gs4_all_xgb.score(X_train_all4, y_train_smoker)}')
print(f' testing accuracy for tobacco: {gs_all_xgb.score(X_test_all, y_test_tobacco)}')
print(f' testing accuracy for activity: {gs2_all_xgb.score(X_test_all2, y_test_activity)}')
print(f' testing accuracy for health: {gs3_all_xgb.score(X_test_all3, y_test_health)}')
print(f' testing accuracy for smoker: {gs4_all_xgb.score(X_test_all4, y_test_smoker)}')
print(f'Precision for tobacco: {tobacco_all_xgb_prec}')
print(f'Precision for activity: {activity_all_xgb_prec}')
print(f'Precision for health: {health_all_xgb_prec}')
print(f'Precision for smoker: {smoker_all_xgb_prec}')
print(gs_all_xgb.best_params_)
print(gs2_all_xgb.best_params_)
print(gs3_all_xgb.best_params_)
print(gs4_all_xgb.best_params_)
###Output
{'selectkbest__k': 91, 'xgbclassifier__learning_rate': 1.0, 'xgbclassifier__max_depth': 5, 'xgbclassifier__n_estimators': 25}
{'selectkbest__k': 136, 'xgbclassifier__learning_rate': 1.0, 'xgbclassifier__max_depth': 5, 'xgbclassifier__n_estimators': 25}
{'selectkbest__k': 76, 'xgbclassifier__learning_rate': 1.0, 'xgbclassifier__max_depth': 5, 'xgbclassifier__n_estimators': 25}
{'selectkbest__k': 61, 'xgbclassifier__learning_rate': 1.0, 'xgbclassifier__max_depth': 5, 'xgbclassifier__n_estimators': 25}
|
IPy-01-uso-interactivo.ipynb | ###Markdown
Uso interactivo===**Juan David Velรกsquez Henao** [email protected] Universidad Nacional de Colombia, Sede Medellรญn Facultad de Minas Medellรญn, Colombia---Haga click [aquรญ](https://github.com/jdvelasq/IPy-for-data-science/blob/master/IPy-01-uso-interactivo.ipynb) para acceder a la รบltima versiรณn onlineHaga click [aquรญ](http://nbviewer.jupyter.org/github/jdvelasq/IPy-for-data-science/blob/master/IPy-01-uso-interactivo.ipynb) para ver la รบltima versiรณn online en `nbviewer`. --- Contenido > * [Uso interactivo](Uso-interactivo) * [Cรกlculos numรฉricos](Cรกlculos-numรฉricos) * [Funciones de usuario](Funciones-de-usuario) * [Funciones matemรกticas](Funciones-matemรกticas) * [Cadenas de caracteres (strings)](Cadenas-de-caracteres) * [Listas](Listas) Cรกlculos numรฉricos [Contenido](Contenido) IPython puede ser usado de forma interactiva como una calculadora. Esto permite que el anรกlisis de datos pueda ser realizado de forma interactiva, de forma similar a como pueden usarse otras herramientas como el lenguaje R o Matlab. A continuaciรณn se ejemplifican los cรกlculos aritmรฉticos bรกsicos.
###Code
2 + 2 + 1
50 - 5 * 6 + 8
(50 - 5 * 6) / 4
8 / 5 #ย resultado real
8 // 5 # parte entera de la division
8 % 5 #ย residuo de la divisiรณn
5 ** 2 # potencia
###Output
_____no_output_____
###Markdown
Tambiรฉn se pueden declarar y usar variables en la ejecuciรณn interactiva.
###Code
x = 20
y = 5 * 9
x * y
a = 12.5 / 100
b = 100.50
a * b
###Output
_____no_output_____
###Markdown
Un caracter de subrayado `_` se usa para indicar el resultado del calculo anterior. Dos caracteres `__` indican el penรบltimo resultado; tres caracteres `_` indican el antepenรบltimo resultado. Igualmente, se puede usan `In[1]` para obtener el contenido de la primera celda, `In[2]` para la segunda y asรญ sucesivamente. De igual forma, los resultados pueden obtenerse como `Out[1]`, `Out[2]`, etc.
###Code
1.1
1.1 + _
1.1 + _
round(_, 2)
Out[8]
In[13]
###Output
_____no_output_____
###Markdown
ย Funciones de usuario [Contenido](Contenido) Las funciones son definidas mediante la palabra reservada `def`. La siguiente funciรณn devuelve el cuadrado de su argumento.
###Code
def square(x): #ย el caracter `:` es obligatorio
return x**2 #ย el cuerpo de la funciรณn esta definido por la identaciรณn (espacios en blanco)
#ย es obligatorio usar `return` para devolver valores
square(2)
square(1+2)
square(square(2))
square(1) + square(2)
def sum_of_squares(x, y):
return square(x) + square(y) #ย las funciones puden ser llamadas dentro de otras
sum_of_squares(1, 2)
###Output
_____no_output_____
###Markdown
Funciones matemรกticas [Contenido](Contenido) > La lista completa de funciones matemรกticas se encuentra disponible [aquรญ](https://docs.python.org/3/library/math.html).
###Code
import math # importa la libreria de funciones matemรกticas
math.cos(3.141516) #ย llama la funciรณn cos en la librerรญa math
###Output
_____no_output_____
###Markdown
**Ejercicio.--** Calcule el valor de la siguiente expresiรณn:$$\frac{5-(1 -(3 - \exp(\frac{1}{8}))}{3(4-2)(2-\frac{3}{8})} - 2!(-1)^3 + \sin (0.98\pi) $$ Cadenas de caracteres [Contenido](Contenido) En IPython tambiรฉn pueden usarse cadenas de caracteres (strings). Ellas pueden delimitarse usando comillas simples o dobles.
###Code
'hola mundo' # comillas simples
"hola mundo" #ย comillas dobles
'--"--' # uso alternado de comillas. IPython entiende que se usa la comilla simples para delimitar el string.
"--'--"
'--\'--' # En estos dos casos se requiere usar el `\` para indicar que la comilla intermedia no es el delimitador.
"--\"--"
###Output
_____no_output_____
###Markdown
El caracter de escape `\n` indica retorno-de-carro o nueva-linea.
###Code
s = 'Primera linea.\nsegunda linea.'
s
print(s) # se debe usar la funciรณn print para imprimir con formato.
print(r'Primera linea.\nsegunda linea.') # note la r antes de la comilla inicial
"""
Los strings de varias lineas pueden
escribirse delimitรกndolos tres comillas
dobles y son usados corrientemente como
comentarios
"""
print("""
Los strings de varias lineas pueden
escribirse delimitรกndolos tres comillas
dobles y son usados corrientemente como
comentarios
""")
3 * 'abc ' + '012' #ย los strings usan `*` para indicar repeticiรณn y `+` para indicar concatenaciรณn.
'abc ' * 3 + '012'
'Py' 'thon' #ย note que acรก se ignoran los espacios entre las comillas interiores
#ย los strings pueden escribirse en varias lรญneas delimitandolos por parรฉntesis.
text = ('Linea 1 '
'Linea 2 '
'Linea 3')
print(text)
#ย borra los caracteres '-' de la cadena de texto
'h-o-l-a- -m-u-n-d-o'.replace('-', '')
#ย cambia los '-' por '='
'h-o-l-a- -m-u-n-d-o'.replace('-', '=')
#ย convierte a mayรบsculas
'hola mundo'.upper()
#ย convierte a minรบsculas
'HOLA MUNDO'.lower()
'hola mundo'.capitalize()
'Hola Mundo Cruel'.swapcase()
'hola mundo cruel'.title()
'hola mundo'.center(20, '-')
'hola mundo'.ljust(20, '-')
'hola mundo'.rjust(20, '-')
'abcdeabcdeabcde'.count('ab')
'abcdeabcdeabcde'.find('cd') #ย posicion de la primera aparaciรณn de la cadena 'cd'
'abc123'.isalnum() # alfanumรฉrico?
'()#@'.isalnum() #ย alfanumรฉrico?
'abc'.isalpha() # alfabรฉtico?
'1234'.isdigit()
'1.234'.isdigit()
'1.234'.isnumeric()
'1,2,3,4,5'.partition(',')
'1,2,3,4,5'.rsplit(',')
'hola\nmundo\ncruel'.splitlines()
#ย concatenaciรณn de strings
x = 'foo'
y = 'bar'
xy = x + y # Ok
x += 'ooo' # Mal
x = ''.join([x, 'ooo']) # Alternativa
###Output
_____no_output_____
###Markdown
En Python, los strings son vectores de caracteres; el primer caracter ocupa la posiciรณn 0, el segundo la 1 y asรญ sucesivamente. Los รญndices negativos (iniciando en `-1`) se usan para indicar la posiciรณn contando desde atrรกs. Poe ejemplo: +---+---+---+---+---+---+ | P | y | t | h | o | n | +---+---+---+---+---+---+ 0 1 2 3 4 5 -6 -5 -4 -3 -2 -1
###Code
word = 'Python'
word[0] # caracter en la posiciรณn 0
word[5] # caracter en la posiciรณn 5
word[-1] # รบltimo caracter
word[-2] #ย antepenรบltimo caracter
word[-6] # primer caracter
word[0:2] #ย el operador `:` se usa para indicar rangos
word[:2] #ย ':2' indica desde el principio hasta la posiciรณn 2 (sin incluirla)
word[2:5]
word[2:] #ย desde la posiciรณn 2 hasta el final
word[:2] + word[2:]
word[:4] + word[4:]
word[-2:] # desde la posiciรณn -2 hasta el final, o sea, los รบltimos dos caracteres.
word[:] # desde el primero hasta el รบltimo caracter.
s = 'abcde' # la funciรณn len calcula la longitud de una cadena de caracteres.
len(s)
###Output
_____no_output_____
###Markdown
**Ejercicio.--** Convierta las letras en las posiciones 3, 6, ... de la cadena `'abcdefghijklm'` en mayรบsculas. Listas [Contenido](Contenido) Las listas son una de las principales estructuras para almacenar informaciรณn en Python. En esta primera parte se presentan los elementos mรกs bรกsicos sobre el manejo de listas. Una revisiรณn mรกs detallada es presentada en este mismo documento mรกs adelante.
###Code
squares = [1, 4, 9, 16, 25] #ย las listas se crean delimitando sus elementos entre [ y ]
squares
squares[0] # Sus elementos se indexan desde cero al igual que en los strings
squares[-1] # tambiรฉn funcionan los รญndices negativos.
squares[-3:] # desde la posiciรณn -3 hasta el final
squares[:] #ย desde el primer hasta el รบltimo elemento.
squares + [36, 49, 64, 81, 100] # concatenacion de listas usando el operador +
cubes = [1, 8, 27, 65, 125] # lista de cubos con un elemento malo
4 ** 3 # el cubo de 4 es 64, no 65!
cubes[3] = 64 # se reemplaza el valor errรณneo
cubes
cubes.append(216) # se agrega el cubo de 6 al final de la lista.
cubes.append(7 ** 3) # y nuevamente se agrega el cubo de 7 al final
cubes
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
letters
letters[2:5] = ['C', 'D', 'E'] # se puede reemplazar un rango de posiciones
letters
letters[2:5] = [] # ahora se remueven.
letters
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
letters
letters[0:7:2] # cada 2
letters[0:7:2] = ['A', 'C', 'E', 'G']
letters
letters[:] = [] # borrado del contenido de la lista
letters
letters = ['a', 'b', 'c', 'd'] #ย la funciรณn len retorna la longitud de la lista
len(letters)
a = ['a', 'b', 'c'] #ย los elementos de las listas pueden ser de cualquier tipo.
n = [1, 2, 3]
x = [a, n] # x es una lista de listas
x
x[0] # el primer elemento de x
x[0][1] # el elemento en la posiciรณn 1 de la primera lista
x[1] #ย el segundo elemento de x
x[1][2] #ย el elemento en la posiciรณn 2 de la segunda lista
###Output
_____no_output_____ |
python/03-data-analysis/mlp-tf.keras/mlp-classification-tf.keras-wine.ipynb | ###Markdown
Wine Quality Classifier Status: In process LOAD THE FEATURE DATA
###Code
import pandas as pd
import numpy as np
X = pd.read_csv('../../02-data-preprocessing/output/preprocessed_data/X.csv', sep=',')
print ('Feature data, shape:\nX: {}'.format(X.shape))
X.head()
###Output
Feature data, shape:
X: (178, 13)
###Markdown
DATA OVERVIEW
###Code
y = pd.read_csv('../../02-data-preprocessing/output/preprocessed_data/y.csv', sep=',')
print ('Target data, shape:\ny: {}'.format(X.shape))
y.head()
###Output
Target data, shape:
y: (178, 13)
###Markdown
SPLIT THE DATA
###Code
from sklearn.model_selection import train_test_split
# set the seed for reproducibility
np.random.seed(127)
# split the dataset into 2 training and 2 testing sets
X_train, X_test, y_train, y_test = train_test_split(X.values, y.values, test_size=0.2, random_state=13)
print('Data shapes:\n')
print('X_train : {}\ny_train : {}\n\nX_test : {}\ny_test : {}'.format(np.shape(X_train),
np.shape(y_train),
np.shape(X_test),
np.shape(y_test)))
###Output
Data shapes:
X_train : (142, 13)
y_train : (142, 3)
X_test : (36, 13)
y_test : (36, 3)
###Markdown
DEFINE NETWORK PARAMETERS
###Code
# define number of attributes
n_features = X_train.shape[1]
n_classes = y_train.shape[1]
# count number of samples in each set of data
n_train = X_train.shape[0]
n_test = X_test.shape[0]
# define amount of neurons
n_layer_in = n_features # 11 neurons in input layer
n_layer_h1 = 50 # first hidden layer
n_layer_h2 = 50 # second hidden layer
n_layer_out = n_classes # 7 neurons in input layer
sigma_init = 0.01 # For randomized initialization
###Output
_____no_output_____
###Markdown
MODEL ARCHITECTURE
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# fix random seed for reproducibility
np.random.seed(42)
# define model architecture
model = Sequential()
model.add(Dense(n_layer_h1, activation='relu', input_shape=(n_features,)))
model.add(Dense(n_layer_h2, activation='relu'))
model.add(Dense(n_classes, activation='softmax'))
# add model's configuration
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# show model architecture
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 50) 700
_________________________________________________________________
dense_1 (Dense) (None, 50) 2550
_________________________________________________________________
dense_2 (Dense) (None, 3) 153
=================================================================
Total params: 3,403
Trainable params: 3,403
Non-trainable params: 0
_________________________________________________________________
###Markdown
EXECUTE THE MODEL
###Code
from tensorflow.keras.callbacks import History
# add history function to
history = History()
model.fit(X_train, y_train, epochs=100, callbacks=[history])
###Output
Train on 142 samples
Epoch 1/100
142/142 [==============================] - 1s 5ms/sample - loss: 1.1010 - accuracy: 0.4507
Epoch 2/100
142/142 [==============================] - 0s 63us/sample - loss: 0.8084 - accuracy: 0.7746
Epoch 3/100
142/142 [==============================] - 0s 56us/sample - loss: 0.6458 - accuracy: 0.9225
Epoch 4/100
142/142 [==============================] - 0s 56us/sample - loss: 0.5232 - accuracy: 0.9577
Epoch 5/100
142/142 [==============================] - 0s 42us/sample - loss: 0.4265 - accuracy: 0.9648
Epoch 6/100
142/142 [==============================] - 0s 42us/sample - loss: 0.3457 - accuracy: 0.9718
Epoch 7/100
142/142 [==============================] - 0s 42us/sample - loss: 0.2812 - accuracy: 0.9718
Epoch 8/100
142/142 [==============================] - 0s 35us/sample - loss: 0.2306 - accuracy: 0.9718
Epoch 9/100
142/142 [==============================] - 0s 42us/sample - loss: 0.1908 - accuracy: 0.9859
Epoch 10/100
142/142 [==============================] - 0s 42us/sample - loss: 0.1581 - accuracy: 0.9859
Epoch 11/100
142/142 [==============================] - 0s 49us/sample - loss: 0.1311 - accuracy: 0.9859
Epoch 12/100
142/142 [==============================] - 0s 49us/sample - loss: 0.1100 - accuracy: 0.9930
Epoch 13/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0937 - accuracy: 0.9930
Epoch 14/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0788 - accuracy: 0.9930
Epoch 15/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0664 - accuracy: 0.9930
Epoch 16/100
142/142 [==============================] - 0s 35us/sample - loss: 0.0562 - accuracy: 0.9930
Epoch 17/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0471 - accuracy: 0.9930
Epoch 18/100
142/142 [==============================] - 0s 35us/sample - loss: 0.0410 - accuracy: 0.9930
Epoch 19/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0351 - accuracy: 0.9930
Epoch 20/100
142/142 [==============================] - 0s 35us/sample - loss: 0.0306 - accuracy: 0.9930
Epoch 21/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0264 - accuracy: 0.9930
Epoch 22/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0230 - accuracy: 0.9930
Epoch 23/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0190 - accuracy: 1.0000
Epoch 24/100
142/142 [==============================] - 0s 56us/sample - loss: 0.0166 - accuracy: 1.0000
Epoch 25/100
142/142 [==============================] - 0s 57us/sample - loss: 0.0142 - accuracy: 1.0000
Epoch 26/100
142/142 [==============================] - 0s 49us/sample - loss: 0.0125 - accuracy: 1.0000
Epoch 27/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0106 - accuracy: 1.0000
Epoch 28/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0091 - accuracy: 1.0000
Epoch 29/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0080 - accuracy: 1.0000
Epoch 30/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0067 - accuracy: 1.0000
Epoch 31/100
142/142 [==============================] - 0s 35us/sample - loss: 0.0057 - accuracy: 1.0000
Epoch 32/100
142/142 [==============================] - 0s 35us/sample - loss: 0.0050 - accuracy: 1.0000
Epoch 33/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0041 - accuracy: 1.0000
Epoch 34/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0036 - accuracy: 1.0000
Epoch 35/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0030 - accuracy: 1.0000
Epoch 36/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0024 - accuracy: 1.0000
Epoch 37/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0023 - accuracy: 1.0000
Epoch 38/100
142/142 [==============================] - 0s 49us/sample - loss: 0.0018 - accuracy: 1.0000
Epoch 39/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0016 - accuracy: 1.0000
Epoch 40/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0014 - accuracy: 1.0000
Epoch 41/100
142/142 [==============================] - 0s 42us/sample - loss: 0.0011 - accuracy: 1.0000
Epoch 42/100
142/142 [==============================] - 0s 42us/sample - loss: 9.7680e-04 - accuracy: 1.0000
Epoch 43/100
142/142 [==============================] - 0s 42us/sample - loss: 8.6909e-04 - accuracy: 1.0000
Epoch 44/100
142/142 [==============================] - 0s 49us/sample - loss: 7.6153e-04 - accuracy: 1.0000
Epoch 45/100
142/142 [==============================] - 0s 42us/sample - loss: 6.2681e-04 - accuracy: 1.0000
Epoch 46/100
142/142 [==============================] - 0s 42us/sample - loss: 5.3950e-04 - accuracy: 1.0000
Epoch 47/100
142/142 [==============================] - 0s 42us/sample - loss: 4.3600e-04 - accuracy: 1.0000
Epoch 48/100
142/142 [==============================] - 0s 42us/sample - loss: 3.6201e-04 - accuracy: 1.0000
Epoch 49/100
142/142 [==============================] - 0s 42us/sample - loss: 3.3280e-04 - accuracy: 1.0000
Epoch 50/100
142/142 [==============================] - 0s 42us/sample - loss: 2.7269e-04 - accuracy: 1.0000
Epoch 51/100
142/142 [==============================] - 0s 35us/sample - loss: 2.4400e-04 - accuracy: 1.0000
Epoch 52/100
142/142 [==============================] - 0s 49us/sample - loss: 1.9874e-04 - accuracy: 1.0000
Epoch 53/100
142/142 [==============================] - 0s 42us/sample - loss: 1.6327e-04 - accuracy: 1.0000
Epoch 54/100
142/142 [==============================] - 0s 42us/sample - loss: 1.2731e-04 - accuracy: 1.0000
Epoch 55/100
142/142 [==============================] - 0s 49us/sample - loss: 1.0994e-04 - accuracy: 1.0000
Epoch 56/100
142/142 [==============================] - 0s 35us/sample - loss: 9.3228e-05 - accuracy: 1.0000
Epoch 57/100
142/142 [==============================] - 0s 42us/sample - loss: 7.2628e-05 - accuracy: 1.0000
Epoch 58/100
142/142 [==============================] - 0s 42us/sample - loss: 6.3962e-05 - accuracy: 1.0000
Epoch 59/100
142/142 [==============================] - 0s 50us/sample - loss: 4.8417e-05 - accuracy: 1.0000
Epoch 60/100
142/142 [==============================] - 0s 42us/sample - loss: 4.0165e-05 - accuracy: 1.0000
Epoch 61/100
142/142 [==============================] - 0s 42us/sample - loss: 3.8325e-05 - accuracy: 1.0000
Epoch 62/100
142/142 [==============================] - 0s 35us/sample - loss: 2.8262e-05 - accuracy: 1.0000
Epoch 63/100
142/142 [==============================] - 0s 42us/sample - loss: 2.4424e-05 - accuracy: 1.0000
Epoch 64/100
142/142 [==============================] - 0s 42us/sample - loss: 1.8734e-05 - accuracy: 1.0000
Epoch 65/100
142/142 [==============================] - 0s 42us/sample - loss: 1.5385e-05 - accuracy: 1.0000
Epoch 66/100
142/142 [==============================] - 0s 42us/sample - loss: 1.2292e-05 - accuracy: 1.0000
Epoch 67/100
142/142 [==============================] - 0s 35us/sample - loss: 1.1022e-05 - accuracy: 1.0000
Epoch 68/100
142/142 [==============================] - 0s 49us/sample - loss: 8.4994e-06 - accuracy: 1.0000
Epoch 69/100
142/142 [==============================] - 0s 35us/sample - loss: 6.9187e-06 - accuracy: 1.0000
Epoch 70/100
142/142 [==============================] - 0s 42us/sample - loss: 5.5069e-06 - accuracy: 1.0000
Epoch 71/100
142/142 [==============================] - 0s 42us/sample - loss: 5.0258e-06 - accuracy: 1.0000
Epoch 72/100
142/142 [==============================] - 0s 42us/sample - loss: 3.9917e-06 - accuracy: 1.0000
Epoch 73/100
142/142 [==============================] - 0s 35us/sample - loss: 3.1539e-06 - accuracy: 1.0000
Epoch 74/100
142/142 [==============================] - 0s 42us/sample - loss: 2.5898e-06 - accuracy: 1.0000
Epoch 75/100
142/142 [==============================] - 0s 35us/sample - loss: 2.2977e-06 - accuracy: 1.0000
Epoch 76/100
142/142 [==============================] - 0s 49us/sample - loss: 1.8443e-06 - accuracy: 1.0000
Epoch 77/100
142/142 [==============================] - 0s 42us/sample - loss: 1.3348e-06 - accuracy: 1.0000
Epoch 78/100
142/142 [==============================] - 0s 42us/sample - loss: 1.1350e-06 - accuracy: 1.0000
Epoch 79/100
142/142 [==============================] - 0s 42us/sample - loss: 9.5450e-07 - accuracy: 1.0000
Epoch 80/100
142/142 [==============================] - 0s 35us/sample - loss: 7.9584e-07 - accuracy: 1.0000
Epoch 81/100
142/142 [==============================] - 0s 49us/sample - loss: 6.6152e-07 - accuracy: 1.0000
Epoch 82/100
142/142 [==============================] - 0s 35us/sample - loss: 5.1125e-07 - accuracy: 1.0000
Epoch 83/100
142/142 [==============================] - 0s 42us/sample - loss: 4.2898e-07 - accuracy: 1.0000
Epoch 84/100
142/142 [==============================] - 0s 35us/sample - loss: 3.8365e-07 - accuracy: 1.0000
Epoch 85/100
142/142 [==============================] - 0s 35us/sample - loss: 3.1565e-07 - accuracy: 1.0000
Epoch 86/100
142/142 [==============================] - 0s 42us/sample - loss: 2.6696e-07 - accuracy: 1.0000
Epoch 87/100
142/142 [==============================] - 0s 35us/sample - loss: 2.2415e-07 - accuracy: 1.0000
Epoch 88/100
142/142 [==============================] - 0s 35us/sample - loss: 1.8973e-07 - accuracy: 1.0000
Epoch 89/100
142/142 [==============================] - 0s 42us/sample - loss: 1.5867e-07 - accuracy: 1.0000
Epoch 90/100
142/142 [==============================] - 0s 35us/sample - loss: 1.4943e-07 - accuracy: 1.0000
Epoch 91/100
142/142 [==============================] - 0s 42us/sample - loss: 1.1921e-07 - accuracy: 1.0000
Epoch 92/100
142/142 [==============================] - 0s 35us/sample - loss: 1.0326e-07 - accuracy: 1.0000
Epoch 93/100
142/142 [==============================] - 0s 35us/sample - loss: 9.7382e-08 - accuracy: 1.0000
Epoch 94/100
142/142 [==============================] - 0s 42us/sample - loss: 8.4790e-08 - accuracy: 1.0000
Epoch 95/100
142/142 [==============================] - 0s 42us/sample - loss: 7.3037e-08 - accuracy: 1.0000
Epoch 96/100
142/142 [==============================] - 0s 35us/sample - loss: 6.4642e-08 - accuracy: 1.0000
Epoch 97/100
142/142 [==============================] - 0s 42us/sample - loss: 5.9605e-08 - accuracy: 1.0000
Epoch 98/100
142/142 [==============================] - 0s 42us/sample - loss: 5.6247e-08 - accuracy: 1.0000
Epoch 99/100
142/142 [==============================] - 0s 42us/sample - loss: 4.7012e-08 - accuracy: 1.0000
Epoch 100/100
142/142 [==============================] - 0s 42us/sample - loss: 4.8691e-08 - accuracy: 1.0000
###Markdown
PRINTING RAW OUTPUT
###Code
predictions = model.predict(X_test)
predictions
###Output
_____no_output_____
###Markdown
EVALUATE TESTING SET
###Code
# Evaluate the model on the test data using `evaluate`
results = model.evaluate(X_test, y_test)
print('\nEvaluate on test data \n\n(loss), (accuracy) :\n{}'.format(results))
###Output
36/36 [==============================] - 0s 2ms/sample - loss: 0.0682 - accuracy: 0.9722
Evaluate on test data
(loss), (accuracy) :
[0.0681806140475803, 0.9722222]
###Markdown
PRINTING RESULTS
###Code
dataframe = pd.DataFrame(np.argmax(predictions,1), columns=['Prediction'])
dataframe['Target'] = np.argmax(y_test, 1)
dataframe['Hit'] = np.equal(dataframe.Target, dataframe.Prediction)
print('\n\nPrinting results :\n\n', dataframe)
print(history.history.keys())
###Output
dict_keys(['loss', 'accuracy'])
###Markdown
VISUALIZE THE RESULTS
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# set up legend
blue_patch = mpatches.Patch(color='blue', label='Train Accuracy [Maximize]')
plt.legend(handles=[blue_patch])
#plot the data
plt.plot(history.history['accuracy'], color='blue')
plt.ylabel('score');
###Output
_____no_output_____
###Markdown
VISUALIZE THE LOSS EVOLUTION
###Code
# set up legend
green_patch = mpatches.Patch(color='green', label='Avg Loss [Minimize]')
plt.legend(handles=[green_patch])
#plot the data
plt.plot(history.history['loss'], color='green')
plt.xlabel('epochs')
plt.ylabel('score');
###Output
_____no_output_____
###Markdown
SAVE MODEL FOR FUTURE RESTORE
###Code
import os
# create dir folders if they don't exits
os.makedirs('output/keras_checkpoints', exist_ok=True)
# save the trained model
model.save('output/keras_checkpoints/mlp_wine_tf_keras.h5')
###Output
_____no_output_____
###Markdown
LOAD PRETRAINED MODEL
###Code
model.load_weights('output/keras_checkpoints/mlp_wine_tf_keras.h5')
###Output
_____no_output_____
###Markdown
TESTING PRETRAINED MODEL
###Code
dataframe = pd.DataFrame(np.argmax(predictions,1), columns=['Prediction'])
dataframe['Target'] = np.argmax(y_test, 1)
dataframe['Hit'] = np.equal(dataframe.Target, dataframe.Prediction)
print('\n\nPrinting results :\n\n', dataframe)
###Output
Printing results :
Prediction Target Hit
0 2 2 True
1 0 0 True
2 2 2 True
3 1 1 True
4 1 1 True
5 1 1 True
6 2 2 True
7 0 0 True
8 1 1 True
9 1 1 True
10 0 0 True
11 0 0 True
12 2 2 True
13 1 1 True
14 2 2 True
15 1 1 True
16 1 1 True
17 0 0 True
18 0 0 True
19 2 1 False
20 2 2 True
21 1 1 True
22 0 0 True
23 0 0 True
24 2 2 True
25 1 1 True
26 2 2 True
27 1 1 True
28 0 0 True
29 2 2 True
30 1 1 True
31 1 1 True
32 0 0 True
33 1 1 True
34 0 0 True
35 0 0 True
|
Machine_Learning_Clustering_ and_ Retrieval/2_kmeans-with-text-data_blank.ipynb | ###Markdown
k-means with text data In this assignment you will* Cluster Wikipedia documents using k-means* Explore the role of random initialization on the quality of the clustering* Explore how results differ after changing the number of clusters* Evaluate clustering, both quantitatively and qualitativelyWhen properly executed, clustering uncovers valuable insights from a set of unlabeled documents. **Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook. Import necessary packages The following code block will check if you have the correct version of GraphLab Create. Any version later than 1.8.5 will do. To upgrade, read [this page](https://turi.com/download/upgrade-graphlab-create.html).
###Code
import graphlab
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from scipy.sparse import csr_matrix
%matplotlib inline
'''Check GraphLab Create version'''
from distutils.version import StrictVersion
assert (StrictVersion(graphlab.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.'
###Output
[INFO] graphlab.cython.cy_server: GraphLab Create v2.1 started. Logging: /tmp/graphlab_server_1477851738.log
INFO:graphlab.cython.cy_server:GraphLab Create v2.1 started. Logging: /tmp/graphlab_server_1477851738.log
###Markdown
Load data, extract features To work with text data, we must first convert the documents into numerical features. As in the first assignment, let's extract TF-IDF features for each article.
###Code
wiki = graphlab.SFrame('people_wiki.gl/')
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
###Output
_____no_output_____
###Markdown
For the remainder of the assignment, we will use sparse matrices. Sparse matrices are matrices that have a small number of nonzero entries. A good data structure for sparse matrices would only store the nonzero entries to save space and speed up computation. SciPy provides a highly-optimized library for sparse matrices. Many matrix operations available for NumPy arrays are also available for SciPy sparse matrices.We first convert the TF-IDF column (in dictionary format) into the SciPy sparse matrix format. We included plenty of comments for the curious; if you'd like, you may skip the next block and treat the function as a black box.
###Code
def sframe_to_scipy(x, column_name):
'''
Convert a dictionary column of an SFrame into a sparse matrix format where
each (row_id, column_id, value) triple corresponds to the value of
x[row_id][column_id], where column_id is a key in the dictionary.
Example
>>> sparse_matrix, map_key_to_index = sframe_to_scipy(sframe, column_name)
'''
assert x[column_name].dtype() == dict, \
'The chosen column must be dict type, representing sparse data.'
# Create triples of (row_id, feature_id, count).
# 1. Add a row number.
x = x.add_row_number()
# 2. Stack will transform x to have a row for each unique (row, key) pair.
x = x.stack(column_name, ['feature', 'value'])
# Map words into integers using a OneHotEncoder feature transformation.
f = graphlab.feature_engineering.OneHotEncoder(features=['feature'])
# 1. Fit the transformer using the above data.
f.fit(x)
# 2. The transform takes 'feature' column and adds a new column 'feature_encoding'.
x = f.transform(x)
# 3. Get the feature mapping.
mapping = f['feature_encoding']
# 4. Get the feature id to use for each key.
x['feature_id'] = x['encoded_features'].dict_keys().apply(lambda x: x[0])
# Create numpy arrays that contain the data for the sparse matrix.
i = np.array(x['id'])
j = np.array(x['feature_id'])
v = np.array(x['value'])
width = x['id'].max() + 1
height = x['feature_id'].max() + 1
# Create a sparse matrix.
mat = csr_matrix((v, (i, j)), shape=(width, height))
return mat, mapping
# The conversion will take about a minute or two.
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
tf_idf
###Output
_____no_output_____
###Markdown
The above matrix contains a TF-IDF score for each of the 59071 pages in the data set and each of the 547979 unique words. Normalize all vectors As discussed in the previous assignment, Euclidean distance can be a poor metric of similarity between documents, as it unfairly penalizes long articles. For a reasonable assessment of similarity, we should disregard the length information and use length-agnostic metrics, such as cosine distance.The k-means algorithm does not directly work with cosine distance, so we take an alternative route to remove length information: we normalize all vectors to be unit length. It turns out that Euclidean distance closely mimics cosine distance when all vectors are unit length. In particular, the squared Euclidean distance between any two vectors of length one is directly proportional to their cosine distance.We can prove this as follows. Let $\mathbf{x}$ and $\mathbf{y}$ be normalized vectors, i.e. unit vectors, so that $\|\mathbf{x}\|=\|\mathbf{y}\|=1$. Write the squared Euclidean distance as the dot product of $(\mathbf{x} - \mathbf{y})$ to itself:\begin{align*}\|\mathbf{x} - \mathbf{y}\|^2 &= (\mathbf{x} - \mathbf{y})^T(\mathbf{x} - \mathbf{y})\\ &= (\mathbf{x}^T \mathbf{x}) - 2(\mathbf{x}^T \mathbf{y}) + (\mathbf{y}^T \mathbf{y})\\ &= \|\mathbf{x}\|^2 - 2(\mathbf{x}^T \mathbf{y}) + \|\mathbf{y}\|^2\\ &= 2 - 2(\mathbf{x}^T \mathbf{y})\\ &= 2(1 - (\mathbf{x}^T \mathbf{y}))\\ &= 2\left(1 - \frac{\mathbf{x}^T \mathbf{y}}{\|\mathbf{x}\|\|\mathbf{y}\|}\right)\\ &= 2\left[\text{cosine distance}\right]\end{align*}This tells us that two **unit vectors** that are close in Euclidean distance are also close in cosine distance. Thus, the k-means algorithm (which naturally uses Euclidean distances) on normalized vectors will produce the same results as clustering using cosine distance as a distance metric.We import the [`normalize()` function](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.normalize.html) from scikit-learn to normalize all vectors to unit length.
###Code
from sklearn.preprocessing import normalize
tf_idf = normalize(tf_idf)
###Output
_____no_output_____
###Markdown
Implement k-means Let us implement the k-means algorithm. First, we choose an initial set of centroids. A common practice is to choose randomly from the data points.**Note:** We specify a seed here, so that everyone gets the same answer. In practice, we highly recommend to use different seeds every time (for instance, by using the current timestamp).
###Code
def get_initial_centroids(data, k, seed=None):
'''Randomly choose k data points as initial centroids'''
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
n = data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = data[rand_indices,:].toarray()
return centroids
###Output
_____no_output_____
###Markdown
After initialization, the k-means algorithm iterates between the following two steps:1. Assign each data point to the closest centroid.$$z_i \gets \mathrm{argmin}_j \|\mu_j - \mathbf{x}_i\|^2$$2. Revise centroids as the mean of the assigned data points.$$\mu_j \gets \frac{1}{n_j}\sum_{i:z_i=j} \mathbf{x}_i$$ In pseudocode, we iteratively do the following:```cluster_assignment = assign_clusters(data, centroids)centroids = revise_centroids(data, k, cluster_assignment)``` Assigning clusters How do we implement Step 1 of the main k-means loop above? First import `pairwise_distances` function from scikit-learn, which calculates Euclidean distances between rows of given arrays. See [this documentation](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html) for more information.For the sake of demonstration, let's look at documents 100 through 102 as query documents and compute the distances between each of these documents and every other document in the corpus. In the k-means algorithm, we will have to compute pairwise distances between the set of centroids and the set of documents.
###Code
from sklearn.metrics import pairwise_distances
# Get the TF-IDF vectors for documents 100 through 102.
queries = tf_idf[100:102,:]
# Compute pairwise distances from every data point to each query vector.
dist = pairwise_distances(tf_idf, queries, metric='euclidean')
print dist
###Output
[[ 1.41000789 1.36894636]
[ 1.40935215 1.41023886]
[ 1.39855967 1.40890299]
...,
[ 1.41108296 1.39123646]
[ 1.41022804 1.31468652]
[ 1.39899784 1.41072448]]
###Markdown
More formally, `dist[i,j]` is assigned the distance between the `i`th row of `X` (i.e., `X[i,:]`) and the `j`th row of `Y` (i.e., `Y[j,:]`). **Checkpoint:** For a moment, suppose that we initialize three centroids with the first 3 rows of `tf_idf`. Write code to compute distances from each of the centroids to all data points in `tf_idf`. Then find the distance between row 430 of `tf_idf` and the second centroid and save it to `dist`.
###Code
# Students should write code here
queries2 = tf_idf[0:3,:]
dist = pairwise_distances(tf_idf[430], queries2[1], metric='euclidean')
print dist
'''Test cell'''
if np.allclose(dist, pairwise_distances(tf_idf[430,:], tf_idf[1,:])):
print('Pass')
else:
print('Check your code again')
###Output
Pass
###Markdown
**Checkpoint:** Next, given the pairwise distances, we take the minimum of the distances for each data point. Fittingly, NumPy provides an `argmin` function. See [this documentation](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.argmin.html) for details.Read the documentation and write code to produce a 1D array whose i-th entry indicates the centroid that is the closest to the i-th data point. Use the list of distances from the previous checkpoint and save them as `distances`. The value 0 indicates closeness to the first centroid, 1 indicates closeness to the second centroid, and so forth. Save this array as `closest_cluster`.**Hint:** the resulting array should be as long as the number of data points.
###Code
# Students should write code here
distances = pairwise_distances(tf_idf, queries2, metric='euclidean')
closest_cluster = np.argmin(distances, axis= 1)
'''Test cell'''
reference = [list(row).index(min(row)) for row in distances]
if np.allclose(closest_cluster, reference):
print('Pass')
else:
print('Check your code again')
###Output
Pass
###Markdown
**Checkpoint:** Let's put these steps together. First, initialize three centroids with the first 3 rows of `tf_idf`. Then, compute distances from each of the centroids to all data points in `tf_idf`. Finally, use these distance calculations to compute cluster assignments and assign them to `cluster_assignment`.
###Code
# Students should write code here
queries3 = tf_idf[0:3,:]
queries_distances = pairwise_distances(tf_idf, queries3, metric='euclidean')
cluster_assignment = np.argmin(queries_distances, axis=1)
if len(cluster_assignment)==59071 and \
np.array_equal(np.bincount(cluster_assignment), np.array([23061, 10086, 25924])):
print('Pass') # count number of data points for each cluster
else:
print('Check your code again.')
###Output
Pass
###Markdown
Now we are ready to fill in the blanks in this function:
###Code
def assign_clusters(data, centroids):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = pairwise_distances(data, centroids, metric='euclidean')
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.argmin(distances_from_centroids, axis=1)
return cluster_assignment
###Output
_____no_output_____
###Markdown
**Checkpoint**. For the last time, let us check if Step 1 was implemented correctly. With rows 0, 2, 4, and 6 of `tf_idf` as an initial set of centroids, we assign cluster labels to rows 0, 10, 20, ..., and 90 of `tf_idf`. The resulting cluster labels should be `[0, 1, 1, 0, 0, 2, 0, 2, 2, 1]`.
###Code
if np.allclose(assign_clusters(tf_idf[0:100:10], tf_idf[0:8:2]), np.array([0, 1, 1, 0, 0, 2, 0, 2, 2, 1])):
print('Pass')
else:
print('Check your code again.')
###Output
Pass
###Markdown
Revising clusters Let's turn to Step 2, where we compute the new centroids given the cluster assignments. SciPy and NumPy arrays allow for filtering via Boolean masks. For instance, we filter all data points that are assigned to cluster 0 by writing```data[cluster_assignment==0,:]``` To develop intuition about filtering, let's look at a toy example consisting of 3 data points and 2 clusters.
###Code
data = np.array([[1., 2., 0.],
[0., 0., 0.],
[2., 2., 0.]])
centroids = np.array([[0.5, 0.5, 0.],
[0., -0.5, 0.]])
###Output
_____no_output_____
###Markdown
Let's assign these data points to the closest centroid.
###Code
cluster_assignment = assign_clusters(data, centroids)
print cluster_assignment
###Output
[0 1 0]
###Markdown
The expression `cluster_assignment==1` gives a list of Booleans that says whether each data point is assigned to cluster 1 or not:
###Code
cluster_assignment==1
###Output
_____no_output_____
###Markdown
Likewise for cluster 0:
###Code
cluster_assignment==0
###Output
_____no_output_____
###Markdown
In lieu of indices, we can put in the list of Booleans to pick and choose rows. Only the rows that correspond to a `True` entry will be retained.First, let's look at the data points (i.e., their values) assigned to cluster 1:
###Code
data[cluster_assignment==1]
###Output
_____no_output_____
###Markdown
This makes sense since [0 0 0] is closer to [0 -0.5 0] than to [0.5 0.5 0].Now let's look at the data points assigned to cluster 0:
###Code
data[cluster_assignment==0]
###Output
_____no_output_____
###Markdown
Again, this makes sense since these values are each closer to [0.5 0.5 0] than to [0 -0.5 0].Given all the data points in a cluster, it only remains to compute the mean. Use [np.mean()](http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.mean.html). By default, the function averages all elements in a 2D array. To compute row-wise or column-wise means, add the `axis` argument. See the linked documentation for details. Use this function to average the data points in cluster 0:
###Code
data[cluster_assignment==0].mean(axis=0)
###Output
_____no_output_____
###Markdown
We are now ready to complete this function:
###Code
def revise_centroids(data, k, cluster_assignment):
new_centroids = []
for i in xrange(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment==[i]]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
# Convert numpy.matrix type to numpy.ndarray type
centroid = centroid.A1
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
return new_centroids
###Output
_____no_output_____
###Markdown
**Checkpoint**. Let's check our Step 2 implementation. Letting rows 0, 10, ..., 90 of `tf_idf` as the data points and the cluster labels `[0, 1, 1, 0, 0, 2, 0, 2, 2, 1]`, we compute the next set of centroids. Each centroid is given by the average of all member data points in corresponding cluster.
###Code
result = revise_centroids(tf_idf[0:100:10], 3, np.array([0, 1, 1, 0, 0, 2, 0, 2, 2, 1]))
if np.allclose(result[0], np.mean(tf_idf[[0,30,40,60]].toarray(), axis=0)) and \
np.allclose(result[1], np.mean(tf_idf[[10,20,90]].toarray(), axis=0)) and \
np.allclose(result[2], np.mean(tf_idf[[50,70,80]].toarray(), axis=0)):
print('Pass')
else:
print('Check your code')
###Output
Pass
###Markdown
Assessing convergence How can we tell if the k-means algorithm is converging? We can look at the cluster assignments and see if they stabilize over time. In fact, we'll be running the algorithm until the cluster assignments stop changing at all. To be extra safe, and to assess the clustering performance, we'll be looking at an additional criteria: the sum of all squared distances between data points and centroids. This is defined as$$J(\mathcal{Z},\mu) = \sum_{j=1}^k \sum_{i:z_i = j} \|\mathbf{x}_i - \mu_j\|^2.$$The smaller the distances, the more homogeneous the clusters are. In other words, we'd like to have "tight" clusters.
###Code
def compute_heterogeneity(data, k, centroids, cluster_assignment):
heterogeneity = 0.0
for i in xrange(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment==i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(member_data_points, [centroids[i]], metric='euclidean')
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
return heterogeneity
###Output
_____no_output_____
###Markdown
Let's compute the cluster heterogeneity for the 2-cluster example we've been considering based on our current cluster assignments and centroids.
###Code
compute_heterogeneity(data, 2, centroids, cluster_assignment)
###Output
_____no_output_____
###Markdown
Combining into a single function Once the two k-means steps have been implemented, as well as our heterogeneity metric we wish to monitor, it is only a matter of putting these functions together to write a k-means algorithm that* Repeatedly performs Steps 1 and 2* Tracks convergence metrics* Stops if either no assignment changed or we reach a certain number of iterations.
###Code
# Fill in the blanks
def kmeans(data, k, initial_centroids, maxiter, record_heterogeneity=None, verbose=False):
'''This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.
record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in each iteration'''
centroids = initial_centroids[:]
prev_cluster_assignment = None
for itr in xrange(maxiter):
if verbose:
print(itr)
# 1. Make cluster assignments using nearest centroids
# YOUR CODE HERE
cluster_assignment = assign_clusters(data, centroids)
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
# YOUR CODE HERE
centroids = revise_centroids(data, k, cluster_assignment)
# Check for convergence: if none of the assignments changed, stop
if prev_cluster_assignment is not None and \
(prev_cluster_assignment==cluster_assignment).all():
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment!=cluster_assignment)
if verbose:
print(' {0:5d} elements changed their cluster assignment.'.format(num_changed))
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(data, k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
return centroids, cluster_assignment
###Output
_____no_output_____
###Markdown
Plotting convergence metric We can use the above function to plot the convergence metric across iterations.
###Code
def plot_heterogeneity(heterogeneity, k):
plt.figure(figsize=(7,4))
plt.plot(heterogeneity, linewidth=4)
plt.xlabel('# Iterations')
plt.ylabel('Heterogeneity')
plt.title('Heterogeneity of clustering over time, K={0:d}'.format(k))
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Let's consider running k-means with K=3 clusters for a maximum of 400 iterations, recording cluster heterogeneity at every step. Then, let's plot the heterogeneity over iterations using the plotting function above.
###Code
k = 3
heterogeneity = []
initial_centroids = get_initial_centroids(tf_idf, k, seed=0)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=heterogeneity, verbose=True)
plot_heterogeneity(heterogeneity, k)
###Output
0
1
19157 elements changed their cluster assignment.
2
7739 elements changed their cluster assignment.
3
5119 elements changed their cluster assignment.
4
3370 elements changed their cluster assignment.
5
2811 elements changed their cluster assignment.
6
3233 elements changed their cluster assignment.
7
3815 elements changed their cluster assignment.
8
3172 elements changed their cluster assignment.
9
1149 elements changed their cluster assignment.
10
498 elements changed their cluster assignment.
11
265 elements changed their cluster assignment.
12
149 elements changed their cluster assignment.
13
100 elements changed their cluster assignment.
14
76 elements changed their cluster assignment.
15
67 elements changed their cluster assignment.
16
51 elements changed their cluster assignment.
17
47 elements changed their cluster assignment.
18
40 elements changed their cluster assignment.
19
34 elements changed their cluster assignment.
20
35 elements changed their cluster assignment.
21
39 elements changed their cluster assignment.
22
24 elements changed their cluster assignment.
23
16 elements changed their cluster assignment.
24
12 elements changed their cluster assignment.
25
14 elements changed their cluster assignment.
26
17 elements changed their cluster assignment.
27
15 elements changed their cluster assignment.
28
14 elements changed their cluster assignment.
29
16 elements changed their cluster assignment.
30
21 elements changed their cluster assignment.
31
22 elements changed their cluster assignment.
32
33 elements changed their cluster assignment.
33
35 elements changed their cluster assignment.
34
39 elements changed their cluster assignment.
35
36 elements changed their cluster assignment.
36
36 elements changed their cluster assignment.
37
25 elements changed their cluster assignment.
38
27 elements changed their cluster assignment.
39
25 elements changed their cluster assignment.
40
28 elements changed their cluster assignment.
41
35 elements changed their cluster assignment.
42
31 elements changed their cluster assignment.
43
25 elements changed their cluster assignment.
44
18 elements changed their cluster assignment.
45
15 elements changed their cluster assignment.
46
10 elements changed their cluster assignment.
47
8 elements changed their cluster assignment.
48
8 elements changed their cluster assignment.
49
8 elements changed their cluster assignment.
50
7 elements changed their cluster assignment.
51
8 elements changed their cluster assignment.
52
3 elements changed their cluster assignment.
53
3 elements changed their cluster assignment.
54
4 elements changed their cluster assignment.
55
2 elements changed their cluster assignment.
56
3 elements changed their cluster assignment.
57
3 elements changed their cluster assignment.
58
1 elements changed their cluster assignment.
59
1 elements changed their cluster assignment.
60
###Markdown
**Quiz Question**. (True/False) The clustering objective (heterogeneity) is non-increasing for this example. **Quiz Question**. Let's step back from this particular example. If the clustering objective (heterogeneity) would ever increase when running k-means, that would indicate: (choose one)1. k-means algorithm got stuck in a bad local minimum2. There is a bug in the k-means code3. All data points consist of exact duplicates4. Nothing is wrong. The objective should generally go down sooner or later. **Quiz Question**. Which of the cluster contains the greatest number of data points in the end? Hint: Use [`np.bincount()`](http://docs.scipy.org/doc/numpy-1.11.0/reference/generated/numpy.bincount.html) to count occurrences of each cluster label. 1. Cluster 0 2. Cluster 1 3. Cluster 2
###Code
biggest_cluster = np.argmax(np.bincount(cluster_assignment))
print biggest_cluster
###Output
2
###Markdown
Beware of local maxima One weakness of k-means is that it tends to get stuck in a local minimum. To see this, let us run k-means multiple times, with different initial centroids created using different random seeds.**Note:** Again, in practice, you should set different seeds for every run. We give you a list of seeds for this assignment so that everyone gets the same answer.This may take several minutes to run.
###Code
k = 10
heterogeneity = {}
import time
start = time.time()
for seed in [0, 20000, 40000, 60000, 80000, 100000, 120000]:
initial_centroids = get_initial_centroids(tf_idf, k, seed)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity[seed] = compute_heterogeneity(tf_idf, k, centroids, cluster_assignment)
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity[seed]))
print (np.max(np.bincount(cluster_assignment)))
sys.stdout.flush()
end = time.time()
print(end-start)
###Output
seed=000000, heterogeneity=57457.52442
18047
seed=020000, heterogeneity=57533.20100
15779
seed=040000, heterogeneity=57512.69257
18132
seed=060000, heterogeneity=57466.97925
17900
seed=080000, heterogeneity=57494.92990
17582
seed=100000, heterogeneity=57484.42210
16969
seed=120000, heterogeneity=57554.62410
16481
234.233843088
###Markdown
Notice the variation in heterogeneity for different initializations. This indicates that k-means sometimes gets stuck at a bad local minimum. **Quiz Question**. Another way to capture the effect of changing initialization is to look at the distribution of cluster assignments. Add a line to the code above to compute the size ( of member data points) of clusters for each run of k-means. Look at the size of the largest cluster (most of member data points) across multiple runs, with seeds 0, 20000, ..., 120000. How much does this measure vary across the runs? What is the minimum and maximum values this quantity takes? One effective way to counter this tendency is to use **k-means++** to provide a smart initialization. This method tries to spread out the initial set of centroids so that they are not too close together. It is known to improve the quality of local optima and lower average runtime.
###Code
def smart_initialize(data, k, seed=None):
'''Use k-means++ to initialize a good set of centroids'''
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
centroids = np.zeros((k, data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(data.shape[0])
centroids[0] = data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(data, centroids[0:1], metric='euclidean').flatten()**2
for i in xrange(1, k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(data, centroids[0:i+1], metric='euclidean')**2,axis=1)
return centroids
###Output
_____no_output_____
###Markdown
Let's now rerun k-means with 10 clusters using the same set of seeds, but always using k-means++ to initialize the algorithm.This may take several minutes to run.
###Code
k = 10
heterogeneity_smart = {}
start = time.time()
for seed in [0, 20000, 40000, 60000, 80000, 100000, 120000]:
initial_centroids = smart_initialize(tf_idf, k, seed)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity_smart[seed] = compute_heterogeneity(tf_idf, k, centroids, cluster_assignment)
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity_smart[seed]))
sys.stdout.flush()
end = time.time()
print(end-start)
###Output
seed=000000, heterogeneity=57468.63808
seed=020000, heterogeneity=57486.94263
seed=040000, heterogeneity=57454.35926
seed=060000, heterogeneity=57530.43659
seed=080000, heterogeneity=57454.51852
seed=100000, heterogeneity=57471.56674
seed=120000, heterogeneity=57523.28839
305.919774771
###Markdown
Let's compare the set of cluster heterogeneities we got from our 7 restarts of k-means using random initialization compared to the 7 restarts of k-means using k-means++ as a smart initialization.The following code produces a [box plot](http://matplotlib.org/api/pyplot_api.html) for each of these methods, indicating the spread of values produced by each method.
###Code
plt.figure(figsize=(8,5))
plt.boxplot([heterogeneity.values(), heterogeneity_smart.values()], vert=False)
plt.yticks([1, 2], ['k-means', 'k-means++'])
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
###Output
_____no_output_____
###Markdown
A few things to notice from the box plot:* On average, k-means++ produces a better clustering than Random initialization.* Variation in clustering quality is smaller for k-means++. **In general, you should run k-means at least a few times with different initializations and then return the run resulting in the lowest heterogeneity.** Let us write a function that runs k-means multiple times and picks the best run that minimizes heterogeneity. The function accepts an optional list of seed values to be used for the multiple runs; if no such list is provided, the current UTC time is used as seed values.
###Code
def kmeans_multiple_runs(data, k, maxiter, num_runs, seed_list=None, verbose=False):
heterogeneity = {}
min_heterogeneity_achieved = float('inf')
best_seed = None
final_centroids = None
final_cluster_assignment = None
for i in xrange(num_runs):
# Use UTC time if no seeds are provided
if seed_list is not None:
seed = seed_list[i]
np.random.seed(seed)
else:
seed = int(time.time())
np.random.seed(seed)
# Use k-means++ initialization
# YOUR CODE HERE
initial_centroids = smart_initialize(data, k, seed=None)
# Run k-means
# YOUR CODE HERE
centroids, cluster_assignment = kmeans(data, k, initial_centroids, maxiter,
record_heterogeneity=None, verbose=True)
# To save time, compute heterogeneity only once in the end
# YOUR CODE HERE
heterogeneity[seed] = compute_heterogeneity(data, k, centroids, cluster_assignment)
if verbose:
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity[seed]))
sys.stdout.flush()
# if current measurement of heterogeneity is lower than previously seen,
# update the minimum record of heterogeneity.
if heterogeneity[seed] < min_heterogeneity_achieved:
min_heterogeneity_achieved = heterogeneity[seed]
best_seed = seed
final_centroids = centroids
final_cluster_assignment = cluster_assignment
# Return the centroids and cluster assignments that minimize heterogeneity.
return final_centroids, final_cluster_assignment
###Output
_____no_output_____
###Markdown
How to choose K Since we are measuring the tightness of the clusters, a higher value of K reduces the possible heterogeneity metric by definition. For example, if we have N data points and set K=N clusters, then we could have 0 cluster heterogeneity by setting the N centroids equal to the values of the N data points. (Note: Not all runs for larger K will result in lower heterogeneity than a single run with smaller K due to local optima.) Let's explore this general trend for ourselves by performing the following analysis. Use the `kmeans_multiple_runs` function to run k-means with five different values of K. For each K, use k-means++ and multiple runs to pick the best solution. In what follows, we consider K=2,10,25,50,100 and 7 restarts for each setting.**IMPORTANT: The code block below will take about one hour to finish. We highly suggest that you use the arrays that we have computed for you.**Side note: In practice, a good implementation of k-means would utilize parallelism to run multiple runs of k-means at once. For an example, see [scikit-learn's KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html).
###Code
#def plot_k_vs_heterogeneity(k_values, heterogeneity_values):
# plt.figure(figsize=(7,4))
# plt.plot(k_values, heterogeneity_values, linewidth=4)
# plt.xlabel('K')
# plt.ylabel('Heterogeneity')
# plt.title('K vs. Heterogeneity')
# plt.rcParams.update({'font.size': 16})
# plt.tight_layout()
#start = time.time()
#centroids = {}
#cluster_assignment = {}
#heterogeneity_values = []
#k_list = [2, 10, 25, 50, 100]
#seed_list = [0, 20000, 40000, 60000, 80000, 100000, 120000]
#for k in k_list:
# heterogeneity = []
# centroids[k], cluster_assignment[k] = kmeans_multiple_runs(tf_idf, k, maxiter=400,
# num_runs=len(seed_list),
# seed_list=seed_list,
# verbose=True)
# score = compute_heterogeneity(tf_idf, k, centroids[k], cluster_assignment[k])
# heterogeneity_values.append(score)
#plot_k_vs_heterogeneity(k_list, heterogeneity_values)
#end = time.time()
#print(end-start)
###Output
_____no_output_____
###Markdown
To use the pre-computed NumPy arrays, first download kmeans-arrays.npz as mentioned in the reading for this assignment and load them with the following code. Make sure the downloaded file is in the same directory as this notebook.
###Code
def plot_k_vs_heterogeneity(k_values, heterogeneity_values):
plt.figure(figsize=(7,4))
plt.plot(k_values, heterogeneity_values, linewidth=4)
plt.xlabel('K')
plt.ylabel('Heterogeneity')
plt.title('K vs. Heterogeneity')
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
filename = 'kmeans-arrays.npz'
heterogeneity_values = []
k_list = [2, 10, 25, 50, 100]
if os.path.exists(filename):
arrays = np.load(filename)
centroids = {}
cluster_assignment = {}
for k in k_list:
print k
sys.stdout.flush()
'''To save memory space, do not load the arrays from the file right away. We use
a technique known as lazy evaluation, where some expressions are not evaluated
until later. Any expression appearing inside a lambda function doesn't get
evaluated until the function is called.
Lazy evaluation is extremely important in memory-constrained setting, such as
an Amazon EC2 t2.micro instance.'''
centroids[k] = lambda k=k: arrays['centroids_{0:d}'.format(k)]
cluster_assignment[k] = lambda k=k: arrays['cluster_assignment_{0:d}'.format(k)]
score = compute_heterogeneity(tf_idf, k, centroids[k](), cluster_assignment[k]())
heterogeneity_values.append(score)
plot_k_vs_heterogeneity(k_list, heterogeneity_values)
else:
print('File not found. Skipping.')
###Output
2
10
25
50
100
###Markdown
In the above plot we show that heterogeneity goes down as we increase the number of clusters. Does this mean we should always favor a higher K? **Not at all!** As we will see in the following section, setting K too high may end up separating data points that are actually pretty alike. At the extreme, we can set individual data points to be their own clusters (K=N) and achieve zero heterogeneity, but separating each data point into its own cluster is hardly a desirable outcome. In the following section, we will learn how to detect a K set "too large". Visualize clusters of documents Let's start visualizing some clustering results to see if we think the clustering makes sense. We can use such visualizations to help us assess whether we have set K too large or too small for a given application. Following the theme of this course, we will judge whether the clustering makes sense in the context of document analysis.What are we looking for in a good clustering of documents?* Documents in the same cluster should be similar.* Documents from different clusters should be less similar.So a bad clustering exhibits either of two symptoms:* Documents in a cluster have mixed content.* Documents with similar content are divided up and put into different clusters.To help visualize the clustering, we do the following:* Fetch nearest neighbors of each centroid from the set of documents assigned to that cluster. We will consider these documents as being representative of the cluster.* Print titles and first sentences of those nearest neighbors.* Print top 5 words that have highest tf-idf weights in each centroid.
###Code
def visualize_document_clusters(wiki, tf_idf, centroids, cluster_assignment, k, map_index_to_word, display_content=True):
'''wiki: original dataframe
tf_idf: data matrix, sparse matrix format
map_index_to_word: SFrame specifying the mapping betweeen words and column indices
display_content: if True, display 8 nearest neighbors of each centroid'''
print('==========================================================')
# Visualize each cluster c
for c in xrange(k):
# Cluster heading
print('Cluster {0:d} '.format(c)),
# Print top 5 words with largest TF-IDF weights in the cluster
idx = centroids[c].argsort()[::-1]
for i in xrange(5): # Print each word along with the TF-IDF weight
print('{0:s}:{1:.3f}'.format(map_index_to_word['category'][idx[i]], centroids[c,idx[i]])),
print('')
if display_content:
# Compute distances from the centroid to all data points in the cluster,
# and compute nearest neighbors of the centroids within the cluster.
distances = pairwise_distances(tf_idf, centroids[c].reshape(1, -1), metric='euclidean').flatten()
distances[cluster_assignment!=c] = float('inf') # remove non-members from consideration
nearest_neighbors = distances.argsort()
# For 8 nearest neighbors, print the title as well as first 180 characters of text.
# Wrap the text at 80-character mark.
for i in xrange(8):
text = ' '.join(wiki[nearest_neighbors[i]]['text'].split(None, 25)[0:25])
print('\n* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki[nearest_neighbors[i]]['name'],
distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else ''))
print('==========================================================')
###Output
_____no_output_____
###Markdown
Let us first look at the 2 cluster case (K=2).
###Code
'''Notice the extra pairs of parentheses for centroids and cluster_assignment.
The centroid and cluster_assignment are still inside the npz file,
and we need to explicitly indicate when to load them into memory.'''
visualize_document_clusters(wiki, tf_idf, centroids[2](), cluster_assignment[2](), 2, map_index_to_word)
###Output
==========================================================
Cluster 0 she:0.025 her:0.017 music:0.012 he:0.011 university:0.011
* Anita Kunz 0.97401
anita e kunz oc born 1956 is a canadianborn artist and illustratorkunz has lived in london
new york and toronto contributing to magazines and working
* Janet Jackson 0.97472
janet damita jo jackson born may 16 1966 is an american singer songwriter and actress know
n for a series of sonically innovative socially conscious and
* Madonna (entertainer) 0.97475
madonna louise ciccone tkoni born august 16 1958 is an american singer songwriter actress
and businesswoman she achieved popularity by pushing the boundaries of lyrical
* %C3%81ine Hyland 0.97536
ine hyland ne donlon is emeritus professor of education and former vicepresident of univer
sity college cork ireland she was born in 1942 in athboy co
* Jane Fonda 0.97621
jane fonda born lady jayne seymour fonda december 21 1937 is an american actress writer po
litical activist former fashion model and fitness guru she is
* Christine Robertson 0.97643
christine mary robertson born 5 october 1948 is an australian politician and former austra
lian labor party member of the new south wales legislative council serving
* Pat Studdy-Clift 0.97643
pat studdyclift is an australian author specialising in historical fiction and nonfictionb
orn in 1925 she lived in gunnedah until she was sent to a boarding
* Alexandra Potter 0.97646
alexandra potter born 1970 is a british author of romantic comediesborn in bradford yorksh
ire england and educated at liverpool university gaining an honors degree in
==========================================================
Cluster 1 league:0.040 season:0.036 team:0.029 football:0.029 played:0.028
* Todd Williams 0.95468
todd michael williams born february 13 1971 in syracuse new york is a former major league
baseball relief pitcher he attended east syracuseminoa high school
* Gord Sherven 0.95622
gordon r sherven born august 21 1963 in gravelbourg saskatchewan and raised in mankota sas
katchewan is a retired canadian professional ice hockey forward who played
* Justin Knoedler 0.95639
justin joseph knoedler born july 17 1980 in springfield illinois is a former major league
baseball catcherknoedler was originally drafted by the st louis cardinals
* Chris Day 0.95648
christopher nicholas chris day born 28 july 1975 is an english professional footballer who
plays as a goalkeeper for stevenageday started his career at tottenham
* Tony Smith (footballer, born 1957) 0.95653
anthony tony smith born 20 february 1957 is a former footballer who played as a central de
fender in the football league in the 1970s and
* Ashley Prescott 0.95761
ashley prescott born 11 september 1972 is a former australian rules footballer he played w
ith the richmond and fremantle football clubs in the afl between
* Leslie Lea 0.95802
leslie lea born 5 october 1942 in manchester is an english former professional footballer
he played as a midfielderlea began his professional career with blackpool
* Tommy Anderson (footballer) 0.95818
thomas cowan tommy anderson born 24 september 1934 in haddington is a scottish former prof
essional footballer he played as a forward and was noted for
==========================================================
###Markdown
Both clusters have mixed content, although cluster 1 is much purer than cluster 0:* Cluster 0: artists, songwriters, professors, politicians, writers, etc.* Cluster 1: baseball players, hockey players, soccer (association football) players, etc.Top words of cluster 1 are all related to sports, whereas top words of cluster 0 show no clear pattern.Roughly speaking, the entire dataset was divided into athletes and non-athletes. It would be better if we sub-divided non-atheletes into more categories. So let us use more clusters. How about `K=10`?
###Code
k = 10
visualize_document_clusters(wiki, tf_idf, centroids[k](), cluster_assignment[k](), k, map_index_to_word)
###Output
==========================================================
Cluster 0 film:0.020 art:0.014 he:0.011 book:0.010 television:0.010
* Wilson McLean 0.97479
wilson mclean born 1937 is a scottish illustrator and artist he has illustrated primarily
in the field of advertising but has also provided cover art
* Anton Hecht 0.97748
anton hecht is an english artist born in london in 2007 he asked musicians from around the
durham area to contribute to a soundtrack for
* David Salle 0.97800
david salle born 1952 is an american painter printmaker and stage designer who helped defi
ne postmodern sensibility salle was born in norman oklahoma he earned
* Vipin Sharma 0.97805
vipin sharma is an indian actor born in new delhi he is a graduate of national school of d
rama new delhi india and the canadian
* Paul Swadel 0.97823
paul swadel is a new zealand film director and producerhe has directed and produced many s
uccessful short films which have screened in competition at cannes
* Allan Stratton 0.97834
allan stratton born 1951 is a canadian playwright and novelistborn in stratford ontario st
ratton began his professional arts career while he was still in high
* Bill Bennett (director) 0.97848
bill bennett born 1953 is an australian film director producer and screenwriterhe dropped
out of medicine at queensland university in 1972 and joined the australian
* Rafal Zielinski 0.97850
rafal zielinski born 1957 montreal is an independent filmmaker he is best known for direct
ing films such as fun sundance film festival special jury award
==========================================================
Cluster 1 league:0.052 rugby:0.044 club:0.042 cup:0.042 season:0.041
* Chris Day 0.93220
christopher nicholas chris day born 28 july 1975 is an english professional footballer who
plays as a goalkeeper for stevenageday started his career at tottenham
* Gary Hooper 0.93481
gary hooper born 26 january 1988 is an english professional footballer who plays as a forw
ard for norwich cityhooper started his career at nonleague grays
* Tony Smith (footballer, born 1957) 0.93504
anthony tony smith born 20 february 1957 is a former footballer who played as a central de
fender in the football league in the 1970s and
* Jason Roberts (footballer) 0.93527
jason andre davis roberts mbe born 25 january 1978 is a former professional footballer and
now a football punditborn in park royal london roberts was
* Paul Robinson (footballer, born 1979) 0.93587
paul william robinson born 15 october 1979 is an english professional footballer who plays
for blackburn rovers as a goalkeeper he is a former england
* Alex Lawless 0.93732
alexander graham alex lawless born 26 march 1985 is a welsh professional footballer who pl
ays for luton town as a midfielderlawless began his career with
* Neil Grayson 0.93748
neil grayson born 1 november 1964 in york is an english footballer who last played as a st
riker for sutton towngraysons first club was local
* Sol Campbell 0.93759
sulzeer jeremiah sol campbell born 18 september 1974 is a former england international foo
tballer a central defender he had a 19year career playing in the
==========================================================
Cluster 2 championships:0.040 tour:0.037 championship:0.032 world:0.029 won:0.029
* Alessandra Aguilar 0.94505
alessandra aguilar born 1 july 1978 in lugo is a spanish longdistance runner who specialis
es in marathon running she represented her country in the event
* Heather Samuel 0.94529
heather barbara samuel born 6 july 1970 is a retired sprinter from antigua and barbuda who
specialized in the 100 and 200 metres in 1990
* Viola Kibiwot 0.94617
viola jelagat kibiwot born december 22 1983 in keiyo district is a runner from kenya who s
pecialises in the 1500 metres kibiwot won her first
* Ayelech Worku 0.94636
ayelech worku born june 12 1979 is an ethiopian longdistance runner most known for winning
two world championships bronze medals on the 5000 metres she
* Morhad Amdouni 0.94763
morhad amdouni born 21 january 1988 in portovecchio is a french middle and longdistance ru
nner he was european junior champion in track and cross country
* Krisztina Papp 0.94776
krisztina papp born 17 december 1982 in eger is a hungarian long distance runner she is th
e national indoor record holder over 5000 mpapp began
* Petra Lammert 0.94869
petra lammert born 3 march 1984 in freudenstadt badenwrttemberg is a former german shot pu
tter and current bobsledder she was the 2009 european indoor champion
* Hasan Mahboob 0.94880
hasan mahboob ali born silas kirui on 31 december 1981 in kapsabet is a bahraini longdista
nce runner he became naturalized in bahrain and switched from
==========================================================
Cluster 3 baseball:0.110 league:0.103 major:0.052 games:0.047 season:0.045
* Steve Springer 0.89300
steven michael springer born february 11 1961 is an american former professional baseball
player who appeared in major league baseball as a third baseman and
* Dave Ford 0.89547
david alan ford born december 29 1956 is a former major league baseball pitcher for the ba
ltimore orioles born in cleveland ohio ford attended lincolnwest
* Todd Williams 0.89820
todd michael williams born february 13 1971 in syracuse new york is a former major league
baseball relief pitcher he attended east syracuseminoa high school
* Justin Knoedler 0.90035
justin joseph knoedler born july 17 1980 in springfield illinois is a former major league
baseball catcherknoedler was originally drafted by the st louis cardinals
* Kevin Nicholson (baseball) 0.90643
kevin ronald nicholson born march 29 1976 is a canadian baseball shortstop he played part
of the 2000 season for the san diego padres of
* James Baldwin (baseball) 0.90648
james j baldwin jr born july 15 1971 is a former major league baseball pitcher he batted a
nd threw righthanded in his 11season career he
* Joe Strong 0.90655
joseph benjamin strong born september 9 1962 in fairfield california is a former major lea
gue baseball pitcher who played for the florida marlins from 2000
* Javier L%C3%B3pez (baseball) 0.90691
javier alfonso lpez born july 11 1977 is a puerto rican professional baseball pitcher for
the san francisco giants of major league baseball he is
==========================================================
Cluster 4 research:0.038 university:0.035 professor:0.032 science:0.023 institute:0.019
* Lawrence W. Green 0.95957
lawrence w green is best known by health education researchers as the originator of the pr
ecede model and codeveloper of the precedeproceed model which has
* Timothy Luke 0.96057
timothy w luke is university distinguished professor of political science in the college o
f liberal arts and human sciences as well as program chair of
* Ren%C3%A9e Fox 0.96100
rene c fox a summa cum laude graduate of smith college in 1949 earned her phd in sociology
in 1954 from radcliffe college harvard university
* Francis Gavin 0.96323
francis j gavin is first frank stanton chair in nuclear security policy studies and profes
sor of political science at mit before joining mit he was
* Catherine Hakim 0.96374
catherine hakim born 30 may 1948 is a british sociologist who specialises in womens employ
ment and womens issues she is currently a professorial research fellow
* Stephen Park Turner 0.96405
stephen turner is a researcher in social practice social and political theory and the phil
osophy of the social sciences he is graduate research professor in
* Robert Bates (political scientist) 0.96489
robert hinrichs bates born 1942 is an american political scientist he is eaton professor o
f the science of government in the departments of government and
* Georg von Krogh 0.96505
georg von krogh was born in oslo norway he is a professor at eth zurich and holds the chai
r of strategic management and innovation he
==========================================================
Cluster 5 football:0.076 coach:0.060 basketball:0.056 season:0.044 played:0.037
* Todd Curley 0.92731
todd curley born 14 january 1973 is a former australian rules footballer who played for co
llingwood and the western bulldogs in the australian football league
* Ashley Prescott 0.92992
ashley prescott born 11 september 1972 is a former australian rules footballer he played w
ith the richmond and fremantle football clubs in the afl between
* Pete Richardson 0.93204
pete richardson born october 17 1946 in youngstown ohio is a former american football defe
nsive back in the national football league and former college head
* Nathan Brown (Australian footballer born 1976) 0.93561
nathan daniel brown born 14 august 1976 is an australian rules footballer who played for t
he melbourne demons in the australian football leaguehe was drafted
* Earl Spalding 0.93654
earl spalding born 11 march 1965 in south perth is a former australian rules footballer wh
o played for melbourne and carlton in the victorian football
* Bud Grant 0.93766
harry peter bud grant jr born may 20 1927 is a former american football and canadian footb
all head coach grant served as the head coach
* Tyrone Wheatley 0.93885
tyrone anthony wheatley born january 19 1972 is the running backs coach of michigan and a
former professional american football player who played 10 seasons
* Nick Salter 0.93916
nick salter born 30 july 1987 is an australian rules footballer who played for port adelai
de football club in the australian football league aflhe was
==========================================================
Cluster 6 she:0.138 her:0.089 actress:0.014 film:0.013 miss:0.012
* Lauren Royal 0.93445
lauren royal born march 3 circa 1965 is a book writer from california royal has written bo
th historic and novelistic booksa selfproclaimed angels baseball fan
* Barbara Hershey 0.93496
barbara hershey born barbara lynn herzstein february 5 1948 once known as barbara seagull
is an american actress in a career spanning nearly 50 years
* Janet Jackson 0.93559
janet damita jo jackson born may 16 1966 is an american singer songwriter and actress know
n for a series of sonically innovative socially conscious and
* Jane Fonda 0.93759
jane fonda born lady jayne seymour fonda december 21 1937 is an american actress writer po
litical activist former fashion model and fitness guru she is
* Janine Shepherd 0.93833
janine lee shepherd am born 1962 is an australian pilot and former crosscountry skier shep
herds career as an athlete ended when she suffered major injuries
* Ellina Graypel 0.93847
ellina graypel born july 19 1972 is an awardwinning russian singersongwriter she was born
near the volga river in the heart of russia she spent
* Alexandra Potter 0.93858
alexandra potter born 1970 is a british author of romantic comediesborn in bradford yorksh
ire england and educated at liverpool university gaining an honors degree in
* Melissa Hart (actress) 0.93913
melissa hart is an american actress singer and teacher she made her broadway debut in 1966
as an ensemble member in jerry bocks the apple
==========================================================
Cluster 7 music:0.057 album:0.040 band:0.035 orchestra:0.023 released:0.022
* Brenton Broadstock 0.95722
brenton broadstock ao born 1952 is an australian composerbroadstock was born in melbourne
he studied history politics and music at monash university and later composition
* Prince (musician) 0.96057
prince rogers nelson born june 7 1958 known by his mononym prince is an american singerson
gwriter multiinstrumentalist and actor he has produced ten platinum albums
* Will.i.am 0.96066
william adams born march 15 1975 known by his stage name william pronounced will i am is a
n american rapper songwriter entrepreneur actor dj record
* Tom Bancroft 0.96117
tom bancroft born 1967 london is a british jazz drummer and composer he began drumming age
d seven and started off playing jazz with his father
* Julian Knowles 0.96152
julian knowles is an australian composer and performer specialising in new and emerging te
chnologies his creative work spans the fields of composition for theatre dance
* Dan Siegel (musician) 0.96223
dan siegel born in seattle washington is a pianist composer and record producer his earlie
r music has been described as new age while his more
* Tony Mills (musician) 0.96238
tony mills born 7 july 1962 in solihull england is an english rock singer best known for h
is work with shy and tnthailing from birmingham
* Don Robertson (composer) 0.96249
don robertson born 1942 is an american composerdon robertson was born in 1942 in denver co
lorado and began studying music with conductor and pianist antonia
==========================================================
Cluster 8 hockey:0.216 nhl:0.134 ice:0.065 season:0.053 league:0.047
* Gord Sherven 0.83598
gordon r sherven born august 21 1963 in gravelbourg saskatchewan and raised in mankota sas
katchewan is a retired canadian professional ice hockey forward who played
* Eric Brewer 0.83765
eric peter brewer born april 17 1979 is a canadian professional ice hockey defenceman for
the anaheim ducks of the national hockey league nhl he
* Stephen Johns (ice hockey) 0.84580
stephen johns born april 18 1992 is an american professional ice hockey defenceman he is c
urrently playing with the rockford icehogs of the american hockey
* Mike Stevens (ice hockey, born 1965) 0.85320
mike stevens born december 30 1965 in kitchener ontario is a retired professional ice hock
ey player who played 23 games in the national hockey league
* Tanner Glass 0.85484
tanner glass born november 29 1983 is a canadian professional ice hockey winger who plays
for the new york rangers of the national hockey league
* Todd Strueby 0.86053
todd kenneth strueby born june 15 1963 in lanigan saskatchewan and raised in humboldt sask
atchewan is a retired canadian professional ice hockey centre who played
* Steven King (ice hockey) 0.86129
steven andrew king born july 22 1969 in east greenwich rhode island is a former ice hockey
forward who played professionally from 1991 to 2000
* Don Jackson (ice hockey) 0.86661
donald clinton jackson born september 2 1956 in minneapolis minnesota and bloomington minn
esota is an ice hockey coach and a retired professional ice hockey player
==========================================================
Cluster 9 party:0.028 election:0.025 minister:0.025 served:0.021 law:0.019
* Doug Lewis 0.96516
douglas grinslade doug lewis pc qc born april 17 1938 is a former canadian politician a ch
artered accountant and lawyer by training lewis entered the
* David Anderson (British Columbia politician) 0.96530
david a anderson pc oc born august 16 1937 in victoria british columbia is a former canadi
an cabinet minister educated at victoria college in victoria
* Lucienne Robillard 0.96679
lucienne robillard pc born june 16 1945 is a canadian politician and a member of the liber
al party of canada she sat in the house
* Bob Menendez 0.96686
robert bob menendez born january 1 1954 is the senior united states senator from new jerse
y he is a member of the democratic party first
* Mal Sandon 0.96706
malcolm john mal sandon born 16 september 1945 is an australian politician he was an austr
alian labor party member of the victorian legislative council from
* Roger Price (Australian politician) 0.96717
leo roger spurway price born 26 november 1945 is a former australian politician he was ele
cted as a member of the australian house of representatives
* Maureen Lyster 0.96734
maureen anne lyster born 10 september 1943 is an australian politician she was an australi
an labor party member of the victorian legislative assembly from 1985
* Don Bell 0.96739
donald h bell born march 10 1942 in new westminster british columbia is a canadian politic
ian he is currently serving as a councillor for the
==========================================================
###Markdown
Clusters 0, 1, and 5 appear to be still mixed, but others are quite consistent in content.* Cluster 0: artists, actors, film directors, playwrights* Cluster 1: soccer (association football) players, rugby players* Cluster 2: track and field athletes* Cluster 3: baseball players* Cluster 4: professors, researchers, scholars* Cluster 5: Austrailian rules football players, American football players* Cluster 6: female figures from various fields* Cluster 7: composers, songwriters, singers, music producers* Cluster 8: ice hockey players* Cluster 9: politiciansClusters are now more pure, but some are qualitatively "bigger" than others. For instance, the category of scholars is more general than the category of baseball players. Increasing the number of clusters may split larger clusters. Another way to look at the size of the clusters is to count the number of articles in each cluster.
###Code
np.bincount(cluster_assignment[10]())
###Output
_____no_output_____
###Markdown
**Quiz Question**. Which of the 10 clusters above contains the greatest number of articles?1. Cluster 0: artists, actors, film directors, playwrights2. Cluster 4: professors, researchers, scholars3. Cluster 5: Austrailian rules football players, American football players4. Cluster 7: composers, songwriters, singers, music producers5. Cluster 9: politicians **Quiz Question**. Which of the 10 clusters contains the least number of articles?1. Cluster 1: soccer (association football) players, rugby players2. Cluster 3: baseball players3. Cluster 6: female figures from various fields4. Cluster 7: composers, songwriters, singers, music producers5. Cluster 8: ice hockey players There appears to be at least some connection between the topical consistency of a cluster and the number of its member data points. Let us visualize the case for K=25. For the sake of brevity, we do not print the content of documents. It turns out that the top words with highest TF-IDF weights in each cluster are representative of the cluster.
###Code
visualize_document_clusters(wiki, tf_idf, centroids[25](), cluster_assignment[25](), 25,
map_index_to_word, display_content=False) # turn off text for brevity
###Output
==========================================================
Cluster 0 law:0.077 district:0.048 court:0.046 republican:0.038 senate:0.038
==========================================================
Cluster 1 research:0.054 professor:0.033 science:0.032 university:0.031 physics:0.029
==========================================================
Cluster 2 hockey:0.216 nhl:0.134 ice:0.065 season:0.052 league:0.047
==========================================================
Cluster 3 party:0.065 election:0.042 elected:0.031 parliament:0.027 member:0.023
==========================================================
Cluster 4 board:0.025 president:0.023 chairman:0.022 business:0.022 executive:0.020
==========================================================
Cluster 5 minister:0.160 prime:0.056 cabinet:0.044 party:0.043 election:0.042
==========================================================
Cluster 6 university:0.044 professor:0.037 studies:0.035 history:0.034 philosophy:0.031
==========================================================
Cluster 7 election:0.066 manitoba:0.058 liberal:0.051 party:0.045 riding:0.043
==========================================================
Cluster 8 racing:0.095 formula:0.056 championship:0.054 race:0.052 poker:0.051
==========================================================
Cluster 9 economics:0.146 economic:0.096 economist:0.053 policy:0.048 research:0.043
==========================================================
Cluster 10 championships:0.075 olympics:0.050 marathon:0.048 metres:0.048 she:0.048
==========================================================
Cluster 11 she:0.144 her:0.092 miss:0.016 actress:0.015 television:0.012
==========================================================
Cluster 12 he:0.011 radio:0.009 show:0.009 that:0.009 his:0.009
==========================================================
Cluster 13 baseball:0.109 league:0.104 major:0.052 games:0.047 season:0.045
==========================================================
Cluster 14 art:0.144 museum:0.076 gallery:0.056 artist:0.033 arts:0.031
==========================================================
Cluster 15 football:0.125 afl:0.060 nfl:0.051 season:0.049 played:0.045
==========================================================
Cluster 16 music:0.097 jazz:0.061 piano:0.033 composer:0.029 orchestra:0.028
==========================================================
Cluster 17 league:0.052 rugby:0.044 club:0.043 cup:0.042 season:0.042
==========================================================
Cluster 18 poetry:0.055 novel:0.045 book:0.042 published:0.039 fiction:0.035
==========================================================
Cluster 19 film:0.095 theatre:0.038 films:0.035 directed:0.029 television:0.028
==========================================================
Cluster 20 album:0.064 band:0.049 music:0.037 released:0.033 song:0.025
==========================================================
Cluster 21 bishop:0.075 air:0.066 force:0.048 church:0.047 command:0.045
==========================================================
Cluster 22 orchestra:0.146 opera:0.116 symphony:0.106 conductor:0.077 music:0.064
==========================================================
Cluster 23 basketball:0.120 coach:0.105 nba:0.065 head:0.042 season:0.040
==========================================================
Cluster 24 tour:0.256 pga:0.213 golf:0.142 open:0.073 golfer:0.062
==========================================================
###Markdown
Looking at the representative examples and top words, we classify each cluster as follows. Notice the bolded items, which indicate the appearance of a new theme.* Cluster 0: **lawyers, judges, legal scholars*** Cluster 1: **professors, researchers, scholars (natural and health sciences)*** Cluster 2: ice hockey players* Cluster 3: politicans* Cluster 4: **government officials*** Cluster 5: politicans* Cluster 6: **professors, researchers, scholars (social sciences and humanities)*** Cluster 7: Canadian politicians* Cluster 8: **car racers*** Cluster 9: **economists*** Cluster 10: track and field athletes* Cluster 11: females from various fields* Cluster 12: (mixed; no clear theme)* Cluster 13: baseball players* Cluster 14: **painters, sculptors, artists*** Cluster 15: Austrailian rules football players, American football players* Cluster 16: **musicians, composers*** Cluster 17: soccer (association football) players, rugby players* Cluster 18: **poets*** Cluster 19: **film directors, playwrights*** Cluster 20: **songwriters, singers, music producers*** Cluster 21: **generals of U.S. Air Force*** Cluster 22: **music directors, conductors*** Cluster 23: **basketball players*** Cluster 24: **golf players**Indeed, increasing K achieved the desired effect of breaking up large clusters. Depending on the application, this may or may not be preferable to the K=10 analysis.Let's take it to the extreme and set K=100. We have a suspicion that this value is too large. Let us look at the top words from each cluster:
###Code
k=100
visualize_document_clusters(wiki, tf_idf, centroids[k](), cluster_assignment[k](), k,
map_index_to_word, display_content=False)
# turn off text for brevity -- turn it on if you are curious ;)
###Output
==========================================================
Cluster 0 brazilian:0.137 brazil:0.082 de:0.056 rio:0.053 paulo:0.050
==========================================================
Cluster 1 bishop:0.170 diocese:0.085 archbishop:0.083 church:0.072 ordained:0.058
==========================================================
Cluster 2 zealand:0.247 new:0.069 auckland:0.056 wellington:0.031 zealands:0.029
==========================================================
Cluster 3 comics:0.181 comic:0.121 strip:0.042 graphic:0.036 book:0.034
==========================================================
Cluster 4 puerto:0.309 rico:0.220 rican:0.066 juan:0.041 ricos:0.031
==========================================================
Cluster 5 bbc:0.192 radio:0.127 presenter:0.054 show:0.046 news:0.042
==========================================================
Cluster 6 senate:0.059 district:0.053 county:0.051 committee:0.049 state:0.044
==========================================================
Cluster 7 labor:0.105 australian:0.099 liberal:0.071 election:0.067 seat:0.061
==========================================================
Cluster 8 economics:0.065 university:0.048 research:0.045 professor:0.043 economic:0.043
==========================================================
Cluster 9 foreign:0.086 ambassador:0.076 affairs:0.061 nations:0.053 united:0.040
==========================================================
Cluster 10 she:0.188 her:0.052 women:0.026 womens:0.020 council:0.019
==========================================================
Cluster 11 rowing:0.246 sculls:0.097 rower:0.081 olympics:0.073 championships:0.068
==========================================================
Cluster 12 fashion:0.086 photography:0.085 photographer:0.057 photographs:0.038 art:0.025
==========================================================
Cluster 13 republican:0.098 governor:0.051 district:0.044 election:0.043 senate:0.043
==========================================================
Cluster 14 orchestra:0.227 symphony:0.177 philharmonic:0.084 music:0.080 conductor:0.057
==========================================================
Cluster 15 air:0.375 force:0.242 command:0.106 commander:0.094 base:0.080
==========================================================
Cluster 16 baseball:0.098 league:0.097 era:0.083 pitcher:0.083 pitched:0.075
==========================================================
Cluster 17 church:0.114 theology:0.072 theological:0.066 seminary:0.047 christian:0.037
==========================================================
Cluster 18 song:0.071 songs:0.043 music:0.041 album:0.030 singer:0.025
==========================================================
Cluster 19 basketball:0.165 nba:0.113 points:0.067 season:0.044 rebounds:0.044
==========================================================
Cluster 20 art:0.209 museum:0.186 gallery:0.082 arts:0.046 contemporary:0.044
==========================================================
Cluster 21 poetry:0.213 poems:0.083 poet:0.069 poets:0.044 literary:0.040
==========================================================
Cluster 22 guitar:0.215 guitarist:0.045 music:0.045 guitars:0.037 classical:0.028
==========================================================
Cluster 23 novel:0.127 published:0.045 novels:0.044 book:0.039 fiction:0.030
==========================================================
Cluster 24 jazz:0.205 music:0.048 band:0.034 pianist:0.025 recorded:0.023
==========================================================
Cluster 25 polish:0.211 poland:0.097 warsaw:0.091 sejm:0.039 she:0.023
==========================================================
Cluster 26 trinidad:0.259 tobago:0.178 calypso:0.058 caribbean:0.033 soca:0.027
==========================================================
Cluster 27 tour:0.261 pga:0.220 golf:0.140 open:0.073 golfer:0.063
==========================================================
Cluster 28 afl:0.177 football:0.128 australian:0.092 adelaide:0.064 season:0.062
==========================================================
Cluster 29 skating:0.263 skater:0.107 speed:0.095 she:0.066 ice:0.060
==========================================================
Cluster 30 party:0.073 election:0.035 elected:0.029 candidate:0.022 parliament:0.021
==========================================================
Cluster 31 rugby:0.198 cup:0.049 against:0.046 played:0.045 wales:0.040
==========================================================
Cluster 32 book:0.039 books:0.029 published:0.026 editor:0.021 author:0.017
==========================================================
Cluster 33 piano:0.150 music:0.071 orchestra:0.056 competition:0.053 pianist:0.051
==========================================================
Cluster 34 wrestling:0.299 wwe:0.163 wrestler:0.092 championship:0.079 tag:0.078
==========================================================
Cluster 35 opera:0.269 she:0.067 la:0.041 sang:0.040 operatic:0.036
==========================================================
Cluster 36 radio:0.080 show:0.069 host:0.038 sports:0.030 television:0.028
==========================================================
Cluster 37 music:0.131 composition:0.038 composer:0.037 orchestra:0.026 ensemble:0.023
==========================================================
Cluster 38 drummer:0.099 band:0.092 album:0.040 drums:0.039 rock:0.034
==========================================================
Cluster 39 moore:0.306 moores:0.034 her:0.021 she:0.020 sports:0.012
==========================================================
Cluster 40 computer:0.086 engineering:0.072 research:0.045 science:0.044 technology:0.042
==========================================================
Cluster 41 minister:0.164 prime:0.068 cabinet:0.043 party:0.039 government:0.038
==========================================================
Cluster 42 research:0.062 professor:0.035 university:0.034 science:0.031 psychology:0.030
==========================================================
Cluster 43 news:0.127 anchor:0.062 reporter:0.059 she:0.045 correspondent:0.045
==========================================================
Cluster 44 league:0.088 town:0.060 season:0.060 club:0.059 football:0.055
==========================================================
Cluster 45 football:0.046 cup:0.044 club:0.042 team:0.041 league:0.033
==========================================================
Cluster 46 football:0.108 vfl:0.099 australian:0.068 melbourne:0.067 goals:0.064
==========================================================
Cluster 47 design:0.166 architecture:0.119 architectural:0.058 architects:0.038 architect:0.037
==========================================================
Cluster 48 philosophy:0.227 philosophical:0.045 university:0.044 professor:0.041 philosopher:0.041
==========================================================
Cluster 49 physics:0.121 mathematics:0.072 mathematical:0.060 theory:0.053 professor:0.043
==========================================================
Cluster 50 baron:0.070 lord:0.060 lords:0.054 chairman:0.035 british:0.034
==========================================================
Cluster 51 chef:0.143 food:0.136 restaurant:0.095 wine:0.086 cooking:0.064
==========================================================
Cluster 52 fiction:0.138 stories:0.069 short:0.054 fantasy:0.048 writers:0.043
==========================================================
Cluster 53 poker:0.477 wsop:0.121 event:0.091 limit:0.078 winnings:0.072
==========================================================
Cluster 54 canadian:0.122 canada:0.068 toronto:0.053 ontario:0.049 curling:0.028
==========================================================
Cluster 55 sri:0.282 lanka:0.183 lankan:0.094 colombo:0.046 ceylon:0.027
==========================================================
Cluster 56 conductor:0.207 orchestra:0.136 conducting:0.087 music:0.080 symphony:0.073
==========================================================
Cluster 57 prison:0.035 police:0.027 sentenced:0.026 court:0.025 convicted:0.023
==========================================================
Cluster 58 blues:0.234 band:0.047 music:0.039 album:0.037 guitar:0.035
==========================================================
Cluster 59 dj:0.093 hop:0.052 hip:0.051 music:0.048 album:0.037
==========================================================
Cluster 60 de:0.127 la:0.059 el:0.035 mexico:0.026 y:0.025
==========================================================
Cluster 61 jewish:0.193 rabbi:0.132 israel:0.052 hebrew:0.038 jews:0.032
==========================================================
Cluster 62 ballet:0.362 dance:0.109 dancer:0.084 she:0.057 danced:0.044
==========================================================
Cluster 63 hockey:0.220 nhl:0.138 ice:0.067 season:0.053 league:0.048
==========================================================
Cluster 64 law:0.148 court:0.093 judge:0.071 district:0.051 justice:0.043
==========================================================
Cluster 65 coach:0.205 head:0.086 basketball:0.059 coaching:0.052 football:0.046
==========================================================
Cluster 66 armenian:0.278 armenia:0.168 yerevan:0.100 sargsyan:0.055 genocide:0.031
==========================================================
Cluster 67 album:0.088 released:0.044 music:0.040 records:0.033 albums:0.027
==========================================================
Cluster 68 she:0.158 her:0.152 music:0.020 album:0.016 singer:0.013
==========================================================
Cluster 69 theatre:0.194 directed:0.034 production:0.031 play:0.029 actor:0.027
==========================================================
Cluster 70 health:0.099 medical:0.089 medicine:0.086 research:0.039 clinical:0.039
==========================================================
Cluster 71 european:0.145 parliament:0.115 party:0.053 member:0.049 committee:0.048
==========================================================
Cluster 72 marathon:0.459 half:0.087 she:0.082 hours:0.063 championships:0.062
==========================================================
Cluster 73 she:0.147 her:0.105 actress:0.098 film:0.063 role:0.054
==========================================================
Cluster 74 she:0.101 her:0.065 women:0.012 show:0.010 television:0.009
==========================================================
Cluster 75 lds:0.196 church:0.177 churchs:0.099 latterday:0.074 byu:0.073
==========================================================
Cluster 76 quebec:0.242 qubcois:0.064 universit:0.061 minister:0.059 parti:0.051
==========================================================
Cluster 77 film:0.233 festival:0.085 films:0.048 documentary:0.048 feature:0.045
==========================================================
Cluster 78 hong:0.288 kong:0.268 chinese:0.068 china:0.037 wong:0.035
==========================================================
Cluster 79 soccer:0.296 league:0.072 indoor:0.065 team:0.053 season:0.052
==========================================================
Cluster 80 he:0.011 that:0.009 his:0.009 world:0.008 it:0.007
==========================================================
Cluster 81 ireland:0.092 northern:0.072 election:0.072 irish:0.066 gael:0.054
==========================================================
Cluster 82 comedy:0.048 series:0.047 actor:0.043 television:0.038 role:0.037
==========================================================
Cluster 83 racing:0.128 formula:0.080 race:0.066 car:0.061 driver:0.055
==========================================================
Cluster 84 election:0.096 manitoba:0.086 liberal:0.071 party:0.067 conservative:0.060
==========================================================
Cluster 85 business:0.038 company:0.031 chairman:0.027 ceo:0.025 management:0.023
==========================================================
Cluster 86 chess:0.414 grandmaster:0.085 olympiad:0.066 championship:0.064 fide:0.059
==========================================================
Cluster 87 tennis:0.077 doubles:0.068 boxing:0.057 title:0.048 open:0.047
==========================================================
Cluster 88 president:0.038 served:0.028 board:0.028 university:0.026 education:0.022
==========================================================
Cluster 89 campaign:0.061 presidential:0.054 political:0.047 republican:0.037 bush:0.037
==========================================================
Cluster 90 football:0.120 nfl:0.106 yards:0.081 bowl:0.052 quarterback:0.041
==========================================================
Cluster 91 baseball:0.117 league:0.108 runs:0.061 major:0.052 batted:0.044
==========================================================
Cluster 92 album:0.115 her:0.073 billboard:0.066 chart:0.064 singles:0.064
==========================================================
Cluster 93 film:0.087 films:0.050 directed:0.029 television:0.024 actor:0.022
==========================================================
Cluster 94 championships:0.106 metres:0.086 she:0.059 m:0.059 athletics:0.054
==========================================================
Cluster 95 art:0.109 gallery:0.040 artist:0.036 paintings:0.032 painting:0.032
==========================================================
Cluster 96 band:0.120 album:0.040 bands:0.035 bass:0.031 rock:0.030
==========================================================
Cluster 97 miss:0.361 pageant:0.209 usa:0.127 she:0.110 teen:0.063
==========================================================
Cluster 98 freestyle:0.155 swimming:0.120 m:0.119 swimmer:0.090 heat:0.075
==========================================================
Cluster 99 army:0.081 commander:0.080 command:0.076 military:0.076 staff:0.058
==========================================================
###Markdown
The class of soccer (association football) players has been broken into two clusters (44 and 45). Same goes for Austrialian rules football players (clusters 26 and 48). The class of baseball players have been also broken into two clusters (16 and 91).**A high value of K encourages pure clusters, but we cannot keep increasing K. For large enough K, related documents end up going to different clusters.**That said, the result for K=100 is not entirely bad. After all, it gives us separate clusters for such categories as Brazil, wrestling, computer science and the Mormon Church. If we set K somewhere between 25 and 100, we should be able to avoid breaking up clusters while discovering new ones.Also, we should ask ourselves how much **granularity** we want in our clustering. If we wanted a rough sketch of Wikipedia, we don't want too detailed clusters. On the other hand, having many clusters can be valuable when we are zooming into a certain part of Wikipedia.**There is no golden rule for choosing K. It all depends on the particular application and domain we are in.**Another heuristic people use that does not rely on so much visualization, which can be hard in many applications (including here!) is as follows. Track heterogeneity versus K and look for the "elbow" of the curve where the heterogeneity decrease rapidly before this value of K, but then only gradually for larger values of K. This naturally trades off between trying to minimize heterogeneity, but reduce model complexity. In the heterogeneity versus K plot made above, we did not yet really see a flattening out of the heterogeneity, which might indicate that indeed K=100 is "reasonable" and we only see real overfitting for larger values of K (which are even harder to visualize using the methods we attempted above.) **Quiz Question**. Another sign of too large K is having lots of small clusters. Look at the distribution of cluster sizes (by number of member data points). How many of the 100 clusters have fewer than 236 articles, i.e. 0.4% of the dataset?Hint: Use `cluster_assignment[100]()`, with the extra pair of parentheses for delayed loading.
###Code
counts = np.bincount(cluster_assignment[100]())
len(counts[counts < 236])
###Output
_____no_output_____ |
variance-of-hypothetical-case.ipynb | ###Markdown
variance in denial rate via random sampling```Starting...Done. Compiling results. Trials Run: 500000Results for: n denying:=======================- Mean(statistic=660.001498, minmax=(659.9646687985933, 660.0383272014067))- Variance(statistic=250.66869175599598, minmax=(249.84406514239993, 251.49331836959203))- Std_dev(statistic=15.832520069653977, minmax=(15.806477891593625, 15.85856224771433))Results for: P(deny):=====================- Mean(statistic=0.4000009078787878, minmax=(0.39997858715066253, 0.40002322860691303))- Variance(statistic=9.207298136124737e-05, minmax=(9.177008820657481e-05, 9.237587451591993e-05))- Std_dev(statistic=0.009595466708881199, minmax=(0.009579683570662804, 0.009611249847099594))``````Trials Run: 50000Denying members | Min: 594, Max: 727P(deny) | Min: 0.36, Max: 0.4406060606060606Results for: n denying:=======================- Mean(statistic=660.01426, minmax=(659.8978372145845, 660.1306827854156))- Variance(statistic=250.49061665239998, minmax=(247.88477084380597, 253.096462460994))- Std_dev(statistic=15.82689535734662, minmax=(15.74457201629463, 15.90921869839861))Results for: P(deny):=====================- Mean(statistic=0.40000864242424233, minmax=(0.3999380831603541, 0.40007920168813055))- Variance(statistic=9.200757269142332e-05, minmax=(9.105042087926759e-05, 9.296472450357906e-05))- Std_dev(statistic=0.009592057792331285, minmax=(0.009542164858360382, 0.009641950726302188))``````Trials Run: 500000Denying members | Min: 585, Max: 740P(deny) | Min: 0.35454545454545455, Max: 0.4484848484848485Results for: n denying:=======================- Mean(statistic=660.029864, minmax=(659.9930150989986, 660.0667129010013))- Variance(statistic=250.936924141504, minmax=(250.1114151218836, 251.7624331611244))- Std_dev(statistic=15.840988736234364, minmax=(15.81493262845707, 15.867044844011657))Results for: P(deny):=====================- Mean(statistic=0.40001809939393945, minmax=(0.3999957667266659, 0.400040432061213))- Variance(statistic=9.217150565344503e-05, minmax=(9.186828838269374e-05, 9.247472292419632e-05))- Std_dev(statistic=0.009600599234081434, minmax=(0.009584807653610347, 0.009616390814552522))```
###Code
import math
import numpy as np
import scipy.stats as stats
rand = np.random.Generator(np.random.PCG64DXSM())
def pad_and_print(s, pad=0):
to_print = s + " " * max(0, pad-len(s))
print(to_print, end='')
return len(to_print)
def measure_variance(repeat_times=10_000, n_members=4500, sample_size=1650, overall_denial_rate=0.4):
p_deny = overall_denial_rate
n_deny = round(p_deny * n_members)
n_conf = n_members - n_deny
# since we want to count the number of denying members in samples,
# using `1` for denying members and `0` for confirming members means
# that we can just `sum` the sample.
members = [1] * n_deny + [0] * n_conf
results = []
results_p = []
print(f"Starting...")
progress_mod = repeat_times // 20
max_len = 11
for t in range(repeat_times):
member_list = rand.choice(members, sample_size, replace=False, shuffle=True)
measured_denying = sum(member_list)
measured_p_deny = measured_denying / sample_size
results.append(measured_denying)
results_p.append(measured_p_deny)
if t % progress_mod == 0:
status_msg = f"\r{t} / {repeat_times}: {measured_denying} denying (P = {measured_p_deny:.3f})"
max_len = pad_and_print(status_msg, pad=max_len)
pad_and_print(f"\rDone. Compiling results.", pad=max_len)
print()
print(f"Trials Run: {repeat_times}")
print()
print(f"Denying members | Min: {min(results)}, Max: {max(results)}")
print(f"P(deny) | Min: {min(results_p)}, Max: {max(results_p)}")
print()
for name, res in [('n denying', results), ('P(deny)', results_p)]:
mvs = stats.bayes_mvs(res)
res_title = f"Results for: {name}:"
print(res_title)
print("=" * len(res_title))
for smm in [mvs[0], mvs[2]]:
print(f"- {smm}")
print()
# measure_variance(repeat_times=500_000)
# e.g. AVP -- 0 / 33 denials
print("AVP sample at 0.03 denial")
measure_variance(n_members=1650, sample_size=33, overall_denial_rate=0.03)
# - Mean(statistic=0.9967, minmax=(0.9808556183216671, 1.012544381678333))
# - Std_dev(statistic=0.9632700088760161, minmax=(0.952066339147559, 0.9744736786044732))
print("AVP sample at 0.015 denial")
measure_variance(n_members=1650, sample_size=33, overall_denial_rate=0.015)
# - Mean(statistic=0.4963, minmax=(0.48471356990685976, 0.5078864300931403))
# - Std_dev(statistic=0.7044049332592724, minmax=(0.6962120899706691, 0.7125977765478758))
###Output
Starting...
Done. Compiling results.
Trials Run: 10000
Denying members | Min: 0, Max: 6
P(deny) | Min: 0.0, Max: 0.18181818181818182
Results for: n denying:
=======================
- Mean(statistic=0.994, minmax=(0.9779631916894328, 1.010036808310567))
- Std_dev(statistic=0.9749687174468727, minmax=(0.9636289815418819, 0.9863084533518636))
Results for: P(deny):
=====================
- Mean(statistic=0.030121212121212118, minmax=(0.029635248233013115, 0.03060717600941112))
- Std_dev(statistic=0.029544506589299177, minmax=(0.02920087822854188, 0.029888134950056475))
Starting...
Done. Compiling results.
Trials Run: 10000
Denying members | Min: 0, Max: 6
P(deny) | Min: 0.0, Max: 0.18181818181818182
Results for: n denying:
=======================
- Mean(statistic=0.5075, minmax=(0.4959655555664573, 0.5190344444335426))
- Std_dev(statistic=0.7012444295678933, minmax=(0.693088345691716, 0.7094005134440707))
Results for: P(deny):
=====================
- Mean(statistic=0.015378787878787879, minmax=(0.015029259259589617, 0.01572831649798614))
- Std_dev(statistic=0.02124983119902707, minmax=(0.021002677142173208, 0.02149698525588093))
|
2. Data Modeling/Introduction to Data Modeling/L1_Exercise_2_Creating_a_Table_with_Apache_Cassandra.ipynb | ###Markdown
Lesson 1 Exercise 2: Creating a Table with Apache Cassandra Walk through the basics of Apache Cassandra. Complete the following tasks: Create a table in Apache Cassandra, Insert rows of data, Run a simple SQL query to validate the information. `` denotes where the code needs to be completed. Import Apache Cassandra python package
###Code
import cassandra
###Output
_____no_output_____
###Markdown
Create a connection to the database
###Code
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance
session = cluster.connect()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
TO-DO: Create a keyspace to do the work in
###Code
## TO-DO: Create the keyspace
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS test_keyspace
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
TO-DO: Connect to the Keyspace
###Code
## To-Do: Add in the keyspace you created
try:
session.set_keyspace('test_keyspace')
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Create a Song Library that contains a list of songs, including the song name, artist name, year, album it was from, and if it was a single. `song_titleartist_nameyearalbum_namesingle` TO-DO: You need to create a table to be able to run the following query: `select * from songs WHERE year=1970 AND artist_name="The Beatles"`
###Code
## TO-DO: Complete the query below
query = "CREATE TABLE IF NOT EXISTS songs "
query = query + "(song_title text, artist_name text, year int, album_name text, single boolean, PRIMARY KEY (year, artist_name))"
try:
session.execute(query)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
TO-DO: Insert the following two rows in your table`First Row: "1970", "Let It Be", "The Beatles", "Across The Universe", "False", ``Second Row: "1965", "Think For Yourself", "The Beatles", "Rubber Soul", "False"`
###Code
## Add in query and then run the insert statement
query = "INSERT INTO songs (song_title, artist_name, year, album_name, single)"
query = query + " VALUES (%s, %s, %s, %s, %s)"
try:
session.execute(query, ('Let It Be', 'The Beatles', 1970, 'Across The Universe', False))
except Exception as e:
print(e)
try:
session.execute(query, ('Think For Yourself', 'The Beatles', 1965, 'Rubber Soul', False))
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
TO-DO: Validate your data was inserted into the table.
###Code
## TO-DO: Complete and then run the select statement to validate the data was inserted into the table
query = 'SELECT * FROM songs'
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
###Output
1965 Rubber Soul The Beatles
1970 Across The Universe The Beatles
###Markdown
TO-DO: Validate the Data Model with the original query.`select * from songs WHERE YEAR=1970 AND artist_name="The Beatles"`
###Code
##TO-DO: Complete the select statement to run the query
query = "SELECT * FROM songs WHERE YEAR=1970 AND artist_name='The Beatles'"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
###Output
1970 Across The Universe The Beatles
###Markdown
And Finally close the session and cluster connection
###Code
session.shutdown()
cluster.shutdown()
###Output
_____no_output_____ |
term-2-concentrations/capstone-project-2-nlp-machine-translation/machine_translation.ipynb | ###Markdown
Artificial Intelligence Nanodegree Machine Translation ProjectIn this notebook, sections that end with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully! IntroductionIn this notebook, you will build a deep neural network that functions as part of an end-to-end machine translation pipeline. Your completed pipeline will accept English text as input and return the French translation.- **Preprocess** - You'll convert text to sequence of integers.- **Models** Create models which accepts a sequence of integers as input and returns a probability distribution over possible translations. After learning about the basic types of neural networks that are often used for machine translation, you will engage in your own investigations, to design your own model!- **Prediction** Run the model on English text. DatasetWe begin by investigating the dataset that will be used to train and evaluate your pipeline. The most common datasets used for machine translation are from [WMT](http://www.statmt.org/). However, that will take a long time to train a neural network on. We'll be using a dataset we created for this project that contains a small vocabulary. You'll be able to train your model in a reasonable time with this dataset. Load DataThe data is located in `data/small_vocab_en` and `data/small_vocab_fr`. The `small_vocab_en` file contains English sentences with their French translations in the `small_vocab_fr` file. Load the English and French data from these files from running the cell below.
###Code
import helper
# Load English data
english_sentences = helper.load_data('data/small_vocab_en')
# Load French data
french_sentences = helper.load_data('data/small_vocab_fr')
print('Dataset Loaded')
###Output
_____no_output_____
###Markdown
FilesEach line in `small_vocab_en` contains an English sentence with the respective translation in each line of `small_vocab_fr`. View the first two lines from each file.
###Code
for sample_i in range(2):
print('small_vocab_en Line {}: {}'.format(sample_i + 1, english_sentences[sample_i]))
print('small_vocab_fr Line {}: {}'.format(sample_i + 1, french_sentences[sample_i]))
###Output
_____no_output_____
###Markdown
From looking at the sentences, you can see they have been preprocessed already. The puncuations have been delimited using spaces. All the text have been converted to lowercase. This should save you some time, but the text requires more preprocessing. VocabularyThe complexity of the problem is determined by the complexity of the vocabulary. A more complex vocabulary is a more complex problem. Let's look at the complexity of the dataset we'll be working with.
###Code
import collections
english_words_counter = collections.Counter([word for sentence in english_sentences for word in sentence.split()])
french_words_counter = collections.Counter([word for sentence in french_sentences for word in sentence.split()])
print('{} English words.'.format(len([word for sentence in english_sentences for word in sentence.split()])))
print('{} unique English words.'.format(len(english_words_counter)))
print('10 Most common words in the English dataset:')
print('"' + '" "'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '"')
print()
print('{} French words.'.format(len([word for sentence in french_sentences for word in sentence.split()])))
print('{} unique French words.'.format(len(french_words_counter)))
print('10 Most common words in the French dataset:')
print('"' + '" "'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '"')
###Output
_____no_output_____
###Markdown
For comparison, _Alice's Adventures in Wonderland_ contains 2,766 unique words of a total of 15,500 words. PreprocessFor this project, you won't use text data as input to your model. Instead, you'll convert the text into sequences of integers using the following preprocess methods:1. Tokenize the words into ids2. Add padding to make all the sequences the same length.Time to start preprocessing the data... Tokenize (IMPLEMENTATION)For a neural network to predict on text data, it first has to be turned into data it can understand. Text data like "dog" is a sequence of ASCII character encodings. Since a neural network is a series of multiplication and addition operations, the input data needs to be number(s).We can turn each character into a number or each word into a number. These are called character and word ids, respectively. Character ids are used for character level models that generate text predictions for each character. A word level model uses word ids that generate text predictions for each word. Word level models tend to learn better, since they are lower in complexity, so we'll use those.Turn each sentence into a sequence of words ids using Keras's [`Tokenizer`](https://keras.io/preprocessing/text/tokenizer) function. Use this function to tokenize `english_sentences` and `french_sentences` in the cell below.Running the cell will run `tokenize` on sample data and show output for debugging.
###Code
import project_tests as tests
from keras.preprocessing.text import Tokenizer
def tokenize(x):
"""
Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)
"""
# TODO: Implement
return None, None
tests.test_tokenize(tokenize)
# Tokenize Example output
text_sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
text_tokenized, text_tokenizer = tokenize(text_sentences)
print(text_tokenizer.word_index)
print()
for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(sent))
print(' Output: {}'.format(token_sent))
###Output
_____no_output_____
###Markdown
Padding (IMPLEMENTATION)When batching the sequence of word ids together, each sequence needs to be the same length. Since sentences are dynamic in length, we can add padding to the end of the sequences to make them the same length.Make sure all the English sequences have the same length and all the French sequences have the same length by adding padding to the **end** of each sequence using Keras's [`pad_sequences`](https://keras.io/preprocessing/sequence/pad_sequences) function.
###Code
import numpy as np
from keras.preprocessing.sequence import pad_sequences
def pad(x, length=None):
"""
Pad x
:param x: List of sequences.
:param length: Length to pad the sequence to. If None, use length of longest sequence in x.
:return: Padded numpy array of sequences
"""
# TODO: Implement
return None
tests.test_pad(pad)
# Pad Tokenized output
test_pad = pad(text_tokenized)
for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(np.array(token_sent)))
print(' Output: {}'.format(pad_sent))
###Output
_____no_output_____
###Markdown
Preprocess PipelineYour focus for this project is to build neural network architecture, so we won't ask you to create a preprocess pipeline. Instead, we've provided you with the implementation of the `preprocess` function.
###Code
def preprocess(x, y):
"""
Preprocess x and y
:param x: Feature List of sentences
:param y: Label List of sentences
:return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)
"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x = pad(preprocess_x)
preprocess_y = pad(preprocess_y)
# Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions
preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)
return preprocess_x, preprocess_y, x_tk, y_tk
preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\
preprocess(english_sentences, french_sentences)
print('Data Preprocessed')
###Output
_____no_output_____
###Markdown
ModelsIn this section, you will experiment with various neural network architectures.You will begin by training four relatively simple architectures.- Model 1 is a simple RNN- Model 2 is a RNN with Embedding- Model 3 is a Bidirectional RNN- Model 4 is an optional Encoder-Decoder RNNAfter experimenting with the four simple architectures, you will construct a deeper architecture that is designed to outperform all four models. Ids Back to TextThe neural network will be translating the input to words ids, which isn't the final form we want. We want the French translation. The function `logits_to_text` will bridge the gab between the logits from the neural network to the French translation. You'll be using this function to better understand the output of the neural network.
###Code
def logits_to_text(logits, tokenizer):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
index_to_words = {id: word for word, id in tokenizer.word_index.items()}
index_to_words[0] = '<PAD>'
return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
print('`logits_to_text` function loaded.')
###Output
_____no_output_____
###Markdown
Model 1: RNN (IMPLEMENTATION)A basic RNN model is a good baseline for sequence data. In this model, you'll build a RNN that translates English to French.
###Code
from keras.layers import GRU, Input, Dense, TimeDistributed
from keras.models import Model
from keras.layers import Activation
from keras.optimizers import Adam
from keras.losses import sparse_categorical_crossentropy
def simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
"""
Build and train a basic RNN on x and y
:param input_shape: Tuple of input shape
:param output_sequence_length: Length of output sequence
:param english_vocab_size: Number of unique English words in the dataset
:param french_vocab_size: Number of unique French words in the dataset
:return: Keras model built, but not trained
"""
# TODO: Build the layers
model = None
model.compile(loss=sparse_categorical_crossentropy,
optimizer=Adam(learning_rate),
metrics=['accuracy'])
return model
tests.test_simple_model(simple_model)
# Reshaping the input to work with a basic RNN
tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])
tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))
# Train the neural network
simple_rnn_model = simple_model(
tmp_x.shape,
preproc_french_sentences.shape[1],
len(english_tokenizer.word_index),
len(french_tokenizer.word_index))
simple_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)
# Print prediction(s)
print(logits_to_text(simple_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))
###Output
_____no_output_____
###Markdown
Model 2: Embedding (IMPLEMENTATION)You've turned the words into ids, but there's a better representation of a word. This is called word embeddings. An embedding is a vector representation of the word that is close to similar words in n-dimensional space, where the n represents the size of the embedding vectors.In this model, you'll create a RNN model using embedding.
###Code
from keras.layers.embeddings import Embedding
def embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
"""
Build and train a RNN model using word embedding on x and y
:param input_shape: Tuple of input shape
:param output_sequence_length: Length of output sequence
:param english_vocab_size: Number of unique English words in the dataset
:param french_vocab_size: Number of unique French words in the dataset
:return: Keras model built, but not trained
"""
# TODO: Implement
return None
tests.test_embed_model(embed_model)
# TODO: Reshape the input
# TODO: Train the neural network
# TODO: Print prediction(s)
###Output
_____no_output_____
###Markdown
Model 3: Bidirectional RNNs (IMPLEMENTATION)One restriction of a RNN is that it can't see the future input, only the past. This is where bidirectional recurrent neural networks come in. They are able to see the future data.
###Code
from keras.layers import Bidirectional
def bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
"""
Build and train a bidirectional RNN model on x and y
:param input_shape: Tuple of input shape
:param output_sequence_length: Length of output sequence
:param english_vocab_size: Number of unique English words in the dataset
:param french_vocab_size: Number of unique French words in the dataset
:return: Keras model built, but not trained
"""
# TODO: Implement
return None
tests.test_bd_model(bd_model)
# TODO: Train and Print prediction(s)
###Output
_____no_output_____
###Markdown
Model 4: Encoder-Decoder (OPTIONAL)Time to look at encoder-decoder models. This model is made up of an encoder and decoder. The encoder creates a matrix representation of the sentence. The decoder takes this matrix as input and predicts the translation as output.Create an encoder-decoder model in the cell below.
###Code
from keras.layers import RepeatVector
def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
"""
Build and train an encoder-decoder model on x and y
:param input_shape: Tuple of input shape
:param output_sequence_length: Length of output sequence
:param english_vocab_size: Number of unique English words in the dataset
:param french_vocab_size: Number of unique French words in the dataset
:return: Keras model built, but not trained
"""
# OPTIONAL: Implement
return None
tests.test_encdec_model(encdec_model)
# OPTIONAL: Train and Print prediction(s)
###Output
_____no_output_____
###Markdown
Model 5: Custom (IMPLEMENTATION)Use everything you learned from the previous models to create a model that incorporates embedding and a bidirectional rnn into one model.
###Code
def model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
"""
Build and train a model that incorporates embedding, encoder-decoder, and bidirectional RNN on x and y
:param input_shape: Tuple of input shape
:param output_sequence_length: Length of output sequence
:param english_vocab_size: Number of unique English words in the dataset
:param french_vocab_size: Number of unique French words in the dataset
:return: Keras model built, but not trained
"""
# TODO: Implement
return None
tests.test_model_final(model_final)
print('Final Model Loaded')
###Output
_____no_output_____
###Markdown
Prediction (IMPLEMENTATION)
###Code
import numpy as np
from keras.preprocessing.sequence import pad_sequences
def final_predictions(x, y, x_tk, y_tk):
"""
Gets predictions using the final model
:param x: Preprocessed English data
:param y: Preprocessed French data
:param x_tk: English tokenizer
:param y_tk: French tokenizer
"""
# TODO: Train neural network using model_final
model = None
## DON'T EDIT ANYTHING BELOW THIS LINE
y_id_to_word = {value: key for key, value in y_tk.word_index.items()}
y_id_to_word[0] = '<PAD>'
sentence = 'he saw a old yellow truck'
sentence = [x_tk.word_index[word] for word in sentence.split()]
sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post')
sentences = np.array([sentence[0], x[0]])
predictions = model.predict(sentences, len(sentences))
print('Sample 1:')
print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]]))
print('Il a vu un vieux camion jaune')
print('Sample 2:')
print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]]))
print(' '.join([y_id_to_word[np.argmax(x)] for x in y[0]]))
final_predictions(preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer)
###Output
_____no_output_____ |
dff9_src/dff9_HW0.ipynb | ###Markdown
dff9: HW0 and HW1 Step 3: Test File ImportReplace the UNI in the steps with your UNI.
###Code
import dff9_HW0
dff9_HW0.t1()
###Output
_____no_output_____
###Markdown
The text above should look like my example, but with you UNI.__Note:__ Any time you change the underlying Python file, you must restart the kernel using the menu. You must then re-import and rerun any cells. Step 4: Install PyMYSQL and iPython-SQL- You run the commands below in an Anaconda terminal window.- [Install](https://anaconda.org/anaconda/pymysql) ```pymysql``` in your Anaconda environment.- [Install](https://anaconda.org/conda-forge/ipython-sql) ```iPython-SQL``` in your Anaconda environment.- Restart the notebook Kernel.- The following cell should execute.
###Code
import pymysql
pymysql.__version__
###Output
_____no_output_____
###Markdown
- In the cell below, replace ```dbuser:dbuserdbuser``` with your MySQL user ID and password.
###Code
%load_ext sql
%sql mysql+pymysql://dbuser:dbuserdbuser@localhost
###Output
_____no_output_____
###Markdown
- The following is a simple test. You should get similar results, but your might be slightly different.
###Code
%sql show tables from information_schema
###Output
* mysql+pymysql://dbuser:***@localhost
73 rows affected.
###Markdown
Step 5: Load Sample Data- In the directory where you cloned the project, there is a sub-folder ```db_book.```- Start DataGrip.- In DataGrip, choose ```File->New DataSource->MySQL.``` - Accept the default name for the data source. - Set the MySQL user ID and password. - You may see a message stating that you need to install database drives. Install the drivers. - Select the newly created data source. The name will ```Run SQL Script```. Navigate to and choose the file ```DDL_drop.sql```.- Do the same for ```smallRelationsInsertFile.sql```.- You will see an icon/text on the side bar labelled ```db_book.``` It may be greyed-out. Right click on the entry and choose ```New query console.``` You may see a message ```Current schema not introspected``` and ```Introspect schema``` on the far right. Click on ```Introspect schema.```- Enter ```select * from course``` in the query console window. Click on the little green arrow to run the query.- Take a screen show of your DataGrip window and save the screen show into the folder of the form ```dff9_src``` using your UNI. Remember the name of the file.- Set your file name in the cell below replacing the example and run the cell. You should see your screenshot below. Yours will look a little different from mine. As long as yours shows the query result, you are fine.
###Code
file_name = 'Screen Shot 2022-01-23 at 10.27.12 AM.png'
print("\n")
from IPython.display import Image
Image(filename=file_name)
###Output
###Markdown
Step 6: Very %sql- Execute the cell below. Your answer will be similar to mine but may not match exactly.
###Code
%sql select * from db_book.course
###Output
* mysql+pymysql://dbuser:***@localhost
13 rows affected.
###Markdown
Step 7: Pandas, CSV and SQL- Run the cell below.
###Code
import pandas
pandas.__version__
###Output
_____no_output_____
###Markdown
- Install [SQLAlchemy](https://anaconda.org/anaconda/sqlalchemy) using an Anaconda prompt.- Restart the notebook kernel and rerun all cells. Then run the cell below.
###Code
from sqlalchemy import create_engine
###Output
_____no_output_____
###Markdown
- Go into DataGrip. Select your local database, e.g. ```@localhost```.- Open a query console and execute ```create database lahmansdb```. Then execute the cell below.__Note:__ Your answer will be different because I have already loaded tables.
###Code
%sql show tables from lahmansdb;
###Output
* mysql+pymysql://dbuser:***@localhost
3 rows affected.
###Markdown
- There is a folder ```data``` in the project you cloned. There is a file in the folder ```People.csv```.- Execute the following code cell. If you are on Windows, you may have to change the path to the file and may have to replace ```/``` with ```\\``` in paths.- You should see a result similar to mine below.
###Code
df = pandas.read_csv('../data/People.csv')
df
###Output
_____no_output_____
###Markdown
- We will now save the data to MySQL. Run the cells below. You will have to change ```dbuser:dbuserdbuser``` to your MySQL user ID and password.
###Code
engine = create_engine("mysql+pymysql://dbuser:dbuserdbuser@localhost")
df.to_sql('people', con=engine, index=False, if_exists='replace', schema='lahmansdb')
###Output
_____no_output_____
###Markdown
- Test that you wrote the information to the databases.
###Code
%sql select * from lahmansdb.people where nameLast='Williams' and bats='L'
###Output
* mysql+pymysql://dbuser:***@localhost
19 rows affected.
###Markdown
Step 7: Done (Non-Programming)- You are done. Programming Track - Include a screen capture of your PyCharm execution of the web application. Your should look like the one below but may be different.
###Code
file_name = 'pycharm.png'
print("\n")
from IPython.display import Image
Image(filename=file_name)
###Output
###Markdown
- Put a screen capture of access the web page. Yours will look similar to mine but may be slightly different.
###Code
file_name = 'browser.png'
print("\n")
from IPython.display import Image
Image(filename=file_name)
###Output
|
heart_final.ipynb | ###Markdown
Heart disease detection Abstract : Cardiopathy or heart disease is a non-specific term applied to all diseases of the heart.According to the National Center for Health and Statistics (CDC), heart disease is the leading cause of death in the United States of America in 2019 at a rate of 161.5 per 100,000 population, followed only by cancer and unintentional injury. Although this number has decreased from 257.6 (per 100,000 population) to 161.5 today, it is of great importance to the scientific community to minimize this rate. To this end, measures are being taken to reduce the risk of heart disease. Here is a non-exhaustive list of factors: - High blood pressure: It is usually referred to as the "silent killer". It is asymptomatic. The only way to find out if you have high blood pressure is to measure it. You can lower your blood pressure with lifestyle changes or medication. -Unhealthy blood cholesterol levels: cholesterol is a waxy, fat-like substance made by your liver or found in certain foods. Your liver produces enough for your body's needs, but we often get more cholesterol from the foods we eat. Low density lipoprotein cholesterol is considered "bad" cholesterol, while high density lipoprotein cholesterol is considered "good" cholesterol. - Genetics. - Tobacco use. - Obesity. 0ur goal is to model a cardiopathy detector in the manner of a binary classifier due to the discrete nature of our target variables. We will train and test our model using the heart.csv file provided by the Department of Computer Science at the College of California Irvine. Exploratory analysis We will now explore the data, analyse the variables and decide which model we will apply to our data.
###Code
import pandas as pd
import numpy as np
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import precision_score,recall_score, accuracy_score
import shap
df = pd.read_csv("heart.csv")
df.info()
df.head()
df.count()
df.describe()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 303 entries, 0 to 302
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 303 non-null int64
1 sex 303 non-null int64
2 cp 303 non-null int64
3 trestbps 303 non-null int64
4 chol 303 non-null int64
5 fbs 303 non-null int64
6 restecg 303 non-null int64
7 thalach 303 non-null int64
8 exang 303 non-null int64
9 oldpeak 303 non-null float64
10 slope 303 non-null int64
11 ca 303 non-null int64
12 thal 303 non-null int64
13 target 303 non-null int64
dtypes: float64(1), int64(13)
memory usage: 33.3 KB
###Markdown
The data is made of 303 patients, the mean age is 54 (not robust but hypothesis is : experiment didn't have extreme outliers).The mean cholesterol is 246, it's quite acceptable is the ideal total is 200mg. The experiment is therefore not biased in this consideration.Now, we'll define our feature variables & our target variable.Before doing any dimensional reduction we will do some feature engineering. let's look into variables | variable | type | min-max | description || ----------- | ----------- | ----------- | ----------- || age | discrete | 29-77 | Age in years || sex | binary | 0-1 | Sex (1=male,0=female)|| cp | discrete | 0-4 | Experienced chest pain (1:typical angina,2:atypical_angina,3: non-anginal pain,4:asymptomatic)|| trestbps | discrete | 94-200 | resting blood pressure(mm Hg on admissionto the hospital) || chol | discrete | 126-56 | cholesterol mg/dl || fbs | binary | 0-1 | fasting blood sugar (> 120 mg/dl, 1 = true; 0 = false)|| resteg | binary | 0-1 | resting eeg (0 = normal, 1 = having ST-T wave abnormality, 2 = showing probable or definite left ventricular hypertrophy by Estes' criteria)|| thalach | discrete | 71-202 | The person's maximum heart rate achieved || exang | binary | 0-1 | Exercise induced angina (1 = yes; 0 = no)|| old peak | continuous | 0-1.6 | ST depression induced by exercise relative to rest|| slope | discrete | 0-2 |the slope of the peak exercise ST segment (Value 1: upsloping, Value 2: flat, Value 3: downsloping)|| ca | discrete | 0-4 |The number of major vessels (0-3) || thal | discrete | 0-3 |A blood disorder called thalassemia (3 = normal; 6 = fixed defect; 7 = reversable defect)|| target | binary | 0-1 |Heart disease (0 = no, 1 = yes)| Model Although we intuitively focus on age, chest pain and cholesterol levels, we should also give the other traits the benefit of the doubt. To this end, we run a Catboost classifier and extract our Shap values. This way we get the influence of each feature on the quality of the model. To prevent overfitting, we implement an earlyStop. EarlyStop is a form of regularisation used to avoid overfitting when training a learning process in an iterative method. Catboost uses a gradient descent optimisation algorithm and is therefore suitable for this.
###Code
X = df.drop('target', axis=1)
y = df['target']
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.80, random_state=42)
#model train/fit - Early stoping od_type od _wait
model = CatBoostClassifier(iterations=2,
od_type = "Iter",
od_wait = 100).fit(X_train, y_train)
#store predicted value
y_pred = model.predict(X_test)
#define confusion_matrix
confusion_matrix = confusion_matrix(y, model.predict(X))
#plot confusion matrix
import seaborn as sns
sns.heatmap(confusion_matrix, annot=True)
#classification report
print(classification_report(y_test, y_pred))
#recall precision
print('Recall : \n',recall_score(y_test, y_pred,
average='weighted'))
print('Precision: \n',precision_score(y_test, y_pred,
average='weighted'))
print('Accuracy: \n',accuracy_score(y_test, y_pred))
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
# summarize the effects of all the features
shap.summary_plot(shap_values, X)
###Output
_____no_output_____ |
2021_07_15.ipynb | ###Markdown
Machine Learning 2
###Code
x=[{'city':'seoul','temp':10.0},{'city':"Dubai",'temp':33.5},{'city':"LA",'temp':20.0}]
x
from sklearn.feature_extraction import DictVectorizer
vec=DictVectorizer(sparse=False)
vec.fit_transform(x) # X๋ฅผ ๋ฒ์ฃผํ ์๋ํ ์๋ฃ๋ก ๋ณํ
vec1=DictVectorizer() # ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ค์ด๊ธฐ์ํด sparse=T
x1=vec1.fit_transform(x)
x1
x1.toarray()
vec1.get_feature_names()
#ํ
์คํธ ์๋ฃ์ ์๋ํ
text=["๋ด๋ค ๋ด๋ค ๋นํ๊ธฐ ๋ ์๋ผ ๋ ์๋ผ",
"๋์ด ๋์ด ๋ ์๋ผ ์ฐ๋ฆฌ ๋นํ๊ธฐ",
"๋ด๊ฐ ๋ง๋ ๋นํ๊ธฐ ๋ ์๋ผ ๋ ์๋ผ",
"๋ฉ๋ฆฌ ๋ฉ๋ฆฌ ๋ ์๋ผ ์ฐ๋ฆฌ ๋นํ๊ธฐ"]
text
from sklearn.feature_extraction.text import CountVectorizer
vec2=CountVectorizer()
t=vec2.fit_transform(text).toarray() # text๋ฅผ ์๋ํ ๋ฐฐ์ด ์๋ฃ๋ก ๋ณํ
import pandas as pd
t1=pd.DataFrame(t, columns=vec2.get_feature_names())
t1
#TFIDF
from sklearn.feature_extraction.text import TfidfVectorizer
tfid=TfidfVectorizer()
x2=tfid.fit_transform(text).toarray() #๋์ ๋น๋๋ ๋ฎ์ ๊ฐ์ค์น, ๋ฎ์ ๋น๋๋ ๋์ ๊ฐ์ค์น
x3=pd.DataFrame(x2,columns=tfid.get_feature_names())
x3
#ํน์ฑ๋ณ์ ์์ฑ
import matplotlib.pyplot as plt
import numpy as np
x=np.array([1,2,3,4,5])
y=np.array([5,3,1,5,8])
plt.plot(x,y,'o') #์ ํํ๊ท๋ฅผ ํ๊ธฐ์๋ ๋ถ์ ํฉ
from sklearn.preprocessing import PolynomialFeatures
fg=PolynomialFeatures(degree=3,include_bias=True)# ์ ํธํญ ์ฌ๋ถ
x1=fg.fit_transform(x[:,np.newaxis])#3์ฐจ๊น์ง์์ฑ
x1
from sklearn.linear_model import LinearRegression
reg=LinearRegression()
reg.fit(x1,y)
yfit=reg.predict(x1) #์ ํฉ๊ฐ
plt.plot(x,y,'ko',label='origin') # ko๋ ๊ฒ์์ ๋๊ทธ๋ผ๋ฏธ
plt.plot(x,yfit,'rs-',label='fitted') #rs-๋ ๋นจ๊ฐ์ ๋ค๋ชจ๋ฅผ ์ค๋ก ์๊ธฐ
plt.legend(loc='best') #๋ฒ์ฃผ๋ฅผ ์ ์ผ ์ ์ ํ ๊ณณ์ผ๋ก
plt.show()
x_miss=np.array([[1,2,3,None],[5,np.NaN,7,8],[None,10,11,12],[13,np.nan,15,16]])
x_miss
from sklearn.impute import SimpleImputer
im=SimpleImputer(strategy='mean')
im.fit_transform(x_miss) # ์ด์ ํ๊ท ๊ฐ์ผ๋ก ๋์ฒด
# pipeline library๋ฅผ ์ด์ฉํ ๊ฒฐ์ธก ์๋ฃ ๋์ฒด ๋ฐ ํน์ฑ๋ณ์ ์์ฑ
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.impute import SimpleImputer
y=pd.Series([2,5,1,6])
model=make_pipeline(SimpleImputer(strategy="mean"),PolynomialFeatures(degree=2),LinearRegression())
model.fit(x_miss,y)
model.predict(x_miss)
from google.colab import files
myfile = files.upload()
import io
import pandas as pd
#pd.read_csv๋ก csvํ์ผ ๋ถ๋ฌ์ค๊ธฐ
data = pd.read_csv(io.BytesIO(myfile['train2.csv']))
data.head()
#๋๋ผ์ด๋ธ์ ์ ๊ทผํ ์ ์๋๋ก ์๋ ์ฝ๋ ์
๋ ฅ
from google.colab import drive
drive.mount('/content/drive')
#๋ถ๋ฌ์ฌ ํ์ผ์ ๊ฒฝ๋ก๋ฅผ filename ๋ณ์์ ์ ์ฅ
filename = '/train.csv'
#pandas read_csv๋ก ๋ถ๋ฌ์ค๊ธฐ
df1 = pd.read_csv(filename)
df1.head()
from google.colab import drive
drive.mount('/content/drive')
filename="/store.csv"
df2=pd.read_csv(filename)
df2.head()
df=pd.merge(df1,df2,on="Store")
df.shape
df.dtypes #์๋ฃํ ํ์ธ
print(len(df['Store'].unique()))
print(len(df['Date'].unique()))
print(df['DayOfWeek'].value_counts())
import pandas as pd
import numpy as np
#df1=pd.read_csv("D:/Users/user/Desktop/train.csv",low_memory=False)
#df2=pd.read_csv("D:/Users/user/Desktop/store.csv",low_memory=False)
#df=pd.merge(df1,df2,on="Store")
df.shape
df.dtypes
print(len(df['Store'].unique()))
print(len(df['Date'].unique()))
print(df['DayOfWeek'].value_counts())
df['Date']=pd.to_datetime(df['Date'])
df['Month']=df['Date'].dt.month
df['Quarter']=df['Date'].dt.quarter
df['Year']=df['Date'].dt.year
df['Day']=df['Date'].dt.day
df['Week']=df['Date'].dt.week
df['Season']=np.where(df['Month'].isin([3,4,5]),'Spring',
np.where(df['Month'].isin([6,7,8]),'Summer',
np.where(df['Month'].isin([9,10,11]),'Fall',
np.where(df['Month'].isin([12,1,2]),'Winter','None'))))
print(df[['Date','Year','Month','Day','Week','Quarter','Season']].head())
df.hist(figsize=(20,10))
df.isnull().sum()/df.shape[0]*100
###Output
_____no_output_____
###Markdown
๊ฐ ๋ณ์์ ๊ฒฐ์ธก์ ๋ ์ถ๋ ฅ, 10%์ดํ์ด๋ฉด ๊ฒฐ์ธก์น๋ฅผ ๋์ฒดํ๊ธฐ ์ํ ๋
ธ๋ ฅํ์ 30% ์ด์์ด๋ฉด ํด๋น ๋ณ์๋ฅผ ์ ๊ฑฐํ๋ ๊ฒ์ด ๋ชจํ์ ์์์ฑ์ ํผํ ์ ์๋ ๊ฑฐ์ ์ ์ผํ ๋ฐฉ๋ฒ ๋ฐ๋ผ์ ํ์ฌ 0.26%์ ๊ฒฐ์ธก์ ๋ณด์ด๊ณ ์๋ competitionDistance ๋ณ์๋ฅผ ์์ ์ mode๊ฐ์ผ๋ก ๋์ฒด
###Code
df['CompetitionDistance']=df['CompetitionDistance'].fillna(df['CompetitionDistance'].mode())
df['CompetitionDistance'].isnull().sum()
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
n_columns=['Customers','Open','Promo','Promo2','StateHoliday','SchoolHoliday','CompetitionDistance']
categ_columns=['DayOfWeek','Quarter','Month','Year','StoreType','Assortment','Season']
###Output
_____no_output_____
###Markdown
LabelEncoder๋ ๋ฒ์ฃผํ ๋ณ์๋ฅผ 0~(๋ฒ์ฃผ์ -1)๋ก ์์นํํ๊ณ OneHotEncoder๋ Label encoding๋ ์๋ฃ๋ฅผ one-hot encoding์ผ๋ก ์ ํํ๋ classํจ์์ด๋ค.
###Code
def dummy(data,col):
lab=LabelEncoder() #0~c-1๋ก ํด๋์ค๋ถ์ฌ
aa=lab.fit_transform(data[col]).reshape(-1,1)
ohe=OneHotEncoder(sparse=False)
column_names=[col+'_'+str(i) for i in lab.classes_]
return(pd.DataFrame(ohe.fit_transform(aa),columns=column_names))
fdata=df[n_columns]
for column in categ_columns:
temp_df=dummy(df,column)
fdata=pd.concat([fdata,temp_df],axis=1)
fdata.head()
fdata.shape
###Output
_____no_output_____
###Markdown
one-hot encoding ํ ๋ณ์ ๊ฐฏ์ 38๊ฐ๋ก ์ฆ๊ฐ
###Code
fdata.dtypes.unique()
###Output
_____no_output_____
###Markdown
0์ ํด์ผ์ด ์๋๋ a๋ ๊ณตํด์ผ b๋ ๋ถํ์ c๋ ํฌ๋ฆฌ์ค๋ง์ค๋ฅผ ์๋ฏธํ๋ค. **2.4 ๋ถ๊ท ํ์๋ฃ์ ์ฒ๋ฆฌ**
###Code
###Output
_____no_output_____
###Markdown
์ ์น์
###Code
###Output
_____no_output_____ |
3d_data_augmentation.ipynb | ###Markdown
###Code
from google.colab import drive
drive.mount('/content/drive/')
!ls
import os
os.chdir('drive/My Drive/deep_learning')
!pip install tensorflow==2.0.0-rc1
import tensorflow as tf
print(tf.__version__)
!git clone https://github.com/pydicom/pydicom.git
!git clone https://github.com/hiram64/3D-VoxelDataGenerator.git
!git pull
!ls -ltr
!pip install -q VoxelDataGenerator
!pip install -q keras
import matplotlib.pyplot as plt
import io
import pydicom
import tensorflow as tf
import os
import numpy as np
import math
import random
from scipy.ndimage.interpolation import shift, zoom
from scipy.ndimage import rotate
from numpy import expand_dims
from glob import glob
from sklearn.model_selection import train_test_split
from mpl_toolkits.mplot3d import Axes3D
from io import StringIO
from matplotlib import cm
from operator import itemgetter
from voxel_data_generator import VoxelDataGenerator
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from keras.layers import BatchNormalization
from keras.layers import Conv2D, Conv3D
from keras.layers import MaxPooling2D, MaxPool3D, MaxPooling3D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout, Input, BatchNormalization
from skimage.transform import resize
# ๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ
# ๋ฐ์ดํฐ ๊ฒฝ๋ก
PathDicom = "C:/Users/codls/Desktop/dcm/1/"
# ๋ฐ์ดํฐ๋ค ๋น list์ ์ ์ฅ
lstFilesDCM = []
for dirName, subdirList, fileList in os.walk(PathDicom):
for filename in fileList:
if ".ima" in filename.lower(): # ํด๋์ .ima ํ์ผ์ด ์์ผ๋ฉด ๋ฆฌ์คํธ์ ์ถ๊ฐ
lstFilesDCM.append(os.path.join(dirName,filename))
print(lstFilesDCM[0]) # ํ์ธ
# Dicom file display
## (1)
# ์ฒซ๋ฒ์งธ .ima ํ์ผ์ ์ฝ์
RefDs = pydicom.read_file(lstFilesDCM[0])
# Z์ถ์ ๋ฐ๋ผ ํ, ์ด, ์ฌ๋ผ์ด์ค ์ ๊ณ์ฐ
ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM))
# ๊ฐ๊ฒฉ ์ ์ฅ (in mm)
ConstPixelSpacing = (float(RefDs.PixelSpacing[0]), float(RefDs.PixelSpacing[1]), float(RefDs.SliceThickness))
# x,y,z ์ขํ ์ ์ฅ
x = np.arange(0.0, (ConstPixelDims[0]+1)*ConstPixelSpacing[0], ConstPixelSpacing[0])
y = np.arange(0.0, (ConstPixelDims[1]+1)*ConstPixelSpacing[1], ConstPixelSpacing[1])
z = np.arange(0.0, (ConstPixelDims[2]+1)*ConstPixelSpacing[2], ConstPixelSpacing[2])
# ConstPixelDims์ ๋ง๊ฒ array ํฌ๊ธฐ๋ฅผ ๋ง์ถฐ์ค
ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)
# ๋ชจ๋ ๋ค์ด์ฝค ํ์ผ ๋ฃจํ
for filenameDCM in lstFilesDCM:
# ํ์ผ ์ฝ๊ธฐ
ds = pydicom.read_file(filenameDCM)
# ๋ฐ์ดํฐ ์ ์ฅ
ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array
ArrayDicom.shape
plt.figure(dpi=300)
plt.axes().set_aspect('equal', 'datalim')
plt.set_cmap(plt.gray())
# flipud > array ๋ฐฐ์ด ๋ค์ง๊ธฐ
plt.pcolormesh(x, y, np.flipud(ArrayDicom[:, :, 26])) # z์ถ ์กฐ์ ํ ์ ์์
## (2)
ConstPixelDims = (len(lstFilesDCM),int(RefDs.Rows), int(RefDs.Columns))
# The array is sized based on 'ConstPixelDims'
ArrayDicom1 = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)
print(ArrayDicom1.shape)
# loop through all the DICOM files
for filenameDCM in lstFilesDCM:
# ํ์ผ ์ฝ๊ธฐ
ds = pydicom.read_file(filenameDCM)
# array ํํ๋ก ์ ์ฅ
ArrayDicom1[lstFilesDCM.index(filenameDCM), :, :] = ds.pixel_array
# 10๋ฒ์งธ slice display
plt.imshow(ArrayDicom1[9,:,:])
# 3D augmentation
## >> imaโarrayํ ํ์ผ์ display
## ArrayDicom ์ด 3D ๋ฐ์ดํฐ 1 set
## VoxelDataGenerator
# z์ถ ๋ฒ์(0~9) ์ค์ , ์ฒ๋ฆฌ ์๋ ์ํด
data1 = ArrayDicom[:, :, :10]
# VoxelDataGenerator ์ฌ์ฉ ์ batch๊ฐ ์๊ตฌ๋จ, batch ์ถ๊ฐ
samples= data1.reshape((1,)+data1.shape)
#samples = data
#samples.shape
print(samples.shape)
# test.py์ c, ImageDataGenerator ํจ์, generator
train_datagen = VoxelDataGenerator(flip_axis=None,
shift_axis='random', shift_range='random',
zoom_axis='random', zoom_range='random',
rotate_axis=3, rotate_angle='random')
# ๋์ค ๋ฅ๋ฌ๋ ๋ test์ ์ฌ์ฉํ๋ ๊ฒ, ์ง๊ธ x
test_datagen = VoxelDataGenerator(flip_axis=None,
shift_axis=None, shift_range=0,
zoom_axis='same', zoom_range=0,
rotate_axis=None, rotate_angle=0)
# generator๋ฅผ data์ ์ ์ฉ
training_set = train_datagen.build(data=samples, batch_size = 1)
# ๋์ค ๋ฅ๋ฌ๋ ๋ ์ฌ์ฉํ๋ ๊ฒ
test_set = test_datagen.build(data=samples, batch_size = 1)
print(type(train_datagen)) # ๋ชจ๋
print(type(training_set)) # generator
type(next(training_set)[0])
print(next(training_set)[0].shape)
## generator voxel plot
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.voxels(next(training_set)[0], edgecolor='k')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
## generator data 1 slice
plt.imshow(next(training_set)[0,:,:,0])
## original voxel plot
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.voxels(data1, edgecolor='k')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
data1.shape
##original data 1 slice
plt.imshow(data1[:,:,0])
plt.xlabel('x');
plt.ylabel('y');
plt.xlim(0,256);
plt.ylim(0,256);
samples.shape
## normalization
def normalize(ArrayDicom):
ArrayDicom_min = np.min(ArrayDicom)
return (ArrayDicom-ArrayDicom_min)/(np.max(ArrayDicom)-ArrayDicom_min)
def show_histogram(values):
n, bins, patches = plt.hist(values.reshape(-1), 50, density=1)
bin_centers = 0.5 * (bins[:-1] + bins[1:])
for c, p in zip(normalize(bin_centers), patches):
plt.setp(p, 'facecolor', cm.viridis(c))
plt.show()
show_histogram(ArrayDicom)
##
def scale_by(ArrayDicom, fac):
mean = np.mean(ArrayDicom)
return (ArrayDicom-mean)*fac + mean
transformed = np.clip(scale_by(np.clip(normalize(ArrayDicom)-0.1, 0, 1)**0.4, 2)-0.1, 0, 1)
show_histogram(transformed)
ArrayDicom.shape
data=ArrayDicom
##
resized = resize(ArrayDicom, (256, 256, 192), mode='constant')
##
def make_ax(grid=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.grid(grid)
return ax
def explode(data):
shape_arr = np.array(data.shape)
size = shape_arr[:3]*2 - 1
exploded = np.zeros((shape_arr), dtype=data.dtype)
exploded[::2, ::2, ::2] = data
return exploded
##
def plot_cube(cube, angle=320):
cube = normalize(cube)
facecolors = cm.viridis(cube)
facecolors[:,:,:,-1] = cube
facecolors = explode(facecolors)
filled = facecolors[:,:,:,-1] != 0
x1, y1, z1 = expand_coordinates(np.indices(np.array(filled.shape) + 1))
fig = plt.figure(figsize=(30/2.54, 30/2.54))
ax = fig.gca(projection='3d')
ax.view_init(30, angle)
ax.set_xlim(right=IMG_DIM*2)
ax.set_ylim(top=IMG_DIM*2)
ax.set_zlim(top=IMG_DIM*2)
ax.voxels(x1, y1, z1, filled, facecolors=facecolors)
plt.show()
plot_cube(resized[:,:,:])
print(training_set)
# Create the model
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=samples.shape))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(no_classes, activation='softmax'))
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(24,24,3)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(3, activation='softmax'))
###Output
_____no_output_____ |
Model backlog/Train/4-jigsaw-train-distilbert-ml-cased-bias.ipynb | ###Markdown
Dependencies
###Code
import os, glob, warnings
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from transformers import TFDistilBertModel
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses
from tensorflow.keras.callbacks import LearningRateScheduler, EarlyStopping
from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling1D
from sklearn.metrics import confusion_matrix, roc_auc_score, classification_report
def seed_everything(seed=0):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
# Auxiliary functions
def plot_metrics(history, metric_list):
fig, axes = plt.subplots(len(metric_list), 1, sharex='col', figsize=(20, 18))
axes = axes.flatten()
for index, metric in enumerate(metric_list):
axes[index].plot(history[metric], label='Train %s' % metric)
axes[index].plot(history['val_%s' % metric], label='Validation %s' % metric)
axes[index].legend(loc='best', fontsize=16)
axes[index].set_title(metric)
plt.xlabel('Epochs', fontsize=16)
sns.despine()
plt.show()
def plot_confusion_matrix(y_train, train_pred, y_valid, valid_pred, labels=[0, 1]):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))
train_cnf_matrix = confusion_matrix(y_train, train_pred)
validation_cnf_matrix = confusion_matrix(y_valid, valid_pred)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')
plt.show()
# Datasets
def get_training_dataset():
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
dataset = dataset.prefetch(AUTO)
return dataset
def get_validation_dataset():
dataset = tf.data.Dataset.from_tensor_slices((x_valid, y_valid))
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
###Output
_____no_output_____
###Markdown
TPU configuration
###Code
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
AUTO = tf.data.experimental.AUTOTUNE
###Output
Running on TPU grpc://10.0.0.2:8470
REPLICAS: 8
###Markdown
Load data
###Code
dataset_base_path = '/kaggle/input/jigsaw-dataset-toxic-distilbert/'
dataset_base_bias_pt1_path = '/kaggle/input/jigsaw-dataset-bias-distilbert-pt1/'
dataset_base_bias_pt2_path = '/kaggle/input/jigsaw-dataset-bias-distilbert-pt2/'
x_train_bias_path = glob.glob(dataset_base_bias_pt1_path + 'x_train*.npy')
x_train_bias_path += glob.glob(dataset_base_bias_pt2_path + 'x_train*.npy')
x_train_bias_path.sort()
y_train_bias_path = glob.glob(dataset_base_bias_pt1_path + 'y_train*.npy')
y_train_bias_path += glob.glob(dataset_base_bias_pt2_path + 'y_train*.npy')
y_train_bias_path.sort()
x_valid_path = dataset_base_path + 'x_valid.npy'
y_valid_path = dataset_base_path + 'y_valid.npy'
x_train = np.load(x_train_bias_path[0])
for path_pt in x_train_bias_path[1:3]:
x_train = np.vstack([x_train, np.load(path_pt)])
y_train = np.load(y_train_bias_path[0])
for path_pt in y_train_bias_path[1:3]:
y_train = np.vstack([y_train, np.load(path_pt)])
x_valid = np.load(x_valid_path)
y_valid = np.load(y_valid_path)
print('Train samples %d' % len(x_train))
print('Validation samples %d' % len(x_valid))
###Output
Train samples 300000
Validation samples 8000
###Markdown
Model parameters
###Code
MAX_LEN = 512
BATCH_SIZE = 64 * strategy.num_replicas_in_sync
EPOCHS = 20
LEARNING_RATE = 1e-5 # * strategy.num_replicas_in_sync
ES_PATIENCE = 5
TRAIN_STEPS = len(x_train) // BATCH_SIZE
VALIDATION_STEPS = len(x_valid) // BATCH_SIZE
base_model_path = '/kaggle/input/diltilbert-base-ml-cased-huggingface/distilbert-base-multilingual-cased-tf_model.h5'
config_path = '/kaggle/input/diltilbert-base-ml-cased-huggingface/distilbert-base-multilingual-cased-config.json'
model_path = 'model.h5'
###Output
_____no_output_____
###Markdown
Learning rate schedule
###Code
LR_START = 1e-9
LR_MIN = 1e-6
LR_MAX = LEARNING_RATE
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY = .8
def lrfn(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
rng = [i for i in range(EPOCHS)]
y = [lrfn(x) for x in rng]
fig, ax = plt.subplots(figsize=(20, 8))
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
###Output
Learning rate schedule: 1e-09 to 1e-05 to 1.4e-06
###Markdown
Model
###Code
def model_fn():
input_word_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_word_ids')
base_model = TFDistilBertModel.from_pretrained(base_model_path, config=config_path)
sequence_output = base_model(input_word_ids)[0]
x = GlobalAveragePooling1D()(sequence_output)
x = Dropout(0.25)(x)
output = Dense(1, activation='sigmoid', name='output')(x)
model = Model(inputs=input_word_ids, outputs=output)
model.compile(optimizers.Adam(lr=LEARNING_RATE),
loss=losses.BinaryCrossentropy(),
metrics=[metrics.BinaryAccuracy(), metrics.AUC()])
return model
with strategy.scope():
model = model_fn()
model.summary()
###Output
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_word_ids (InputLayer) [(None, 512)] 0
_________________________________________________________________
tf_distil_bert_model (TFDist ((None, 512, 768),) 134734080
_________________________________________________________________
global_average_pooling1d (Gl (None, 768) 0
_________________________________________________________________
dropout_19 (Dropout) (None, 768) 0
_________________________________________________________________
output (Dense) (None, 1) 769
=================================================================
Total params: 134,734,849
Trainable params: 134,734,849
Non-trainable params: 0
_________________________________________________________________
###Markdown
Train
###Code
STEPS_PER_EPOCH = len(x_train) // BATCH_SIZE
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE,
restore_best_weights=True, verbose=1)
lr_callback = LearningRateScheduler(lrfn, verbose=1)
history = model.fit(x=get_training_dataset(),
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=get_validation_dataset(),
callbacks=[es, lr_callback],
epochs=EPOCHS,
verbose=1).history
model.save_weights(model_path)
###Output
Train for 585 steps, validate for 15 steps
Epoch 00001: LearningRateScheduler reducing learning rate to 1e-09.
Epoch 1/20
585/585 [==============================] - 390s 666ms/step - loss: 0.7553 - binary_accuracy: 0.2667 - auc: 0.4928 - val_loss: 0.7043 - val_binary_accuracy: 0.3934 - val_auc: 0.5089
Epoch 00002: LearningRateScheduler reducing learning rate to 2.0008e-06.
Epoch 2/20
585/585 [==============================] - 341s 582ms/step - loss: 0.3335 - binary_accuracy: 0.7079 - auc: 0.5248 - val_loss: 0.4380 - val_binary_accuracy: 0.8456 - val_auc: 0.5191
Epoch 00003: LearningRateScheduler reducing learning rate to 4.0005999999999994e-06.
Epoch 3/20
585/585 [==============================] - 341s 582ms/step - loss: 0.2935 - binary_accuracy: 0.7088 - auc: 0.7059 - val_loss: 0.3999 - val_binary_accuracy: 0.8413 - val_auc: 0.7060
Epoch 00004: LearningRateScheduler reducing learning rate to 6.000399999999999e-06.
Epoch 4/20
585/585 [==============================] - 339s 580ms/step - loss: 0.2519 - binary_accuracy: 0.7086 - auc: 0.8115 - val_loss: 0.4047 - val_binary_accuracy: 0.8430 - val_auc: 0.7465
Epoch 00005: LearningRateScheduler reducing learning rate to 8.0002e-06.
Epoch 5/20
585/585 [==============================] - 341s 583ms/step - loss: 0.2415 - binary_accuracy: 0.7090 - auc: 0.8325 - val_loss: 0.3950 - val_binary_accuracy: 0.8428 - val_auc: 0.7633
Epoch 00006: LearningRateScheduler reducing learning rate to 1e-05.
Epoch 6/20
585/585 [==============================] - 339s 580ms/step - loss: 0.2361 - binary_accuracy: 0.7091 - auc: 0.8444 - val_loss: 0.4164 - val_binary_accuracy: 0.8436 - val_auc: 0.7622
Epoch 00007: LearningRateScheduler reducing learning rate to 8.200000000000001e-06.
Epoch 7/20
585/585 [==============================] - 339s 580ms/step - loss: 0.2318 - binary_accuracy: 0.7094 - auc: 0.8536 - val_loss: 0.3961 - val_binary_accuracy: 0.8443 - val_auc: 0.7752
Epoch 00008: LearningRateScheduler reducing learning rate to 6.760000000000001e-06.
Epoch 8/20
585/585 [==============================] - 339s 580ms/step - loss: 0.2296 - binary_accuracy: 0.7094 - auc: 0.8586 - val_loss: 0.3994 - val_binary_accuracy: 0.8448 - val_auc: 0.7782
Epoch 00009: LearningRateScheduler reducing learning rate to 5.608000000000001e-06.
Epoch 9/20
585/585 [==============================] - 339s 579ms/step - loss: 0.2275 - binary_accuracy: 0.7095 - auc: 0.8628 - val_loss: 0.4073 - val_binary_accuracy: 0.8444 - val_auc: 0.7761
Epoch 00010: LearningRateScheduler reducing learning rate to 4.6864000000000006e-06.
Epoch 10/20
584/585 [============================>.] - ETA: 0s - loss: 0.2259 - binary_accuracy: 0.7097 - auc: 0.8660Restoring model weights from the end of the best epoch.
585/585 [==============================] - 347s 594ms/step - loss: 0.2260 - binary_accuracy: 0.7097 - auc: 0.8660 - val_loss: 0.4343 - val_binary_accuracy: 0.8448 - val_auc: 0.7730
Epoch 00010: early stopping
###Markdown
Model loss graph
###Code
sns.set(style="whitegrid")
plot_metrics(history, metric_list=['loss', 'binary_accuracy', 'auc'])
###Output
_____no_output_____
###Markdown
Model evaluation
###Code
train_pred = model.predict(get_training_dataset(), steps=TRAIN_STEPS)
valid_pred = model.predict(get_validation_dataset())
print('Train set ROC AUC %.4f' % roc_auc_score(np.round(y_train[:len(train_pred)]), np.round(train_pred)))
print(classification_report(np.round(y_train[:len(train_pred)]), np.round(train_pred)))
print('Validation set ROC AUC %.4f' % roc_auc_score(y_valid[:len(valid_pred)], np.round(valid_pred)))
print(classification_report(y_valid[:len(valid_pred)], np.round(valid_pred)))
###Output
Train set ROC AUC 0.5021
precision recall f1-score support
0.0 0.94 0.92 0.93 281880
1.0 0.06 0.08 0.07 17640
accuracy 0.87 299520
macro avg 0.50 0.50 0.50 299520
weighted avg 0.89 0.87 0.88 299520
Validation set ROC AUC 0.5287
precision recall f1-score support
0.0 0.85 0.98 0.91 6494
1.0 0.45 0.07 0.13 1186
accuracy 0.84 7680
macro avg 0.65 0.53 0.52 7680
weighted avg 0.79 0.84 0.79 7680
###Markdown
Confusion matrix
###Code
plot_confusion_matrix(np.round(y_train[:len(train_pred)]), np.round(train_pred),
y_valid[:len(valid_pred)], np.round(valid_pred))
###Output
_____no_output_____
###Markdown
Visualize predictions
###Code
train = pd.read_csv("/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv",
usecols=['comment_text', 'toxic'], nrows=10)
valid = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv',
usecols=['comment_text', 'toxic'], nrows=10)
train['pred'] = train_pred[:len(train)]
valid['pred'] = valid_pred[:len(valid)]
print('Train set')
display(train[['comment_text', 'toxic', 'pred']].head(10))
print('Validation set')
display(valid[['comment_text', 'toxic', 'pred']].head(10))
###Output
Train set
|
Color_map_creation_for_physical_world_exp.ipynb | ###Markdown
Color Mapping Creation for Miniature-Scale Physical-World Experiment
###Code
%matplotlib inline
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
STEP1: Capture a gray-scale palletSince the DRP attack uses only grayscale perturbations, we need to know what color the camera perceives grayscale to be. Thus, we prepare a gray-scale pallet containg all 256 (8-bit) grayscale colors and capture it.
###Code
### Gray scale pallet
patch = np.zeros((255 * 10, 1000), dtype=np.uint8)
for i in range(0, 255):
patch[i*10: (i+1) * 10] = i
plt.imshow(patch, cmap='gray')
### Capture it in a camera (OpenPilot dashcam)
img = cv2.cvtColor(cv2.imread('data/img_physical_world_pallet.png'), cv2.COLOR_BGR2RGB)
plt.imshow(img)
###Output
_____no_output_____
###Markdown
STEP2: Extract color mappings
###Code
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
bev = img[510:575, 560:600]
plt.imshow(bev)
plt.title('Observed grayscale color pallet')
plt.imshow(bev)
plt.show()
df = pd.DataFrame(bev.mean(axis=1), columns=['R','G','B'])#.plot(style=['r', 'g', 'b'])
df['source'] = np.round((df.index.values / df.shape[0] * 256)).astype(int)
df = df.groupby('source').mean().astype(int)
df['X'] = df.index.values
df['mean'] = np.round(df[['R','G','B']].mean(axis=1)).astype(int)
df.plot(style=['r', 'g', 'b', 'y'])
plt.ylabel('Target color')
plt.xlabel('Source color')
###Output
_____no_output_____
###Markdown
The yellow line is the case that the obserbed colors and the prited pallee colors are the same. As shown, the colors less than 50 are obserbed brighter. The colors over 50 are observed darker. If you change the lighting conditions or the printer, this color mapping will be different. STEP3: Complete all 256 grayscale color mappings
###Code
from scipy.interpolate import UnivariateSpline
tmp = df[['mean']].reset_index().groupby('mean', as_index=False)['source'].min().rename({'mean': 'target'}, axis=1)#.set_index('source')#.reindex(np.arange(256))
sp = UnivariateSpline(tmp['source'], tmp['target'], k=3)
#map_sim2patch = map_sim2patch.fillna(method='ffill').fillna(method='bfill')
source = np.arange(256)
target = sp(source).astype(np.uint8)
df_sim2patch = pd.DataFrame({
'source': source,
'target': target
})
df_sim2patch.set_index('source').plot()
plt.ylabel('Target color')
plt.xlabel('Source color')
###Output
_____no_output_____
###Markdown
STEP4: Replace colors in patch based on color mapping
###Code
raw_patch = cv2.cvtColor(cv2.imread('data/patch_for_physical_before_color_change.png'), cv2.COLOR_BGR2RGB)
# Since above color mapping is not so accurate, I prepare more accurate color mapping for base color.
base_color = np.array([49, 47, 50])
observed_base_color = np.array([93.69449545, 92.59961091, 89.35799455])
plt.title('Raw patch before color replacment with out lane lines')
plt.imshow(raw_patch)
plt.axis('off')
plt.show()
scale = 5
left_lane_pos = 2.5
right_lane_pos = 38
LANE_WIDTH = 1.5
map_sim2patch = df_sim2patch.set_index('target')['source'].to_dict()
patch = raw_patch.clip(df_sim2patch['target'].min(), df_sim2patch['target'].max())
for i in range(patch.shape[0]):
for j in range(patch.shape[1]):
# base color and darker than base color use the base color mapping since the perturbation is always brighter than base color.
if np.abs(base_color - patch[i, j]).sum() > 0 and map_sim2patch[patch[i, j].max()] > np.min(observed_base_color):
patch[i, j] = [map_sim2patch[patch[i, j, 0]]] * 3
else:
patch[i, j] = observed_base_color
# Drawing lane lines
patch[:, int(left_lane_pos * scale)*2:int((left_lane_pos + LANE_WIDTH) * scale)*2] = 220
patch[0*scale * 2:25 * scale * 2, right_lane_pos * scale * 2:int((right_lane_pos + LANE_WIDTH) * scale) *2] = 220
patch[40*scale * 2:60 * scale * 2, right_lane_pos * scale * 2:int((right_lane_pos + LANE_WIDTH) * scale) * 2] = 220
patch[78*scale * 2:96 * scale * 2, right_lane_pos * scale * 2:int((right_lane_pos + LANE_WIDTH) * scale) * 2] = 220
patch = np.uint8(patch)
plt.title('Patch after color replacment')
plt.imshow(patch)
plt.axis('off')
plt.show()
###Output
_____no_output_____ |
downloaded_kernels/loan_data/kernel_184.ipynb | ###Markdown
Understand the different data patterns in the lending data
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
%matplotlib inline
from matplotlib import style
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
df_loan.shape
df_loan.columns
df_loan.describe()
df_loan.head(5)
df_loan.isnull().sum()
###Output
_____no_output_____ |
lijin-THU:notes-python/07-interfacing-with-other-languages/07.04-cython-part-2.ipynb | ###Markdown
Cython๏ผCython ่ฏญๆณ๏ผ่ฐ็จๅ
ถไปCๅบ Cython ่ฏญๆณ cdef ๅ
ณ้ฎ่ฏ `cdef` ๅฎไน `C` ็ฑปๅๅ้ใ ๅฏไปฅๅฎไนๅฑ้จๅ้๏ผ```cythondef fib(int n): cdef int a,b,i ...```ๅฎไนๅฝๆฐ่ฟๅๅผ๏ผ```cythoncdef float distance(float *x, float *y, int n): cdef: int i float d = 0.0 for i in range(n): d += (x[i] - y[i]) ** 2 return d```ๅฎไนๅฝๆฐ๏ผ```cythoncdef class Particle(object): cdef float psn[3], vel[3] cdef int id```ๆณจๆๅฝๆฐ็ๅๆฐไธ้่ฆไฝฟ็จ cdef ็ๅฎไนใ def, cdef, cpdef ๅฝๆฐ `Cython` ไธๅ
ฑๆไธ็งๅฎไนๆนๅผ๏ผ`def, cdef, cpdef` ไธ็ง๏ผ- `def` - Python, Cython ้ฝๅฏไปฅ่ฐ็จ- `cdef` - ๆดๅฟซ๏ผๅช่ฝ Cython ่ฐ็จ๏ผๅฏไปฅไฝฟ็จๆ้- `cpdef` - Python, Cython ้ฝๅฏไปฅ่ฐ็จ๏ผไธ่ฝไฝฟ็จๆ้ cimport
###Code
from math import sin as pysin
from numpy import sin as npsin
%load_ext Cython
###Output
_____no_output_____
###Markdown
ไปๆ ๅ `C` ่ฏญ่จๅบไธญ่ฐ็จๆจกๅ๏ผ`cimport` ๅช่ฝๅจ `Cython` ไธญไฝฟ็จ๏ผ
###Code
%%cython
from libc.math cimport sin
from libc.stdlib cimport malloc, free
###Output
_____no_output_____
###Markdown
cimport ๅ pxd ๆไปถ ๅฆๆๆณๅจๅคไธชๆไปถไธญๅค็จ `Cython` ไปฃ็ ๏ผๅฏไปฅๅฎไนไธไธช `.pxd` ๆไปถ๏ผ็ธๅฝไบๅคดๆไปถ `.h`๏ผๅฎไนๆนๆณ๏ผ่ฟไธชๆไปถๅฏนๅบไบไธไธช `.pyx` ๆไปถ๏ผ็ธๅฝไบๆบๆไปถ `.c`๏ผ๏ผ็ถๅๅจๅ
ถไป็ๆไปถไธญไฝฟ็จ `cimport` ๅฏผๅ
ฅ๏ผ`fib.pxd, fib.pyx` ๆไปถๅญๅจ๏ผ้ฃไนๅฏไปฅ่ฟๆ ท่ฐ็จ๏ผ```cythonfrom fib cimport fib```่ฟๅฏไปฅ่ฐ็จ `C++` ๆ ๅๅบๅ `Numpy C Api` ไธญ็ๆไปถ๏ผ```cythonfrom libcpp.vector cimport vectorcimport numpy as cnp``` ่ฐ็จๅ
ถไปCๅบ ไปๆ ๅๅบ `string.h` ไธญ่ฐ็จ `strlen`๏ผ
###Code
%%file len_extern.pyx
cdef extern from "string.h":
int strlen(char *c)
def get_len(char *message):
return strlen(message)
###Output
Writing len_extern.pyx
###Markdown
ไธ่ฟ `Cython` ไธไผ่ชๅจๆซๆๅฏผๅ
ฅ็ๅคดๆไปถ๏ผๆไปฅ่ฆไฝฟ็จ็ๅฝๆฐๅฟ
้กปๅๅฃฐๆไธ้๏ผ
###Code
%%file setup_len_extern.py
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
ext_modules=[ Extension("len_extern", ["len_extern.pyx"]) ],
cmdclass = {'build_ext': build_ext}
)
###Output
Writing setup_len_extern.py
###Markdown
็ผ่ฏ๏ผ
###Code
!python setup_len_extern.py build_ext --inplace
###Output
running build_ext
cythoning len_extern.pyx to len_extern.c
building 'len_extern' extension
creating build
creating build\temp.win-amd64-2.7
creating build\temp.win-amd64-2.7\Release
C:\Miniconda\Scripts\gcc.bat -DMS_WIN64 -mdll -O -Wall -IC:\Miniconda\include -IC:\Miniconda\PC -c len_extern.c -o build\temp.win-amd64-2.7\Release\len_extern.o
writing build\temp.win-amd64-2.7\Release\len_extern.def
C:\Miniconda\Scripts\gcc.bat -DMS_WIN64 -shared -s build\temp.win-amd64-2.7\Release\len_extern.o build\temp.win-amd64-2.7\Release\len_extern.def -LC:\Miniconda\libs -LC:\Miniconda\PCbuild\amd64 -lpython27 -lmsvcr90 -o "C:\Users\Jin\Documents\Git\python-tutorial\07. interfacing with other languages\len_extern.pyd"
###Markdown
ไป `Python` ไธญ่ฐ็จ๏ผ
###Code
import len_extern
###Output
_____no_output_____
###Markdown
่ฐ็จ่ฟไธชๆจกๅๅ๏ผๅนถไธ่ฝ็ดๆฅไฝฟ็จ `strlen` ๅฝๆฐ๏ผๅฏไปฅ็ๅฐ๏ผ่ฟไธชๆจกๅไธญๅนถๆฒกๆ `strlen` ่ฟไธชๅฝๆฐ๏ผ
###Code
dir(len_extern)
###Output
_____no_output_____
###Markdown
ไธ่ฟๅฏไปฅ่ฐ็จ `get_len` ๅฝๆฐ๏ผ
###Code
len_extern.get_len('hello')
###Output
_____no_output_____
###Markdown
ๅ ไธบ่ฐ็จ็ๆฏ `C` ๅฝๆฐ๏ผๆไปฅๅฝๆฐ็่กจ็ฐไธ `C` ่ฏญ่จ็็จๆณไธ่ด๏ผไพๅฆ `C` ่ฏญ่จไปฅ `\0` ไธบๅญ็ฌฆไธฒ็็ปๆ็ฌฆ๏ผๆไปฅไผๅบ็ฐ่ฟๆ ท็ๆ
ๅต๏ผ
###Code
len_extern.get_len('hello\0world!')
###Output
_____no_output_____
###Markdown
้คไบๅฏนๅทฒๆ็ `C` ๅฝๆฐ่ฟ่ก่ฐ็จ๏ผ่ฟๅฏไปฅๅฏนๅทฒๆ็ `C` ็ปๆไฝ่ฟ่ก่ฐ็จๅไฟฎๆน๏ผ
###Code
%%file time_extern.pyx
cdef extern from "time.h":
struct tm:
int tm_mday
int tm_mon
int tm_year
ctypedef long time_t
tm* localtime(time_t *timer)
time_t time(time_t *tloc)
def get_date():
"""Return a tuple with the current day, month and year."""
cdef time_t t
cdef tm* ts
t = time(NULL)
ts = localtime(&t)
return ts.tm_mday, ts.tm_mon + 1, ts.tm_year + 1900
###Output
Writing time_extern.pyx
###Markdown
่ฟ้ๆไปฌๅชไฝฟ็จ `tm` ็ปๆไฝ็ๅนดๆๆฅไฟกๆฏ๏ผๆไปฅๅชๅฃฐๆไบ่ฆ็จไบไธไธชๅฑๆงใ
###Code
%%file setup_time_extern.py
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
ext_modules=[ Extension("time_extern", ["time_extern.pyx"]) ],
cmdclass = {'build_ext': build_ext}
)
###Output
Writing setup_time_extern.py
###Markdown
็ผ่ฏ๏ผ
###Code
!python setup_time_extern.py build_ext --inplace
###Output
running build_ext
cythoning time_extern.pyx to time_extern.c
building 'time_extern' extension
C:\Miniconda\Scripts\gcc.bat -DMS_WIN64 -mdll -O -Wall -IC:\Miniconda\include -IC:\Miniconda\PC -c time_extern.c -o build\temp.win-amd64-2.7\Release\time_extern.o
writing build\temp.win-amd64-2.7\Release\time_extern.def
C:\Miniconda\Scripts\gcc.bat -DMS_WIN64 -shared -s build\temp.win-amd64-2.7\Release\time_extern.o build\temp.win-amd64-2.7\Release\time_extern.def -LC:\Miniconda\libs -LC:\Miniconda\PCbuild\amd64 -lpython27 -lmsvcr90 -o "C:\Users\Jin\Documents\Git\python-tutorial\07. interfacing with other languages\time_extern.pyd"
###Markdown
ๆต่ฏ๏ผ
###Code
import time_extern
time_extern.get_date()
###Output
_____no_output_____
###Markdown
ๆธ
็ๆไปถ๏ผ
###Code
import zipfile
f = zipfile.ZipFile('07-04-extern.zip','w',zipfile.ZIP_DEFLATED)
names = ['setup_len_extern.py',
'len_extern.pyx',
'setup_time_extern.py',
'time_extern.pyx']
for name in names:
f.write(name)
f.close()
!rm -f setup*.*
!rm -f len_extern.*
!rm -f time_extern.*
!rm -rf build
###Output
_____no_output_____ |
examples/Examine_Derecho_Day.ipynb | ###Markdown
This notebook demonstrates how to use wind report data First, download the wind svrgis file with unique ids and UTC time
###Code
from svrimg.utils.get_tables import get_table
import pandas as pd
df_svrgis = get_table(which='svrgis', haz_type='wind', data_dir="../data/csvs/")
df_svrgis.head()
###Output
<ipython-input-1-d83b000f3f3b>:4: DtypeWarning: Columns (24) have mixed types.Specify dtype option on import or set low_memory=False.
df_svrgis = get_table(which='svrgis', haz_type='wind', data_dir="../data/csvs/")
###Markdown
Subset the dataset for a derecho day--June 29th 2012
###Code
import datetime
import pandas as pd
start_time = datetime.datetime(2012, 6, 29, 14, 0)
end_time = datetime.datetime(2012, 6, 30, 6, 0)
df_svrgis['date_utc'] = pd.to_datetime(df_svrgis.date_utc)
df_sub = df_svrgis[(df_svrgis.date_utc >= start_time) & (df_svrgis.date_utc <= end_time)].copy()
df_sub.head()
###Output
_____no_output_____
###Markdown
Request images from svrimg.niu.edu
###Code
from svrimg.utils.get_images import request_images
file_locs = request_images(df_sub.index.values, haz_type='wind')
file_locs
###Output
_____no_output_____
###Markdown
Summarize the morphology using PM Mean
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from svrimg.analysis.pmmean import _run_pmm_one_variable
from svrimg.utils.get_images import get_img_list
from svrimg.mapping.map_helper import draw_box_plot
plt.rcParams['figure.figsize'] = 25, 25
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['axes.labelsize'] = 20
imgs = get_img_list(df_sub.index.values, haz_type="wind", keep_missing=False)
img_mean = np.mean(imgs, axis=0)
img_median = np.median(imgs, axis=0)
img_pmm = _run_pmm_one_variable(imgs)
ax = plt.subplot(2,3,1)
ax.set_title("Mean", fontsize=20)
ax = draw_box_plot(ax, img_mean)
ax = plt.subplot(2,3,2)
ax.set_title("Median", fontsize=20)
ax = draw_box_plot(ax, img_median)
ax = plt.subplot(2,3,3)
ax.set_title("PMM", fontsize=20)
ax = draw_box_plot(ax, img_pmm)
###Output
_____no_output_____
###Markdown
Show summary over time
###Code
plt.rcParams['figure.figsize'] = 25, 25
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
plt.rcParams['axes.labelsize'] = 10
for ix, dtime in enumerate(pd.date_range(start=start_time, end=end_time, freq='3H')):
period_start = (dtime + datetime.timedelta(hours=1.5)) - datetime.timedelta(hours=1.5)
period_end = (dtime + datetime.timedelta(hours=1.5)) + datetime.timedelta(hours=1.5)
df_ = df_sub[(df_sub.date_utc >= period_start) & (df_sub.date_utc <= period_end)]
imgs = get_img_list(df_.index.values, haz_type="wind", keep_missing=False)
ax = plt.subplot(3, 3, ix+1)
img_pmm = _run_pmm_one_variable(imgs)
ax.set_title("PMM\n{} - {}".format(period_start, period_end), fontsize=20)
ax = draw_box_plot(ax, img_pmm)
ax.text(0, 130, "n={}".format(len(imgs)), fontsize=25)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Load the keras model to identify only QLCS structuresRun the "Train_Model" notebook first.
###Code
from tensorflow import keras
model = keras.models.load_model("../data/models/morph_model_v02.h5")
keras.utils.plot_model(model, show_shapes=True)
###Output
_____no_output_____
###Markdown
Transform the images into keras-friendly representations and then have the model attempt to classify
###Code
from numpy import expand_dims
import numpy as np
imgs = get_img_list(df_sub.index.values, haz_type="wind", keep_missing=True)
imgs = expand_dims(imgs, 3)
imgs = imgs / 80 #normalize
preds = model.predict(imgs)
lookup = {0:'Cellular', 1:'QLCS', 2:'Tropical', 3:'Other', 4:'Noise', 5:'Missing'}
for index, cls in lookup.items():
df_sub[cls] = preds[:, index]
df_sub['Classification'] = [lookup[x] for x in np.argmax(preds, axis=1)]
###Output
_____no_output_____
###Markdown
Plot the reports based on their classifications
###Code
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
%matplotlib inline
plt.rcParams['figure.figsize'] = 20, 20
ax = plt.axes(projection=ccrs.LambertConformal(central_longitude=-88))
ax.set_extent([-95, -70, 35, 43])
ax.coastlines()
ax.add_feature(cfeature.STATES)
for index, cls in lookup.items():
df_ = df_sub[df_sub.Classification==cls]
plt.plot(df_.slon, df_.slat, '.', ms=15, label="{} ({})".format(cls, len(df_)), transform=ccrs.PlateCarree())
plt.legend(prop={"size":20})
###Output
_____no_output_____ |
Examples/12_tacticities/polycarboxybetaine/scripts/rnd_walk.pilot_tutorial.ipynb | ###Markdown
Simulated random walk polymerization with PySIMM using CGenFF Prerequisites To run this tutorial a few additional python packages should set up in your system - **PySIMM** for managing molecules and forcefield typing: (https://github.com/polysimtools/pysimm). - For PySIMM to work properly it should be **integrated with LAMMPS** (the integration only includes the setup of envirnomental variable `$LAMMPS_EXEC` that points towards LAMMPS binary). - Optional: **NGLView** for in-code molecule visualization (https://github.com/nglviewer/nglview).
###Code
from pysimm import lmps
from pysimm import system
from pysimm import forcefield
from pysimm.apps.random_walk import random_walk, random_walk_tacticity, check_tacticity
import matplotlib.pyplot as mplp
import matplotlib.image as mplim
from IPython import display
import numpy
import sys
import os
###Output
_____no_output_____
###Markdown
Additional API: **Function to cap a polymer** Here is a description of a PySIMM method that can cap a polymer chain with methyls (*-CH3*). The method requires a PySIMM system as an input, that represents an uncapped polymer.Here are the main assertions of the method: * Assumes that the system atoms are typed with **CGenFF** forcefield (pysimm.forcefield.charmm). * Requires the atoms to be capped have their linker attribute set to either _'head'_ or _'tail'_. * Assumes that atoms to be capped are carbons and they are in **sp3 hybridization**. The method does 3 things: 1. Adds a methyl carbon to a valence position of the linker carbon that should be capped. 2. Adds 3 hydrogens at the approximate vacant positions of the tetrahedron. 3. Runs a short conjugated gradient minimization to put all atoms to their minimal energy positions.
###Code
def cap_with_methyls(input_sst, ff):
'''
A utility method that implements capping of the free ends of polymer chains with methyl
groups in all-atom forcefield representation
'''
# Let's cap the oligomer with the methyl (-CH3) group
captypes = []
for cpn in ['CG331', 'HGA3']:
tmp = input_sst.particle_types.get(cpn)
if tmp:
cpt = tmp[0]
else:
cpt = ff.particle_types.get(cpn)[0].copy()
input_sst.particle_types.add(cpt)
captypes.append(cpt)
for p in input_sst.particles:
if p.linker is not None:
if len(p.bonded_to) < 4:
# assuming that the linker atom is sp3 hybridized C, let's define the last non-occupied direction
# of the tetrahedron
dir = numpy.zeros(3)
for p_ in p.bonded_to:
dir += numpy.array([p.x, p.y, p.z]) - numpy.array([p_.x, p_.y, p_.z])
dir = dir / numpy.linalg.norm(dir)
cap_c = system.Particle(x=p.x + 1.53 * dir[0], y=p.y + 1.53 * dir[1], z=p.z + 1.53 * dir[2],
type=captypes[0])
input_sst.add_particle_bonded_to(cap_c, p, f=ff)
dir_h = numpy.array([1.0, 1.0, 1.0])
dir_h[0] = -(dir_h[1] * dir[1] + dir_h[2] * dir[2]) / dir[0]
dir_h = dir_h / numpy.linalg.norm(dir_h)
dir_h2 = numpy.array([1.0, 1.0, -1.0])
dir_h2[1] = (dir[2] / dir[0] - dir_h[2] / dir_h[0]) / (dir[1] / dir[0] - dir_h[1] / dir_h[0])
dir_h2[0] = dir[2] / dir[0] - dir[1] * dir_h2[1] / dir[0]
dir_h2 = dir_h2 / numpy.linalg.norm(dir_h2)
stretch = 0.78
input_sst.add_particle_bonded_to(system.Particle(x=cap_c.x + stretch * dir[0] + stretch * dir_h[0],
y=cap_c.y + stretch * dir[1] + stretch * dir_h[1],
z=cap_c.z + stretch * dir[2] + stretch * dir_h[2],
type=captypes[1]), cap_c, f=ff)
input_sst.add_particle_bonded_to(system.Particle(x=cap_c.x + stretch * dir[0] + stretch * dir_h2[0],
y=cap_c.y + stretch * dir[1] + stretch * dir_h2[1],
z=cap_c.z + stretch * dir[2] + stretch * dir_h2[2],
type=captypes[1]), cap_c, f=ff)
input_sst.add_particle_bonded_to(system.Particle(x=cap_c.x + stretch * dir[0] - stretch * dir_h2[0],
y=cap_c.y + stretch * dir[1] - stretch * dir_h2[1],
z=cap_c.z + stretch * dir[2] - stretch * dir_h2[2],
type=captypes[1]), cap_c, f=ff)
input_sst.objectify()
input_sst.center(what='particles', at=[0.0, 0.0, 0.0], move_both=False)
sim = lmps.Simulation(input_sst, log='capping_opt.log')
sim.add_min(min_style='cg', name='min_cg', etol=1.0e-6, ftol=1.0e-6, maxiter=int(1e+6), maxeval=int(1e+7))
sim.run()
###Output
_____no_output_____
###Markdown
**Functions to draw results** Method that plots ratio of meso/racemo diads in the system as calculated by the `pysimm.apps.random_walk.check_tacticity()`
###Code
def display_diad_distrib(data, canvas):
canvas.hist(list(map(int, data)), bins=[-0.5, 0.5, 1.5], rwidth=0.2,
range=(-0.5, 1.5), density=False);
mplp.xticks(ticks=[0, 1], labels=['Racemo', 'Meso'], fontsize=16)
canvas.set_ylabel('Number of diads', fontsize=18)
canvas.set_xlabel('Diad type', fontsize=18)
return canvas
###Output
_____no_output_____
###Markdown
Method draws a PySIMM molecular system by creating a temporary .PDB file and using means of NGLView to show it on the notebook canvas. If the `fname` file name is set will also save the temporary .pdb file under that name.
###Code
try:
import nglview
except ImportError as err:
print('No NGLView module installed for this Python kernell')
def display_system(sstm, fname=None, labels_on=False):
is_temp = False
if not fname:
fname = 'tmp_file.pdb'
is_temp = True
sstm.write_pdb(fname)
if 'nglview' in sys.modules.keys():
view = nglview.show_structure_file(fname)
if labels_on:
view.add_label(color='black', scale=1.3, labelType='text',
labelText = [str(pt.tag) for pt in sstm.particles],
zOffset=2.0, attachment='middle_center')
if is_temp:
os.remove(fname)
return view
###Output
_____no_output_____
###Markdown
Step **I**: Preparing the repetitive unit Let's load the repetitive unit of the polymer to the `pysimm.system()` from a `.pdb` file that has `'CONNECT'` section (PySIMM interprets that section and makes bonds between the atoms of the system accordingly).The `system.read_pdb` method supports additional string parameter that points to the `.str` (CHARMM stream) file which can be used to update charges of the particles of the system. Additionally, using the **nglview** Python package we can visualize its structure directly from the same '.mol2' file.
###Code
data_path = '../../../../pysimm/models/monomers/topologies/'
sst = system.read_pdb(data_path + 'cbma.pdb', str_file=data_path + 'cbma.str')
sst.set_charge()
print('Total charge of the rep. unit is {}q'.format(round(sst.charge, 6)))
display_system(sst, labels_on=True)
###Output
(debug) PySIMM: reading file
(debug) PySIMM: read_pdb: reading file '../../../../pysimm/models/monomers/topologies/cbma.str'
Total charge of the rep. unit is 0.0q
###Markdown
Here is an example of definition of head and tail atoms in the CBMA repetitive unit. As shown above, the undercoordinated carbon atoms (carbons with incomplete valency) have the indices 1 and 2. They will be the head (the atom with which the current repeating unit connects to the previous repeating unit) and the tail (the atom to which the next repeating unit connects to) during the pysimm polymerization process
###Code
display.Image('../figures/figure2.png', embed=True, retina=True)
lnkr_atoms = {'head': 1, 'tail': 2}
for nm in lnkr_atoms.keys():
sst.particles[lnkr_atoms[nm]].linker = nm
###Output
_____no_output_____
###Markdown
Let's type the repetitive unit with basic pysimm CGenFF automatic typing tool. The partial charges of all particles were read from the .str file, so there is no need to reassign them (in principle PySIMM can do it using Gasteiger method)
###Code
ff = forcefield.Charmm()
sst.apply_forcefield(ff, charges=None)
###Output
PySIMM: Dihedrals assigned successfully.
IMPORTANT: all dihedral weighting factors (coefficients to compensate for double counting in rings) are currently set to 1.0.
If those values are different for your system please multiply corresponding force constants by the weights manually.
###Markdown
Step **II**: Making the polymer and checking its tacticity Once the forcefield types and charges are defined, one can use the forcefield-assisted random walk method. Let's built a short (10 rep. units) chain.
###Code
sngl_chain = random_walk(sst, 10,
forcefield=ff, density=0.01,
print_to_screen='false', traj=False, unwrap=True)
###Output
PySIMM: Molecule 1 inserted
17:07:14: 1/10 monomers added
17:07:14: 2/10 monomers added
17:07:14: starting relax_002 LAMMPS simulation
17:07:14: relax_002 simulation using LAMMPS successful
17:07:14: 3/10 monomers added
17:07:14: starting relax_003 LAMMPS simulation
17:07:15: relax_003 simulation using LAMMPS successful
17:07:15: 4/10 monomers added
17:07:15: starting relax_004 LAMMPS simulation
17:07:16: relax_004 simulation using LAMMPS successful
17:07:16: 5/10 monomers added
17:07:16: starting relax_005 LAMMPS simulation
17:07:17: relax_005 simulation using LAMMPS successful
17:07:18: 6/10 monomers added
17:07:18: starting relax_006 LAMMPS simulation
17:07:19: relax_006 simulation using LAMMPS successful
17:07:19: 7/10 monomers added
17:07:19: starting relax_007 LAMMPS simulation
17:07:22: relax_007 simulation using LAMMPS successful
17:07:22: 8/10 monomers added
17:07:22: starting relax_008 LAMMPS simulation
17:07:24: relax_008 simulation using LAMMPS successful
17:07:24: 9/10 monomers added
17:07:24: starting relax_009 LAMMPS simulation
17:07:27: relax_009 simulation using LAMMPS successful
17:07:27: 10/10 monomers added
17:07:27: starting relax_010 LAMMPS simulation
17:07:31: relax_010 simulation using LAMMPS successful
###Markdown
The built is sucsessesfull, let's visualise the oligomer chain we built.
###Code
display_system(sngl_chain, fname='oligomer.10unts.uncapped.vacuum.pdb')
###Output
_____no_output_____
###Markdown
So far, the oligomer chain is uncapped. There are two backbone atoms in the system that are undercoordinated. Let's use the method that is set in appendix to connect methyl groups to those undercoordinated atoms.
###Code
oligomer = sngl_chain.copy()
cap_with_methyls(oligomer, ff)
oligomer.center(what='particles', at=[0.0, 0.0, 0.0], move_both=False)
display_system(oligomer, 'oligomer.10unts.capped.vacuum.pdb')
###Output
_____no_output_____
###Markdown
Finally, let's check the tacticity of the created oligomer. The `check_tacticity()` method of the `random_walk` application analyzes the local geometry of atoms for polymers which can have tacticity. The method returns the distribution of meso/racemo diads along the backbone of the macromolecule (see the image).
###Code
display.Image('../figures/figure3.png', embed=True, retina=True)
###Output
_____no_output_____
###Markdown
The input parameters of the method are: - A pysimm system that represents a macromolecule to analyze. - A list with 4 integers that defines the indices of the node atoms in a repetitive unit of the macromolecule. Indices in that order represent: **(1)** first atom of the backbone; **(2)** second atom of the backbone; **(3)** the first atom of the first side chain (or methyl, or hydrogen); **(4)** the first atom of the second side chain. Note: the colors of the atom indices match the colors of the vectors on the figure below. - Length of the repetitive unit of the macromolecule The second variable of `check_tacticity()` output is the list that shows whether the two consecutive repetitive units in the chain form either a meso (True) or a racemo (False) diad. Let's examine obtained oligomer and print the result in the form of simple 2-column histogram that will show the ratio of meso to racemo diads. Indices for the analyzed repetitive unit are highlighted on the **first image** of the tutorial, and they are 1, 2, 8, and 10.
###Code
display.Image('../figures/figure2.png', embed=True, retina=True)
tacticity_stat = check_tacticity(sngl_chain, [1, 2, 8, 10], len(sst.particles))
fig, ax = mplp.subplots(1, 1, figsize=(8, 4))
display_diad_distrib(tacticity_stat[1], ax);
###Output
_____no_output_____
###Markdown
In this particular case, among 10 monomers (thus 9 diads) we see that most of them have a meso- configuration, meaning that the two monomers in the diad have the same orientation. Only a very few monomer pairs (10%-30%, depending on the run) occasionally will form a racemo- diads.In this implementation of the random walk there is no explicit control of the following monomer orientation, and all monomers attached initially form meso- diads. However, because of geometry optimization and short NVE simulations, orientation of neighbouring monomers occasionally can switch, thus we see some number of racemo- diads.Depending on the strength of the energy barrier during longer MD simulations polymer can relax to an atactic state.But PySIMM also allows to gain more control on polymer tacticity right after the construction. Step **III**: Polymerization with controlled tacticity In this part let's use another method which is called `random_walk_tacticity()`, which allows to define the orientiation of the next monomer attached during the polymer building phase.For that, some additional modifications should be done to the repetitive unit we previously used. * The `random_walk_tacticity()` method requires a capped monomer so let's add capping carbon atoms to the linker atoms of our monomer. (Capping atoms will be undercoordinated, but this is not important in this case, as they are in any case removed during the simulated polymerization). * Both capping atoms should be decorated with an additional field named `rnd_wlk_tag` that contains a string with either `'head_cap'` or `'tail_cap'` value, respectively. * An additional label should be assigned to an atom, that together with the backbone linker atoms will form a plane, which will then define the necessary reflection of the next monomer (see the figure).
###Code
new_sst = sst.copy()
bnd_length = 1.4
# random_walk_tacticity requires a capped molecule, however, the capping atoms will be deleted.
# Those are basically dummy atoms, and can be of any type. Let's define them as carbon backbone atoms,
# which will allow us to maintain the correct backbone atom types.
captype = (new_sst.particle_types.get('CG331') +
new_sst.particle_types.get('CG321') +
new_sst.particle_types.get('CG311'))[0]
# loop through the particles to add caps to linkers
for p in new_sst.particles:
if p.linker:
# define and normalize directional vector for the capping atom
tmp = numpy.array([sum([p.x - p_.x for p_ in p.bonded_to]),
sum([p.y - p_.y for p_ in p.bonded_to]),
sum([p.z - p_.z for p_ in p.bonded_to])])
tmp = bnd_length * tmp / numpy.linalg.norm(tmp)
# add new capping particle along the defined direction
new_p = new_sst.add_particle_bonded_to(system.Particle(x=p.x + tmp[0],
y=p.y + tmp[1],
z=p.z + tmp[2],
type=captype), p, f=ff)
# decorate particle with '****_cap' tag and assign head_cap a property to mark it
# with 'mirror' label as an atom that makes plane for syndiotactic reflection
new_p.rnd_wlk_tag = p.linker + '_cap'
if p.linker == 'head':
setattr(new_p, 'linker', 'mirror')
new_sst.objectified = False
new_sst.objectify()
display_system(new_sst, labels_on=True)
###Output
_____no_output_____
###Markdown
* To control tacticity the method has `tacticity` keyword argument that accepts a real number $n \in [0; 1]$, which defines the relative number of isotactic insertions, so that $n = 1$ will be a fully isotactic chain, $n = 0$ will be syndiotactic, and $n = 0.5$ will be a chain with equal number of isotactic and syndiotactic insertions. * The method also accepts those specific strings as values of `tacticity` key. One can use either $n=1$ or `'isotactic'`, $n=0$ or `'syndiotactic'`, and $n=0.5$ or `'atactic'`.For more details and more options of `random_walk_tacticity()` please see the method description in pysimm documentation.First, let's run the `random_walk_tacticity()` in no simulation mode. Next monomer will be put to an approximately correct, geometrically calculated position without force field optimization and NVE simulations.
###Code
polymer_nosim = random_walk_tacticity(new_sst, 15, forcefield=ff,
tacticity='syndiotactic', sim='no', density=0.01)
display_system(polymer_nosim, 'straight_chain.pdb')
###Output
_____no_output_____
###Markdown
The result is a syndiotactic chain, and all diads in this case are clearly racemo- diads.
###Code
tacticity_stat = check_tacticity(polymer_nosim, [1, 2, 8, 10], len(sst.particles))
fig, ax = mplp.subplots(1, 1, figsize=(8, 4))
display_diad_distrib(tacticity_stat[1], ax);
###Output
_____no_output_____
###Markdown
Now let's do the same but with the forcefield optimization turned on, and see how many diads will be reconfigured from racemo- to meso- geometry.
###Code
polymer = random_walk_tacticity(new_sst, 15, forcefield=ff, tacticity=0.0, )
display_system(polymer, 'some_chain.pdb')
tacticity_stat = check_tacticity(polymer, [1, 2, 8, 10], len(sst.particles))
fig, ax = mplp.subplots(1, 1, figsize=(8, 4))
display_diad_distrib(tacticity_stat[1], ax);
###Output
_____no_output_____
###Markdown
* The figure confirms that the optimization can change the initial distribution of monomer orientations. However, the chain obtained with `random_walk_tacticity()` has more recemo- than meso- diads (as the example was based on creating a syndiotactic polymer), as compared to the chain from the original `random_walk()` (10%-30%). A setup to construct a polymer chain with exact tacticity In the previous section it was shown that in PySIMM one can easily construct a 'no simulation' polymer chain. The simple (but illustrative for tacticity explanations) chain is made by adding the monomers to each other *without* force field optimizations. The tacticity (ratio of meso-/racemo- diads) of that chain is easy to set to be exact, unlike the tacticity of a chain build with enabled forcefield optimizations. The robustness of the forcefield functional form allows one to run the simulations using the 'no simulation' chain as an initial structure. The long enough MD simulations with the fixed (via SHAKE) angle between the side radicals will result a relaxed polymer chain with exactly the same tacticity as it was constructed at the beginning. *Please note, that depending on geometry of your repetitive unit the initial structure of chain of concatenated monomers might be defined more or less approximate. We recommend (if it is possible) to put linker atoms (tail and head atoms) into the anti-periplanar positions (see e.g. pCBMA repetitive unit in this tutorial).* Below is the small code listing that sets up (using PySIMM) the MD simulations starting with 'no simulation' configuration and result the relaxed polymer chain.
###Code
pmer_shake = polymer_nosim.copy()
sim = lmps.Simulation(pmer_shake, name='shake_relax', log='shake.log')
# This command adds a SHAKE fix to LAMMPS simulations which fixes an angle
# between two side chains of the pCBMA, and bonds that make that angle
sim.add_custom('fix shck_fix all shake 0.001 40 0 b {} {} a {}'.format(
polymer.bond_types.get('CG2O2,CG301')[0].tag,
polymer.bond_types.get('CG301,CG331')[0].tag,
polymer.angle_types.get('CG331,CG301,CG2O2')[0].tag))
sim.add_md(ensemble='nve', limit=0.1, length=30000)
sim.run()
pmer_shake.unwrap()
display_system(pmer_shake)
tacticity_stat = check_tacticity(pmer_shake, [1, 2, 8, 10], len(sst.particles))
fig, ax = mplp.subplots(1, 1, figsize=(8, 4))
display_diad_distrib(tacticity_stat[1], ax);
print(tacticity_stat[1])
###Output
[True, False, False, False, False, False, False, False, False, False, False, False, False, False]
|
AdvancedPython/TheNeedForSpeed.ipynb | ###Markdown
Python - The Need for SpeedPython was originally developed by Guido van Rossum as a high level general purpose scripting language in the 1990s. It's strength has been in particular that it is easy to learn and that algorithms can be coded quickly (python has sometimes been described as executable pseudo-code). In recent years it has been increasingly used also for scientific computing and mathematics. Here one quickly reaches a point where the time spent waiting for a program to run becomes much longer than the time spent writing the code. Hence the need for speed. In this tutorial a number of ways of speeding python up will be explored. These will be tried in the context of a simple numerical algorithm. We will start with a simple but slow implementation and then look at what makes it slow and how to speed things up. Method 0: Gaussian Elimination - A simplistic implementationConsider one of the basic mathematical algorithms that most students learn in their first year of university mathematics: solving a set of simultaneous linear equations using the Gaussian Elimination algorithm. Below is the framework for a function to implement this method. Note the use of comments to describe the intent and usage of the function. Preconditions and post-conditions, in the form of assert statements, are used to help with debugging. Including such comments and conditions as a matter of course is a good practice, even for code that you don't intend to maintain long term, as it can save you a lot of time hunting for bugs or when you invariably end up re-using code for other purposes than originally planned.While this is not the main point of the exercise, you may also want to think about things such as numerical stability and how to deal with any errors. To keep things simple here we are going to assume that the input arguments are a dense matrix $A$ in the form of a dictionary, right hand side vector $b$ and we return the solution to $A x = b$ as a simple list.*Hint:* Here is a pseudo-code of a very basic Gaussian elimination algorithm```pythonINPUT: A,bU := A so the input is not modifiedx := bfor i = 1,...,n: using 1 indexing here as in typical math. notation assuming U_ii != 0 here, could add code to cater for this exception Divide row i of U by U_ii (this makes U_ii==1) for k = i+1,...,n: subtract U_ki times row i from row k x_k := x_k - U_ki * x_iU is now upper triangular for i = n,n-1,...,1: back substitution x_i := (x_i - sum( U_ik * x_k for k=i+1,...,n) ) / U_iiOUTPUT: x```
###Code
def gaussSimple(A,b):
"""Solve the set of equations A x = b and return x
Input: b = list of length n, and A = dictionary, with A[i,j] being the entry for every row i, column j
Output: A list of length n giving the solution. (This version throws an assertion failure if there is no solution)
Note: The inputs are not modified by this function."""
n = len(b)
N = range(n)
assert (len(A) == n*n), "Matrix A has wrong size" # simple check of inputs
assert all( (i,j) in A for i in N for j in N), "Cannot handle sparse matrix A"
U = dict(A.items()) # make a copy of A before we transform it to upper triangular form
x = b[:] # copy so we don't modify the orignal
## insert your code here
## for the most basic version we want to:
## For every row i
## Eliminate all entries in column i below i by subtracting some multiple of row i
## Update the right hand side (in x) accordingly
## return [] if A does not have full rank (we come across a coefficient of zero)
## Back-substitute to replace x with the actual solution
error = max( abs(sum(A[i,j]*x[j] for j in N)-b[i]) for i in N)
assert (error < 1e-5 ), f"Incorrect solution: out by {error}" # check that we have reasonable accuracy
return x
###Output
_____no_output_____
###Markdown
To test this we are going to generate some random data. This will also allow us to see how fast it runs as the size of the problem increases
###Code
from random import random
def randProb(n):
"Returns a randomly generated n x n matrix A (as a dictionary) and right hand side vector b (as a list)."
n = int(n)
assert n > 0
N = range(n)
return dict( ((i,j), random()) for i in N for j in N), [random() for i in N]
A,b = randProb(3)
gaussSimple(A,b)
###Output
_____no_output_____
###Markdown
Execution timingNow lets see how fast this thing goes. There are a couple of things to think about when we talk about timing:* Is it elapsed time (wall-clock) time or CPU time we are measuring? The two should be nearly the same if we are using a single threaded program on a computer that is not fully loaded. However, once we have multiple threads in our program, or multiple other programs competing for a limited number of CPUs, the results could be quite different* Random variation - running the same code several times can result in small random variations in run-time due to a range of factors (cache misses, variable CPU clock speed, computer load, etc) even when the functiuon we are testing is entirely deterministic and run with the same inputs* Garbage collection: One of the things that make Python convenient is that we generally don't have to worry about memory management. However if the automated garbage collection happens to cut in during the function we are testing this can make a big difference.For our tests here we are going to use the `timeit` module that takes care of the last two points by turning of garbage collection and making repeated tests easy. We will specifically ask it to measure CPU time using the `time.process_time()` function but you can experiment with using `time.perf_counter()` function which is the default.In addition, to get a feeling for how the runtime increases as we double the size of the data, we also print the ratio between successive tests with increasing data. We reduce the random variation between tests of different methods by always generating data with the same random number seed. Since we are going to make use of the speed test functions repeatedly we are going to write this out as a separate module that we can import into any notebook or python program.
###Code
%%writefile 'gausstest.py'
"""This module contains a function for testing the speed of a function which solve Ax=b
usage: import gausstest
gausstest.speed(myfunction) # optional second argument, maximum size n
Here myfunction takes arguments A,b,"""
import timeit,time,gc
from statistics import mean,stdev
from random import seed,random
def randProb(n):
"Returns a randomly generated n x n matrix A (as a dictionary) and right hand side vector b (as a list)."
n = int(n)
assert n > 0
N = range(n)
return dict( ((i,j), random()) for i in N for j in N), [random() for i in N]
def speed(method,maxSize=400):
seed(123456) # fix some arbitrary seed so we keep generating the same data
randP = lambda n : randProb(n) # add randProb to locals() namespace
prev,n = 0.0, 50
gc.disable()
while n <= maxSize:
gc.collect() # manual cleanout of garbage before each repeat
t = timeit.repeat(stmt="method(A,b)",setup=f"A,b=randP({n})",
timer=time.process_time,repeat=5,number=1,globals=locals())
print("%4d %10.4f ฯ=%.2f sec" % (n,mean(t),stdev(t)),"(x %.2f)"%(mean(t)/prev) if prev > 0 else "")
prev = mean(t)
n *= 2
gc.enable()
###Output
_____no_output_____
###Markdown
Now lets test our function systematically.
###Code
import gausstest
print("Simple test of a single 3-dimensional instance: ",
gaussSimple(*gausstest.randProb(3)),"\n")
# Note that in the above the '*' means we are passing the tuple of outputs
# from randProb as successive arguments to gaussSimple
gausstest.speed(gaussSimple) # full test
###Output
_____no_output_____
###Markdown
Discussion - Complexity* What is the theoretical complexity of this algorithm?* Does the practical performance match the theoretical expectation?* What can be done to make this implementation better? * More robust numerically? - (Left as exercise to interested students) * Faster? - What makes it slow? Method 1: changing the data structuresAs discussed, part off the reason for the slow time is that python treates every variable and every element of generic data structures like lists and dictionaries, as an "Any" type that could contain anything. That means that in executing every step of the algorithm, the python interpreter has to got through a vast list of "if" statements to check every possible type to determine what actually needs to be done at this point of the code.In our next version, we are going to replace the dictionary and list structures with `array` data structures. These are much more like the arrays found in Java/C/Fortran/... just a big chunk of memory with each element containing the same type. The basic usage for this data type is ```pythonfrom array import arrayx = array('i',range(10))```This would initialise an array of integers containing the numbers 0,...,9. An array behaves much like a list in python but contains only elements of one basic type (integer, char, float). For our purposes we will want to create `array('d',[])` where 'd' stands for `double` (C style 64-bit floating point number) and `[]` should be replaced by a suitable initialiser rather than the empty list. AWrite a function `gaussArray` that uses array data structures for all data types (matrix, right hand side and x). To fit the matrix into a single dimensional array, you need to do a bit of index arithmetic (and use `range` with appropriate step size). Alternatively (if you prefer) you could use the `numpy` version of `array`.
###Code
from array import array
#Solution algorithm
def gaussArray(A,b):
"""Solve the set of equations A x = b and return x
Input: b = list of length n, and A = dictionary, with A[i,j] being the entry for every row i, column j
Output: An array of length n giving the solution.
Note: The inputs are not modified by this function."""
## insert your code here
import gausstest
gausstest.speed(gaussArray)
###Output
_____no_output_____
###Markdown
Discussion Question:Where does the speed-up of this method over the previous one come from? What makes this method quite slow still? Method 2: Using numpyFor numeric computation, a "standard" set of libraries is the [numpy](http://www.numpy.org/) module and friends. This provides vectors and matrices together with basic linear algebra routines implented largely in C++ and Fortran. These laregy mimick the type of basic functionality built into matlab. Many other python packages are built on top of this so that the numpy data types have become a defacto standard for any kind of scientific computing with python. We could use numpy to solve our equations directly using the [`numpy.linalg.solve`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.solve.htmlnumpy.linalg.solve) routine. You may want to try this briefly. However, here we are mainly interested here in seeing whether writing our own algorithm on top of the basic matrix methods is going to be faster. Basic usage for numpy: For convenience we `import numpy as np` (a fairly common practice in the python community)* Create matrices using something like `A = np.array( [ [1,2,3],[4,5,6] ] )` to create a 2 x 3 matrix. If all elements are integer this will be int64 type otherwise float64. You can query the dimensions with `A.shape`* Matrices can be indexed using `A[0,2] == 3` or using slices such as `A[0,1:3] == np.array([2,3.0])`* Arithmetic operations are overloaded to do mostly do what you would expect. The only significant exception is multiplication. When multiplying two arrays or matrices together we get the Schur product, that is the result has each of the corresponding elements from the input multiplied together (the operands must have the same dimensions). To get the normal inner product or matrix procut use `np.matmul`. E.g. np.matmul( np.array([[3,0],[0,2]]), A) or `A.dot(x)` to get an inner product that is effectively the same as matrix multiply if A is a matrix and x is a vector or matrix (but behaves slightly differently for other types of A & x).* Matrices can be built up using `np.hstack`, `np.vstack` (horizontal & vertical stacking)* To reshape a numpy array use `np.reshape`, this is particularly useful to turn 1 dimensional matrices into 2 dimensional matrices. `V = np.reshape(v,(len(v),1))` will turn an array v of length n into a n x 1 matrix V.The task for this method is to write a Gaussian elimination method using matrix multiplications. In a first year maths course you have probably seen that the elementary row operations of Gaussian elimination can be represented by pre-multiplying the matrix we are reducing with a suitable elementary matrix (an identity matrix with one off-diagonal element). So we can rewrite the algorithm to set up a suitable elementary matrix for each row reduction and pre-multiplying our matrix with this. For example for a 5 x 5 matrix $A$, to subtract 3 times the 2nd row from the fourth row we would pre-multiply $A$ by$$E_{r4-3\times r2}=\begin{bmatrix}1& & & &\\& 1 & & &\\ &&1&&\\&-3&&1&\\&&&&1\end{bmatrix}$$There is only one problem: Naive matrix multiplication requires $O(n^3)$ operations. If we just replace out inner loop with such a matrix multiplication, the overal complexity will be $O(n^5)$ - clearly that would be a bad idea. To fix this we are going to use two "tricks"1. Collapsing all of the elementary matrices into a single square matrix that carries out all of the row reductions below the current element. For "zeroing" out the column below element $(i,i)$ this looks like an identity matrix with non-zero elements only below element $(i,i)$. This means we are only carrying out $O(n)$ matrix multiplications.2. Using sparse matrices. Since the matrices we are using are mostly zero, we only want to store, and more importantly multipy with, the non-zero entries of the matrix. This reduces the cost of matrix multiplications from $O(n^3)$ to $O(n^2)$, as the sparse matrix only has at most 2 non-zero entries per row.Note: sparse matrices are found not in numpy itself but in [scipy.sparse](https://docs.scipy.org/doc/scipy/reference/sparse.html) where there are multiple formats, depending on whether we are storing matrices by row or column or with some (block) diagonal structure. Here it makes sense to use the row based format (as when we are pre-multiplying with our special matrix, each entry in the answer is the inner product of a column of the right hand side with a row of our special matrix). For a compressed row sparse matrix the internal storage representation is:* An array `start` of length number of rows + 1, with `start[i]` containing the first index of non-zero elements of row `i` (and `start[i+1]` giving the end)* An array `col` of length number of non-zeros where `col[j]` is the column of the j'th non-zero entry in the matrix* An array `data` of length number of non-zeros so that `A[i,j] == data[k]` if `col[k]==j` and `start[i]<=k<start[i+1]`We can set such a sparse matrix up by either providing the `start`, `col` and `data` arrays explicitly, or in various other formats as described [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.htmlscipy.sparse.csr_matrix).
###Code
import numpy as np
def npSolve(A,b):
"just to check that numpy really is faster"
# write an implementation that calls np.linalg.solve
gausstest.speed(npSolve,800)
import numpy as np
from scipy.sparse import csr_matrix
def gaussSparse(A,b):
"Implementation of Gaussian elimination with numpy and sparse matrices"
# write an implementation that uses csr_matrix
gausstest.speed(gaussSparse,1600) # this should be fast enough to allow running up to 1600x1600
###Output
_____no_output_____
###Markdown
Method 3 - Call an external libraryAt this point you might think - for this task Python is more trouble than it's worth. Why not just write the critical parts in a real programming language (say C++)? Just use Python to do what it's good at: pre-processing data, reporting, quick-and-dirty hacking etc. Fortunately, calling an external library from Python is not hard. Here I will provide you a simple C callable library that implements the basic gaussian elimination and it will be your task to interface to this. Similar patterns are also useful if you are using a commercial library (no source code available) or if you inhert some code from a colleague who still codes in Fortran.For starters here is the C++ code
###Code
%%writefile gauss.cpp
extern "C" { // initial declaration as C style function (don't want C++ function name mangling)
const char *hello(const char *world); // simple test function
double check(const double **A,int i,int j); // returns A[i][j], check if arguments are passed correctly
double gauss(int n,const double **A,const double *b,double *x); // compute x : A x = b, for n x n matrix A
// assumes that x is already allocated in the right size - returns the maximum error
}
#include <stdio.h>
#include <vector>
#include <math.h>
const char*hello(const char *world) { static char buf[1028]; sprintf(buf,"Hello %s",world); return buf; }
double check(const double **A,int i,int j) { // check if arguments are passed correctly
return A[i][j];
}
double gauss(int n,const double **A,const double *b,double *x) // compute x : A x = b, for n x n matrix A & return max error
{
std::vector<std::vector<double> > U(n);
for(int i=0; i<n; ++i){ // copy input data into U
U[i].resize(n+1);
for(int j=0; j<n; ++j) U[i][j]=A[i][j];
U[i][n]=b[i];
}
for(int i=0; i<n; ++i) // do the row reduction
for(int j=i+1; j<n; ++j){
const double mult = U[j][i]/U[i][i];
U[j][i] = 0;
for(int k=i+1; k<=n; ++k)
U[j][k] -= mult * U[i][k];
}
for(int i=n-1; i>=0; --i){ // back-substitution
x[i] = U[i][n];
for(int j=i+1; j<n; ++j) x[i] -= U[i][j]*x[j];
x[i] /= U[i][i];
}
double error=0;
for(int i=0; i<n; ++i){
double sum=-b[i];
for(int j=0; j<n; ++j) sum += A[i][j]*x[j];
if(fabs(sum)>error) error = fabs(sum);
}
return error;
} // end function gauss()
###Output
_____no_output_____
###Markdown
Lets compile this code. If you are under windows and are having trouble with this, there is a pre-compiled .dll on the website (just put it in the same folder as this notebook)
###Code
import subprocess
# run: a simple function to execute a command (like os.system) and capture the output
run = lambda cmd: subprocess.run(cmd.split(),stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.decode("utf-8")
print(
run("g++ -o gauss.so -fPIC -shared -O3 -Wall gauss.cpp -lm"), # compile
run("ls -l gauss.so")) # check the library exists
#### Note: in Jupyter notebooks we could just as easily do this with the 'magic' ! commands below
# This is just a bit of magic go compile the C++ code. If you are doing this on windows you would get a DLL (assuming
# you have a proper windows installation that includes enough of cygwin to make windows look like a proper operating system :-)
!g++ -o gauss.so -fPIC -shared -O3 -Wall gauss.cpp -lm
# you should now have a file gauss.so in the same directory as the notebook
!ls -l gauss.so # just checking that the file is here and has the correct timestamp
###Output
_____no_output_____
###Markdown
Calling an external libraryMost of the magic required for interacting with the compiled library can be found in the [`ctypes`](https://docs.python.org/3.6/library/ctypes.html) module. The key elements are:* `cdll.LoadLibrary("./gauss.so")` allows you to load a library. There are some subtle differences between windows (loading a .dll library) and Linux (calling a .so library) but it should work on any system (also Macs). Note: a library can only be loaded once. A second call to LoadLibrary with the same filename will return a link to the same (previously loaded) library. If you change the C++ source code an recompile you need to restart the kernel (or change the filename * Converting to the correct types: ctypes defines standard ctypes. Many things will convert automatically for example python `bytes` string to `const char *` * When working with C types explicitly you may need to convert. E.g. `i=cint(3)` and `i.value` (to go from python int to `cint` and back again)* Sometimes we need to specifically set things up correctly. We can use `ctypes.c_void_p` to get a generic pointer type (`void *`) or use `POINTER(c_int)` to create a pointer to an integer (in this case).* Arrays can be declared directly by multiplying a type by an integer size and using an initaliser list. For example `c_int*3` is the same type as `int[3]` in C/C++. So `(c_int*3)()` constructs an unintialised array and `(c_int * 3)([0,1,2])` will create an initialised array. Just like in C, you can call a function expecting `POINTER(c_int)` with a an argument of type `c_int*3`, for example.* You may need to declare functions including their return type so that python needs how to call the function. For example if you had loaded a library `lib` containing a function called `func` then * `lib.func.argtypes = [c_char_p, c_double]` says that `func(char *, double)` is the signature of the arguments * `lib.func.restype = c_char` says func returns a `char` (rather than the default `int`)* Alternatively numpy provides a simple way to access the pointer to the underlying memory buffer. If `x` is a numpy array then `x.ctypes.data` contains the pointer. See [`numpy.ndarray.ctypes`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.ctypes.html) for more information* Yet another option is to modify the C code to have a set of simple helper functions for setting up the inputs and querying the outputs with all helper functions just receiving or returning basic values (int or double) that can be passed easily. Have a go at calling the compiled library. To get started you might want to test that you can load and call simple functions. For example try calling the `hello(b"world")` function from the library or use the `check(A,i,j)` function to see if you can pass a matrix and return element `A[i][j]`.
###Code
# insert your code here
###Output
_____no_output_____
###Markdown
Now try writing a small wrapper to pass the standard equation data into the C program and return the result as a list
###Code
# solution
from ctypes import *
def gaussC(A,b):
"Solve by using the gauss() function from gauss.so/gauss.dll"
# write your code here
import gausstest
gausstest.speed(gaussC,1600) # how fast will this go?
###Output
_____no_output_____
###Markdown
DiscussionWhy is C fast? Can python ever be as fast as C? Method 4 - Using NumbaWhat about compiling Python to give it the speed of C/C++/Fortran or at least something close to it? There are a number of projects that are working towards this goal. Some examples of this include [PyPy](https://pypy.org/), [PyRex](http://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/) (no longer supported?) and [Numba](http://numba.pydata.org/). Each of these make some attempt to compile python to native machine code. Note that there are also some other python implementation such as Jython and IronPython that target the Java Virtual Machine and .NET, but this is more about compatibility with a different software ecosystem than speed. Another issue that can cause confusion is that Python "byte compiles" modules - this is quite different to compiling to machine code with a language such as C or Fortran. It really just turns verbose ASCII source code to a much denser binary format that is still interpreted but much quicker for the interpreter to read (for example it doesn't contain any of the comments from the original source).The difficulty with trying to compile Python, rather than interpreting it, is that Python was _not_ designed for this. Hence most attempts at producing a fast, compiled version of Python tends to only be compatible with a subset of python. Here we will use the **Numba** package, because it is in very active development, can be mixed seamlessly with interpreted Python code, and particularly targets scientific computing applications. How does Numba work? It does a just-in-time (JIT) compilation of marked functions into machine code. That is we include a special library and "tag" functions to be compiled to machine code the first time that they are run. Such tagged functions can only use a subset of the python language (or else the numba package will fall back to running same code as the interpreter)Basic usage:```Pythonfrom numba import * or just import jit, int32,float64@jit( float64(int32,float64[:,:]), nopython=True)def f(n,M): return M[n,n]```What does all that mean?* `@jit` is a special decorator for the function that says this function should be just-in-time compiled by numba. In fact this is the only part that is really necessay, the rest of the lines in paranthesis could be left out. * The first optional argument to `@jit` provides a type specification for the function. In this case it says that the function returns a `float64` (64-bit floating point number or `double` in C). It takes two arguments the first of which is a 32-bit signed integer (as opposed to say a `uint8`), the second argument is a numpy matrix. If this is not specified, numba will "guess" the types based on the arguments to the function the first time it is called. * The same function may be compiled multiple times with different types (either by being called with different arguments or by specifying a list of type declarations). Note that the compile time makes the first time that the function is called relatively slow compared to subsequent calls* `nopython=True` tells the compiler that it should not accept any python code that it cannot compile (where it would need to fall back to the python interpreter). Without this @jit will always succeed in "compiling" the function but will potentially produce a function that is no faster than the standard interpreted python code. With the flag set, the compiler will raise an error if the function includes elements of python that it can't handle.That should be enough to get you started. See the [Numba manual](http://numba.pydata.org/numba-doc/latest/user/index.html) for more information. Go ahead and write a version of the Gaussian elimination solver using Numba - not that you will need a wrapper function to translate the dictionary into numpy arrays first.
###Code
import numba
from numba import jit,float64
import numpy as np
# add a @jit decorator
def numbaSolve(A,b):
"""Just-in-time compiled gauss-elimination function"""
# write your code here to compute x
return x
def gaussNumba(A,b):
"wrapper to call the function compiled with numba"
# convert argument types and call numbaSolve
return x
gausstest.speed(gaussNumba,1600) # how does this compare with the C++ version?
###Output
_____no_output_____
###Markdown
Some comments on NumbaThe strategy employed by Numba is virtually identical to that used in [Julia](http://julialang.org) to produce fast code. If you have a project that is entirely focussed on heavy-duty computational work, you may want to give this ago. For small amounts of numerical computation in a larger python project, Numba is entirely adequate though (and still improving rapidly due to ongoing development). For algorithms that are based less on numerical computations with vectors and matrices, you may also run into the limits of what Numba can't compile - this may require rewriting an algorithm in a slightly less natural (more fortran-ish) way. Note also that Numba does _not_ compile all of the libraries that you import (though it supports a reasonable subset of the numpy libraries). So if you import some module X and call `X.foo()` this function will still run slowly. Method 5 - Two cores are better than oneComputers are getting more and more cores with every generation (the maxima server has 32, the mathprog group's server has 72, even your phone probably has 4). So why are we only using 1 core in our python program so far? Discussion questions: * What's the difference between Multi-threading vs Multi-processing?* What is Python's GIL?* GPU based parallelisation Suggested approach Use Numba library with `@jit(parallel=True,nopython=True)` (can also explicity set `nogil=True` for any function to be called in a parallel thread. Indicate that loops are to be done in parallel by using `numba.prange(n)` instead of `range(n)`: that is a loop `for i in prange(n)` will be carried out automaticallly by multiple threads. They automatically synchronise (wait for all of the threads to finish) at the end of the for-loop.When doing the parallisation consider:* Want reasonable chunks of work in each thread - synchronisation costs time* Need to avoid conflicts where multiple threads want to write to the same memory: simultaneous read is fine. Avoid needing to use locks* Limit to 2 threads - partly because of the number of people on the server, and because unless we significantly increase the size of matrices there will be very limited benefit from using many more. To enforce this use:```pythonimport osos.environ["NUMBA_NUM_THREADS"]="2"import numba```Note that this has to be done before you load numba for the first time. If you are working in a notebook where you have used numba previously, please re-start the kernel. If you are working on a command line version of python you could also set the `NUMBA_NUM_THREADS` environmental variable before you even start python.
###Code
# please restart the kernel before executing this for the first time
# (the thread limit is only enforced when numba is first loaded)
import os
os.environ["NUMBA_NUM_THREADS"]="2"
import numba
from numba import jit,float64,prange
import numpy as np
# add jit decorator here
def parSolve(A,b):
"Parallel gaussian elimination using numba"
# write parallel version of numbaSolve() here
return x
def gaussPar(A,b):
"Call the parallel version compiled with numba"
# write wrapper function to call parSolve()
return x
import gausstest
gaussPar(*gausstest.randProb(5)) # compile & test code
###Output
_____no_output_____
###Markdown
Measuring performance of parallel codeThe default speed test will only report CPU time used. This doesn't tell you whether it has been used by 1 CPU or several. In fact, due to the overhead of parallelisation we expect the CPU time to go up - though hopefully not by very much. At the same time the elapsed time should go down.
###Code
gausstest.speed(gaussPar,1600) # by default measures CPU need to test ellapsed time as well
## measure the cost of parallel threads
import numpy as np
import timeit,time,gc
from statistics import mean,stdev
from random import seed,random
def parspeed(method,maxSize=400):
from gausstest import randProb
seed(123456) # fix some arbitrary seed so we keep generating the same data
prev,n = 0.0, 50
gc.disable()
while n <= maxSize:
gc.collect() # manual cleanout of garbage before each repeat
#CPU = -time.process_time()
T = timeit.repeat(stmt="method(A,b)",setup=f"A,b=randProb({n})",
# need timer to be subtractable so make it a vector
timer=lambda : np.array([time.perf_counter(),time.process_time()]),
repeat=5,number=1,globals=locals())
#CPU += time.process_time()
CPU = [ x[1] for x in T]
t = wall = [x[0] for x in T]
nThread = [ c/s for c,s in zip(CPU,wall)]
print("%4d elapsed %10.4f ฯ=%.2f sec" % (
n,mean(t),stdev(t)),"(x %.2f)"%(mean(t)/prev) if prev > 0 else " "*8,
"%.2f ฯ=%.2f threads used"%(mean(nThread),stdev(nThread)))
prev = mean(t)
n *= 2
gc.enable()
parspeed(gaussPar,1600) # need to test ellapsed time as well
###Output
_____no_output_____ |
2018_09_01_create_seigel_data_done_3_0.ipynb | ###Markdown
Do it!delete the individual columns and keep the calculated average
###Code
scan_id_df.filter(regex='TUGT').columns.tolist()
bad = \
[('day_90', 'TUGT', 'time_taken'),
('day_90', 'TUGT_average', 'time1'),
('day_90', 'TUGT_average', 'time2'),
('day_90', 'TUGT_average', 'time3'),
('day_90', 'TUGT_average', 'TUGT_average'),
('day_365', 'TUGT', 'time1'),
('day_365', 'TUGT', 'time2'),
('day_365', 'TUGT', 'time3'),
('day_365', 'TUGT', 'average')]
scan_id_df.drop(bad,axis='columns',inplace=True)
# drop mri
mri_drop = scan_id_df.filter(regex='MRI').columns.tolist()
scan_id_df.drop(mri_drop,axis='columns',inplace=True)
scan_id_df.columns = scan_id_df.columns.remove_unused_levels()
response_drop = \
[('day_7', 'RAPA', 'resp'),
('day_7', 'RAPA', 'resp_other'),
('day_7', 'RAPA', 'resp_date'),
('day_90', 'mRS', 'resp'),
('day_90', 'mRS', 'resp_other'),
('day_90', 'BI', 'resp.1'),
('day_90', 'BI', 'resp_other.1'),
('day_90', 'SIS', 'resp.2'),
('day_90', 'SIS', 'resp_other.2'),
('day_90', 'RAPA', 'resp'),
('day_90', 'RAPA', 'resp_other'),
('day_90', 'RAPA', 'resp_date'),
('day_90', 'WSAS', 'resp.5'),
('day_90', 'WSAS', 'resp_other.5'),
('day_90', 'WSAS', 'resp_date.2'),
('day_365', 'mRS', 'resp'),
('day_365', 'mRS', 'resp_other'),
('day_365', 'BI', 'resp.1'),
('day_365', 'BI', 'resp_other.1'),
('day_365', 'SIS', 'resp.2'),
('day_365', 'SIS', 'resp_other.2'),
('day_365', 'RAPA', 'resp'),
('day_365', 'RAPA', 'resp_other'),
('day_365', 'RAPA', 'resp_date'),
('day_365', 'WSAS', 'resp.5'),
('day_365', 'WSAS', 'resp_other.5'),
('day_365', 'WSAS', 'resp_date.2')]
scan_id_df.drop(response_drop,axis='columns',inplace=True)
scan_id_df.columns = scan_id_df.columns.remove_unused_levels()
###Output
_____no_output_____
###Markdown
Are these needed?
###Code
('day_7', 'infarct_type')
('day_90', 'TDT', 'lesion_location_confirmed'),
('day_90', 'TDT', 'Leision_location'),
('day_90','lesion_location_confirmed','lesion location confirmed by imaging at 3 months (BC report)'),
scan_id_df[('day_7', 'infarct_type')].dropna(how='all')
scan_id_df[('day_90', 'TDT', 'Leision_location')]
scan_id_df[('day_90','lesion_location_confirmed','lesion location confirmed by imaging at 3 months (BC report)')]
infarct_type
scan_id_df.shape
scan_id_df[('day_baseline','NIHSS_multiple')].columns.tolist()
drop_nihss_multiple = [('day_baseline','NIHSS_multiple',c) for c in ['nihss_three_hr',
'nihss_seven_hr',
'nihss_eighteen_hr',
'nihss_other_hr']]
scan_id_df.drop(drop_nihss_multiple,axis = 'columns',inplace= True)
scan_id_df.columns = scan_id_df.columns.remove_unused_levels()
count_dict = scan_id_df.count().to_dict()
scan_id_df.shape
for k in count_dict.keys():
print(k[0],'\n',k[1],'\n',k[2],'\n','\t\t\t\t',round((count_dict[k]/68)*100))
###Output
day_baseline
NIHSS_multiple
nihss_three_hr_score
24.0
day_baseline
NIHSS_multiple
nihss_three_hr_datetime
24.0
day_baseline
NIHSS_multiple
nihss_seven_hr_score
15.0
day_baseline
NIHSS_multiple
nihss_seven_hr_datetime
15.0
day_baseline
NIHSS_multiple
nihss_eighteen_hr_score
26.0
day_baseline
NIHSS_multiple
nihss_eighteen_hr_datetime
26.0
day_baseline
NIHSS_multiple
nihss_other_hr_score
31.0
day_baseline
NIHSS_multiple
nihss_other_hr_datetime
31.0
day_baseline
recent_medical_history
tpa
88.0
day_baseline
recent_medical_history
tpa_datetime
21.0
day_baseline
recent_medical_history
tpa_volume
21.0
day_baseline
NIHSS
loc
99.0
day_baseline
NIHSS
loc_quest
99.0
day_baseline
NIHSS
loc_command
99.0
day_baseline
NIHSS
best_gaze
99.0
day_baseline
NIHSS
vis_field_test
99.0
day_baseline
NIHSS
facial_palsy
99.0
day_baseline
NIHSS
motor_arm_l
99.0
day_baseline
NIHSS
motor_arm_r
99.0
day_baseline
NIHSS
motor_leg_l
99.0
day_baseline
NIHSS
motor_leg_r
99.0
day_baseline
NIHSS
limb_ataxia
99.0
day_baseline
NIHSS
sensory
99.0
day_baseline
NIHSS
best_lang
99.0
day_baseline
NIHSS
dysarthria
99.0
day_baseline
NIHSS
extinction
99.0
day_baseline
NIHSS
score
99.0
day_baseline
NIHSS
valid
100.0
day_baseline
NIHSS
reason
1.0
day_7
NIHSS
loc
99.0
day_7
NIHSS
loc_quest
99.0
day_7
NIHSS
loc_command
99.0
day_7
NIHSS
best_gaze
99.0
day_7
NIHSS
vis_field_test
99.0
day_7
NIHSS
facial_palsy
99.0
day_7
NIHSS
motor_arm_l
99.0
day_7
NIHSS
motor_arm_r
99.0
day_7
NIHSS
motor_leg_l
99.0
day_7
NIHSS
motor_leg_r
99.0
day_7
NIHSS
limb_ataxia
99.0
day_7
NIHSS
sensory
99.0
day_7
NIHSS
best_lang
99.0
day_7
NIHSS
dysarthria
99.0
day_7
NIHSS
extinction
99.0
day_7
NIHSS
score
99.0
day_7
NIHSS
valid
100.0
day_7
NIHSS
reason
1.0
day_7
MoCA
exe_trail
94.0
day_7
MoCA
exe_cube
94.0
day_7
MoCA
exe_clock_cont
94.0
day_7
MoCA
exe_clock_num
94.0
day_7
MoCA
exe_clock_hand
94.0
day_7
MoCA
name_lion
94.0
day_7
MoCA
name_rhino
94.0
day_7
MoCA
name_camel
93.0
day_7
MoCA
mem_face_1
94.0
day_7
MoCA
mem_velvet_1
94.0
day_7
MoCA
mem_church_1
94.0
day_7
MoCA
mem_daisy_1
94.0
day_7
MoCA
mem_red_1
94.0
day_7
MoCA
mem_face_2
93.0
day_7
MoCA
mem_velvet_2
93.0
day_7
MoCA
mem_church_2
93.0
day_7
MoCA
mem_daisy_2
93.0
day_7
MoCA
mem_red_2
93.0
day_7
MoCA
att_forward
94.0
day_7
MoCA
att_backward
94.0
day_7
MoCA
att_a
93.0
day_7
MoCA
att_subtract
94.0
day_7
MoCA
lang1
94.0
day_7
MoCA
lang2
94.0
day_7
MoCA
lang3
94.0
day_7
MoCA
abs_train
94.0
day_7
MoCA
abs_watch
94.0
day_7
MoCA
recall_face
94.0
day_7
MoCA
recall_velvet
94.0
day_7
MoCA
recall_church
94.0
day_7
MoCA
recall_daisy
94.0
day_7
MoCA
recall_red
94.0
day_7
MoCA
orient_day
94.0
day_7
MoCA
orient_month
94.0
day_7
MoCA
orient_year
94.0
day_7
MoCA
orient_day_week
94.0
day_7
MoCA
orient_place
94.0
day_7
MoCA
orient_city
93.0
day_7
MoCA
edu_less_year_12
94.0
day_7
MoCA
MoCA_score
100.0
day_7
MADRS
report_sad
99.0
day_7
MADRS
apparent_sad
99.0
day_7
MADRS
inner_tension
99.0
day_7
MADRS
reduce_sleep
99.0
day_7
MADRS
reduce_appetite
99.0
day_7
MADRS
concent_diff
99.0
day_7
MADRS
lassitude
99.0
day_7
MADRS
inable_feel
99.0
day_7
MADRS
pess_thought
99.0
day_7
MADRS
suicide_thought
97.0
day_7
MADRS
score
99.0
day_7
MADRS
valid
100.0
day_7
MADRS
reason
3.0
day_7
RAPA
never
100.0
day_7
RAPA
light
100.0
day_7
RAPA
light_wk
100.0
day_7
RAPA
mod_less_30m
100.0
day_7
RAPA
vig_less_20m
100.0
day_7
RAPA
mod_more_30m
100.0
day_7
RAPA
vig_more_20m
100.0
day_7
RAPA
score_1
100.0
day_7
RAPA
strength
100.0
day_7
RAPA
flexibility
100.0
day_7
RAPA
score_2
100.0
day_7
infarct_type
is_ich
12.0
day_7
infarct_type
date_time.1
0.0
day_7
infarct_type
symptomatic
0.0
day_7
infarct_type
loc_frontal
12.0
day_7
infarct_type
loc_temporal
12.0
day_7
infarct_type
loc_parietal
12.0
day_7
infarct_type
loc_occipital
12.0
day_7
infarct_type
loc_subcortical
12.0
day_7
infarct_type
loc_brainstem
12.0
day_7
infarct_type
loc_cerebellar
12.0
day_7
infarct_type
loc_subdural
12.0
day_7
infarct_type
loc_subarachnoid
12.0
day_7
infarct_type
treatment
0.0
day_7
infarct_type
treatment_spec
0.0
day_90
mRS
score
100.0
day_90
mRS
date
0.0
day_90
mRS
valid
100.0
day_90
mRS
reason
0.0
day_90
NIHSS
loc
100.0
day_90
NIHSS
loc_quest
100.0
day_90
NIHSS
loc_command
100.0
day_90
NIHSS
best_gaze
100.0
day_90
NIHSS
vis_field_test
100.0
day_90
NIHSS
facial_palsy
100.0
day_90
NIHSS
motor_arm_l
100.0
day_90
NIHSS
motor_arm_r
100.0
day_90
NIHSS
motor_leg_l
100.0
day_90
NIHSS
motor_leg_r
100.0
day_90
NIHSS
limb_ataxia
100.0
day_90
NIHSS
sensory
100.0
day_90
NIHSS
best_lang
100.0
day_90
NIHSS
dysarthria
100.0
day_90
NIHSS
extinction
100.0
day_90
NIHSS
score
100.0
day_90
NIHSS
valid
100.0
day_90
NIHSS
reason
0.0
day_90
BI
feeding
100.0
day_90
BI
bathing
100.0
day_90
BI
grooming
100.0
day_90
BI
dressing
100.0
day_90
BI
bowels
100.0
day_90
BI
bladder
100.0
day_90
BI
toilet_use
100.0
day_90
BI
transfers
100.0
day_90
BI
mobility
100.0
day_90
BI
stairs
100.0
day_90
BI
score
100.0
day_90
SIS
phy_arm
99.0
day_90
SIS
phy_hand
99.0
day_90
SIS
phy_leg
99.0
day_90
SIS
phy_foot
99.0
day_90
SIS
mem_told
99.0
day_90
SIS
mem_day_bef
99.0
day_90
SIS
mem_to_do
99.0
day_90
SIS
mem_wk_day
99.0
day_90
SIS
mem_concert
99.0
day_90
SIS
mem_think_quick
99.0
day_90
SIS
mem_sol_prob
99.0
day_90
SIS
feel_sad
99.0
day_90
SIS
feel_nobody
99.0
day_90
SIS
feel_burden
99.0
day_90
SIS
feel_nothing
99.0
day_90
SIS
feel_blame
99.0
day_90
SIS
feel_enjoy
99.0
day_90
SIS
feel_nervous
99.0
day_90
SIS
feel_life_with
99.0
day_90
SIS
feel_smile
99.0
day_90
SIS
com_say_name
99.0
day_90
SIS
com_under
99.0
day_90
SIS
com_reply
99.0
day_90
SIS
com_correct
99.0
day_90
SIS
com_part
99.0
day_90
SIS
com_ph
99.0
day_90
SIS
com_ph_num
99.0
day_90
SIS
act_knife
99.0
day_90
SIS
act_dress
99.0
day_90
SIS
act_bathe
99.0
day_90
SIS
act_toenail
99.0
day_90
SIS
act_toilet
99.0
day_90
SIS
act_bladder
99.0
day_90
SIS
act_bowel
99.0
day_90
SIS
act_l_task
99.0
day_90
SIS
act_shopping
99.0
day_90
SIS
act_h_task
99.0
day_90
SIS
mob_sit
99.0
day_90
SIS
mob_stand
99.0
day_90
SIS
mob_walk
99.0
day_90
SIS
mob_move
99.0
day_90
SIS
mob_walk_blk
99.0
day_90
SIS
mob_walk_fast
99.0
day_90
SIS
mob_one_stair
99.0
day_90
SIS
mob_sev_stair
99.0
day_90
SIS
mob_car
99.0
day_90
SIS
hand_carry
99.0
day_90
SIS
hand_door_knob
99.0
day_90
SIS
hand_open_jar
99.0
day_90
SIS
hand_shoe_lace
99.0
day_90
SIS
hand_pick_dime
99.0
day_90
SIS
life_work
99.0
day_90
SIS
life_social
99.0
day_90
SIS
life_quiet_rec
99.0
day_90
SIS
life_act_rec
99.0
day_90
SIS
life_family_role
99.0
day_90
SIS
life_religous
99.0
day_90
SIS
life_wish
99.0
day_90
SIS
life_help
99.0
day_90
SIS
stroke_recovery
99.0
day_90
SIS
score_1
99.0
day_90
SIS
score_2
99.0
day_90
SIS
score_3
99.0
day_90
SIS
score_4
99.0
day_90
SIS
score_5
99.0
day_90
SIS
score_6
99.0
day_90
SIS
score_7
99.0
day_90
SIS
score_8
99.0
day_90
SIS
score_perceivedrecovery
99.0
day_90
SIS
16
99.0
day_90
SIS
sum
99.0
day_90
SIS
index
99.0
day_90
RAPA
never
100.0
day_90
RAPA
light
100.0
day_90
RAPA
light_wk
100.0
day_90
RAPA
mod_less_30m
100.0
day_90
RAPA
vig_less_20m
100.0
day_90
RAPA
mod_more_30m
100.0
day_90
RAPA
vig_more_20m
100.0
day_90
RAPA
score_1
100.0
day_90
RAPA
strength
100.0
day_90
RAPA
flexibility
100.0
day_90
RAPA
score_2
100.0
day_90
WSAS
work_ability
100.0
day_90
WSAS
home_manage
100.0
day_90
WSAS
social_leisure
100.0
day_90
WSAS
private_leisure
100.0
day_90
WSAS
relationships
100.0
day_90
WSAS
score
100.0
day_90
WSAS
valid
100.0
day_90
WSAS
reason
100.0
day_90
MoCA
exe_trail
99.0
day_90
MoCA
exe_cube
99.0
day_90
MoCA
exe_clock_cont
99.0
day_90
MoCA
exe_clock_num
99.0
day_90
MoCA
exe_clock_hand
99.0
day_90
MoCA
name_lion
100.0
day_90
MoCA
name_rhino
100.0
day_90
MoCA
name_camel
100.0
day_90
MoCA
mem_face_1
100.0
day_90
MoCA
mem_velvet_1
100.0
day_90
MoCA
mem_church_1
100.0
day_90
MoCA
mem_daisy_1
100.0
day_90
MoCA
mem_red_1
100.0
day_90
MoCA
mem_face_2
100.0
day_90
MoCA
mem_velvet_2
100.0
day_90
MoCA
mem_church_2
100.0
day_90
MoCA
mem_daisy_2
100.0
day_90
MoCA
mem_red_2
100.0
day_90
MoCA
att_forward
100.0
day_90
MoCA
att_backward
100.0
day_90
MoCA
att_a
100.0
day_90
MoCA
att_subtract
100.0
day_90
MoCA
lang1
100.0
day_90
MoCA
lang2
100.0
day_90
MoCA
lang3
100.0
day_90
MoCA
abs_train
100.0
day_90
MoCA
abs_watch
100.0
day_90
MoCA
recall_face
100.0
day_90
MoCA
recall_velvet
100.0
day_90
MoCA
recall_church
100.0
day_90
MoCA
recall_daisy
100.0
day_90
MoCA
recall_red
100.0
day_90
MoCA
orient_day
100.0
day_90
MoCA
orient_month
100.0
day_90
MoCA
orient_year
100.0
day_90
MoCA
orient_day_week
100.0
day_90
MoCA
orient_place
100.0
day_90
MoCA
orient_city
100.0
day_90
MoCA
edu_less_year_12
100.0
day_90
MoCA
MoCA_score
99.0
day_90
MADRS
report_sad
100.0
day_90
MADRS
apparent_sad
100.0
day_90
MADRS
inner_tension
100.0
day_90
MADRS
reduce_sleep
100.0
day_90
MADRS
reduce_appetite
100.0
day_90
MADRS
concent_diff
100.0
day_90
MADRS
lassitude
100.0
day_90
MADRS
inable_feel
100.0
###Markdown
No data in RAVENS
###Code
df[('day_365', 'RAVENs')]
###Output
/Users/alistair/anaconda3/lib/python3.6/site-packages/ipykernel/kernelbase.py:399: PerformanceWarning: indexing past lexsort depth may impact performance.
user_expressions, allow_stdin)
|
Mini-lab 08 - Neural classification.ipynb | ###Markdown
Mini-lab 8: Neural classificationYou will find code for generating three toy data sets and a neural logistic regression model to perform classification with. Your task is to extend the neural model to:1. change the learning rate and number of epochs, compare loss/accuracy curves2. change the model to a multiclass classifier for a predefined number of classes3. allow for more complex decision boundaries (hint: read up on nn.Sigmoid and nn.Linear, a more complex network might need more time to train)It might be beneficial to add model parameters for some of the above functionalities (e.g. n_outputs, learning_rate, n_hidden, n_epochs or max_epochs).As usual, we start with some imports.
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Choose your data
###Code
from sklearn.datasets import make_classification, make_moons, make_blobs
# Load the toy data
X, y = make_blobs(n_samples=200, centers=2, cluster_std=.7, n_features=2, random_state=0)
# X, y = make_moons(n_samples=200, noise=.1, random_state=0)
# X, y = make_classification(n_samples=200, n_features=2, n_informative=2,
# n_redundant=0, n_repeated=0, n_classes=3, class_sep=1.8,
# n_clusters_per_class=1, random_state=0)
###Output
_____no_output_____
###Markdown
It's nice to have a plotting function. This will show us the data and model predictions.
###Code
def plot_classification(model=None):
fig = plt.figure(figsize=(4, 4), dpi=100)
ax = fig.subplots(1, 1)
ax.scatter(X[:, 0], X[:, 1], c=y, s=150, marker='.', edgecolor='k', cmap='coolwarm')
if model is not None:
a = ax.axis()
XX, YY = np.meshgrid(np.linspace(a[0], a[1], 200),
np.linspace(a[2], a[3], 200))
X_test = torch.Tensor(np.concatenate([np.vstack(XX.ravel()),
np.vstack(YY.ravel())], axis=1))
Z = model.predict(X_test)
ax.pcolor(XX, YY, Z.reshape(XX.shape), alpha=.5, cmap='coolwarm', edgecolors='none', snap=True, zorder=-1)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.show()
plot_classification()
###Output
_____no_output_____
###Markdown
Now for defining the model. It is probably best if you start with running the model, and then try to understand it. Explore its inner workings by reading the manual and adding small pieces of functionality. Note that this is written to comply with the sklearn estimator format and is far from a minimalist implementation.
###Code
class MinimalistModel(nn.Module):
def __init__(self):
super(MinimalistModel, self).__init__()
self.net = nn.Sequential( # Container for sequential operations
nn.Linear(2, 2),
nn.Softmax(dim=1))
def forward(self, X):
"""Forward pass, this is used for making predictions."""
return self.net(X)
model = MinimalistModel()
print(model)
print(list(model.parameters())[0])
optimizer = torch.optim.Adam(model.parameters(), lr=1)
loss_function = nn.CrossEntropyLoss()
for epoch in range(20):
model.zero_grad() # Reset gradients
loss = loss_function(model(X), y) # Calculate loss
loss.backward() # Backward pass
optimizer.step() # Run the optimizer one step
print(list(model.parameters())[0])
import torch
import torch.nn as nn
class LogisticRegression(nn.Module):
def __init__(self, n_iter=20, verbose=False):
"""Simple classifier for 2D data
This is based on logistic regressing but the output is a softmax."""
super(LogisticRegression, self).__init__()
self.n_iter = n_iter
assert n_iter > 0
self.verbose = verbose
# torch.manual_seed(0) # Forces the same initial weights every time
# Define layers
self.net = nn.Sequential( # Container for sequential operations
nn.Linear(2, 2),
nn.Softmax(dim=1))
# Define optimiser and loss
self.optimizer_ = torch.optim.SGD(self.parameters(), lr=1)
self.loss_function_ = nn.CrossEntropyLoss()
# For making nice plots
self.training_loss_ = list()
self.training_accuracy_ = list()
def forward(self, X):
"""Forward pass, this is used for making predictions."""
return self.net(X)
def _reset(self):
"""Reset the model weights."""
# There has to be a simpler way to do this
for _, module in model.named_children():
if hasattr(module, 'reset_parameters'):
module.reset_parameters()
if hasattr(module, 'named_children'):
for _, submodule in module.named_children():
if hasattr(submodule, 'reset_parameters'):
submodule.reset_parameters()
def fit(self, X, y):
"""Train the model"""
self._reset()
for epoch in range(self.n_iter):
self._fit(X, y)
if self.verbose:
if self.n_iter < 50 or (epoch % 50) == 0:
print("Epoch %2i: Loss %.7f" % (epoch, self._loss.item()))
def _fit(self, X, y):
"""A single step in the training"""
self.zero_grad() # Reset gradients
forward_pass_output = self(X) # Forward pass
self._loss = self.loss_function_(forward_pass_output, y) # Calculate loss
self._loss.backward() # Backward pass
self.optimizer_.step() # Run the optimizer one step
# Store values for user feedback
self.training_loss_.append(self._loss.item())
self.training_accuracy_.append(self.score(X, y))
def predict(self, X):
"""Predict labels"""
return np.argmax(self.predict_proba(X), axis=1) # Pick max probabilities
def predict_proba(self, X):
"""Predict label probabilities"""
with torch.no_grad(): # Disable gradient calculations
log_probs = self(X) # Forward pass to predict probabilities
return log_probs.detach().numpy()
def score(self, X, y):
"""Get the accuracy of the model given the data"""
return np.sum(self.predict(X)==y.numpy())/len(y)
model = LogisticRegression(n_iter=500)
print(model)
# Convert the data to torch tensors
X = torch.Tensor(X)
y = torch.LongTensor(y)
###Output
LogisticRegression(
(net): Sequential(
(0): Linear(in_features=2, out_features=2, bias=True)
(1): Softmax(dim=1)
)
(loss_function_): CrossEntropyLoss()
)
###Markdown
The model starts out with random value as weights. This should give us a crappy classification.
###Code
plot_classification(model)
###Output
_____no_output_____
###Markdown
Time for training.
###Code
model.fit(X, y)
print("Model training accuracy %.1f%%" % (100*model.score(X, y)))
###Output
Model training accuracy 99.5%
###Markdown
We can ask the model to tell us about its parameters.
###Code
for param in model.parameters():
print(param)
###Output
Parameter containing:
tensor([[-0.6668, 1.8333],
[ 1.7968, -1.6851]], requires_grad=True)
Parameter containing:
tensor([-2.3867, 2.6662], requires_grad=True)
###Markdown
Remember the stored loss and accuracy values. We can plot these too.
###Code
fig = plt.figure(figsize=(6, 4))
ax = plt.subplot()
ax.plot(model.training_loss_, 'b.-')
ax.set_ylabel("Training Loss", color='b')
ax.set_xlabel("Epoch")
# ax.set_yscale('log')
ax.tick_params(axis='y', labelcolor='b')
ax = ax.twinx()
ax.plot(np.asarray(model.training_accuracy_)*100, 'r.-')
ax.set_ylabel("Accuracy [%]", color='r')
ax.tick_params(axis='y', labelcolor='r')
a = list(ax.axis())
a[2] = 0
a[3] = 100
ax.axis(a)
fig.tight_layout()
plt.show()
print("Model accuracy is %.1f%%" % (model.score(X, y)*100))
###Output
_____no_output_____
###Markdown
The classification should work better after training.
###Code
plot_classification(model)
###Output
_____no_output_____ |
Assignment 5/Assignment 5/partC.ipynb | ###Markdown
Part C Change Cost function Activation = tanh + relu + softmaxLoss = **mean_squared_error**
###Code
EPOCHS = 10000
BATCH_SIZE = 1000
display_step = 500
with tf.name_scope('Inputs_C'):
X = tf.placeholder("float", [None, num_input],name='Features_C')
Y = tf.placeholder("float", [None, num_classes],name='Label_C')
# using two numpy arrays
features, labels = (X, Y)
# make a simple model
def Neuron(x):
with tf.name_scope('layer1_C'):
net = tf.layers.dense(x, 100, activation=tf.nn.relu)
with tf.name_scope('layer2_C'):
net = tf.layers.dense(net, 50, activation=tf.tanh)
with tf.name_scope('layer3_C'):
net = tf.layers.dense(net, 20, activation=tf.nn.softmax)
with tf.name_scope('out_layer_C'):
prediction = tf.layers.dense(net, 4)
return prediction
prediction = Neuron(X)
#loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=Y))
loss = tf.losses.mean_squared_error(labels=Y, predictions=prediction)
tf.summary.scalar('loss_C',loss)
#tf.losses.mean_squared_error(prediction, y) # pass the second value
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('acuracy_C',accuracy)
#from iter.get_net() as label
train_op = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
merge_summary= tf.summary.merge_all()
writer = tf.summary.FileWriter('C:/Users/BoyangWei.LAPTOP-SRSNTDRH/7390/TensorFlow/files/C')
writer.add_graph(sess.graph)
for i in range(EPOCHS):
_, loss_value,acc_value = sess.run([train_op, loss,accuracy],feed_dict={X: x, Y: y})
if i% display_step == 0:
print("Iter: {}, Loss: {:.4f}".format(i+1, loss_value))
print("Accurancy: " +str(acc_value))
summary=sess.run(merge_summary,feed_dict={X: x,Y: y})
writer.add_summary(summary,i)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
print("Test accuracy: "+ str(accuracy.eval({X: np.array(temp_x_test), Y: np.array(keras.utils.to_categorical(y_test))})))
###Output
Iter: 1, Loss: 0.2474
Accurancy: 0.25725
Iter: 501, Loss: 0.1716
Accurancy: 0.35775
Iter: 1001, Loss: 0.1614
Accurancy: 0.371
Iter: 1501, Loss: 0.1495
Accurancy: 0.432
Iter: 2001, Loss: 0.1471
Accurancy: 0.43575
Iter: 2501, Loss: 0.1454
Accurancy: 0.43725
Iter: 3001, Loss: 0.1447
Accurancy: 0.438
Iter: 3501, Loss: 0.1443
Accurancy: 0.438
Iter: 4001, Loss: 0.1441
Accurancy: 0.43825
Iter: 4501, Loss: 0.1439
Accurancy: 0.438
Iter: 5001, Loss: 0.1439
Accurancy: 0.4375
Iter: 5501, Loss: 0.1437
Accurancy: 0.4375
Iter: 6001, Loss: 0.1436
Accurancy: 0.43775
Iter: 6501, Loss: 0.1435
Accurancy: 0.438
Iter: 7001, Loss: 0.1435
Accurancy: 0.438
Iter: 7501, Loss: 0.1434
Accurancy: 0.438
Iter: 8001, Loss: 0.1434
Accurancy: 0.4385
Iter: 8501, Loss: 0.1434
Accurancy: 0.4385
Iter: 9001, Loss: 0.1434
Accurancy: 0.4385
Iter: 9501, Loss: 0.1434
Accurancy: 0.43825
Test accuracy: 0.74595267
|
_draft/FinanceDataReader.ipynb | ###Markdown
FinanceDataReader Package FinanceDataReader ๋ผ๋ ์์ฒญ๋ ํจํค์ง๊ฐ ์ด๋ค๋ถ์ด ๊ฐ๋ฐํ์
จ๋์ง๋ ๋ชฐ๋ผ๋ ์ฌ๋ฌด๋ฐ์ดํฐ ๋ชจ์ผ๋ ๋๊ฐ์ ์ฌ๋์๊ฒ ์ค๋ฌด์ ํฐ ๋์์ด ๋์๋ค. ์ง์ฌ ๊ฐ์ฌ๋๋ฆฐ๋ค. ์ด ํจํค์ง๋ฅผ ์ค์นํ๊ณ ์ํฌํธํด์ ๊ฐ๋จํ ์ฃผ๊ฐ ์๊ณ์ด ๋ถ์์ ์งํํด๋ณด๊ณ ์ ํ๋ค. ๋๋ ์ ๋๋ก ์จ๋ณด๋ ๊ฑด ์ฒ์์ด๋ผ.. ๊ทธ๋๋ ๋๊ตฐ๊ฐ ์ด ํฌ์คํธ๋ฅผ ๋ณด๊ณ ๋์์ด ๋๊ธฐ๋ฅผ ๊ฐ์ ํ ๋ฐ๋๋ค. ๋ด๊ฐ ํ๊ณ ํ ๊ฑด, ์ํ๋ ์ฃผ์ ๋๋ ์์ฅETF์ ์ข
๊ฐ Movement๋ฅผ ์๋ ๊ทธ๋ํ์ฒ๋ผ ์๊ฐํ ํด๋ณด๊ณ ์ ํ๋ค.
###Code
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = 'NanumGothic'
plt.rcParams["axes.grid"] = True
plt.rcParams["figure.figsize"] = (15,15)
plt.rcParams["axes.formatter.useoffset"] = False
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams["axes.formatter.limits"] = -10000, 10000
df.plot()
###Output
_____no_output_____
###Markdown
์ฐ์ , ํจํค์ง๋ฅผ ์ํฌํธ ํ๊ณ ๋ฒ์ ผ์ ํ์ธํด๋ณธ๋ค.
###Code
import FinanceDataReader as fdr
fdr.__version__
###Output
_____no_output_____
###Markdown
์ฌ์ฉ ๊ฐ์ด๋์ ๋ฐ๋ผ์ ์ฐจ๋ถํ ํ๋์ฉ ํด๋ณด๊ณ ์ ํ๋ค. ๊ตญ๋ด ์์ฅ์ฌ๋ ์ฐธ๊ณ ๋ก ์ฝ์คํผ, ์ฝ์ค๋ฅ, ์ฝ๋ฅ์ค๊ฐ ์กด์ฌํ๋ค. ์ด 3๊ฐ์ง ๋ง์ผ์ ์์ฅ๋ ํ์ฌ๋ค์ ๋ฆฌ์คํธ๋ ์๋ ๋ช
๋ น์ด๋ฅผ ํตํด ํ๋ฒ์ DataFrameํํ๋ก ๋ถ๋ฌ์ฌ ์ ์๋ค.
###Code
KRX_LISTING = fdr.StockListing('KRX') #์ฐธ๊ณ ๋ก NYSE๋ฅผ ์
๋ ฅํ๋ฉด ๋ฏธ๊ตญ์์ฅ๋ ์ํ๊ฐ๋ฅ!
###Output
_____no_output_____
###Markdown
์ด๋ค ์ ๋ณด๋ฅผ ๊ฐ์ ธ์๋์ง ๊ฐ์๋ ์๋์ ๊ฐ๋ค. ๊น๋ํ๊ฒ ์ฝ 10๊ฐ์ง ์ ๋ณด๋ฅผ DataFrame์ผ๋ก ์ ๊ณตํด์ค๋ค.
###Code
KRX_LISTING.info()
KRX_LISTING
###Output
_____no_output_____
###Markdown
์ฝ 2597๊ฐ ํ์ฌ๋ค์ด ๊ฒ์๋๊ณ ์๋ค. ์ด๊ฒ ์ค์ ๋ง๋ ์ซ์์ธ์ง๋ ๋ชจ๋ฅด๊ฒ ์ผ๋ ์ฝ๋ ๋ท๋จ์ ๋ฏ์ด๋ณด๋ krx marketdata์์ ๊ฐ์ ธ์ค๋ ๊ฒ์ฒ๋ผ ๋ณด์ฌ์ ์๋ง๋ ์ต์ ์ด ๊ณ์ ๋ฐ์๋๋ ์์ฒ์ ๊ฐ์ง๊ณ ์๋ ๋ฏํ๋ค. ์ฌํผ ๋คํ ํ
์คํธ ์ผ์์ 2๊ฐ์ง ์ข
๋ชฉ 005930๊ณผ 000660์ ๊ฐ๊ฒฉ์ถ์ด๋ฅผ ๊ทธ๋ ค๋ณธ๋ค. ์ฐธ๊ณ ๋ก ์ ์๋ ์ผ์ฑ์ ์์ ํ์ด๋์ค๋ค.
###Code
df1 = fdr.DataReader('005930', '2019-10-01', '2020-12-04') #์ผ์ฑ์ ์
df2 = fdr.DataReader('000660', '2019-10-01', '2020-12-04') #ํ์ด๋์ค
df1['Close'].plot()
df2['Close'].plot()
###Output
_____no_output_____
###Markdown
์ต๊ทผ์ ํฌ๊ฒ ๊ฐ๊ฒฉ์ด ์์นํ ๊ฒ์ผ๋ก ๋ณด์ฌ์ง๋ค. ํนํ ํ์ด๋์ค. ์ด๋ฐํํ๋ก ๊ฐ ์ข
๋ชฉ๋ณ ๋ ์ด๋ธ๋ง๊ณผ ๊ธฐ๊ฐ์๋ฐ๋ฅธ ์ข
๊ฐ Movement๋ฅผ ์กฐ๊ธ๋ ๋ํ
์ผํ๊ฒ ๊ทธ๋ ค๋ณด์. ์ผ์ฑ์ ์, ํ๊ธ๊ณผ์ปดํจํฐ, ๊ทธ๋ฆฌ๊ณ ๊ฐ์ข
ETF์ ์์ง์์ ๋์์ ๊ทธ๋ ค๋ณด๋๋ก ํ๋ค.
###Code
stock_list = [
["์ผ์ฑ์ ์", "005930"],
["ํ๊ธ๊ณผ์ปดํจํฐ", "030520"],
["TIGER 2์ฐจ์ ์งํ
๋ง", "305540"],
["KODEX 200", "069500"],
["TIGER ์ํํธ์จ์ด" ,"157490"],
["TIGER ํ๋์ฐจ๊ทธ๋ฃน+ํ๋๋ฉํธ ()","138540"],
["KINDEX ๋ฏธ๊ตญS&P 500", "360200"]
]
###Output
_____no_output_____
###Markdown
stock_list ์์๋ค๊ฐ ์ข
๋ชฉ์ด๋ฆ๊ณผ ์ข
๋ชฉ์ฝ๋๋ฅผ ์
๋ ฅํ๊ณ ๋ฐ์ดํฐ ํ๋ ์ํํ๋ก ์ ๋ฆฌํ๋ค.
###Code
import pandas as pd
df_list = [fdr.DataReader(code, '2019-11-01', '2020-12-31')['Close'] for name, code in stock_list]
#len(df_list)
# pd.concat()๋ก ํฉ์น๊ธฐ
df = pd.concat(df_list, axis=1)
df.columns = [name for name, code in stock_list]
df.head(10)
###Output
_____no_output_____
###Markdown
์๊ณ์ด ํํ๋ก ์ธ๋ฑ์ค๊ฐ๊ณผ ๊ฐ ์ข
๋ชฉ๋ณ ์ข
๊ฐ๊ฐ ๊น๋ํ ์ ๋ฆฌ๊ฐ ๋์๋ค. ์ด๋ฅผ ์๊ฐํ ํด๋ณด์
###Code
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = 'NanumGothic'
plt.rcParams["axes.grid"] = True
plt.rcParams["figure.figsize"] = (15,15)
plt.rcParams["axes.formatter.useoffset"] = False
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams["axes.formatter.limits"] = -10000, 10000
df.plot()
###Output
_____no_output_____
###Markdown
๊ธฐํ ๋ค๋ฅธ ETF ์ข
๋ชฉ๋ค๋ ๋์ผํ ๋ฐฉ์์ผ๋ก ๊ทธ๋ ค๋ณด๋ฉด ์๋์ ๊ฐ๋ค.
###Code
stock_list2= [
["ARIRANG ์ ํฅ๊ตญMSCI", "195980"],
["KODEX ๊ณจ๋์ ๋ฌผ(H)", "132030"],
["TIGER ๋ฏธ๊ตญS&P500 ์ ๋ฌผ(H)" ,"143850"],
["KODEX 200", "069500"],
#["TIGER ์ํํธ์จ์ด" ,"157490"],
#["KOSEF ๊ตญ๊ณ ์ฑ10๋
","148070"],
#[" KODEX ๋จ๊ธฐ์ฑ๊ถPLUS", "214980"]
]
import pandas as pd
df_list2 = [fdr.DataReader(code, '2019-11-01', '2020-12-31')['Close'] for name, code in stock_list2]
#len(df_list)
# pd.concat()๋ก ํฉ์น๊ธฐ
df2 = pd.concat(df_list2, axis=1)
df2.columns = [name for name, code in stock_list2]
df2.tail(10)
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = 'NanumGothic'
plt.rcParams["axes.grid"] = True
plt.rcParams["figure.figsize"] = (15,15)
plt.rcParams["axes.formatter.useoffset"] = False
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams["axes.formatter.limits"] = -10000, 10000
df2.plot()
###Output
_____no_output_____ |
datapreporcess_replace.ipynb | ###Markdown
###Code
import pandas as pd
df = pd.read_csv('./auto-mpg.csv', header=None)
df.info()
df.columns = ['mpg','cylinders','displacement','horsepower','weight',
'acceleration','model year','origin','name']
df[['horsepower','name']].describe(include='all')
df['horsepower'].value_counts()
df['horsepower'].astype('float')
df['horsepower'].unique()
df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False)
df_horsepower.unique()
df_horsepower = df['horsepower'].replace(to_replace='?', inplace=False)
df_horsepower.unique()
df_horsepower = df_horsepower.astype('float')
df_horsepower.mean()
df['horsepower'] = df_horsepower.fillna(104)
df.info()
df['name'].unique()
df.head()
###Output
_____no_output_____ |
demos/Day 2 - exercise regression.ipynb | ###Markdown
Exercise: power demand predictionProblem description: https://archive.ics.uci.edu/ml/datasets/combined+cycle+power+plant- Data is provided in two formats - xlsx and ods. Use pd.read_excel to load the data .xlsx file. The excel file has 5 sheets. Create a dataframe for each and concatenate them into a single dataframe. [Hint: use pd.concat function]. Note: one column need to renamed: PE => EP to be consistent with the problem statement.- Create training and test set with 70/30 ratio and random seed = 1. Predict EP based on the other variables as features (AT, RH, V and AP). - Calculate R2 and RMSE for training and test data. [Answer: 0.9287, 0.9284 (r2) 4.55 4.57 (rmse)]- Find the residuals (ypredict - ytrue) on the test data and plot histogram to see its distribution. Ideally the histogram of the residuals should look "gaussian normal". Do a scatter plot for residual vs actual. Observe whether the residuals are consistently same for entire range of actual. - Which features are positively related with the outcome and which are negatively related?- Which feature is the strongest predictor?- Improve your model using log transformation of the output and polynomial transformation of the features with degree = 2 [Answer: 0.9371, 0.9369]. Also, plot the residual on the test data.
###Code
import pandas as pd
sheets = [pd.read_excel("/Users/abasar/Downloads/CCPP/Folds5x2_pp.xlsx", i) for i in range(5)]
len(sheets)
df = pd.concat(sheets)
df.shape
df.head()
d = pd.ExcelFile("/Users/abasar/Downloads/CCPP/Folds5x2_pp.xlsx")
d.sheet_names
sheets = pd.read_excel("/Users/abasar/Downloads/CCPP/Folds5x2_pp.xlsx", sheet_name=None)
pd.concat(sheets).reset_index().iloc[:, 2:]
d = pd.read_excel("/Users/abasar/Downloads/CCPP/Folds5x2_pp.xlsx", sheet_name=None)
df = pd.concat(d)
df.to_excel("sample.xlsx")
from sklearn import *
import numpy as np
target = "PE"
y = np.log(df[target])
X = df.drop(columns=[target])
#X = pd.get_dummies(X, drop_first=True)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X.values.astype("float")
, y , test_size = 0.3, random_state = 1)
pipe = pipeline.Pipeline([
("poly", preprocessing.PolynomialFeatures(degree=2, include_bias=False)),
("scaler", preprocessing.StandardScaler()),
("est",linear_model.SGDRegressor(random_state=1, eta0=0.001, tol=1e-5
, learning_rate="constant"
, max_iter= 10000))
])
pipe.fit(X_train, y_train)
y_train_pred = pipe.predict(X_train)
y_test_pred = pipe.predict(X_test)
print("r2 train", metrics.r2_score(y_train, y_train_pred))
print("r2 test", metrics.r2_score(y_test, y_test_pred))
print("rmse train", np.sqrt(metrics.mean_squared_error(y_train, y_train_pred)))
print("rmse test", np.sqrt(metrics.mean_squared_error(y_test, y_test_pred)))
###Output
r2 train 0.9352738621694812
r2 test 0.9348396383411652
rmse train 0.009505957949204604
rmse test 0.0095535831892362
|
content/post/matplotlib/matplotlib.ipynb | ###Markdown
Basics pyplot vs object-oriented interface Matplotlib has two interfaces: the object-oriented interface renders Axes instances on Figure instances, while the less flexible state-based MATLAB style-interface keeps track of the current figure and axes and other objects and directs plotting functions accordingly (more [here](https://matplotlib.org/stable/tutorials/introductory/lifecycle.htmla-note-on-the-object-oriented-api-vs-pyplot)). Object-oriented plot
###Code
df = sns.load_dataset("diamonds")
fig, ax = plt.subplots()
ax.scatter(x="carat", y="price", data=df)
ax.set(xlabel="Carat", ylabel="Price")
###Output
_____no_output_____
###Markdown
pyplot version
###Code
plt.scatter(x="carat", y="price", data=df)
plt.xlabel("Carat")
plt.ylabel("Price")
###Output
_____no_output_____
###Markdown
Plot lifecycleBased on [this](https://pbpython.com/effective-matplotlib.html) great blog post by Chris Moffitt and the matplotlib [tutorial](https://matplotlib.org/stable/tutorials/introductory/lifecycle.html) that's based on the same post. Reading in raw data of customer sales transactions and keeping sales volume and number of purchases for top 10 customers by sales.
###Code
fp = (
"https://github.com/chris1610/pbpython/blob/master/data/"
"sample-salesv3.xlsx?raw=true"
)
df_raw = pd.read_excel(fp)
print(df_raw.shape)
df_raw.head(2)
df = (
df_raw.groupby("name")
.agg(sales=("ext price", "sum"), purchases=("quantity", "count"))
.sort_values("sales")[-10:]
.reset_index()
)
df
###Output
_____no_output_____
###Markdown
Choosing a style
###Code
plt.style.available
plt.style.use("seaborn-whitegrid")
###Output
_____no_output_____
###Markdown
Prototyping plot with Pandas
###Code
df.plot(kind="barh", x="name", y="sales", legend=None);
###Output
_____no_output_____
###Markdown
Customising plot combining fast Pandas plotting with Matplotlib object-oriented API
###Code
def xlim(x):
"""Set xlim with custom padding."""
return x.max() * np.array([-0.05, 1.3])
fig, ax = plt.subplots()
df.plot(kind="barh", x="name", y="sales", legend=None, ax=ax)
ax.set(
xlim=xlim(df.sales),
xlabel="Sales",
ylabel="Customer",
title="Top customers 2014",
);
###Output
_____no_output_____
###Markdown
Formatting currency values using custom formatter
###Code
def currency(x, pos):
"""Reformat currency amount at position x."""
return f"{x * 1e-3:1.1f}K"
ax.xaxis.set_major_formatter(currency)
fig
###Output
_____no_output_____
###Markdown
Adding a line for average sales
###Code
sales_mean = df.sales.mean()
ax.axvline(sales_mean, linestyle=":", color="green")
lab = f"Mean: {currency(sales_mean, 0)}"
ax.text(
x=1.05 * sales_mean,
y=0,
s=lab,
color="green",
)
fig
###Output
_____no_output_____
###Markdown
Identify new customers
###Code
for customer in [2, 4, 5]:
ax.text(x=1.05 * sales_mean, y=customer, s="New customer")
fig
###Output
_____no_output_____
###Markdown
Show sales and number of purchases, [xkcd](https://xkcd.com)-themed (just because...)
###Code
with plt.xkcd():
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
df.plot(kind="barh", x="name", y="sales", legend=None, ax=ax0)
sales_mean = df.sales.mean()
ax0.axvline(sales_mean, color="green", linestyle=":")
lab = f"Mean: {currency(sales_mean, 0)}"
ax0.text(1.05 * sales_mean, 0, lab, color="green")
for customer in [2, 4, 5]:
ax0.text(sales_mean, customer, "New customer")
ax0.xaxis.set_major_formatter(currency)
ax0.set(xlim=xlim(df.sales), ylabel="Customer",title="Sales")
df.plot(kind="barh", x="name", y="purchases", legend=None, ax=ax1)
purch_mean = df.purchases.mean()
ax1.axvline(purch_mean, color="green", linestyle=":")
ax1.text(purch_mean, 0, f"Mean: {purch_mean}", color="green")
ax1.set(title="Purchases", xlim=xlim(df.purchases))
fig.suptitle(
"Sales and purchases for top 10 customers in 2022",
fontsize=18,
fontweight="bold",
y=1.05,
)
###Output
_____no_output_____
###Markdown
Save figure
###Code
fig.canvas.get_supported_filetypes()
fp = 'figure-path.png'
fig.savefig(fp, transparent=False, dpi=80, bbox_inches="tight")
###Output
_____no_output_____ |
jwst_validation_notebooks/jwst_dark_quality_test/jwst_dark_quality_test.ipynb | ###Markdown
JWST Pipeline Validation Testing Notebook: Dark Quality Test Table of Contents [Introduction](intro_ID) [Imports](imports_ID) [Getting the Data](data_ID) [Run Dark Correction Pipeline Step](pipeline_ID) [Check the slope of the median ramp for the detector](slope_ID) IntroductionThe dark current step removes dark current from a JWST exposure by subtracting dark current data stored in a dark reference file.For more details, visit the documentation here: https://jwst-pipeline.readthedocs.io/en/latest/jwst/dark_current/description.html Defining TermJWST: James Webb Space TelescopeOUT: Other Useful Terms[Top of Page](title_ID) Imports* jwst.datamodels for building model for JWST Pipeline* jwst.dark_current.dark_sub.do_correction to perform the dark correction* matplotlib.pyplot.plt to generate plot* numpy for array parsing and masking* os for operating system commands.* matplotlib inline for plot rendering in notebook[Top of Page](title_ID)
###Code
from jwst.datamodels import DarkMIRIModel, MIRIRampModel
from jwst.dark_current.dark_sub import do_correction
import matplotlib.pyplot as plt
import numpy as np
import os
%matplotlib inline
###Output
_____no_output_____
###Markdown
Getting the DataWe are constructing a fake MIRI/Dark model dataset using the datamodels library in JWST.Mention something about artifactory... Or how or where we store and access data.[Top of Page](title_ID)
###Code
# size of integration
nints = 1
ngroups = 7
xsize = 1032
ysize = 1024
# Define data's shape
csize = (nints, ngroups, ysize, xsize)
data = np.random.rand(nints, ngroups, ysize, xsize)*1e6
# create a JWST MIRI rampmodel
dm_ramp = MIRIRampModel(data=data)
dm_ramp.meta.instrument.name = 'MIRI'
dm_ramp.meta.observation.date = '2018-01-01'
dm_ramp.meta.observation.time = '00:00:00'
dm_ramp.meta.subarray.xstart = 1
dm_ramp.meta.subarray.xsize = xsize
dm_ramp.meta.subarray.ystart = 1
dm_ramp.meta.subarray.ysize = ysize
dm_ramp.meta.description = 'Fake data.'
# Define shape of dark model
csize = (nints*5, ngroups, ysize, xsize)
data = np.random.rand(nints, ngroups, ysize, xsize) * 1e-3
# Create dark datamodel
dark = DarkMIRIModel(data=data)
dark.meta.instrument.name = 'MIRI'
dark.meta.date = '2018-01-01'
dark.meta.time = '00:00:00'
dark.meta.subarray.xstart = 1
dark.meta.subarray.xsize = xsize
dark.meta.subarray.ystart = 1
dark.meta.subarray.ysize = ysize
dark.meta.exposure.nframes = 1
dark.meta.exposure.groupgap = 0
dark.meta.description = 'Fake data.'
dark.meta.reftype = 'DarkModel'
dark.meta.pedigree = 'Dummy'
dark.meta.useafter = '2015-10-01T00:00:00'
# create raw input data for step
dm_ramp.meta.exposure.nframes = 1
dm_ramp.meta.exposure.groupgap = 0
# populate data array of science cube
for i in range(0, ngroups):
dm_ramp.data[0, i] = i
# populate data array of reference file
for i in range(0, ngroups):
dark.data[0, i] = i * 0.1
###Output
_____no_output_____
###Markdown
Run Dark Correction Pipeline StepDefine the output file and run the linearity correction step of the pipeline.[Top of Page](title_ID)
###Code
# run pipeline
outfile = do_correction(dm_ramp, dark)
###Output
_____no_output_____
###Markdown
Check the slope of the median ramp for the detectorThe count rate of the dark subtracted ramp should be small (< 0.1?)[Top of Page](title_ID)
###Code
med_in = np.median(dm_ramp.data[0, :, :, :], axis=(1, 2))
med_out = np.median(outfile.data[0, :, :, :,], axis=(1,2))
groups = np.arange(med_in.shape[0])
slope_in = np.polyfit(groups, med_in, 1)
slope_out = np.polyfit(groups, med_out, 1)
print(
"Slope of median ramp before dark subtraction: {} counts/group".format(
slope_in[0]))
print(
"Slope of median ramp after dark subtraction: {} counts/group".format(
slope_out[0]))
# Set plot params
plt.rc('font', weight='bold')
plt.rc('xtick.major', size=5, pad=7)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
plt.figure(figsize=(20,10))
plt.grid(True, ls='--')
plt.title('Random Data', fontsize=20, fontweight='bold')
plt.plot(med_in, c='k', label= 'Ramp PreDarkCorr')
plt.plot(med_out,c='r', label='Ramp PostDarkCorr')
plt.xlabel('Group Number', fontsize=20, fontweight='bold')
plt.ylabel('Counts', fontsize=20, fontweight='bold')
plt.legend(fontsize='xx-large')
###Output
_____no_output_____ |
src/tutorials/2_basic/basic_classification.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 Franรงois Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Train your first neural network: basic classification View on TensorFlow.org Run in Google Colab View source on GitHub This guide trains a neural network model to classify images of clothing, like sneakers and shirts. It's okay if you don't understand all the details, this is a fast-paced overview of a complete TensorFlow program with the details explained as we go.This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow.
###Code
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Import the Fashion MNIST dataset This guide uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here: <img src="https://tensorflow.org/images/fashion-mnist-sprite.png" alt="Fashion MNIST sprite" width="600"> Figure 1. Fashion-MNIST samples (by Zalando, MIT License). Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) datasetโoften used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code. We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow, just import and load the data:
###Code
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
###Output
_____no_output_____
###Markdown
Loading the dataset returns four NumPy arrays:* The `train_images` and `train_labels` arrays are the *training set*โthe data the model uses to learn.* The model is tested against the *test set*, the `test_images`, and `test_labels` arrays.The images are 28x28 NumPy arrays, with pixel values ranging between 0 and 255. The *labels* are an array of integers, ranging from 0 to 9. These correspond to the *class* of clothing the image represents: Label Class 0 T-shirt/top 1 Trouser 2 Pullover 3 Dress 4 Coat 5 Sandal 6 Shirt 7 Sneaker 8 Bag 9 Ankle boot Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images:
###Code
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
###Output
_____no_output_____
###Markdown
Explore the dataLet's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, with each image represented as 28 x 28 pixels:
###Code
train_images.shape
###Output
_____no_output_____
###Markdown
Likewise, there are 60,000 labels in the training set:
###Code
len(train_labels)
###Output
_____no_output_____
###Markdown
Each label is an integer between 0 and 9:
###Code
train_labels
###Output
_____no_output_____
###Markdown
There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels:
###Code
test_images.shape
###Output
_____no_output_____
###Markdown
And the test set contains 10,000 images labels:
###Code
len(test_labels)
###Output
_____no_output_____
###Markdown
Preprocess the dataThe data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255:
###Code
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
###Output
_____no_output_____
###Markdown
We scale these values to a range of 0 to 1 before feeding to the neural network model. For this, cast the datatype of the image components from an integer to a float, and divide by 255. Here's the function to preprocess the images: It's important that the *training set* and the *testing set* are preprocessed in the same way:
###Code
train_images = train_images / 255.0
test_images = test_images / 255.0
###Output
_____no_output_____
###Markdown
Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
###Code
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
###Output
_____no_output_____
###Markdown
Build the modelBuilding the neural network requires configuring the layers of the model, then compiling the model. Setup the layersThe basic building block of a neural network is the *layer*. Layers extract representations from the data fed into them. And, hopefully, these representations are more meaningful for the problem at hand.Most of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have parameters that are learned during training.
###Code
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
###Output
_____no_output_____
###Markdown
The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a 2d-array (of 28 by 28 pixels), to a 1d-array of 28 * 28 = 784 pixels. Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.After the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are densely-connected, or fully-connected, neural layers. The first `Dense` layer has 128 nodes (or neurons). The second (and last) layer is a 10-node *softmax* layerโthis returns an array of 10 probability scores that sum to 1. Each node contains a score that indicates the probability that the current image belongs to one of the 10 classes. Compile the modelBefore the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:* *Loss function* โThis measures how accurate the model is during training. We want to minimize this function to "steer" the model in the right direction.* *Optimizer* โThis is how the model is updated based on the data it sees and its loss function.* *Metrics* โUsed to monitor the training and testing steps. The following example uses *accuracy*, the fraction of the images that are correctly classified.
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Train the modelTraining the neural network model requires the following steps:1. Feed the training data to the modelโin this example, the `train_images` and `train_labels` arrays.2. The model learns to associate images and labels.3. We ask the model to make predictions about a test setโin this example, the `test_images` array. We verify that the predictions match the labels from the `test_labels` array. To start training, call the `model.fit` methodโthe model is "fit" to the training data:
###Code
model.fit(train_images, train_labels, epochs=5)
###Output
_____no_output_____
###Markdown
As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.88 (or 88%) on the training data. Evaluate accuracyNext, compare how the model performs on the test dataset:
###Code
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
###Output
_____no_output_____
###Markdown
It turns out, the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of *overfitting*. Overfitting is when a machine learning model performs worse on new data than on their training data. Make predictionsWith the model trained, we can use it to make predictions about some images.
###Code
predictions = model.predict(test_images)
###Output
_____no_output_____
###Markdown
Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
###Code
predictions[0]
###Output
_____no_output_____
###Markdown
A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see which label has the highest confidence value:
###Code
np.argmax(predictions[0])
###Output
_____no_output_____
###Markdown
So the model is most confident that this image is an ankle boot, or `class_names[9]`. And we can check the test label to see this is correct:
###Code
test_labels[0]
###Output
_____no_output_____
###Markdown
We can graph this to look at the full set of 10 channels
###Code
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
###Output
_____no_output_____
###Markdown
Let's look at the 0th image, predictions, and prediction array.
###Code
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
###Output
_____no_output_____
###Markdown
Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label. Note that it can be wrong even when very confident.
###Code
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
###Output
_____no_output_____
###Markdown
Finally, use the trained model to make a prediction about a single image.
###Code
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
###Output
_____no_output_____
###Markdown
`tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list:
###Code
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
###Output
_____no_output_____
###Markdown
Now predict the image:
###Code
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
###Output
_____no_output_____
###Markdown
`model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch:
###Code
np.argmax(predictions_single[0])
###Output
_____no_output_____ |
Sandpit/graph_dfs_recursion.ipynb | ###Markdown
Graph Depth-First Search With Recursion We've done depth-first search previously using an iterative approach (i.e., using a loop). In this notebook, we'll show how to implement a recursive soluton.The basic idea is to select a node and explore all the possible paths from that node, and to apply this recursively to each node we are exploring.You can see some helpful illustrations with various combinations here: https://www.cs.usfca.edu/~galles/visualization/DFS.html
###Code
# For this exercise we will be using an Adjacency List representation to store the graph.
# Class Node representation.
class Node:
def __init__(self,val):
self.value = val
self.children = []
def add_child(self,new_node):
self.children.append(new_node)
def remove_child(self,del_node):
if del_node in self.children:
self.children.remove(del_node)
class Graph():
def __init__(self,node_list):
self.nodes = node_list
def add_edge(self,node1,node2):
if(node1 in self.nodes and node2 in self.nodes):
node1.add_child(node2)
node2.add_child(node1)
def remove_edge(self,node1,node2):
if(node1 in self.nodes and node2 in self.nodes):
node1.remove_child(node2)
node2.remove_child(node1)
###Output
_____no_output_____
###Markdown
Initializing Graph with an example Consider the above graph structure. The following code initializes all the edges according to the above structure.
###Code
# Creating a graph as above.
nodeG = Node('G')
nodeR = Node('R')
nodeA = Node('A')
nodeP = Node('P')
nodeH = Node('H')
nodeS = Node('S')
graph1 = Graph([nodeS,nodeH,nodeG,nodeP,nodeR,nodeA] )
graph1.add_edge(nodeG,nodeR)
graph1.add_edge(nodeA,nodeR)
graph1.add_edge(nodeA,nodeG)
graph1.add_edge(nodeR,nodeP)
graph1.add_edge(nodeH,nodeG)
graph1.add_edge(nodeH,nodeP)
graph1.add_edge(nodeS,nodeR)
# To verify that the graph is created accurately.
# Let's just print all the parent nodes and child nodes.
for each in graph1.nodes:
print('parent node = ',each.value,end='\nchildren\n')
for each in each.children:
print(each.value,end=' ')
print('\n')
###Output
parent node = S
children
R
parent node = H
children
G P
parent node = G
children
R A H
parent node = P
children
R H
parent node = R
children
G A P S
parent node = A
children
R G
###Markdown
Sample input and output The output would vary based on the implementation of your algorithm, the order in which children are stored within the adjacency list. DFS using recursionNow that we have our example graph initialized, we are ready to do the actual depth-first search. Here's what that looks like:
###Code
def dfs_recursion_start(start_node, search_value):
visited = set()
return dfs_recursion(start_node, search_value, visited)
def dfs_recursion(current_node, search_value, visited):
if current_node not in visited:
visited.add(current_node)
if current_node.value == search_value:
return current_node
for child in current_node.children:
result = dfs_recursion(child, search_value, visited)
if type(result) is Node:
return result
return None
assert nodeA == dfs_recursion_start(nodeG, 'A')
assert nodeA == dfs_recursion_start(nodeS, 'A')
assert nodeS == dfs_recursion_start(nodeP, 'S')
assert nodeR == dfs_recursion_start(nodeH, 'R')
###Output
_____no_output_____ |
SouthwesternAlbertaOpenData/place-commute-work.ipynb | ###Markdown
 Data Challenge: Using Pandas Dataframes With Commuting DataNow that we are familiar with data structures, let's tackle a problem with real data. We'll be working data from the 2016 Canadian Census. Four years ago, an Alberta scientist used DNA extracted from an [Albertosaurus](https://en.wikipedia.org/wiki/Albertosaurus) fossil to create a living dinosaur. It escaped from her lab, and a mutation caused it to eat cars and bicycles.Local governments are asking you to use open data to determine how many people commute by car and bicycle so they can send targeted warning messages. **[Data Science Tip]** Cite your source.โ
Statistics Canada. 2017. Lethbridge [Census metropolitan area], Alberta and Saskatchewan [Province] (table). Census Profile. 2016 Census. Statistics Canada Catalogue no. 98-316-X2016001. Ottawa. Released November 29, 2017.https://www12.statcan.gc.ca/census-recensement/2016/dp-pd/prof/index.cfm?Lang=E (accessed February 13, 2020). NamesDouble-click this cell to edit it.Names of group members: โ๏ธName of school: โ๏ธ In the next section we will use Pandas dataframes to explore this dataset and learn more about where people in southwestern Alberta work, how they get to work, and how long it takes them to commute.Let's create a few lists with data categories that are found in our dataset.
###Code
# Build data subsets
place_of_work_data = [
'Worked at home',
'Worked outside Canada',
'No fixed workplace address',
'Worked at usual place']
commuting_employed_data = [
'Commute within census subdivision (CSD) of residence',
'Commute to a different census subdivision (CSD) within census division (CD) of residence',
'Commute to a different census subdivision (CSD) and census division (CD) within province or territory of residence',
'Commute to a different province or territory']
mode_commute_data = [
'Car, truck, van - as a driver',
'Car, truck, van - as a passenger',
'Public transit',
'Walked',
'Bicycle',
'Other method']
commuting_duration_data =[
'Less than 15 minutes',
'15 to 29 minutes',
'30 to 44 minutes',
'45 to 59 minutes',
'60 minutes and over']
leave_for_work_data = [
'Between 5 a.m. and 5:59 a.m.',
'Between 6 a.m. and 6:59 a.m.',
'Between 7 a.m. and 7:59 a.m.',
'Between 8 a.m. and 8:59 a.m.',
'Between 9 a.m. and 11:59 a.m.',
'Between 12 p.m. and 4:59 a.m.']
print('Data subsets successfully created.')
###Output
_____no_output_____
###Markdown
Let's now create a dictionary using the lists above.
###Code
# Combo datasets
work_environment_dictionary = {"Place of work":place_of_work_data,
"Commuting":commuting_employed_data,
"Mode of commute":mode_commute_data,
"Commuting duration":commuting_duration_data,
"Leaving for work time":leave_for_work_data}
print('Data dictionary created.')
###Output
_____no_output_____
###Markdown
๐Challenge 1 (Exploratory)Use the `work_environment_dictionary` to access any of the categories that peak your attention. Practice using multiple keys, and try accessing different values within each category using indexing notation.
###Code
# โ๏ธyour response here
###Output
_____no_output_____
###Markdown
Next we will import a number of libraries to help us. Run the cell below to import the libraries.
###Code
# Import libraries, get data subsets
import pandas as pd
import os, sys, glob, zipfile
from ipywidgets import widgets
from io import BytesIO
from urllib.request import urlopen
# Override RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# Pandas settings
pd.set_option('display.max_rows', 800)
pd.set_option('display.max_columns', 800)
# load "cufflinks" library under short name "cf"
import cufflinks as cf
# command to display graphics correctly in Jupyter notebook
cf.go_offline()
def enable_plotly_in_cell():
import IPython
from plotly.offline import init_notebook_mode
display(IPython.core.display.HTML('''<script src="/static/components/requirejs/require.js"></script>'''))
init_notebook_mode(connected=False)
get_ipython().events.register('pre_run_cell', enable_plotly_in_cell)
print("Success! Libraries imported as expected and graphing is enabled.")
###Output
_____no_output_____
###Markdown
Now to download the data from Statistics Canada.There is a lot of information in the cell below, but basically what we are doing is:1. Downloading the data 2. Uncompressing the data3. Selecting the downloaded file4. Reading the file as a [Pandas](https://pandas.pydata.org) dataframeThe last step is important, as the `Pandas` code library transforms the contents of the file into a data structure that we can manipulate.Run the cell below. It will take a minute or two, so be patient.
###Code
print("Downloading data. Please wait...")
# Link to zipped data
link_csv = "https://www12.statcan.gc.ca/census-recensement/2016/dp-pd/prof/details/download-telecharger/comp/GetFile.cfm?Lang=E&FILETYPE=CSV&GEONO=069"
# Unzip data in local directory
r = urlopen(link_csv).read()
z = zipfile.ZipFile(BytesIO(r))
print("Download complete. Extracting data files.")
z.extractall()
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
print("Extraction complete. Parsing data into pandas dataframe.")
# Get CSV files only from extracted data sets
os.chdir("./")
csv_file = []
for file in glob.glob("*.csv"):
if hasNumbers(file):
census_table = pd.read_csv(file)
else:
continue
print("Success!")
###Output
_____no_output_____
###Markdown
Next we are going to clean the data. Run the cell below.
###Code
# Data cleanup - remove unused columns
census_table = census_table.drop(['GEO_CODE (POR)','GEO_LEVEL', 'GNR','GNR_LF','DATA_QUALITY_FLAG',\
'CSD_TYPE_NAME','ALT_GEO_CODE','Notes: Profile of Census Subdivisions (2247)',
'Member ID: Profile of Census Subdivisions (2247)'], axis=1)
# Data cleanup - Rename columns
census_table = census_table.rename(columns={"Dim: Sex (3): Member ID: [1]: Total - Sex": "Total - Sex",\
"Dim: Sex (3): Member ID: [2]: Male": "Male",\
"Dim: Sex (3): Member ID: [3]: Female":"Female"})
print('Data cleanup complete.')
###Output
_____no_output_____
###Markdown
Time to use our data categories. Run the cell below to print the different categories you can experiment with.
###Code
# Show categories
## Build widgets
# Region of interest
cities = ["Brooks", "Lethbridge","Medicine Hat"]
style = {'description_width': 'initial'}
all_the_widgets = [widgets.Dropdown(
value = work_environment_dictionary["Place of work"],
options = work_environment_dictionary,
description ='Data subsets:',
style = style,
disabled=False),widgets.Dropdown(
value = 'Lethbridge',
options = cities,
description ='City:',
style = style,
disabled=False), widgets.Dropdown(
value = cities[1],
options = cities,
description ='Data subsets:',
style = style,
disabled=False),widgets.Dropdown(
value = 'Lethbridge',
options = cities,
description ='City:',
style = style,
disabled=False)]
display(all_the_widgets[0])
###Output
_____no_output_____
###Markdown
๐Challenge 2 (Data Science)1. Run the cell below. 2. Look at the table and the graph. 3. What are your observations about that category?4. Use the menu above to help you remember the names of the keys. 5. In the cell below, substitute "Place of work" for another category you are interested in. Then run the cell. 6. Repeat steps 2 and 3. 7. Double click on this cell to enter your observations about the data. Include numbers, how male and female compare in terms of the categories. โ๏ธYour observations here:
###Code
#โ๏ธ Your answer here
data_category = work_environment_dictionary["Place of work"]
# _____ Once that is complete, run the cell
# Display dataset
# Get subsets of the data for Lethbridge and Lethbridge County
region = census_table[census_table["GEO_NAME"]==cities[1]]
# Set index to Profile of Census Subdivisions
region.set_index('DIM: Profile of Census Subdivisions (2247)', inplace=True)
var = data_category
display(region.loc[var])
# Drop Census Year and Geo name as they are the same for this subset
vis_data = region.loc[var].drop(["CENSUS_YEAR","GEO_NAME"],axis=1)
# Visualize data
vis_data.iplot(kind="bar",values="Dim: Sex (3): Member ID: [1]: Total - Sex",\
labels="Dim: Sex (3): Member ID: [1]: Total - Sex",
title="Workers conditions in " + cities[1])
###Output
_____no_output_____
###Markdown
BONUS: Comparing how people in Lethbridge, Brooks, and Medicine Hat get to workContinue exploring the data for three cities: Lethbridge, Brooks, and Medicine Hat. Use the dictionary as you did in previous exercises.
###Code
data_combo_widget = [widgets.Dropdown(
value = work_environment_dictionary["Place of work"],
options = work_environment_dictionary,
description ='Data subsets:',
style = style,
disabled=False)]
display(data_combo_widget[0])
var = work_environment_dictionary["Place of work"]
rows = []
for i in range(len(cities)):
city = census_table[(census_table["GEO_NAME"]==cities[i])]
for i in range(len(var)):
row = city[city["DIM: Profile of Census Subdivisions (2247)"]==var[i]]
rows.append(row)
result = pd.concat(rows)
display(result)
result["Male"] = result["Male"].astype(int)
result["Female"] = result["Female"].astype(int)
by_region = result.pivot_table(columns=["GEO_NAME"],\
index="DIM: Profile of Census Subdivisions (2247)",\
values=["Male","Female"])
by_region.iplot(kind='bar',title="Workers conditions in " + cities[0] +", " + cities[1] + " and "+ cities[2])
###Output
_____no_output_____ |
_notebooks/2021-03-31-Projectile -Motion-II.ipynb | ###Markdown
Projectile Motion - II In my previous post, I discussed how to simulate vertical motion of the free fall of a projectile without considering air resistance. In this section, I am going to simulate another kind of projectile motion cosidering effect of air resistance. Consider a cannonball fired towards a target with a certain velocity, say $100 m/s$. Suppose, the target is at a distance 100 kilometer from the cannon. What should be the angle of fire so that cannonball hits the target accurately? Consider, the air resistance to be approximately 0.01.The projectile motion is influenced by force of gravity and drag force due to air resistance. The equation of motion -1. For motion in horizontal direction$$\frac{d^2x}{dt^2}=-kv_x$$2. For motion in vertical direction$$\frac{d^2y}{dt^2}=-g -kv_y$$For numerically solving the differential equations, let us split them into two first order differential equations.$$\frac{dx}{dt}=v_x $$$$\frac{dv_x}{dt}=-kv_x$$$$\frac{dy}{dt}=v_y $$$$\frac{dv_y}{dt}=-g-kv_y$$The initial conditions are$$ x(0) = 0\\ y(0) = 0\\ v_x(0)=v\cos\theta\\ v_y(0)=v\sin\theta $$$\theta$ is the angle of projection. So, the plan of simulation is as follows -1. Solving the differential equations by any standard method. I am using `odeint` function from `SciPy` package to solve the differential equations. For a particular angle of projection ($\theta$), the solver function will give horizontal distance ($x$), vertical distance ($y$), horizontal velocity ($v_x$) and vertical velocity ($v_y$) at different time step ($t$).2. By applying linear interpolation, we can exactly estimate the point where the projectile hits the ground. 3. Then, we can find out the difference between the target and the point of hitting on the ground by the projectile.4. The difference existing between the target and the point of hitting on the ground by the projectile for an arbitrary angle of projection can be minimised below a tolerance limit by searching appropriate value of angle. This can be done by "Bisection method" usually employed to locate the root of a polynomial function.
###Code
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import bisect
#basic parametrs
k = 0.01
g = 9.8
v = 100
target = 500
t_init, t_final, step_size = 0, 20, 0.001
t = np.arange(t_init, t_final, step_size)
z = np.zeros([len(t),4])
def model(z, t, params):
x, y, vx, vy = z
dx_dt = vx
dy_dt = vy
dvx_dt = -k*vx
dvy_dt = -g-k*vy
dz_dt = np.array([dx_dt, dy_dt, dvx_dt, dvy_dt])
return dz_dt
@np.vectorize
def diff(theta):
theta = np.radians(theta)
params = [k, g, theta]
x0, y0, vx0, vy0 = 0, 0, v*np.cos(theta), v*np.sin(theta)
z0 = [x0, y0, vx0, vy0]
sol = odeint(model, z0, t, args=(params,))
x, y, vx, vy = sol[:, 0], sol[:, 1], sol[:, 2], sol[:, 3]
y = y[y>=0]
x = x[:len(y)]
vx = vx[:len(y)]
vy = vy[:len(y)]
xground = x[-2] + y[-2]*(x[-1]-x[-2])/(y[-1]-y[-2])
diff = xground - target
return diff
def plot():
fig, ax = plt.subplots(figsize=(8, 5))
# set the x-spine
ax.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
# set the y-spine
ax.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
plot()
theta = np.arange(10, 90, 0.1)
plt.plot(theta, diff(theta), ':r')
plt.xlabel('$\\theta$', fontsize=14)
plt.ylabel('$\Delta x$', fontsize=14)
plt.xlim(0, 90)
plt.show()
###Output
_____no_output_____
###Markdown
$\Delta x$ which represents the difference between target and projectile hitting point is plotted against different angle of projection. It is seen that there are two angles of projection for which projectile can accurately hit the target. The first one lies in the interval $(10^{\circ}, 20^{\circ})$ and the second one in the interval $(70^{\circ}, 80^{\circ})$. Using `bisect` function from `SciPy`, we can find the exact value of angle.
###Code
angle1 = bisect(diff, 10, 20)
angle2 = bisect(diff, 70, 80)
print('\n Angle1 = %0.2f'%angle1,'\n Angle2 = %0.2f'%angle2)
diff1 = diff(angle1)
diff2 = diff(angle2)
print('\n Difference for angle1 = %0.3f'%diff1,'\n Difference for angle2 = %0.11f'%diff2)
###Output
Angle1 = 15.26
Angle2 = 73.15
Difference for angle1 = -0.085
Difference for angle2 = -0.00000000002
###Markdown
After getting the exact value of angle of projection, using `projectile` function, we can depict the projectile trajectory as shown in figure.
###Code
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import bisect
def model(z, t, params):
x, y, vx, vy = z
dx_dt = vx
dy_dt = vy
dvx_dt = -k*vx
dvy_dt = -g-k*vy
dz_dt = np.array([dx_dt, dy_dt, dvx_dt, dvy_dt])
return dz_dt
def projectile(angle):
theta = np.radians(angle)
params = [k, g, theta]
x0, y0, vx0, vy0 = 0, 0, v*np.cos(theta), v*np.sin(theta)
z0 = [x0, y0, vx0, vy0]
sol = odeint(model, z0, t, args=(params,))
x, y, vx, vy = sol[:, 0], sol[:, 1], sol[:, 2], sol[:, 3]
y = y[y>=0]
x = x[:len(y)]
vx = vx[:len(y)]
vy = vy[:len(y)]
return x, y
plot()
x1, y1 = projectile(angle1)
x2, y2 = projectile(angle2)
plt.plot(x1, y1, ls='--', color='purple', label='$\\theta$ = %d$^\circ}$'%angle1)
plt.plot(x2, y2, ls='--', color='blue', label='$\\theta$ = %d$^\circ}$'%angle2)
plt.plot(500, 0, 'ro', markersize=10)
plt.plot(0, 0, 'ko', markersize=10)
plt.ylim(0, 500)
plt.xlabel('x', fontsize=14)
plt.ylabel('y', fontsize=14)
plt.title('Projectile Motion', fontsize=16)
plt.legend(frameon=False)
plt.annotate('Starting point', xy=(0,0), xytext=(50, 100), arrowprops=dict(arrowstyle='->'), fontsize=14)
plt.annotate('Target', xy=(500,0), xytext=(350, 100), arrowprops=dict(arrowstyle='->'), fontsize=14)
plt.show()
###Output
_____no_output_____ |
src/exploratory_data_analysis/chance/python/aws_predict/training/notebooks/USWildfireClassification.ipynb | ###Markdown
Data Preparation Library Imports
###Code
# Base Imports
import sqlite3
import pandas as pd
import numpy as np
# Pre-processing
from sklearn.preprocessing import LabelEncoder
# Metrics and Evaluation
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
# Model Selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
# Pipeline
from sklearn.pipeline import Pipeline
import joblib
# Estimators
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
estimator = "decision_tree_classifier"
conn = sqlite3.connect('../../../../../../../data/FPA_FOD_20170508.sqlite')
df_fires = pd.read_sql_query("SELECT LATITUDE, LONGITUDE, DISCOVERY_DATE, FIRE_SIZE, STATE,OWNER_DESCR, STAT_CAUSE_DESCR FROM 'Fires'", conn)
df_fires.info()
df_fires.isna().any()
df_fires["DISCOVERY_DATETIME"] = pd.to_datetime(df_fires["DISCOVERY_DATE"], unit='D', origin='julian')
df_fires['DISCOVERY_DAY_OF_WEEK'] = df_fires["DISCOVERY_DATETIME"].dt.day_name()
# create an instance of LabelEncoder
label_encoder = LabelEncoder()
# map to numerical values in a new variable
df_fires["STATE_CAT"] = label_encoder.fit_transform(df_fires['STATE'])
df_fires["OWNER_DESCR_CAT"] = label_encoder.fit_transform(df_fires['OWNER_DESCR'])
df_fires["DISCOVERY_DAY_OF_WEEK_CAT"] = label_encoder.fit_transform(df_fires['DISCOVERY_DAY_OF_WEEK'])
df_fires.info()
X = df_fires[["LATITUDE", "LONGITUDE", "DISCOVERY_DATE", "FIRE_SIZE", "STATE_CAT", "OWNER_DESCR_CAT", "DISCOVERY_DAY_OF_WEEK_CAT"]]
y = df_fires["STAT_CAUSE_DESCR"]
###Output
_____no_output_____
###Markdown
Train / Test Split
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.1,
random_state=1,
stratify=y)
###Output
_____no_output_____
###Markdown
Gaussian Naive Bayes Classifier
###Code
%%time
if estimator == "gaussian_nb":
clf = OneVsRestClassifier(GaussianNB())
clf.fit(X_train, y_train)
###Output
CPU times: user 3 ยตs, sys: 0 ns, total: 3 ยตs
Wall time: 5.72 ยตs
###Markdown
Train Decision Classifier
###Code
%%time
if estimator == "decision_tree_classifier":
clf = OneVsRestClassifier(DecisionTreeClassifier(random_state=1,
splitter='best',
min_samples_split=5,
min_samples_leaf=4,
max_features='auto',
class_weight=None))
clf.fit(X_train, y_train)
%%time
y_pred = clf.predict(X_test)
%%time
print ('accuracy:', accuracy_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
if estimator == "decision_tree_classifier":
joblib.dump(clf, '../models/decission_tree_classifier.pkl', compress=3)
elif estimator == "gaussian_nb":
joblib.dump(clf, '../models/gaussian_nb_classifier.pkl')
elif estimator =="kneighbors_classifier":
joblib.dump(clf, '../models/knn_classifier.pkl')
else:
pass
print(X_test[:1])
print(y_test[:1])
if estimator == "decision_tree_classifier":
classifier = joblib.load('../models/decission_tree_classifier.pkl')
elif estimator == "gaussian_nb":
classifier = joblib.load('../models/gaussian_nb_classifier.pkl')
elif estimator =="kneighbors_classifier":
classifier = joblib.load('../models/knn_classifier.pkl')
else:
pass
# classifier = joblib.load('../models/decission_tree_classifier.pkl')
pred_test = [[43.235833, -122.466944, 2452859.5, 0.1, 37, 15, 0]]
classifier.predict(pred_test)
pred_proba = classifier.predict_proba(pred_test)
pred_proba
max_proba = np.argmax(pred_proba, axis=1)
pred_proba[[0][0]][int(max_proba)]
classifier.classes_
###Output
_____no_output_____ |
modelmaker/resources/templates/default/notebooks/mnist.ipynb | ###Markdown
MNIST digits classification datasethttps://keras.io/api/datasets/mnist/load_data-function**Tuple of Numpy arrays:** (x_train, y_train), (x_test, y_test)**x_train, x_test:** uint8 arrays of grayscale image data with shapes (num_samples, 28, 28)**y_train, y_test:** uint8 arrays of digit labels (integers in range 0-9) with shapes (num_samples,)
###Code
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(path="mnist.npz")
print(f"training set as {x_train.shape[0]} samples each with dimensions {x_train[0].shape}")
print(f"test set as {x_test.shape[0]} samples each with dimensions {x_test[0].shape}")
print(f"class labels: {np.unique(y_train)}")
plt.title(f"label = {y_train[0]}")
plt.imshow(x_train[0])
###Output
_____no_output_____ |
.ipynb_checkpoints/XGBoost-checkpoint.ipynb | ###Markdown
Based on [PUBG finish placement prediction](https://www.kaggle.com/c/pubg-finish-placement-prediction/overview) Problem definition> ... given over 65,000 games' worth of anonymized player data, split into training and testing sets, ... predict final placement from final in-game stats and initial player ratings.What's the best strategy to win in PUBG? Should you sit in one spot and hide your way into victory, or do you need to be the top shot? Let's let the data do the talking!๋ฐฐํ๊ทธ๋ผ์ด๋๋ 100๋ช
์ ํ๋ ์ด์ด๊ฐ ๋น์์ผ๋ก ์ฌ์ ๋จ์ด์ ธ ์๋ก๋ฅผ ์ฃฝ์ด๊ณ , ์ค์ด๋๋ ์๊ธฐ์ฅ์ ํผํด ์ด์๋จ๋ ๋ฐฐํ ๋ก์ํ ๊ฒ์์
๋๋ค. ํ๋ ์ด์ด๋ค์ 1๋ฑ์ ํ๊ธฐ ์ํด์ ๋ค์ํ ์ ๋ต์ ํผ์นฉ๋๋ค. ์ฌ๋์ด ๋ง์ ๊ณณ์ผ๋ก ๋จ์ด์ ธ ์ด๋ฐ์ ๋ค๋ฅธ ํ๋ ์ด์ด๋ค์ ์ฃฝ์ด๊ฑฐ๋, ๋ฐ๋๋ก ์ฌ๋์ด ์๋ ๊ณณ์ผ๋ก ๊ฐ, ๋์ค์ ๊ธฐ์ฝํ๊ธฐ๋ ํฉ๋๋ค. ๋ ์๋์ฐจ๋ฅผ ํ๊ณ ์๋ฆฌ๋ฅผ ๋ด๋ฉด์ ์ด๋ํ๋ ์ฌ๋๋ ์๊ณ , ์กฐ์ฉํ ํ์ฒ์ ๋ฐ๋ผ ์๋๋ ค ๊ฐ๋ ์ฌ๋๋ ์์ต๋๋ค. ์ด๋ ๊ฒ ๋ค์ํ ์ ๋ต์์์ ์ด๋ค ์์๋ค์ด ๊ฒ์ ๋ฑ์์ ์ํฅ์ ๋ฏธ์น๋์ง, ๋์ ๋ฑ์์ ํ๋ ์ด์ด๋ค์ ์ด๋ค ์ ๋ต์ ํผ์น๋์ง, ์ด๋ป๊ฒ ํด์ผ ๋์ ๋ฑ์๋ฅผ ์ป์ ์ ์๋์ง ์์๋ณด๋ ค๊ณ ํฉ๋๋ค. 0. Import Libraries
###Code
# # For autoreloading modules
# ## IPython extension to reload modules before executing user code.
# ## autoreload reloads modules automatically before entering the execution of code typed at the IPython prompt.
# %load_ext autoreload
# %autoreload 2 # Reload all modules (except those excluded by %aimport) every time before executing the Python code typed.
# # For notebook plotting
# %matplotlib inline
###Output
_____no_output_____
###Markdown
**Standard libraries**
###Code
import os
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
**Visualization**
###Code
import matplotlib.pyplot as plt
import seaborn as sns
#from pdpbox import pdp
#from plotnine import *
#from pandas_summary import DataFrameSummary
from sklearn.ensemble import RandomForestRegressor
from IPython.display import display
###Output
_____no_output_____
###Markdown
**Machine Learning**
###Code
import sklearn
from sklearn import metrics
from scipy.cluster import hierarchy as hc
#from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.ensemble import forest
from sklearn.tree import export_graphviz
###Output
_____no_output_____
###Markdown
**Random Shuffle**
###Code
from sklearn.utils import shuffle
###Output
_____no_output_____
###Markdown
1. Acquire DataAcquire training and testing datasets into **Pandas** DataFrames
###Code
root = ''
train = pd.read_csv(root + 'train_V2.csv')
#test = pd.read_csv(root + 'test_V2.csv')
train = shuffle(train)
train = train.iloc[:3600000]
test = train.iloc[3600000:]
# test ์
๋ ๋์ค์๋ ์ถ๊ฐํ feature๋ ๋ฒ๋ฆฌ๋ feature๋ฅผ ๋ค ์กฐ์ ํด ์ฃผ์ด์ผ ํจ.
###Output
_____no_output_____
###Markdown
2. Analyze Data Training data Overview
###Code
# train.info()
# train.describe()
# train.describe(include=['O'])
###Output
_____no_output_____
###Markdown
- Categorical: Id, groupId, matchId, matchType - Numerical: assists, boosts, damageDealt, DBNOs, headshotKills, heals, killPlace, killPoinst, kills, killStreaks, longestKill, matchDuration, maxPlace, numGroups, rankPoints, revives, rideDstance, roadKills, swimDistance, teamKills, vehicleDestroys, walkDistance, weaponsAcquired, winPoints - Target: winPlacePerc Simple AssumptionsFeatures that do not need to be taken into consideration for model training- Id, goupId: These are features that merely describe the identity of the player, and therefore the values will show no correlation with winning- matchId, matchDuration: These are features that merely describe the match, and therefore the values will have no correlation with winning- rankPoints, winPoints, killPoints: These feature have too many missing values Drop the row with null valueThere exists a row in which the **winplacePerc** column value is NaN.We must drop this row.
###Code
# Check row with NaN value
train[train['winPlacePerc'].isnull()]
# Drop row with NaN 'winPlacePerc' value
train.drop(2744604, inplace=True)
# The row at index 2744604 will be gone
train[train['winPlacePerc'].isnull()]
###Output
_____no_output_____
###Markdown
Analysis by visualizing dataFeature Distribution VisualtizationTo understand the dataset, we will visualize each feature as a histogram. **Histograms** show sample distribution for continuous numerical variables where ranges help identify patterns.
###Code
# plt.hist(train['assists'], bins=50)
# plt.title('assists')
# plt.show()
# ํฌ ํฌ์ธํธ๊ฐ ๋ช ์ด์์ธ ์ฌ๋๋ค์ ์ํ๋ ์ฌ๋์ด๋ผ๊ณ ํ๋จ..... ๋น์ค ๋๋ฆฌ๊ณ ์์ค์ถ ์๋ฅด๊ณ ๋ฒ์๋ณ๋ก ๋ค๋ฅธ ํน์ง์ ๋ํ๋ด๋ ๊ฑฐ ๋ถ์ํ๊ธฐ
# ๋ฐ์ดํฐํ๋ ์ ๋ณ๋ก ๋ฝ๊ธฐ! ์ต๊ณ ์ฑ๋ฅ์ ๋ณผ๋!!
###Output
_____no_output_____
###Markdown
3. Wrangle Data
###Code
df_wrangle=train.copy()
###Output
_____no_output_____
###Markdown
Outlier, Anomaly Detection - merge
###Code
df_wrangle['totalDistance'] = df_wrangle['rideDistance'] + df_wrangle['walkDistance'] + df_wrangle['swimDistance']
# create killsWithoutMoving feature
df_wrangle['killsWithoutMoving'] = ((df_wrangle['kills'] > 0) & (df_wrangle['totalDistance'] == 0))
df_wrangle.drop('totalDistance',axis=1)
#create headshot_rate feature
df_wrangle['headshotRate'] = df_wrangle['headshotKills']/df_wrangle['kills']
df_wrangle['headshotRate'] = df_wrangle['headshotRate'].fillna(0)
#outlier ์ ๊ฑฐ ์
df_wrangle.shape
###Output
_____no_output_____
###Markdown
Anomalies in Killing
###Code
#Kills without movement (์์ง์ ์์ด kills ์์๋)
display(df_wrangle[df_wrangle['killsWithoutMoving']==True].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['killsWithoutMoving'] == True].index)
df_wrangle=df_wrangle.drop('killsWithoutMoving', axis=1)
#roadKills (10ํฌ ์ด์)
display(df_wrangle[df_wrangle['roadKills']>10].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['roadKills']>=10].index)
###Output
_____no_output_____
###Markdown
Anomalies in Aim
###Code
#30ํฌ ์ด์
# Plot the distribution of kills
plt.figure(figsize=(12,4))
sns.countplot(data=df_wrangle, x=df_wrangle['kills']).set_title('Kills')
#plt.show()
display(df_wrangle[df_wrangle['kills']>=30].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['kills']>=30].index)
#100% ํค๋์ท (์ ๋งคํด์ ์๋บ)
plt.figure(figsize=(12,4))
sns.distplot(df_wrangle['headshotRate'], bins=10)
#plt.show()
df_wrangle=df_wrangle.drop('headshotRate', axis=1) #์์ธ๊ฑฐ๋๊น
#longestKill (1km๋ณด๋ค ๋ ๋ฉ๋ฆฌ์์ ์ ๊ฒฝ์ฐ)
plt.figure(figsize=(12,4))
sns.distplot(df_wrangle['longestKill'], bins=10)
#plt.show()
display(df_wrangle[df_wrangle['longestKill']>=1000].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['longestKill']>=1000].index)
###Output
_____no_output_____
###Markdown
Anomalies in Distance
###Code
#df_wrangle[['walkDistance', 'rideDistance', 'swimDistance']].describe()
#walkDistance (10km ์ด์ ์ ๊ฑฐ)
plt.figure(figsize=(12,4))
sns.distplot(df_wrangle['walkDistance'], bins=10)
#plt.show()
display(df_wrangle[df_wrangle['walkDistance'] >= 10000].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['walkDistance']>=10000].index)
#rideDistance (20km ์ด์ ์ ๊ฑฐ)
plt.figure(figsize=(12,4))
sns.distplot(df_wrangle['rideDistance'], bins=10)
#plt.show()
display(df_wrangle[df_wrangle['rideDistance'] >= 20000].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['rideDistance']>=20000].index)
#swimDistance (2km ์ด์ ์ ๊ฑฐ)
plt.figure(figsize=(12,4))
sns.distplot(df_wrangle['swimDistance'], bins=10)
#plt.show()
display(df_wrangle[df_wrangle['swimDistance'] >= 2000].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['swimDistance']>=2000].index)
###Output
_____no_output_____
###Markdown
Anomalies in supplies
###Code
#weaponsAcquired (80 ์ด์ ์ ๊ฑฐ)
plt.figure(figsize=(12,4))
sns.distplot(df_wrangle['weaponsAcquired'], bins=100)
#plt.show()
display(df_wrangle[df_wrangle['weaponsAcquired'] >= 80].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['weaponsAcquired']>=80].index)
#heals (40๊ฐ ์ด์ ์ ๊ฑฐ)
plt.figure(figsize=(12,4))
sns.distplot(df_wrangle['heals'], bins=10)
#plt.show()
display(df_wrangle[df_wrangle['heals'] >= 40].shape)
df_wrangle=df_wrangle.drop(df_wrangle[df_wrangle['heals']>=40].index)
#Outlier ์ ๊ฑฐ ํ
df_wrangle.shape
df_wrangle.drop(columns = ['totalDistance'], inplace=True)
#df_wrangle_anomaly = df_wrangle.copy()
###Output
_____no_output_____
###Markdown
Normalize - merge make 'totalPlayer' column
###Code
# count total players in match
df_wrangle['totalPlayers'] = df_wrangle.groupby('matchId')['matchId'].transform('count')
plt.figure(figsize=(15,10))
sns.countplot(df_wrangle[df_wrangle['totalPlayers']>=75]['totalPlayers'])
plt.title('Total Player')
#plt.show()
# Create normalized val.
df_wrangle['killsNorm'] = df_wrangle['kills']*((100-df_wrangle['totalPlayers'])/100 + 1)
df_wrangle['matchDurationNorm'] = df_wrangle['matchDuration']*((100-df_wrangle['totalPlayers'])/100 + 1)
df_wrangle['damageDealtNorm'] = df_wrangle['damageDealt']*((100-df_wrangle['totalPlayers'])/100 + 1)
df_wrangle['maxPlaceNorm'] = df_wrangle['maxPlace']*((100-df_wrangle['totalPlayers'])/100 + 1)
df_wrangle['weaponsAcquiredNorm'] = df_wrangle['weaponsAcquired']*((100-df_wrangle['totalPlayers'])/100 + 1)
#df_wrangle['healsandboostsNorm'] = df_wrangle['healsandboosts']*((100-df_wrangle['totalPlayers'])/100 + 1)
# Compare standard features and normalized features
to_show = ['kills','killsNorm', 'matchDuration', 'matchDurationNorm', 'weaponsAcquired', 'weaponsAcquiredNorm' ]
df_wrangle[to_show][0:11]
#df_wrangle_normalize = df_wrangle.copy()
###Output
_____no_output_____
###Markdown
Feature generationdomain ์ดํด ๊ฒฐ๊ณผ,heals์ boosts๋ฅผ ํฉ์น๊ณ rideDistance, walkDistance, swimDistance๋ฅผ ํฉ์น ์๋ก์ด feature๋ฅผ generateํ๋ ๊ฒ์ด ์ ์ ํ๋ค๊ณ ํ๋จ
###Code
# # Create new feature healsandboosts
df_wrangle['healsandboosts'] = df_wrangle['heals'] + df_wrangle['boosts']
# # Create feature totalDistance
df_wrangle['totalDistance'] = df_wrangle['rideDistance'] + df_wrangle['walkDistance'] + df_wrangle['swimDistance']
df_wrangle = df_wrangle.drop(['rideDistance', 'walkDistance', 'swimDistance', 'heals', 'boosts' ],axis =1)
# # Create feature killsWithoutMoving
# train['killsWithoutMoving'] = ((train['kills'] > 0) & (train['totalDistance'] == 0))
###Output
_____no_output_____
###Markdown
One hot encoding for matchType**matchType**
###Code
print('There are {} different Match types in the dataset.'.format(df_wrangle['matchType'].nunique()))
# One hot encode matchType
df_wrangle = pd.get_dummies(df_wrangle, columns=['matchType'])
train = pd.get_dummies(train, columns=['matchType'])
# Take a look at the encoding
matchType_encoding = df_wrangle.filter(regex='matchType')
matchType_encoding.head()
matchType_encoding_train = train.filter(regex='matchType')
df_wrangle = pd.concat([df_wrangle, matchType_encoding], axis=1)
train = pd.concat([train, matchType_encoding_train], axis=1)
###Output
There are 16 different Match types in the dataset.
###Markdown
matchID ์ groupID๋ฅผ categorical type์ผ๋ก ๋ณํ, Id drop(Id ์ข
๋ฅ๋ค์ one hot์ผ๋ก ํ๋ ๊ฒ์ computationally bad)
###Code
# Turn groupId and match Id into categorical types
df_wrangle['groupId'] = df_wrangle['groupId'].astype('category')
df_wrangle['matchId'] = df_wrangle['matchId'].astype('category')
train['groupId'] = train['groupId'].astype('category')
train['matchId'] = train['matchId'].astype('category')
# Get category coding for groupId and matchID
df_wrangle['groupId_cat'] = df_wrangle['groupId'].cat.codes
df_wrangle['matchId_cat'] = df_wrangle['matchId'].cat.codes
train['groupId_cat'] = train['groupId'].cat.codes
train['matchId_cat'] = train['matchId'].cat.codes
# Get rid of old columns
df_wrangle.drop(columns=['groupId', 'matchId'], inplace=True)
train.drop(columns=['groupId', 'matchId'], inplace=True)
# Lets take a look at our newly created features
df_wrangle[['groupId_cat', 'matchId_cat']].head()
# Drop Id column, because it probably won't be useful for our Machine Learning algorithm,
# because the test set contains different Id's
df_wrangle.drop(columns = ['Id'], inplace=True)
train.drop(columns = ['Id'], inplace=True)
###Output
_____no_output_____
###Markdown
4. Model Data Set metrics(MAE)
###Code
# Metric used for the PUBG competition (Mean Absolute Error (MAE))
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
# Function to print the MAE (Mean Absolute Error) score
# This is the metric used by Kaggle in this competition
def print_score(m : LinearRegression):
res = ['mae train: ', mean_absolute_error(m.predict(X_train), y_train),
'mae val: ', mean_absolute_error(m.predict(X_valid), y_valid)]
if hasattr(m, 'oob_score_'): res.append(m.oob_score_)
print(res)
###Output
_____no_output_____
###Markdown
XGBoost Model for train sampling import the model
###Code
import xgboost
sample = train.iloc[0:100000]
original = sample.drop(columns=['winPlacePerc', 'kills','matchDuration','damageDealt','maxPlace','weaponsAcquired'])
target = sample['winPlacePerc']
###Output
_____no_output_____
###Markdown
split data for training and validation
###Code
#Standardization
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
# Function for splitting training and validation data
def split_vals(a, n : int):
return a[:n].copy(), a[n:].copy()
val_perc = 0.1 # % to use for validation set
n_valid = int(val_perc * 100000)
n_trn = len(original)-n_valid
# Split data
raw_train, raw_valid = split_vals(sample, n_trn)
X_train, X_valid = split_vals(original, n_trn)
y_train, y_valid = split_vals(target, n_trn)
X_train=sc.fit_transform(X_train)
X_valid=sc.transform(X_valid)
# Check dimensions of samples
print('Sample train shape: ', X_train.shape,
'\nSample target shape: ', y_train.shape,
'\nSample validation shape: ', X_valid.shape)
# Train basic model
#m1 = LinearRegression()
#m1.fit(X_train, y_train)
#print_score(m1)
# Train basic model
m1 = xgboost.XGBRegressor(random_state=42,n_estimators=400, subsample = 0.8, colsample_bytree=1,max_depth=7, learning_rate=0.08)
m1.fit(X_train, y_train)
print_score(m1)
###Output
['mae train: ', 0.0550490599384843, 'mae val: ', 0.06475269047975063]
###Markdown
for df_wrangle sampling
###Code
## ๋ฐ๊ฟ์ผํจ
sample = df_wrangle.iloc[0:100000]
# Split sample into training data and target variable
original = sample.drop(columns = ['winPlacePerc', 'kills','matchDuration','damageDealt','maxPlace','weaponsAcquired']) #all columns except target
target = sample['winPlacePerc'] # Only target variable
###Output
_____no_output_____
###Markdown
Split data for training and validationcsv๋ฅผ pickle, json์ผ๋ก ๋ฐ๊ฟ์ ํ๋ค.๋ชจ๋ธ ๋ณ๋ก ํ์ผ์ ๋๋์ฅ๋ฐ์ดํฐ๋ก ๋๋ ๋ ๋จ..๋น์ฅฌ์ผ๋ผ์ด์ ผ์ ์ข ๋ ์ด์๊ฒ ํด์์๊ด๊ณ์ ์์๊ฒ ๋ถ์
###Code
n_trn = len(original)-n_valid
# Split data
X_train, X_valid = split_vals(original, n_trn)
y_train, y_valid = split_vals(target, n_trn)
X_train=sc.fit_transform(X_train)
X_valid=sc.transform(X_valid)
# Check dimensions of samples
print('Sample train shape: ', X_train.shape,
'\nSample target shape: ', y_train.shape,
'\nSample validation shape: ', X_valid.shape)
# Train basic model
#m2 = LinearRegression()
#m2.fit(X_train, y_train)
#print_score(m2)
# Train basic model
m2 = xgboost.XGBRegressor(random_state=42,n_estimators=400, subsample = 0.8, colsample_bytree=1,max_depth=7, learning_rate=0.08)
m2.fit(X_train, y_train)
print_score(m2)
###Output
['mae train: ', 0.049243022895292705, 'mae val: ', 0.05997060938482284]
###Markdown
Feature Importance
###Code
def xgb_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}).sort_values('imp', ascending=False)
## ๋ฐ๊ฟ์ผํจ
sample = df_wrangle.iloc[0:100000]
# Split sample into training data and target variable
original = sample.drop(columns = ['winPlacePerc', 'kills','matchDuration','damageDealt','maxPlace','weaponsAcquired']) #all columns except target
target = sample['winPlacePerc'] # Only target variable
# What are the most predictive features according to our basic random forest model
fi = xgb_feat_importance(m2, original); fi[:10]
# Plot a feature importance graph for the 20 most important features
plot1 = fi[:20].plot('cols', 'imp', figsize=(14,6), legend=False, kind = 'barh', title='Top 20 important features')
# Keep only significant features
#to_keep = fi[fi.imp>0.005].cols
to_keep = fi[fi.imp>0.005].cols
print('Significant features: ', len(to_keep))
to_keep.describe()
to_keep
list(to_keep)
#์ 20๊ฐ๋ก ํ๋์ง ์ญ์น ์๊ฐํด๋ณด๊ธฐ
###Output
_____no_output_____
###Markdown
Keep only significant features
###Code
sample = df_wrangle.iloc[0:100000]
# Split sample into training data and target variable
original = sample.drop(columns =['winPlacePerc', 'kills','matchDuration','damageDealt','maxPlace','weaponsAcquired']) #all columns except target
#to_keep_drop_high_corr = list(set(list(to_keep)) - set(['killStreaks', 'damageDealt','maxPlace','numGroups', 'matchId_cat', 'groupId_cat']))
#original = original[to_keep_drop_high_corr]
original = original[to_keep]
target = sample['winPlacePerc'] # Only target variable
n_trn = len(original)-n_valid
# Split data
raw_train, raw_valid = split_vals(sample, n_trn)
X_train, X_valid = split_vals(original, n_trn)
y_train, y_valid = split_vals(target, n_trn)
X_train=sc.fit_transform(X_train)
X_valid=sc.fit_transform(X_valid)
original.columns
# Check dimensions of data
print('Sample train shape: ', X_train.shape,
'\nSample target shape: ', y_train.shape,
'\nSample validation shape: ', X_valid.shape)
# You should get better results by increasing n_estimators
# and by playing around with the parameters
m3 = xgboost.XGBRegressor(random_state=42,n_estimators=400, subsample = 0.8, colsample_bytree=1,max_depth=7, learning_rate=0.08)
m3.fit(X_train, y_train)
print_score(m3)
## ๋ฐ๊ฟ์ผํจ
sample = train.iloc[0:100000]
# Split sample into training data and target variable
original = sample.drop(columns = ['winPlacePerc', 'kills','matchDuration','damageDealt','maxPlace','weaponsAcquired']) #all columns except target
target = sample['winPlacePerc'] # Only target variable
# What are the most predictive features according to our basic random forest model
fi = xgb_feat_importance(m1, original); fi[:10]
# Plot a feature importance graph for the 20 most important features
plot1 = fi[:20].plot('cols', 'imp', figsize=(14,6), legend=False, kind = 'barh', title='Top 20 important features')
# Keep only significant features
#to_keep = fi[fi.imp>0.005].cols
to_keep = fi[fi.imp>0.015].cols
print('Significant features: ', len(to_keep))
to_keep.describe()
sample = train.iloc[0:100000]
# Split sample into training data and target variable
original = sample.drop(columns = ['winPlacePerc', 'kills','matchDuration','damageDealt','maxPlace','weaponsAcquired']) #all columns except target
to_keep_drop_high_corr = list(set(list(to_keep)) - set(['killStreaks', 'damageDealt','maxPlace','numGroups', 'matchId_cat', 'groupId_cat']))
#original = original[to_keep_drop_high_corr]
original = original[to_keep]
target = sample['winPlacePerc'] # Only target variable
n_trn = len(original)-n_valid
# Split data
raw_train, raw_valid = split_vals(sample, n_trn)
X_train, X_valid = split_vals(original, n_trn)
y_train, y_valid = split_vals(target, n_trn)
X_train=sc.fit_transform(X_train)
X_valid=sc.fit_transform(X_valid)
original.columns
# Check dimensions of data
print('Sample train shape: ', X_train.shape,
'\nSample target shape: ', y_train.shape,
'\nSample validation shape: ', X_valid.shape)
# You should get better results by increasing n_estimators
# and by playing around with the parameters
m4 = xgboost.XGBRegressor(random_state=42,n_estimators=400, subsample = 0.8, colsample_bytree=1,max_depth=7, learning_rate=0.08)
m4.fit(X_train, y_train)
print_score(m4)
## ๋ฐ๊ฟ์ผํจ
sample = df_wrangle.iloc[0:100000]
# Split sample into training data and target variable
original = sample.drop(columns = ['winPlacePerc']) #all columns except target
target = sample['winPlacePerc'] # Only target variable
original = original.drop(columns = ['killStreaks', 'damageDealt','maxPlace','numGroups', 'matchId_cat', 'groupId_cat'])
X_train, X_valid = split_vals(original, n_trn)
y_train, y_valid = split_vals(target, n_trn)
X_train=sc.fit_transform(X_train)
X_valid=sc.transform(X_valid)
# Check dimensions of samples
print('Sample train shape: ', X_train.shape,
'\nSample target shape: ', y_train.shape,
'\nSample validation shape: ', X_valid.shape)
# Train basic model
m5 = xgboost.XGBRegressor(random_state=42,n_estimators=400, subsample = 0.8, colsample_bytree=1,max_depth=7, learning_rate=0.08)
m5.fit(X_train, y_train)
print_score(m5)
## ๋ฐ๊ฟ์ผํจ
sample = df_wrangle.iloc[0:100000]
# Split sample into training data and target variable
original = sample.drop(columns = ['winPlacePerc', 'kills','matchDuration','damageDealt','maxPlace','weaponsAcquired']) #all columns except target
target = sample['winPlacePerc'] # Only target variable
to_keep_ = ['assists', 'DBNOs', 'killPlace',
'longestKill', 'numGroups',
'killsNorm', 'damageDealtNorm', 'maxPlaceNorm', 'weaponsAcquiredNorm',
'healsandboosts', 'totalDistance']
original = original[to_keep_]
X_train, X_valid = split_vals(original, n_trn)
y_train, y_valid = split_vals(target, n_trn)
X_train=sc.fit_transform(X_train)
X_valid=sc.transform(X_valid)
# Check dimensions of samples
print('Sample train shape: ', X_train.shape,
'\nSample target shape: ', y_train.shape,
'\nSample validation shape: ', X_valid.shape)
# Train basic model
m6 = xgboost.XGBRegressor(random_state=42,n_estimators=400, subsample = 0.8, colsample_bytree=1,max_depth=7, learning_rate=0.08)
m6.fit(X_train, y_train)
print_score(m6)
# ๋ฐฑ๋ง๊ฐ๋ก training , validation split
# test์๋
###Output
_____no_output_____
###Markdown
Correlations+ Correlation Heatmap
###Code
# # Correlation heatmap
# corr = significant.corr()
# # Set up the matplotlib figure
# f, ax = plt.subplots(figsize=(11, 9))
# # Create heatmap
# heatmap = sns.heatmap(corr)
###Output
_____no_output_____
###Markdown
Final Random Forest Model ๋ชจ๋ธ์ ๊ธ์์ผ์ ์ ํ๊ณ ๋๋ฆฌ๊ธฐ๋ก ํ์ผ๋ฏ๋ก ์ฐ๋ฆฌ๋ ์์ง ์ด๊ฑด ํ์ ์์
###Code
# # Prepare data
# val_perc_full = 0.12 # % to use for validation set
# n_valid_full = int(val_perc_full * len(df_wrangle))
# n_trn_full = len(df_wrangle)-n_valid_full
# df_full = df_wrangle.drop(columns = ['winPlacePerc']) # all columns except target\
# df_fs = df_full.drop(['killStreaks', 'damageDealt','maxPlace','numGroups', 'matchId_cat', 'groupId_cat'], axis=1)
# df_fs.columns
# y = df_wrangle['winPlacePerc'] # target variable
# df_full = df_full[to_keep] # Keep only relevant features
# X_train, X_valid = split_vals(df_full, n_trn_full)
# y_train, y_valid = split_vals(y, n_trn_full)
# X_train=sc.fit_transform(X_train)
# X_valid=sc.transform(X_valid)
# # Check dimensions of data
# print('Sample train shape: ', X_train.shape,
# '\nSample target shape: ', y_train.shape,
# '\nSample validation shape: ', X_valid.shape)
###Output
_____no_output_____
###Markdown
*** ๊ทธ์ ์ normalize, anomaly๋ฅผ ์์ ์ validationํด์ผํ๋ค. ์ฌ๊ธฐ์๋ถํฐ ์์จ feature๊ณ ๋ฆ dendrogram์ ํตํด feature๊ฐ correltaion์ ์๊ฐํ
###Code
# # Create a Dendrogram to view highly correlated features
# import scipy
# corr = np.round(scipy.stats.spearmanr(df_keep).correlation, 4)
# corr_condensed = hc.distance.squareform(1-corr)
# z = hc.linkage(corr_condensed, method='average')
# fig = plt.figure(figsize=(14,10))
# dendrogram = hc.dendrogram(z, labels=df_keep.columns, orientation='left', leaf_font_size=16)
# plt.plot()
###Output
_____no_output_____
###Markdown
kills, killStreaks, damageDealt์ predictive quality๋ฅผ visualize + Predictive quality of kills
###Code
# def get_sample(df,n):
# idxs = sorted(np.random.permutation(len(df))[:n])
# return df.iloc[idxs].copy()
# # Plot the predictive quality of kills
# x_all = get_sample(train, 100000)
# ggplot(x_all, aes('kills','winPlacePerc'))+stat_smooth(se=True, colour='red', method='mavg')
# x_all = get_sample(train, 100000)
# ggplot(x_all, aes('killStreaks','winPlacePerc'))+stat_smooth(se=True, colour='red', method='mavg')
# x_all = get_sample(train, 100000)
# ggplot(x_all, aes('damageDealt','winPlacePerc'))+stat_smooth(se=True, colour='red', method='mavg')
###Output
_____no_output_____
###Markdown
Correlation๊ธฐ๋ฐ, domain์ดํด ๊ธฐ๋ฐ feature drop 1. kills, damageDealt, killStreaks ์ค ๊ฐ์ฅ ์ํฅ๋ ฅ์ด ๋์ feature์ kills ์ด๋ฏ๋ก ๋๋จธ์ง feature๋ drop 2. maxPlace, numGroups ์ญ์ correlation์ด ๋์๋ฐ, domain์ ๋ถ์ํ ๊ฒฐ๊ณผ, ๋ feature ๋ชจ๋ ๋ถํ์ํ๋ค ํ๋จํ์ฌ drop 3. domain์ ๋ถ์ํ ๊ฒฐ๊ณผ, matchId_cat, groupId_cat ์ ๊ฐ์ feature์ญ์ ๋ถํ์ํ๋ค ํ๋จํ์ฌ drop1,2,3 ๋ชจ๋ validation ์งํํด๋ด์ผํ ๋ฏ..๊ทธ๋ฆฌ๊ณ ์๊ฐ ๋จ์ ๊ฒฝ์ฐ, duo, dolo, squad ๋๋ ์๋ ์คํํด๋ณด๊ธฐ
###Code
#correlation ๋น์ทํ kills, damageDealt, killStreaks ์ค์ ๊ฐ์ฅ ์ํฅ๋ ฅ ๋์ feature๊ฐ
#kills์ฌ์ ๋๋จธ์ง ๋๊ฐ feature๋ dropํ๊ธฐ๋ก ํ์๋ค.
#maxPlace, numGroups, matchType ๋ค ๊ณ ๋ง๊ณ ๋งํ๊ณ ์ํฅ๋ ฅ๋ ์์์ ์ง์ด๋ค
#์๊ฐ๋จ์๊ฒฝ์ฐ duo, solo, squad๋ก ๋๋ ์ ํ๋ฒ ํด๋ณธ๋ค.
#train = train.drop(['killStreaks', 'damageDealt', 'maxPlace', 'numGroups', 'Id', 'matchId_cat', 'groupId_cat'], axis=1)
#validation code ์ถ๊ฐ - ์ํฅ๋ ฅ, corr๋น๊ตํ ๊ฒฐ๊ณผ
###Output
_____no_output_____
###Markdown
Correlation๊ธฐ๋ฐ, domain์ดํด ๊ธฐ๋ฐ feature drop validation 1. killStreaks, damageDealt drop ํ validate 2. maxPlace, numGroups drop ํ validate 3. matchId_cat, groupId_cat drop ํ validate Feature generation ๊ฒฐ๊ณผ validation 1.healsandboosts generate ํ validate 2. totalDistance generate ํ validate
###Code
#validation code ์ถ๊ฐ - ์ง๊ด ์์ค์ง๋ถ์คํธ ํผ์ณ์๋ ์
๋ถ๋ถ์ ์์ธํ ํ ๊ฑฐ๋ฉด ๋ชจ๋ธ์ ๋๊ฐ์ ๋ ๋๋ง ํ ๋ค์ฏ๊ฐ ์ ๋,,,,,,,,,,,,,,,, ํผ์
ํ๊ธฐ ์ ํ ์ด์ ๋ฒจ๋ฆฌ๋ฐ์ด์
#๋ณด๊ณ ์๋ ๊ฒฐ๊ณผ๋ง..์ฝ๋ ๋ถ๋ถ์ ใดใด
#train, df_wrangle, df_fs ๊ฐ์ง๊ณ ๋ชจ๋ธ ๋๋ ค~ ๋ฒจ๋ฆฌ๋ฐ์ด์
๊ฒฐ๊ณผ๊น์ง๋ง ๊ฐ์ ธ์ค๊ธฐ
#hyper parameter ์ฌ๋ฌ๊ฐ ํด๋ณด๊ธฐ์ด์ด์ด
###Output
_____no_output_____ |
notebooks/climate change jupyter nutebook 1.ipynb | ###Markdown
Different Regions
###Code
world = data[data['Entity'] == 'World']
china = data[data['Entity'] == 'China']
india= data[data['Entity'] == 'India']
asia= data[data['Entity'] == 'Asia (excl. China & India)']
europe= data[data['Entity'] == 'Europe']
northamerica = data[data['Entity'] == 'North America (excl. USA)']
usa= data[data['Entity'] == 'United States']
africa = data[data['Entity'] == 'Africa']
southamerica= data[data['Entity'] == 'South America']
conts = [africa, usa, asia, europe, southamerica, northamerica, china, india, world]
continents = pd.concat(conts)
print(continents['Entity'].unique())
###Output
['Africa' 'United States' 'Asia (excl. China & India)' 'Europe'
'South America' 'North America (excl. USA)' 'China' 'India' 'World']
###Markdown
G20 countries
###Code
g20 = data.query("Entity == ['Argentina', 'Australia', 'Brazil', 'Canada', 'Saudi Arabia','China', 'France', 'Germany', 'India', 'United States','Indonesia', 'Italy', 'Japan', 'Mexico', 'Russia','South Africa', 'South Korea', 'Turkey', 'United Kingdom', 'Spain']")
print(g20['Entity'].unique())
continents.head()
plt.figure(figsize=(20,10))
sns.barplot(x='Entity',y='Annual CO2 emissions',data=continents[continents["Entity"] != "World"])
plt.show()
plt.figure(figsize=(20,10))
sns.barplot(x='Entity',y='Annual CO2 emissions',data=g20)
plt.xticks(rotation=90)
plt.show()
import plotly.express as px
fig = px.area(continents[continents["Entity"] != "World"], x="Year", y="Annual CO2 emissions",
color="Entity")
fig.show()
import plotly.express as px
fig = px.area(g20, x="Year", y="Annual CO2 emissions",
color="Entity")
fig.show()
world.head()
sns.countplot()
###Output
_____no_output_____ |
Feature_Engineering_for_Machine_Learning_in_Python/Conforming_to_Statistical_Assumptions.ipynb | ###Markdown
What does your data look like? (I)Feature engineering can also be used to make the most out of the data that you already have and use it more effectively when creating machine learning models.Many algorithms may assume that your data is normally distributed, or at least that all your columns are on the same scale. This will often not be the case, e.g. one feature may be measured in thousands of dollars while another would be number of years. We'll create plots to examine the distributions of some numeric columns in the in the stack overflow DataFrame, stored as so_numeric_df.
###Code
# import necessary libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# load the dataset as a dataframe
so_survey_df = pd.read_csv('./Datasets/Combined_DS_v10.csv')
# inspect the dataset
so_survey_df.head()
# check the data types of the columns
so_survey_df.dtypes
###Output
_____no_output_____
###Markdown
The above output shows us thaat there 4 numeric columns in `so_survey_df`. Since we're selecting only these numeric columns, we'll assign them to a separate dataframe below:
###Code
# extract all the numeric columns
so_numeric_df = so_survey_df.select_dtypes(include = ['float64', 'int64'])
# check
so_numeric_df.head()
# Generate a histogram of all columns in the so_numeric_df DataFrame.
so_numeric_df.hist()
# display the plot
plt.show()
# Generate box plots of the Age and Years Experience columns in the so_numeric_df DataFrame.
so_numeric_df[['Age', 'Years Experience']].boxplot()
# disolay the plot
plt.show()
# Generate a box plot of the ConvertedSalary column in the so_numeric_df DataFrame.
so_numeric_df[['ConvertedSalary']].boxplot()
###Output
_____no_output_____
###Markdown
As you can see the distrbutions of columns in a dataset can vary quite a bit**Note :** Remember that the `.boxplot()` method is only available for DataFrames. What does your data look like? (II)We looked at the distribution of individual columns. While this is a good start, a more detailed view of how different features interact with each other may be useful as this can impact your decision on what to transform and how.
###Code
# Plot pairwise relationships in the so_numeric_df dataset.
sns.pairplot(so_numeric_df)
# display the plot
plt.show()
so_numeric_df[(so_numeric_df['Years Experience'] >= 20) & (so_numeric_df['Age'] <= 20)]
###Output
_____no_output_____
###Markdown
There are some incorrectly entered information in the dataset. Some respondents who are below 18 years indicated to have over 20 years working experience.
###Code
# get summary statistics of the numeric columns
so_numeric_df.describe()
###Output
_____no_output_____
###Markdown
understanding these summary statistics of a column can be very valuable when deciding what transformations are necessary.**Note:** There are times when you don't have to transform your data, Machine learning models like decision trees split along a singular point, hence they do not require all the columns to be on the same scale. NormalizationIn normalization you linearly scale the entire column between 0 and 1, with 0 corresponding with the lowest value in the column, and 1 with the largest.When using scikit-learn (the most commonly used machine learning library in Python) you can use a `MinMaxScaler` to apply normalization. (It is called this as it scales your values between a minimum and maximum value.)
###Code
# Import MinMaxScaler from sklearn's preprocessing module.
from sklearn.preprocessing import MinMaxScaler
#Instantiate the MinMaxScaler() as mm_scaler.
mm_scaler = MinMaxScaler()
# Fit the MinMaxScaler on the Age column of so_numeric_df.
mm_scaler.fit(so_numeric_df['Age'].values.reshape(-1, 1))
# Transform the same column with the scaler you just fit and assign to a new column
so_numeric_df['Age_normalized'] = mm_scaler.transform(so_numeric_df['Age'].values.reshape(-1, 1))
# Compare the origional and transformed column
so_numeric_df[['Age', 'Age_normalized']]
###Output
C:\Users\ADMIN\Anaconda3\lib\site-packages\ipykernel_launcher.py:11: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
# This is added back by InteractiveShellApp.init_path()
###Markdown
Notice that all values have been scaled between 0 and 1 StandardizationWhile normalization can be useful for scaling a column between two data points, it is hard to compare two scaled columns if even one of them is overly affected by outliers. One commonly used solution to this is called standardization, where instead of having a strict upper and lower bound, you center the data around its mean, and calculate the number of standard deviations away from mean each data point is.
###Code
# Import StandardScaler from sklearn's preprocessing module.
from sklearn.preprocessing import StandardScaler
# Instantiate the StandardScaler() as ss_scaler.
ss_scaler = StandardScaler()
# Fit the StandardScaler on the Age column of so_numeric_df.
fitted = ss_scaler.fit(so_numeric_df['Age'].values.reshape(-1, 1))
# Transform the same column with the scaler you just fit.
so_numeric_df['Age_standardized'] = ss_scaler.transform(so_numeric_df['Age'].values.reshape(-1, 1))
# Compare the origional and transformed column
so_numeric_df[['Age_standardized', 'Age']].head()
###Output
C:\Users\ADMIN\Anaconda3\lib\site-packages\ipykernel_launcher.py:11: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
# This is added back by InteractiveShellApp.init_path()
###Markdown
We can see that the values have been scaled linearly, but not between set values. Log transformationIn the previous exercises you scaled the data linearly, which will not affect the data's shape. This works great if your data is normally distributed (or closely normally distributed), an assumption that a lot of machine learning models make. Sometimes you will work with data that closely conforms to normality, e.g the height or weight of a population. On the other hand, many variables in the real world do not follow this pattern e.g, wages or age of a population. In this exercise you will use a log transform on the `ConvertedSalary` column in the `so_numeric_df` DataFrame as it has a large amount of its data centered around the lower values, but contains very high values also. These distributions are said to have a long right tail or right skewed.
###Code
# Import PowerTransformer from sklearn's preprocessing module.
from sklearn.preprocessing import PowerTransformer
# Instantiate the PowerTransformer() as pow_trans.
pow_trans = PowerTransformer()
# Fit the PowerTransformer on the ConvertedSalary column of so_numeric_df.
pow_trans.fit(so_numeric_df['ConvertedSalary'].values.reshape(-1, 1))
# Transform the same column with the scaler you just fit.
so_numeric_df['ConvertedSalary_log_transformed'] = pow_trans.transform(so_numeric_df['ConvertedSalary'].values.reshape(-1, 1))
# compare the original and transformed columns
print(so_numeric_df[['ConvertedSalary', 'ConvertedSalary_log_transformed']].head())
# Plot the data before and after the transformation
so_numeric_df[['ConvertedSalary', 'ConvertedSalary_log_transformed']].hist()
# display the plot
plt.show()
###Output
C:\Users\ADMIN\Anaconda3\lib\site-packages\ipykernel_launcher.py:11: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
# This is added back by InteractiveShellApp.init_path()
###Markdown
Notice the change in the shape of the distribution. `ConvertedSalary_log_transformed` column looks much more normal than the original `ConvertedSalary` column.**Note:** we should use `MinMaxScaler` when we know the the data has a strict upper and lower bound. Percentage based outlier removalOne way to ensure a small portion of data is not having an overly adverse effect is by removing a certain percentage of the largest and/or smallest values in the column. This can be achieved by finding the relevant quantile and trimming the data using it with a mask. This approach is particularly useful if you are concerned that the highest values in your dataset should be avoided. When using this approach, you must remember that even if there are no outliers, this will still remove the same top N percentage from the dataset.
###Code
# Find the 95th quantile of the ConvertedSalary column.
quantile = so_numeric_df['ConvertedSalary'].quantile(0.95)
# Trim the so_numeric_df DataFrame to retain all rows where ConvertedSalary is less than it's 95th quantile.
trimmed_df = so_numeric_df[so_numeric_df['ConvertedSalary'] < quantile]
# Plot the histogram of so_numeric_df[['ConvertedSalary']]
# i.e the original histogram
so_numeric_df[['ConvertedSalary']].hist()
# show the plot
plt.show()
# Plot the histogram of trimmed_df[['ConvertedSalary']].
trimmed_df[['ConvertedSalary']].hist()
# show the plot
plt.show()
###Output
_____no_output_____
###Markdown
Statistical outlier removalWhile removing the top N% of your data is useful for ensuring that very spurious points are removed, it does have the disadvantage of always removing the same proportion of points, even if the data is correct. A commonly used alternative approach is to remove data that sits further than three standard deviations from the mean. You can implement this by first calculating the mean and standard deviation of the relevant column to find upper and lower bounds, and applying these bounds as a mask to the DataFrame. This method ensures that only data that is genuinely different from the rest is removed, and will remove fewer points if the data is close together.
###Code
# Calculate the standard deviation and mean of the ConvertedSalary column of so_numeric_df.
std = so_numeric_df['ConvertedSalary'].std()
mean = so_numeric_df['ConvertedSalary'].mean()
# Calculate the upper and lower bounds as three standard deviations away from the mean in both the directions.
# calculate the cut-off
cut_off = std * 3
# get the upper and lower bounds
lower, upper = mean - cut_off, mean + cut_off
# Trim the so_numeric_df DataFrame to retain all rows where ConvertedSalary is within the lower and upper bounds.
trimmed_data = so_numeric_df[(so_numeric_df['ConvertedSalary'] < upper) &
(so_numeric_df['ConvertedSalary'] > lower)]
# check
trimmed_data
# The trimmed box plot
trimmed_data[['ConvertedSalary']].boxplot()
# show the plot
plt.show()
###Output
_____no_output_____
###Markdown
Notice the scale change on the y-axis Train and testing transformations (I)So far you have created scalers based on a column, and then applied the scaler to the same data that it was trained on. When creating machine learning models you will generally build your models on historic data (train set) and apply your model to new unseen data (test set). In these cases you will need to ensure that the same scaling is being applied to both the training and test data.***To do this in practice you train the scaler on the train set, and keep the trained scaler to apply it to the test set. You should never retrain a scaler on the test set.***We split the `so_numeric_df` DataFrame into train (`so_train_numeric`) and test (`so_test_numeric`) sets.
###Code
# remove some rows we don't need
model_data = so_numeric_df.drop(['StackOverflowJobsRecommend', 'Age_normalized',
'Age_standardized', 'ConvertedSalary_log_transformed'], axis = 1)
# get the first 700 rows
model_train = model_data.iloc[:700, :]
# get the last 300 rows
model_test = model_data.iloc[700:, :]
# check
print(model_train.shape)
model_test.shape
# Fit the StandardScaler on the Age column of train data.
ss_scaler.fit(model_train.loc[:, 'Age'].values.reshape(-1, 1))
# Transform the Age column in the test set.
model_test['Age_standardized_transformation'] = ss_scaler.transform(model_test['Age'].values.reshape(-1, 1))
# check
model_test[['Age', 'Age_standardized_transformation']].head()
###Output
C:\Users\ADMIN\Anaconda3\lib\site-packages\ipykernel_launcher.py:5: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
"""
###Markdown
Data leakage is one of the most common mistakes data scientists tend to make. Train and testing transformations (II)Similar to applying the same scaler to both your training and test sets, if you have removed outliers from the train set, you probably want to do the same on the test set as well. Once again ***ensure that you use the thresholds calculated only from the train set to remove outliers from the test set***.
###Code
# Calculate the standard deviation and mean of the ConvertedSalary column for train data
train_std = model_train['ConvertedSalary'].std()
train_mean = model_train['ConvertedSalary'].mean()
# get the cut off
cut_off_2 = train_std * 3
# Calculate the upper and lower bounds as three standard deviations away from the mean in both the directions.
train_lower, train_upper = train_mean - cut_off_2, train_mean + cut_off_2
# Trim the so_test_numeric DataFrame to retain all rows where ConvertedSalary is within the lower and upper bounds.
trim_test = model_test[(model_test['ConvertedSalary'] < train_upper) &
(model_test['ConvertedSalary'] > train_lower)]
trim_test
###Output
_____no_output_____ |
notebook/cat_dataset_process.ipynb | ###Markdown
import
###Code
import os
import os.path as osp
from glob import glob
import shutil
import cv2
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
path
###Code
base_dir = '../'
input_dir = osp.join(base_dir, 'input')
dataset_dir = osp.join(input_dir, 'cat-dataset')
save_dir = osp.join(input_dir, 'cat')
os.makedirs(save_dir, exist_ok=True)
###Output
_____no_output_____
###Markdown
copy image to one folder
###Code
data_names = os.listdir(dataset_dir)
data_names
# ็จฎ้กใซใใฃใฆใใฉใซใใๅใใใใฆใใ็ซใฎ็ปๅใไธใคใฎใใฉใซใใซใพใจใใ
image_num = 1
for data_name in tqdm(data_names):
data_dir = osp.join(dataset_dir, data_name)
file_names = os.listdir(data_dir)
file_names = [file_name for file_name in file_names if osp.splitext(file_name)[1] == '.jpg']
file_names.sort()
for file_name in file_names:
image_path = osp.join(data_dir, file_name)
save_image_num = '{:07}.jpg'.format(image_num)
save_path = osp.join(save_dir, save_image_num)
shutil.copy(image_path, save_path)
image_num += 1
###Output
100%|โโโโโโโโโโ| 7/7 [00:02<00:00, 2.77it/s]
###Markdown
Split and crop data
###Code
image_paths = glob(osp.join(save_dir, '*'))
image_paths.sort()
image_paths[0:10]
train_paths, test_paths = train_test_split(image_paths, test_size=100, random_state=0)
print(len(train_paths))
print(len(test_paths))
def random_crop(image, crop_size):
h, w, _ = image.shape
top = np.random.randint(0, h - crop_size[0])
left = np.random.randint(0, w - crop_size[1])
bottom = top + crop_size[0]
right = left + crop_size[1]
image = image[top:bottom, left:right, :]
return image
def split_save(image_paths, data_type, crop_size=(128, 128), num_aug=1):
shapes = []
for image_path in tqdm(image_paths):
image_name = osp.basename(image_path)
image = cv2.imread(image_path)
for aug_n in range(1, num_aug+1):
image_rsz = random_crop(image, crop_size)
save_image_name = '{}-{:3}.jpg'.format(image_name, aug_n)
image_save_path = osp.join(data_save_dir, 'cat_{}'.format(data_type), save_image_name)
os.makedirs(osp.dirname(image_save_path), exist_ok=True)
cv2.imwrite(image_save_path, image_rsz)
def split_crop(image_paths, data_type, crop=False, crop_size=(128,128), num_aug=1):
shapes = []
for image_path in tqdm(image_paths):
image_name = osp.basename(image_path)
file_name = osp.splitext(image_name)[0]
image = cv2.imread(image_path)
if (image.shape[0]<=crop_size[0]) | (image.shape[1]<=crop_size[1]):
print('size problem', image_path)
continue
if crop:
for aug_n in range(1, num_aug+1):
image_rsz = random_crop(image, crop_size)
save_image_name = '{}{:03}.jpg'.format(file_name, aug_n)
image_save_path = osp.join(input_dir, 'cat_{}'.format(data_type), save_image_name)
os.makedirs(osp.dirname(image_save_path), exist_ok=True)
cv2.imwrite(image_save_path, image_rsz)
else:
save_image_name = '{}.jpg'.format(file_name)
image_save_path = osp.join(input_dir, 'cat_{}'.format(data_type), save_image_name)
os.makedirs(osp.dirname(image_save_path), exist_ok=True)
cv2.imwrite(image_save_path, image)
random_seed = 0
np.random.seed(random_seed)
split_crop(train_paths, 'train', crop=True)
split_crop(test_paths, 'test')
###Output
40%|โโโโ | 3949/9897 [00:22<00:34, 172.08it/s] |
assignment/assignment1/softmax-backprop.ipynb | ###Markdown
Softmax exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*This exercise is analogous to the SVM exercise. You will:- implement a fully-vectorized **loss function** for the Softmax classifier- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** with numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the linear classifier. These are the same steps as we used for the
SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
print('dev data shape: ', X_dev.shape)
print('dev labels shape: ', y_dev.shape)
###Output
Train data shape: (49000, 3073)
Train labels shape: (49000,)
Validation data shape: (1000, 3073)
Validation labels shape: (1000,)
Test data shape: (1000, 3073)
Test labels shape: (1000,)
dev data shape: (500, 3073)
dev labels shape: (500,)
###Markdown
Softmax ClassifierYour code for this section will all be written inside **cs231n/classifiers/softmax.py**.
###Code
# First implement the naive softmax loss function with nested loops.
# Open the file cs231n/classifiers/softmax.py and implement the
# softmax_loss_naive function.
from cs231n.classifiers.softmax import softmax_loss_naive
import time
# Generate a random softmax weight matrix and use it to compute the loss.
W = np.random.randn(3073, 10) * 0.0001
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)
# As a rough sanity check, our loss should be something close to -log(0.1).
print('loss: %f' % loss)
print('sanity check: %f' % (-np.log(0.1)))
X = X_dev
y = y_dev
W = np.random.randn(3073, 10) * 0.0001
reg = 0.0
wx = X.dot(W)
p_unnorm = np.exp(wx-wx.max(1)[:,None])
p_sum = np.sum(p_unnorm,1)[:,None]
p = p_unnorm/p_sum
yp = np.choose(y, p.T)
p_log = np.log(yp)
L = -np.mean(p_log)
print(L)
import torch
X = X_dev
y = y_dev
W = np.random.randn(3073, 10) * 0.0001
W = torch.from_numpy(W)
X = torch.from_numpy(X)
W.requires_grad = True
wx = torch.mm(X,W)
#p_unnorm = torch.exp(wx-wx.max(1)[0][:,None])
p_unnorm = torch.exp(wx)
p_sum = torch.sum(p_unnorm,1)[:,None]
p_sum_inv = 1/p_sum
p = p_unnorm * p_sum_inv
yp = p.gather(1,torch.from_numpy(y).long().view(-1,1))
p_log = torch.log(yp)
L = -torch.mean(p_log)
wx.retain_grad()
p_unnorm.retain_grad()
p_sum.retain_grad()
p_sum_inv.retain_grad()
p.retain_grad()
yp.retain_grad()
p_log.retain_grad()
L.retain_grad()
L.backward()
dL = 1
dp_log = torch.zeros_like(p_log)
dp_log[:] = -1/len(dp_log)
dyp = 1/yp
dp = torch.zeros_like(p)
idx = torch.LongTensor(y)
j = torch.arange(dp.size(0)).long()
dp[j, idx] = 1
dp_sum_inv = p_unnorm
dp_unnorm = p_sum_inv
dp_sum = -p_sum**-2
dp_unnorm2 = torch.ones(p_unnorm.shape, dtype=torch.float64)
dwx = torch.exp(wx)
dw = X
dLp_log = dL * dp_log
dLyp = dLp_log * dyp
dLp = dLyp * dp
dLp_sum_inv = (dLp * dp_sum_inv).sum(1)
dLp_unnorm = dLp * dp_unnorm
dLp_sum = dLp_sum_inv[:,None] * dp_sum
dLp_unnorm2 = dLp_sum * dp_unnorm2
dLp_unnorm += dLp_unnorm2
dLwx = dLp_unnorm * dwx
dLw = torch.mm(dw.t(),dLwx)
dLw[0]
W.grad[0]
p.grad[0]
p_sum.shape
p_unnorm.grad[0]
W.grad[0]
dLp_log = dL * dp_logyp
a = torch.tensor([2.0,3.0,4.0])
a
a.requires_grad=True
b = torch.mean(a)
b
a.grad
b.backward()
a
a.grad
dL = 1
dLloss_all = np.zeros_like(loss_all)
dLloss_all[:] = 1/len(loss_all)
dLprob_log = dLloss_all * -1
dLprob = dLprob_log * 1/y_prob
dLprob_sum = dLprob[:,None]*(-prob_unnorm/prob_sum[:,None]**2)
prob_sum[:,None]
prob_sum.shape
dLprob_log
import torch
x = torch.from_numpy(np.array([1.0,2.0,3.0]))
x.requires_grad = True
y = torch.mean(x)
y.backward()
x.grad
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
###Output
_____no_output_____
###Markdown
Inline Question 1:Why do we expect our loss to be close to -log(0.1)? Explain briefly.****Your answer:** *Fill this in*
###Code
grad
# Complete the implementation of softmax_loss_naive and implement a (naive)
# version of the gradient that uses nested loops.
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)
# As we did for the SVM, use numeric gradient checking as a debugging tool.
# The numeric gradient should be close to the analytic gradient.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# similar to SVM case, do another gradient check with regularization
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# Now that we have a naive implementation of the softmax loss function and its gradient,
# implement a vectorized version in softmax_loss_vectorized.
# The two versions should compute the same results, but the vectorized version should be
# much faster.
tic = time.time()
loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.softmax import softmax_loss_vectorized
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# As we did for the SVM, we use the Frobenius norm to compare the two versions
# of the gradient.
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized))
print('Gradient difference: %f' % grad_difference)
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of over 0.35 on the validation set.
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [2.5e4, 5e4]
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained softmax classifer in best_softmax. #
################################################################################
# Your code
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# evaluate on test set
# Evaluate the best softmax on test set
y_test_pred = best_softmax.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, ))
###Output
_____no_output_____
###Markdown
**Inline Question** - *True or False*It's possible to add a new datapoint to a training set that would leave the SVM loss unchanged, but this is not the case with the Softmax classifier loss.*Your answer*:*Your explanation*:
###Code
# Visualize the learned weights for each class
w = best_softmax.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____ |
FOIP with DPV.ipynb | ###Markdown
This is a work in progress and in no way a complete example of using DPV BackgroundFOIP ACT is the Freedom of Information and Protection of Privacy Act that **public bodies** must obide by in Alberta, Canada.FOIP covers the handling of personal information.In FOIP, personal information is defined to be, "recorded information about an identifiable individual. It is information that can identify an individual (for example, name, home address, home phone number, e-mail address, ID numbers), and information about an individual (for example, physical description, educational qualifications, blood type)."It should be noted that, "the definition of personal information does not include information about a sole proprietorship, partnership, unincorporated association or corporation."The mapping of data handling policy is mapped according to "FOIP Guidelines and Practices: 2009 Edition Chapter 7: Protection of Privacy" (https://web.archive.org/web/20160615221611/https://www.servicealberta.ca/foip/documents/chapter7.pdf). Use Case: Municipal CensusCensuses are conducted by municipalities for the planning of public services.Under the requires guidelines for set by the Alberta Government in 2019 (https://web.archive.org/web/20190929185839/https://open.alberta.ca/dataset/ebee0c79-a9eb-4bf5-993d-30995a2f7554/resource/61613571-e381-4c4e-9f11-2bfe6823ef81/download/2019-municipal-census-manual.pdf), it suggests following a questionaire similar to this (https://web.archive.org/web/20191218172659/http://www.statcan.gc.ca/eng/statistical-programs/instrument/3901_Q8_V1-eng.pdf).For our use case, we follow the questionaire in the above paragraph to identify the personal information collected.We assume that the municipal census is conducted in Alberta, and therefore must abide by the FOIP Act. Data Handling PoliciesWe provide 2 different data handling policies: (1) for internal use that contains non-aggreate data, (2) for external publication which only includes aggregate data.These 2 data handling policies contain the same data subject(s), data controller, and legal basis.Differing is the processing, purpose, personal data categories, recipients, and technical organisational measure. Internal Use External Publication 1. Importing the DPV
###Code
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF, RDFS
g = Graph()
# import DPV
DPV = Namespace("http://w3.org/ns/dpv#")
g.bind('dpv', DPV)
# g.parse('https://www.w3.org/ns/dpv.ttl', format='turtle')
###Output
_____no_output_____
###Markdown
2. Creating Personal Data Handling Policy
###Code
# Personal Data Handling Policy
# For the use case we consider an internal and external data handling policy
policy = Namespace("http://example.org/policy/")
g.bind('policy', policy)
# Create internal policy
g.add( (policy.Internal, RDF.type, DPV.PersonalDataHandling) )
# Create external policy
g.add( (policy.External, RDF.type, DPV.PersonalDataHandling) )
###Output
_____no_output_____
###Markdown
3. Assigning a Data Subject
###Code
# Data Subject
# According to FOIP, the data subject will be the person that has personal information collected by a public body
# Note that there should be many more data subjects (as more than one persons' data is collected),
# but we stick with using 'Bob' for this example.
# create a people namespace, with Bob
person = Namespace("http://example.org/people/")
g.bind('person', person)
g.add( (person.bob, RDF.type, DPV.DataSubject) )
# Add to the policies
g.add( (policy.Internal, DPV.hasDataSubject, person.bob) )
g.add( (policy.External, DPV.hasDataSubject, person.bob) )
###Output
_____no_output_____
###Markdown
4. Assigning a Data Controller
###Code
# Data Controller
# From DPV: The class of Data Controllers that control this particular data handling,
# any legal entity that is defined by article 4.7 of GDPR.
#
# In the context of a municipal census, under FOIP, the data controller would be the public body
# which is the municipality
# create an organization namespace, with City of Edmonton
org = Namespace("http://example.org/organization/")
g.bind('org', org)
g.add( (org.Edmonton, RDF.type, DPV.DataController) )
# Add to the policies
g.add( (policy.Internal, DPV.hasDataController, org.Edmonton) )
g.add( (policy.External, DPV.hasDataController, org.Edmonton) )
###Output
_____no_output_____
###Markdown
5. Assigning a Legal Basis
###Code
# Legal Basis
# From DPV: A particular legal Basis, which permits personal Data handling (e.g., Consent, etc.)
# There is no ontology for FOIP, we create a placeholder
legal = Namespace("http://example.org/legal/")
g.bind('legal', legal)
g.add( (legal.FOIP, RDF.type, DPV.Consent) )
# In the census there is an option to make data available 100+ years in the future
# Under FOIP, if you agree to release information for public use then it is allowed
# Assume that https://www.servicealberta.ca/foip/legislation/foip-act.cfm is the consent notice
g.add( (legal.FOIP, DPV.consentNotice, URIRef('https://www.servicealberta.ca/foip/legislation/foip-act.cfm')) )
# Add to the policies
g.add( (policy.Internal, DPV.hasLegalBasis, legal.FOIP) )
g.add( (policy.External, DPV.hasLegalBasis, legal.FOIP) )
###Output
_____no_output_____
###Markdown
6. Determining Purposes
###Code
# Purpose
# Namespace for Purpose
purpose = Namespace("http://example.org/purpose/")
g.bind('purpose', purpose)
# Internal
g.add( (purpose.CitizenStatistics, RDFS.subClassOf,DPV.ServiceProvision) )
g.add( (purpose.CitizenStatistics, RDFS.subClassOf,DPV.ServiceOptimization) )
g.add( (purpose.CitizenStatistics, RDFS.subClassOf,DPV.ResearchAndDevelopment) )
g.add( (purpose.CitizenStatistics, RDFS.subClassOf,DPV.OptimisationForController) )
g.add( (purpose.CitizenStatistics, RDFS.subClassOf,DPV.InternalResourceOptimisation) )
g.add( (purpose.CitizenStatistics, RDFS.label, Literal("Citizen Statistics")) )
g.add( (purpose.CitizenStatistics, RDFS.comment, Literal("Citizen statistics to deliver and optimize municipal services")) )
g.add( (policy.Internal, DPV.hasPurpose, purpose.CitizenStatistics) )
# External
# The maintain purpose for the external policy is public education throught insights, but there is no such class.
# Most closely related is dpv:SellInsightsFromData, even though the insights provided are free
# e.g. https://www.edmonton.ca/city_government/facts_figures/municipal-census-results.aspx
# Perhaps the 'Sell' purposes should be more generalized?
g.add( (purpose.PublicCitizenStatistics, RDFS.subClassOf, DPV.SellInsightsFromData) )
g.add( (purpose.PublicCitizenStatistics, RDFS.label, Literal("Public Citizen Statistics")) )
g.add( (purpose.PublicCitizenStatistics, RDFS.comment, Literal("Provide aggregate statistics on municipal census data")) )
g.add( (policy.External, DPV.hasPurpose, purpose.CitizenStatistics) )
###Output
_____no_output_____
###Markdown
7. Determining how Data Is Processed
###Code
# Processing
# Use the top-level classes of DPV: Disclose, Copy, Obtain, Remove, Store, Transfer, and Transform
# You can also define more specific processing categories for DPV
# Internal
g.add( (policy.Internal, DPV.hasProcessing, DPV.Disclose) )
g.add( (policy.Internal, DPV.hasProcessing, DPV.Copy) )
g.add( (policy.Internal, DPV.hasProcessing, DPV.Obtain) )
g.add( (policy.Internal, DPV.hasProcessing, DPV.Remove) )
g.add( (policy.Internal, DPV.hasProcessing, DPV.Store) )
g.add( (policy.Internal, DPV.hasProcessing, DPV.Transfer) )
g.add( (policy.Internal, DPV.hasProcessing, DPV.Transform) )
# External
g.add( (policy.External, DPV.hasProcessing, DPV.Disclose) )
g.add( (policy.External, DPV.hasProcessing, DPV.Transform) )
###Output
_____no_output_____
###Markdown
8. Determining Personal Data Categories
###Code
# Personal Data Categories
# Personal data collected is based on
# https://web.archive.org/web/20191218172659/http://www.statcan.gc.ca/eng/statistical-programs/instrument/3901_Q8_V1-eng.pdf
# Internal
# missing mode of transportation, date of birth, country of origin
# has some generalized categories
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.HouseOwned) ) # residence
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.LifeHistory) ) # place of origin, work history, birth date
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.Family) ) # individualโs family and relationships, marital status
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.Professional) ) # individualโs educational or professional career
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.Contact) ) # telephone number, email, address
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.MedicalHealth) ) # disability, heath condition
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.Demographic) )
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.Name) )
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.Age) )
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.Language) )
g.add( (policy.Internal, DPV.hasPersonalDataCategory, DPV.Gender) )
# External
# In the external census, not all information collected is released
# Example of information released: https://www.edmonton.ca/city_government/facts_figures/municipal-census-results.aspx
g.add( (policy.External, DPV.hasPersonalDataCategory, DPV.Age) )
g.add( (policy.External, DPV.hasPersonalDataCategory, DPV.Gender) )
g.add( (policy.External, DPV.hasPersonalDataCategory, DPV.HouseOwned) )
###Output
_____no_output_____
###Markdown
9. Determining Recipients
###Code
# Recipient
# From DPV: The entities that can access the result of a data handling action/processing,
# any legal entity that is defined by article 4.9 of GDPR, which states - 'recipient' means a natural
# or legal person, public authority, agency or another body, to which the personal data are disclosed,
# whether a third party or not.
# Internal
# Data is meant for organizational use
g.add( (org.Edmonton, RDF.type, DPV.Recipient) ) # Both controller and recipient
g.add( (policy.Internal, DPV.hasRecipient, org.Edmonton) )
# External
# Assume that General Public is a body and therefore a recipient
g.add( (org.GeneralPublic, RDF.type, DPV.Recipient) )
g.add( (policy.External, DPV.hasRecipient, org.GeneralPublic) )
###Output
_____no_output_____
###Markdown
10. Technical Organisational Measures
###Code
# Technical Organisational Measures
# From DPV: Technical and organisational measures, for instance security measure,
# storage restrictions etc. required/followed when processing data of the declared category
# Internal
g.add( (policy.Internal, DPV.hasTechnicalOrganisationalMeasure, DPV.AuthorisationProcedure) )
g.add( (policy.Internal, DPV.hasTechnicalOrganisationalMeasure, DPV.CodeOfConduct) )
g.add( (policy.Internal, DPV.hasTechnicalOrganisationalMeasure, DPV.Contract) )
g.add( (policy.Internal, DPV.hasTechnicalOrganisationalMeasure, DPV.NDA) )
g.add( (policy.Internal, DPV.hasTechnicalOrganisationalMeasure, DPV.StaffTraining) )
# External
# Privacy is by deisgn as data is aggregated before release
g.add( (policy.External, DPV.hasTechnicalOrganisationalMeasure, DPV.PrivacyByDesign) )
# Export data handling policies to a graph
g.serialize("dpv-foip.ttl", format="turtle")
# How to query with SPARQL
# qres = g.query(
# """
# SELECT ?policy
# WHERE {
# ?policy a dpv:PersonalDataHandling .
# }
# """)
# for row in qres:
# print(row)
###Output
_____no_output_____ |
Data Extraction.ipynb | ###Markdown
Build DatasetsI made the corpus here: [kaggle.com/mauroebordon/creating-a-qa-corpus-from-askreddit/](https://www.kaggle.com/mauroebordon/creating-a-qa-corpus-from-askreddit/)
###Code
import pandas as pd
import re
import string
from patterns import stop_pattern, emoji_pattern, nostop_pattern
from nltk import word_tokenize, pos_tag
from nltk.stem import WordNetLemmatizer
db_df = pd.read_csv("db/ask-reddit-corpus.csv", index_col=0)
###Output
_____no_output_____
###Markdown
Feature ExtractionOnly Tokens, Lemmas, PoS AND stopword filtering for now.
###Code
def extract_features(df: pd.DataFrame, tags=False):
"""
Extrae los datos necesarios que vamos a utilizar para hacer los diferentes modelos a clasificar.
"""
#Elimintamos los emojis
lmtzr = WordNetLemmatizer()
selected_tags = {"DT", "IN", "PRP", "PRP$", "FW","NN","NNS","NNP","NNPS","PDT","RB","RBR","RBS","RP","VB","VBD","VBG","VBN","VBP","VBZ", "WDT", "WP", "WP$", "WRB"}
#Filtramos las preguntas demasiado extensas y las vacias
db_df = df.copy()[df.Q.apply(lambda x: len(str(x)) < 50)]
db_df = db_df.copy()[db_df.Q.apply(lambda x: len(str(x)) > 0)]
#sdf = df[df.Qscore > 1]
#sdf = sdf.copy()[sdf.ANS.apply(lambda x: len(str(x)) > 15)]
#eliminando los emojis
db_df["Q"] = db_df["Q"].replace(emoji_pattern, "")
#Trabajamos uncased
db_df["Q"] = [str.lower(s) for s in list(db_df.Q)]
#la gente no es tan original y repite preguntas
db_df = db_df.groupby("Q", as_index=False).first()
#Removemos los URLS
db_df["Q"] = db_df["Q"].str.replace("http\S+", "")
db_df["ANS"] = db_df["ANS"].str.replace("http\S+", "")
#Borramos los tags de las preguntas que estan marcadas "[Serious], [NSFW], etc"
#se puede guardar esta informaciรณn.
db_df["Q"] = db_df["Q"].replace(r"^\[.*\]", "", regex=True)
db_df["Q"] = db_df["Q"].replace(r"[\"\โ\โ]", "", regex=True)
db_df["ANS"] = db_df["ANS"].replace(r"[\"\โ\โ]", "", regex=True)
#remuevo punctiation
translator = str.maketrans('','',string.punctuation)
db_df["Qclean"] = db_df.Q.str.translate(translator)
db_df["Aclean"] = db_df.ANS.str.translate(translator)
db_df["Qtoks"] = [word_tokenize(w) for w in db_df["Qclean"]]
db_df["Atoks"] = [word_tokenize(w) for w in db_df["Aclean"]]
db_df["Qtoks"] = [[lmtzr.lemmatize(t) for t in qes] for qes in db_df["Qtoks"]]
db_df["Atoks"] = [[lmtzr.lemmatize(t) for t in qes] for qes in db_df["Atoks"]]
db_df["Qnostp"] = db_df["Qtoks"].apply(lambda x: [re.sub(stop_pattern, "", y) for y in x if re.sub(stop_pattern, "", y) != ""])
db_df["Anostp"] = db_df["Atoks"].apply(lambda x: [re.sub(stop_pattern, "", y) for y in x if re.sub(stop_pattern, "", y) != ""])
db_df["Qstp"] = db_df[['Qtoks','Qnostp']].apply(lambda x: [i for i in x[0] if i not in x[1]], axis=1)
db_df["Astp"] = db_df[['Atoks','Anostp']].apply(lambda x: [i for i in x[0] if i not in x[1]], axis=1)
# Par lemma & POS para
db_df["Qkeys"] = db_df["Qnostp"]
db_df["Akeys"] = db_df["Anostp"]
# Filtered nltk PoS and lemma
if tags:
qtags = []
for ws in list(db_df["Qpos"]):
this_tags = ""
for w in ws:
if w[1] in selected_tags:
this_tags += f"{w[0]}_{w[1]} "
qtags.append(this_tags)
db_df["Qkeys"] = qtags
atags = []
for ws in list(db_df["Apos"]):
this_tags = ""
for w in ws:
if w[1] in selected_tags:
this_tags += f"{w[0]}_{w[1]} "
atags.append(this_tags)
db_df["Akeys"] = atags
db_df = db_df[["id", "Q", "Qclean", "Qtoks", "Qstp", "Qkeys", "ANS", "Aclean", "Atoks", "Astp", "Akeys"]]
return db_df
#usar pickle no sirve para comprimir naranja
df = extract_features(db_df)
df
df = df.copy()
df["QA-keys"] = df["Qkeys"]
df["QA-keys"] += df["Akeys"]
#Qkeys nonstop lemmas
df["Q-kstr"] = df["Qkeys"].apply(lambda x: " ".join(a for a in x))
df["QA-kstr"] = df["Qkeys"].apply(lambda x: " ".join(a for a in x))
df["Q-stpstr"] = df["Qkeys"].apply(lambda x: " ".join(a for a in x))
df.to_csv("db/features.csv")
###Output
_____no_output_____
###Markdown
Building the Test Dataset for Clustering ComparitionSmall manual clusters to get a little sence if clustering is workingEsto quizas es muy muy tonto, pero solamente ordenar alfabeticamente las preguntas es una buena forma de encontrar similaridad.
###Code
import pandas as pd
df = pd.read_csv("db/features.csv", index_col=0)
n_clus = 5 #no cambiar sin agregar otro
n_sam = 100
df = df.groupby("Q", as_index=False).first()
#testeo de algunos "tipos" de preguntas precatios y de concepto
q_ids = []
q_ids += list(df[df.Q.str.contains(r'favorite movie')].id.sample(n_sam))
q_ids += list(df[df.Q.str.contains(r'ever seen')].id.sample(n_sam))
q_ids += list(df[df.Q.str.contains(r'advice')].id.sample(n_sam))
q_ids += list(df[df.Q.str.contains(r'history')].id.sample(n_sam))
q_ids += list(df[df.Q.str.contains(r'book')].id.sample(n_sam))
# Mostramos un poquito como queda
# for j in range(n_clus):
# print(f"\ncluster {j}")
# for i in range(n_sam):
# print(f" {df.loc[q_ids[j*n_clus+i]].Q}")
q_ids = [(x, int(i/n_sam)) for i, x in enumerate(q_ids)]
total = n_clus*n_sam
test_db = []
for i in range(total):
for j in range(1, total-i):
este = q_ids[i]
otro = q_ids[j+i]
test_db += [[este[0], otro[0], este[1] == otro[1]]]
test_db = pd.DataFrame(test_db)
test_db = test_db[test_db[0] != test_db[1]]
test_db
test_db.to_csv("db/test_db.csv")
df.Q.head(30)
###Output
_____no_output_____ |
Day1/Test1.ipynb | ###Markdown
IntroductionPython is an interpreted, object-oriented, high-level programming language with dynamic semantics. Its high-level built in data structures, combined with dynamic typing and dynamic binding, make it very attractive for Rapid Application Development, as well as for use as a scripting or glue language to connect existing components together. Python's simple, easy to learn syntax emphasizes readability and therefore reduces the cost of program maintenance. Python supports modules and packages, which encourages program modularity and code reuse. The Python interpreter and the extensive standard library are available in source or binary form without charge for all major platforms, and can be freely distributed.Often, programmers fall in love with Python because of the increased productivity it provides. Since there is no compilation step, the edit-test-debug cycle is incredibly fast. Debugging Python programs is easy: a bug or bad input will never cause a segmentation fault. Instead, when the interpreter discovers an error, it raises an exception. When the program doesn't catch the exception, the interpreter prints a stack trace. A source level debugger allows inspection of local and global variables, evaluation of arbitrary expressions, setting breakpoints, stepping through the code a line at a time, and so on. The debugger is written in Python itself, testifying to Python's introspective power. On the other hand, often the quickest way to debug a program is to add a few print statements to the source: the fast edit-test-debug cycle makes this simple approach very effective.In this series of examples you will learns the programming concepts of python.Hello World In Python
###Code
print("Hello world")
###Output
Hello world
###Markdown
Overviewthe increased productivity it provides. Since there is no compilation step, the edit-test-debug cycle is incredibly fast. Debugging Python programs is easy: a bug or bad input will never cause a segmentation fault. Instead, when the interpreter discovers an error, it raises an exception. When the program doesn't catch the exception, the interpreter prints
###Code
a = 2
print(a)
###Output
2
|
Task7_Stock_Market_Prediction_using_Numerical_and_Textual_Analysis.ipynb | ###Markdown
Author : Shradha Pujari Task 7 : Stock Market Prediction using Numerical and Textual Analysis GRIP @ The Sparks Foundation Objective: Create a hybrid model for stock price/performance prediction using numerical analysis of historical stock prices, and sentimental analysis of news headlinesDownload historical stock prices from finance.yahoo.com
###Code
#importing required libraries
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn import metrics
from keras.models import Sequential
from keras.layers import Dense, LSTM
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import re
import nltk
import re
from nltk.corpus import stopwords
nltk.download('stopwords')
nltk.download('vader_lexicon')
from textblob import TextBlob
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
import xgboost
###Output
[nltk_data] Downloading package stopwords to
[nltk_data] C:\Users\kriti\AppData\Roaming\nltk_data...
[nltk_data] Package stopwords is already up-to-date!
[nltk_data] Downloading package vader_lexicon to
[nltk_data] C:\Users\kriti\AppData\Roaming\nltk_data...
[nltk_data] Package vader_lexicon is already up-to-date!
###Markdown
Step 1 : Importing the Numerical dataset and performing Exploratory Analysis
###Code
# Dataframe for exploratory analysis
df=pd.read_csv('csv\^BSESN.csv')
df.head()
# Extract date frame and plot closing stock price w.r.t time
df['Date'] = pd.to_datetime(df.Date,format='%Y-%m-%d')
df.index = df['Date']
df.dropna(inplace=True)
#plot
plt.figure(figsize=(16,8))
plt.plot(df['Close'], label='Close Price history')
# fix random seed for reproducibility
np.random.seed(7)
###Output
_____no_output_____
###Markdown
Step 2 : Creating a dataframe for storing the Closing stock data per day
###Code
# convert an array of values into a dataset matrix
def create_dataset(df2, look_back=1):
dataX, dataY = [], []
for i in range(len(df2)-look_back-1):
a = df2[i:(i+look_back), 0]
dataX.append(a)
dataY.append(df2[i + look_back, 0])
return np.array(dataX), np.array(dataY)
df2 = pd.read_csv('csv\^BSESN.csv', usecols=[5], engine='python')
df2.dropna(inplace=True)
df2 = df2.values
df2 = df2.astype('float32')
###Output
_____no_output_____
###Markdown
Step 3 : Data Normalization and Division into Training and Test sets
###Code
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
df2 = scaler.fit_transform(df2)
# split into train and test sets
train_size = int(len(df2) * 0.67)
test_size = len(df2) - train_size
train, test = df2[0:train_size,:], df2[train_size:len(df2),:]
# reshape into X=t and Y=t+1
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
###Output
_____no_output_____
###Markdown
Step 4 : Creating a LSTM for Numerical Analysis
###Code
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(7, input_shape=(look_back, 1)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
###Output
_____no_output_____
###Markdown
Step 5 : Making Predictions
###Code
# shift train predictions for plotting
trainPredictPlot = np.empty_like(df2)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = np.empty_like(df2)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(df2)-1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(df2))
plt.plot(trainPredictPlot,color='red')
plt.plot(testPredictPlot,color='green')
plt.show()
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print("Root mean square error = ",trainScore," RMSE")
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print("Root mean square error = ",testScore," RMSE")
###Output
_____no_output_____
###Markdown
Step 6 : Creating a Hybrid model for Numerical and textual Analysis
###Code
#Text Analysis
columns = ['Date','Category','News']
news = pd.read_csv('csv\india-news-headlines.csv', names = columns)
news
###Output
_____no_output_____
###Markdown
Step 7 : Text preprocessing
###Code
news.drop(0, inplace=True)
news.drop('Category', axis = 1, inplace=True)
news.info()
# Restructuring the date format
news['Date'] = pd.to_datetime(news['Date'],format= '%Y %m %d')
news
#Grouping the headlines for each day
news['News'] = news.groupby(['Date']).transform(lambda x : ' '.join(x))
news = news.drop_duplicates()
news.reset_index(inplace = True, drop = True)
news
news['News']
###Output
_____no_output_____
###Markdown
Step 8 : Adding subjectivity and polarity Scores
###Code
#Functions to get the subjectivity and polarity
def getSubjectivity(text):
return TextBlob(text).sentiment.subjectivity
def getPolarity(text):
return TextBlob(text).sentiment.polarity
#Adding subjectivity and polarity columns
news['Subjectivity'] = news['News'].apply(getSubjectivity)
news['Polarity'] = news['News'].apply(getPolarity)
news
###Output
_____no_output_____
###Markdown
Step 9 : Visualizing the polarity and Subjectivity scores
###Code
plt.figure(figsize = (10,6))
news['Polarity'].hist(color = 'red')
plt.figure(figsize = (10,6))
news['Subjectivity'].hist(color = 'green')
###Output
_____no_output_____
###Markdown
Step 10 : Performing Sentiment Analysis over the news Headlines
###Code
#Adding sentiment score to news
sia = SentimentIntensityAnalyzer()
news['Compound'] = [sia.polarity_scores(v)['compound'] for v in news['News']]
news['Negative'] = [sia.polarity_scores(v)['neg'] for v in news['News']]
news['Neutral'] = [sia.polarity_scores(v)['neu'] for v in news['News']]
news['Positive'] = [sia.polarity_scores(v)['pos'] for v in news['News']]
news
###Output
_____no_output_____
###Markdown
Step 11 : Merging the numerical and textual data
###Code
merge = news
merge
dff = merge[['Subjectivity', 'Polarity', 'Compound', 'Negative', 'Neutral' ,'Positive']]
dff
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
new_df = pd.DataFrame(sc.fit_transform(dff))
new_df.columns = dff.columns
new_df.index = dff.index
new_df.head()
X = new_df[0:249]
y =df['Close']
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 0)
x_train.shape
x_train[:10]
###Output
_____no_output_____
###Markdown
Step 12 : Training a Random Forest Regressor and Adaboost Regressor for hybrid analysis
###Code
rf = RandomForestRegressor()
rf.fit(x_train, y_train)
prediction=rf.predict(x_test)
###Output
_____no_output_____
###Markdown
Step 13 : Determining the accuracy scores for both the Models
###Code
print(prediction[:10])
print(y_test[:10])
print("Root mean square error = ",math.sqrt(mean_squared_error(prediction,y_test))," RMSE")
adb = AdaBoostRegressor()
adb.fit(x_train, y_train)
predictions = adb.predict(x_test)
print("Root mean square error = ",math.sqrt(mean_squared_error(predictions, y_test))," RMSE")
###Output
Root mean square error = 4074.461310273298 RMSE
|
June/baseline-model-in-2-lines-tps-2021-june.ipynb | ###Markdown
Baseline Model using Pywedge What is Pywedge?Pywedge is an open-source python library which is a complete package that helps you in Visualizing the data, Pre-process the data and also create some baseline models which can be further tuned to make the best machine learning model for the data.!pip install pywedgeimport pywedge as pw
###Code
blm = pw.baseline_model(train_df, test_df, c=None, y='target', type='Classification')
blm.classification_summary()
###Output
_____no_output_____ |
notebooks/6-evaluate.ipynb | ###Markdown
Load Dataset
###Code
df = pd.read_csv(os.path.join(DATA_DIR_PATH, DF_NAME), delimiter='\t', index_col=0)
df.head()
decomposed_df = df[(df.entailment_tableau_size > 2) & (df.contradiction_tableau_size > 2)]
decomposed_df
undecomposed_df = df[(df.entailment_tableau_size == 2) & (df.contradiction_tableau_size == 2)]
undecomposed_df
print("Decomposed Sample Rate:", len(decomposed_df) / len(df))
print("Undecomposed Sample Rate:", len(undecomposed_df) / len(df))
###Output
_____no_output_____
###Markdown
Define The Model
###Code
TARGET_DF = decomposed_df
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
batch_size = 32
with open("./data/word_index_map.json", "r") as worddict_file:
worddict = json.load(worddict_file)
from esim.data import Preprocessor
preprocessor = Preprocessor(lowercase=False,
ignore_punctuation=False,
num_words=None,
stopwords={},
labeldict=LABEL_DICT,
bos=None,
eos=None)
preprocessor.worddict = worddict
preprocessor
checkpoint = torch.load("./data/checkpoints/best.pth.tar")
# Retrieving model parameters from checkpoint.
vocab_size = checkpoint["model"]["_word_embedding.weight"].size(0)
embedding_dim = checkpoint["model"]['_word_embedding.weight'].size(1)
hidden_size = checkpoint["model"]["_projection.0.weight"].size(0)
num_classes = checkpoint["model"]["_classification.4.weight"].size(0)
from esim.model import ESIM
model = ESIM(vocab_size,
embedding_dim,
hidden_size,
num_classes=num_classes,
device=device).to(device)
model.load_state_dict(checkpoint["model"])
import numpy as np
def predict(premises, hypothesises):
premises_split = []
for premise in premises:
if type(premise) is list:
premises_split.append(premise)
else:
premises_split.append([w for w in premise.rstrip().split()])
hypothesises_split = []
for hypothesis in hypothesises:
if type(hypothesis) is list:
hypothesises_split.append(hypothesis)
else:
hypothesises_split.append([w for w in hypothesis.rstrip().split()])
transformed_premises = [preprocessor.words_to_indices(premise_split) for premise_split in premises_split]
transformed_hypothesises = [preprocessor.words_to_indices(hypothesis_split) for hypothesis_split in hypothesises_split]
results = []
model.eval()
with torch.no_grad():
for start_index in range(0, len(transformed_premises), batch_size):
premises_batch = transformed_premises[start_index: start_index+batch_size]
premises_len_batch = [len(premise) for premise in premises_batch]
max_of_premises_len_batch = max(premises_len_batch)
premises_batch_tensor = torch.ones((len(premises_batch), max_of_premises_len_batch), dtype=torch.long) * 0
for i, premise in enumerate(premises_batch):
end = premises_len_batch[i]
premises_batch_tensor[i][:end] = torch.tensor(premise[:end])
hypothesises_batch = transformed_hypothesises[start_index: start_index+batch_size]
hypothesises_len_batch = [len(hypothesis) for hypothesis in hypothesises_batch]
max_of_hypothesises_len_batch = max(hypothesises_len_batch)
hypothesises_batch_tensor = torch.ones((len(hypothesises_batch), max_of_hypothesises_len_batch), dtype=torch.long) * 0
for i, hypothesis in enumerate(hypothesises_batch):
end = hypothesises_len_batch[i]
hypothesises_batch_tensor[i][:end] = torch.tensor(hypothesis[:end])
_, probs = model(
premises_batch_tensor.to(device),
torch.tensor(premises_len_batch).to(device),
hypothesises_batch_tensor.to(device),
torch.tensor(hypothesises_len_batch).to(device)
)
results_batch = [prob.cpu().numpy() for prob in probs]
results.extend(results_batch)
return np.array(results)
predict(["I like tomatos", ["I", "like", "tomatos"]],
["I do n't like tomatos", ["I", "do", "n't", "like", "tomatos"]])
###Output
_____no_output_____
###Markdown
ANSWER WITH NORMAL ESIM
###Code
premises = [tree2tokenlist(sample.udtree1) for sample in TARGET_DF.itertuples()]
hypothesises = [tree2tokenlist(sample.udtree2) for sample in TARGET_DF.itertuples()]
gold_labels = np.array([LABEL_DICT[sample.gold_label] for sample in TARGET_DF.itertuples()])
simple_predicted_labels = predict(premises, hypothesises).argmax(axis=1)
print("acc: {:.3f}%".format(100 * (simple_predicted_labels == gold_labels).sum() / len(TARGET_DF)))
###Output
_____no_output_____
###Markdown
ANSWER WITH TABLEAU WITH ESIM
###Code
def transform_tableau(tableau, premise_list, hypothesis_list):
entry_list = []
child_entries_list = []
contradictable_entries_pair_list = []
all_branches = []
def append_entry_list(node):
entry_offset = len(entry_list)
entry_size = 0
for entry in node["entries"]:
entry_list.append(entry)
child_entries_list.append([entry_offset + entry_size + 1])
entry_size += 1
childtree = []
for child_node in node["child_nodes"]:
childtree.append(append_entry_list(child_node))
child_entries_list[entry_offset + entry_size - 1] = childtree
return entry_offset
def append_contradictable_entries_pair_list(entry_index):
subtree_entry_indices = []
for child_entry_index in child_entries_list[entry_index]:
subtree_entry_indices.extend(append_contradictable_entries_pair_list(child_entry_index))
if entry_list[entry_index]["exist_eq_entries"] == False:
for subtree_entry_index in subtree_entry_indices:
if entry_list[entry_index]["origin"] != entry_list[subtree_entry_index]["origin"]:
if entry_list[entry_index]["sign"] == True and entry_list[subtree_entry_index]["sign"] == False:
contradictable_entries_pair_list.append((entry_index, subtree_entry_index))
elif entry_list[entry_index]["sign"] == False and entry_list[subtree_entry_index]["sign"] == True:
contradictable_entries_pair_list.append((subtree_entry_index, entry_index))
elif entry_list[entry_index]["sign"] == True and entry_list[subtree_entry_index]["sign"] == True:
contradictable_entries_pair_list.append((entry_index, subtree_entry_index))
subtree_entry_indices.append(entry_index)
return subtree_entry_indices
def calculate_branch(entry_index):
if len(child_entries_list[entry_index]) == 0:
return [{entry_index}]
branches = []
for child_entry_index in child_entries_list[entry_index]:
branches.extend(calculate_branch(child_entry_index))
for branch in branches:
branch.add(entry_index)
return branches
append_entry_list(tableau["root"])
append_contradictable_entries_pair_list(0)
all_branches = calculate_branch(0)
# entry_list, child_entries_list, contradictable_enttries_pair_list, all_branchesใ่จ็ฎใใ
all_sentence_list = [tree2tokenlist(ET.fromstring(entry["tree"])) for entry in entry_list]
def findadd_sentence_pair(premise, hypothesis):
for i, _premise in enumerate(premise_list):
_hypothesis = hypothesis_list[i]
if premise == _premise and hypothesis == _hypothesis:
return i
premise_list.append(premise)
hypothesis_list.append(hypothesis)
return len(premise_list) - 1
contradiction_labels = []
sentence_pair_row = []
for i, pair in enumerate(contradictable_entries_pair_list):
if entry_list[pair[0]]["sign"] == True and entry_list[pair[1]]["sign"] == False:
contradiction_labels.append(LABEL_DICT["entailment"])
elif entry_list[pair[0]]["sign"] == True and entry_list[pair[1]]["sign"] == True:
contradiction_labels.append(LABEL_DICT["contradiction"])
sentence_pair_row.append(findadd_sentence_pair(all_sentence_list[pair[0]],
all_sentence_list[pair[1]]))
return {
"branches": all_branches,
"pairs": contradictable_entries_pair_list,
"sentence_pair_row": sentence_pair_row,
"contradiction_labels": torch.Tensor(contradiction_labels).to(device)
}
def transform_sample(df):
transformed_sample_list = []
for sample in df.itertuples():
premise_list = []
hypothesis_list = []
transformed_sample = {}
transformed_sample["gold_label"] = LABEL_DICT[sample.gold_label]
transformed_sample["entailment_tableau"] = transform_tableau(json.loads(sample.entailment_tableau),
premise_list,
hypothesis_list)
transformed_sample["contradiction_tableau"] = transform_tableau(json.loads(sample.contradiction_tableau),
premise_list,
hypothesis_list)
transformed_sample["premises"] = premise_list
transformed_sample["hypothesises"] = hypothesis_list
transformed_sample["premise"] = sample.sentence1
transformed_sample["hypothesis"] = sample.sentence2
sentence_pair_size = len(premise_list)
transformed_sample["sentence_pair_size"] = sentence_pair_size
transformed_sample["entailment_tableau"]["sentence_pair_row"] = (torch.eye(sentence_pair_size)[transformed_sample["entailment_tableau"]["sentence_pair_row"]]).to(device)
transformed_sample["contradiction_tableau"]["sentence_pair_row"] = (torch.eye(sentence_pair_size)[transformed_sample["contradiction_tableau"]["sentence_pair_row"]]).to(device)
transformed_sample_list.append(transformed_sample)
return transformed_sample_list
target_dataset = transform_sample(TARGET_DF)
print(view_tableau(TARGET_DF.iloc[0].entailment_tableau))
target_dataset[0]
import copy
def is_close_tableau(tableau, r):
if len(tableau["sentence_pair_row"]) == 0:
return False
is_contradiction_pairs = torch.mv(tableau["sentence_pair_row"], r) == tableau["contradiction_labels"]
branches = copy.copy(tableau["branches"])
for i, pair in enumerate(tableau["pairs"]):
if is_contradiction_pairs[i] == True:
for branch in branches:
if pair[0] in branch and pair[1] in branch:
branches.remove(branch)
return len(branches) == 0
def predict_label(sample, r):
is_close_entailment_tableau = is_close_tableau(sample["entailment_tableau"], r)
is_close_contradiction_tableau = is_close_tableau(sample["contradiction_tableau"], r)
if is_close_entailment_tableau == True and is_close_contradiction_tableau == False:
return 0
elif is_close_entailment_tableau == False and is_close_contradiction_tableau == False:
return 1
elif is_close_entailment_tableau == False and is_close_contradiction_tableau == True:
return 2
else:
return -1
from tqdm.notebook import tqdm
model.eval()
predicted_labels = []
with torch.no_grad():
for sample in tqdm(target_dataset):
if sample["sentence_pair_size"] > 0:
pairs_probs = torch.from_numpy(predict(sample["premises"], sample["hypothesises"])).to(device)
r = torch.argmax(pairs_probs, dim=1).float()
predicted_label = predict_label(sample, r)
else:
predicted_label = 1
predicted_labels.append(predicted_label)
tableau_predicted_labels = np.array(predicted_labels)
tableau_predicted_labels
print("acc: {:.3f}%".format(100 * (tableau_predicted_labels == gold_labels).sum() / len(TARGET_DF)))
print("err: {:.3f}%".format(100 * (tableau_predicted_labels == -1).sum() / len(TARGET_DF)))
###Output
_____no_output_____ |
figures/code/lecture5-rnn.ipynb | ###Markdown
Prepare data
###Code
# n_samples = 10000
# max_length = 10
# X = []
# y = []
# for i in range(n_samples):
# x_i = np.random.rand(np.random.randint(max_length) + 1)
# if np.random.rand() < 0.5 or np.all(x_i == np.sort(x_i)):
# x_i = np.sort(x_i)
# if np.random.rand() < 0.5:
# x_i = x_i[::-1]
# y.append(1)
# else:
# y.append(0)
# X.append(x_i.reshape(-1,1))
# y = np.array(y).reshape(-1, 1)
# from sklearn.model_selection import train_test_split
# indices = np.array(range(len(X_binary)))
# train, test = train_test_split(indices)
n_samples = 10000
n_symbols = 5
max_length = 10
X = []
X_binary = []
y = []
for i in range(n_samples):
x_i = np.random.randint(n_symbols, size=np.random.randint(max_length) + 1)
len_i = len(x_i)
if np.random.rand() < 0.5:
if len_i % 2 == 0:
x_i[:len_i//2] = x_i[len_i//2:][::-1]
else:
x_i[:len_i//2] = x_i[len_i//2+1:][::-1]
y.append(1)
else:
if len_i % 2 == 0:
if np.all(x_i[:len_i//2] == x_i[len_i//2:][::-1]):
y.append(1)
else:
y.append(0)
else:
if np.all(x_i[:len_i//2] == x_i[len_i//2+1:][::-1]):
y.append(1)
else:
y.append(0)
X.append(x_i)
for x_i in X:
b = np.zeros((len(x_i), n_symbols))
for j, x_ij in enumerate(x_i):
b[j, x_ij] = 1
X_binary.append(b)
from sklearn.model_selection import train_test_split
indices = np.array(range(len(X_binary)))
train, test = train_test_split(indices)
###Output
_____no_output_____
###Markdown
RNN
###Code
class Elman(nn.Module):
def __init__(self, num_features, num_hidden, num_layers=1):
super(Elman, self).__init__()
self.rnn = nn.RNN(num_features, num_hidden, num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(num_hidden, 1)
def forward(self, x):
out, hn = self.rnn(x)
if self.rnn.num_layers > 1:
hn = hn[-1, :]
out = self.fc(hn)
return out.view(-1, 1).sigmoid()
class LSTM(nn.Module):
def __init__(self, num_features, num_hidden, num_layers=1):
super(LSTM, self).__init__()
self.rnn = nn.LSTM(num_features, num_hidden, num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(num_hidden, 1)
def forward(self, x):
out, (hn, cn) = self.rnn(x)
out = self.fc(hn)
return out.view(-1, 1).sigmoid()
class GRU(nn.Module):
def __init__(self, num_features, num_hidden, num_layers=1):
super(GRU, self).__init__()
self.rnn = nn.GRU(num_features, num_hidden, num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(num_hidden, 1)
def forward(self, x):
out, hn = self.rnn(x)
out = self.fc(hn)
return out.view(-1, 1).sigmoid()
class BiGRU(nn.Module):
def __init__(self, num_features, num_hidden, num_layers=1):
super(BiGRU, self).__init__()
self.rnn = nn.GRU(num_features, num_hidden, num_layers=num_layers, batch_first=True, bidirectional=True)
self.fc = nn.Linear(2*num_hidden, 1)
self.num_hidden = num_hidden
def forward(self, x):
out, hn = self.rnn(x)
if self.rnn.num_layers > 1:
hn = hn[-2:, :]
out = self.fc(hn.view(-1, 2*self.num_hidden))
return out.view(-1, 1).sigmoid()
test_curves = {}
models = {}
for model, name in [(Elman(n_symbols, 10), "elman"),
(Elman(n_symbols, 10, num_layers=2), "elman-stacked"),
(LSTM(n_symbols, 10), "lstm"),
(GRU(n_symbols, 10), "gru"),]:
(#BiGRU(n_symbols, 5), "bigru")]:
models[name] = model
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), amsgrad=True)
num_epochs = 25
test_loss = []
l = 0
for i in test:
x_i = torch.Tensor(X_binary[i:i+1])
y_i = torch.Tensor(y[i:i+1])
outputs = model(x_i)
loss = criterion(outputs, y_i)
l += loss
print('Epoch: [%d/%d], Step: Loss: %.4f'
% (0, num_epochs, l / len(test)))
test_loss.append(l / len(test))
for epoch in range(num_epochs):
for i in train:
optimizer.zero_grad()
x_i = torch.Tensor(X_binary[i:i+1])
y_i = torch.Tensor(y[i:i+1])
outputs = model(x_i)
loss = criterion(outputs, y_i)
loss.backward()
optimizer.step()
l = 0
for i in test:
x_i = torch.Tensor(X_binary[i:i+1])
y_i = torch.Tensor(y[i:i+1])
outputs = model(x_i)
loss = criterion(outputs, y_i)
l += loss
print('Epoch: [%d/%d], Step: Loss: %.4f'
% (epoch, num_epochs, l / len(test)))
test_loss.append(l / len(test))
test_curves[name] = np.array([v.detach().numpy() for v in test_loss])
test_curves
###Output
_____no_output_____
###Markdown
Plots
###Code
plt.plot(range(num_epochs+1), test_curves["elman"], c="r", label="Elman")
#plt.plot(range(num_epochs+1), test_curves["elman-stacked"], "r--", label="Elman 2-layer")
#plt.plot(range(num_epochs+1), test_curves["lstm"], c="b", label="LSTM")
#plt.plot(range(num_epochs+1), test_curves["gru"], c="g", label="GRU")
plt.ylim(0,0.75)
plt.grid()
plt.legend()
remove_frame()
plt.savefig("palindrome-1.png")
plt.show()
X_test = []
y_test = []
for i in range(25000):
x_i = np.random.randint(n_symbols, size=np.random.randint(2*max_length) + 1)
len_i = len(x_i)
if np.random.rand() < 0.5:
if len_i % 2 == 0:
x_i[:len_i//2] = x_i[len_i//2:][::-1]
else:
x_i[:len_i//2] = x_i[len_i//2+1:][::-1]
y_test.append(1)
else:
if len_i % 2 == 0:
if np.all(x_i[:len_i//2] == x_i[len_i//2:][::-1]):
y_test.append(1)
else:
y_test.append(0)
else:
if np.all(x_i[:len_i//2] == x_i[len_i//2+1:][::-1]):
y_test.append(1)
else:
y_test.append(0)
X_test.append(x_i)
X_binary_test = []
for x_i in X_test:
b = np.zeros((len(x_i), n_symbols))
for j, x_ij in enumerate(x_i):
b[j, x_ij] = 1
X_binary_test.append(b)
model = models["elman"]
l = np.zeros(2*max_length)
counters = np.zeros(2*max_length)
for i in range(len(X_test)):
x_i = torch.Tensor(X_binary_test[i:i+1])
y_i = torch.Tensor(y_test[i:i+1])
outputs = model(x_i)
loss = criterion(outputs, y_i)
l[len(x_i[0])-1] += loss
counters[len(x_i[0])-1] += 1
plt.plot(range(1,2*max_length+1),l/counters, color="r", marker="o", label="Elman")
model = models["elman-stacked"]
l = np.zeros(2*max_length)
counters = np.zeros(2*max_length)
for i in range(len(X_test)):
x_i = torch.Tensor(X_binary_test[i:i+1])
y_i = torch.Tensor(y_test[i:i+1])
outputs = model(x_i)
loss = criterion(outputs, y_i)
l[len(x_i[0])-1] += loss
counters[len(x_i[0])-1] += 1
plt.plot(range(1,2*max_length+1),l/counters, "r--", marker="o", label="Elman 2-layer")
model = models["lstm"]
l = np.zeros(2*max_length)
counters = np.zeros(2*max_length)
for i in range(len(X_test)):
x_i = torch.Tensor(X_binary_test[i:i+1])
y_i = torch.Tensor(y_test[i:i+1])
outputs = model(x_i)
loss = criterion(outputs, y_i)
l[len(x_i[0])-1] += loss
counters[len(x_i[0])-1] += 1
plt.plot(range(1,2*max_length+1),l/counters, color="b", marker="o", label="LSTM")
model = models["gru"]
l = np.zeros(2*max_length)
counters = np.zeros(2*max_length)
for i in range(len(X_test)):
x_i = torch.Tensor(X_binary_test[i:i+1])
y_i = torch.Tensor(y_test[i:i+1])
outputs = model(x_i)
loss = criterion(outputs, y_i)
l[len(x_i[0])-1] += loss
counters[len(x_i[0])-1] += 1
plt.plot(range(1,2*max_length+1),l/counters, color="g", marker="o", label="GRU")
plt.legend()
plt.ylim(0,0.75)
plt.grid()
remove_frame()
plt.savefig("length-4.png")
plt.show()
###Output
_____no_output_____ |
Text Extraction - Tesseract.ipynb | ###Markdown
Tesseract Tesseract is an optical character recognition engine for various operating systems. It is free software, released under the Apache License. It is open source. In 2006, Tesseract was considered one of the most accurate open-source OCR engines available.
###Code
import os
import re
import cv2
import glob
import pytesseract
import numpy as np
import pandas as pd
from datetime import date
from pytesseract import Output
from difflib import get_close_matches
pytesseract.pytesseract.tesseract_cmd=r"<local_path>/Tesseract-OCR/tesseract.exe"
imagelink = "<local_path>/Google-Tesseract/Images/"
###Output
_____no_output_____
###Markdown
Before we jump into Tesseract, let us view some image manipulation that can be handy while extracting text from any image. OPERATIONS ON IMAGES
###Code
# DISPLAY IMAGE
link = imagelink + "example_1.jpg"
image = cv2.imread(link, 0)
cv2.imshow("Image Displayed", image)
cv2.waitKey(0)
# RESIZE IMAGE
link = imagelink + "example_1.jpg"
image = cv2.imread(link, 0)
image = cv2.resize(image, (500, 700))
cv2.imshow("Image Resized", image)
cv2.waitKey(0)
# CROPPED IMAGE
link = imagelink + "example_1.jpg"
image = cv2.imread(link, 0)
image = image[50:, :200]
cv2.imshow("Image Cropped", image)
cv2.waitKey(0)
# ROTATE IMAGE
link = imagelink + "example_1.jpg"
image = cv2.imread(link, 0)
image = cv2.rotate(image, cv2.cv2.ROTATE_90_CLOCKWISE)
cv2.imshow("Image Rotated", image)
cv2.waitKey(0)
# TRANSLATED IMAGE
link = imagelink + "example_1.jpg"
image = cv2.imread(link, 0)
height, width = image.shape[:2]
tx, ty = width / 4, height / 4
translation_matrix = np.array([[1, 0, tx],[0, 1, ty]], dtype=np.float32)
image = cv2.warpAffine(src=image, M=translation_matrix, dsize=(width, height))
cv2.imshow("Image Translated", image)
cv2.waitKey(0)
###Output
_____no_output_____
###Markdown
TEXT EXTRACTION - Simple Extraction A plain vanilla approach to identify and translate the text within any image
###Code
link = imagelink + "example_1.jpg"
image = cv2.imread(link, 0)
data = pytesseract.image_to_string(image)
print(data)
link = imagelink + "example_2.jpg"
image = cv2.imread(link, 0)
data = pytesseract.image_to_string(image)
print(data)
###Output
โYou've gotta dance like there's nobody watching,
Love like you'll never be hurt,
Sing like there's nobody listening,
And live like itโs heaven on earth.โ
โ William w. Pu rkey
###Markdown
- Text Extraction With Manipulations Let us now increase our level of difficulty. Here we will extract the number of views from some Instagram stories.- First, we will now use simple image manipulations that have been defined earlier with the above examples. - Second, use Tesseract to extract the text.- Lastly, we will look for a pattern that may appear before the views and construct a simple regular expression to process the text.
###Code
link = imagelink + "example_3.jpg"
image = cv2.imread(link,0)
image = cv2.resize(image, (500, 700))
image = image[25:300, :]
thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
Data = pytesseract.image_to_string(thresh, lang='eng',config='--psm 6')
print("\n{}".format(Data))
print("-"*20)
print("\nWe notice that views on the screenshot are visible after a special character 'ยฉ'.\nTherefore we use regex to extract the number of views.")
Views = re.findall(r'ยฉ .*',Data)[0]
Views = [int(i) for i in Views.split() if i.isdigit()][0]
print("-"*20)
print("\nExample 3 has {} views.".format(Views))
###Output
it ยฉ 13 ~*~ wu
C) Kimmy Long
C) Le Fevre Taylor
--------------------
We notice that views on the screenshot are visible after a special character 'ยฉ'.
Therefore we use regex to extract the number of views.
--------------------
Example 3 has 13 views.
###Markdown
We can't automate a process if there is a dependency on visibility for a pattern. Thus, we use an alternative method to extract the number of views.- The first two steps are similar to the previous example.- Next, using list comprehension, we will filter out the numbers from the text extracted by Tesseract
###Code
link = imagelink + "example_4.jpg"
image = cv2.imread(link,0)
image = cv2.resize(image, (500, 700))
image = image[25:300, :]
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(rgb, output_type=Output.DICT)
Views = [int(i) for i in results["text"] if i.isdigit()][0]
print("{}\n".format(results["text"]))
print("-"*20)
print("\nWe can't automate a process if there is a dependency on visibility for a special character\nThus, we use another method to extract the number of views.")
print("-"*20)
print("\nExample 4 has {} views.".format(Views))
###Output
['', '', '', '', 'ยฉ', '5616', '', '', '', 'rm', '', '', '', '']
--------------------
We can't automate a process if there is a dependency on visibility for a special character
Thus, we use another method to extract the number of views.
--------------------
Example 4 has 5616 views.
###Markdown
- Using Bounded Box Approach In object detection, we usually use a bounding box to describe the location of an object. The bounding box is a rectangular box that determines the coordinates.- We need to use "results = pytesseract.image_to_data(rgb, output_type=Output.DICT)". This will return a dictionary with the coordinates for each text that has been detected by Tesseract- Next, we extract the coordinates for the text needed to create a bounded box. Once the coordinates are located for the text we perform manipulations to crop the image.
###Code
link = imagelink + "example_5.jpg"
image = cv2.imread(link)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(rgb, output_type=Output.DICT)
top = results['text'].index("Review")
bottom = results['text'].index("helpful?")
top_cod = results["top"][top]
top_cod = top_cod - round(top_cod/1.5)
bottom_cod = results["top"][bottom]
image = image[top_cod:bottom_cod, :]
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(rgb, output_type=Output.DICT)
review = results['text'][results['text'].index(get_close_matches("/10", results['text'],cutoff=0.6)[0])+1:]
review = " ".join([i for i in review if i != ""])
reviewtext = results['text'][results['text'].index(get_close_matches("/10", results['text'],cutoff=0.6)[0])+1:]
reviewindx = reviewtext.index([i for i in reviewtext if (i.isdigit()) and (int(i) >=1900 and int(i) <= date.today().year)][0])
reviewtime = " ".join(reviewtext[:reviewindx+1][-3:])
reviewheading = " ".join([i for i in reviewtext[:reviewindx-2][:-2] if i != ""])
reviewer = reviewtext[:reviewindx-2][-1]
review = " ".join([i for i in reviewtext[reviewindx+1:] if i != ""])
completereview = [reviewtime,reviewer,reviewheading,review]
print(*completereview,sep="\n--------------\n")
cv2.imshow("Image", image)
cv2.waitKey(0)
#Using rating
link = imagelink + "example_6.jpg"
image = cv2.imread(link)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(rgb, output_type=Output.DICT)
top_val = results['text'][results['text'].index(get_close_matches("/10", results['text'],cutoff=0.6)[0])]
top = results['text'].index(top_val)
bottom = results['text'].index("helpful?")
top_cod = results["top"][top]
top_cod = top_cod - round(top_cod/6)
bottom_cod = results["top"][bottom]
image = image[top_cod:bottom_cod, :]
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(rgb, output_type=Output.DICT)
review = results['text'][results['text'].index(get_close_matches("/10", results['text'],cutoff=0.6)[0])+1:]
review = " ".join([i for i in review if i != ""])
reviewtext = results['text'][results['text'].index(get_close_matches("/10", results['text'],cutoff=0.6)[0])+1:]
reviewindx = reviewtext.index([i for i in reviewtext if (i.isdigit()) and (int(i) >=1900 and int(i) <= date.today().year)][0])
reviewtime = " ".join(reviewtext[:reviewindx+1][-3:])
reviewheading = " ".join([i for i in reviewtext[:reviewindx-2][:-2] if i != ""])
reviewer = reviewtext[:reviewindx-2][-1]
review = " ".join([i for i in reviewtext[reviewindx+1:] if i != ""])
completereview = [reviewtime,reviewer,reviewheading,review]
print(*completereview,sep="\n--------------\n")
cv2.imshow("Image", image)
cv2.waitKey(0)
#Bulk using rating
Image = os.path.join(imagelink, "*")
Image = glob.glob(Image)[-2:]
for link in Image:
print("\nDetails for Image: {}\n".format(link.split("\\")[-1]))
image = cv2.imread(link)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(rgb, output_type=Output.DICT)
top_val = results['text'][results['text'].index(get_close_matches("/10", results['text'],cutoff=0.6)[0])]
top = results['text'].index(top_val)
bottom = results['text'].index("helpful?")
top_cod = results["top"][top]
top_cod = top_cod - round(top_cod/6)
bottom_cod = results["top"][bottom]
image = image[top_cod:bottom_cod, :]
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(rgb, output_type=Output.DICT)
review = results['text'][results['text'].index(get_close_matches("/10", results['text'],cutoff=0.6)[0])+1:]
review = " ".join([i for i in review if i != ""])
reviewtext = results['text'][results['text'].index(get_close_matches("/10", results['text'],cutoff=0.6)[0])+1:]
reviewindx = reviewtext.index([i for i in reviewtext if (i.isdigit()) and (int(i) >=1900 and int(i) <= date.today().year)][0])
reviewtime = " ".join(reviewtext[:reviewindx+1][-3:])
reviewheading = " ".join([i for i in reviewtext[:reviewindx-2][:-2] if i != ""])
reviewer = reviewtext[:reviewindx-2][-1]
review = " ".join([i for i in reviewtext[reviewindx+1:] if i != ""])
completereview = [reviewtime,reviewer,reviewheading,review]
print("-"*20)
print(*completereview,sep="\n--------------\n")
print("-"*20)
###Output
Details for Image: example_5.JPG
--------------------
20 July 2017
--------------
TheLittleSongbird
--------------
Spider-Man with a fresh twist
--------------
Really enjoyed the first two films, both contained great scenes/action, acting and the two best villains of the films. Was mixed on the third film, which wasn't that bad but suffered mainly from bloat, and was not totally sold on the โAmazing Spider-Manโ films. Whether โSpider-Man: Homecomingโ is the best 'Spider-Man' film ever is debatable, some may prefer the first two films, others may prefer this. To me, it is the best 'Spider-Man' film since the second and on par with the first two. It may not have taken as many risks or had sequences/action as memorable as the first two films, and for more of an origin story it's best to stick with the first two films. For a fresh twist on 'Spider-Man' and the superhero genre, โSpider-Man: Homecomingโ (one of Marvel's best to date) more than fits the bill.
--------------------
Details for Image: example_6.JPG
--------------------
29 September 2017
--------------
SnoopyStyle
--------------
fun comic book fare
--------------
Salvager Adrian Toomes (Michael Keaton) holds a grudge against Tony Stark (Robert Downey Jr.) after his takeover of the Battle of New York cleanup. Toomes kept some of the Chitauri tech to create new weapons. Eight years later after the events of Civil War, Peter Parker (Tom Holland) returns to his school, Midtown School of Science and Technology. He lives with his sought-after aunt May (Marisa Tomei). He has a crush on classmate Liz. His best friend Ned discovers his secret identity Spider-Man. There is also the sarcastic academic teammate Michelle (Zendaya). This is fun. It's got the comic book action. It weaves into the MCU with ease. RDJ has a supporting role which is more than a simple cameo. This definitely has the John Hughes vibe. It's nice light fun in this overarching comics universe. Holland is a great teen Spider-man as he showed in Civil War. The young cast is terrific and Keaton is an awesome villain. Keaton has real depth which is built over the years. His humanity creates more than a comic book villain. The surprise connection hits it out of the park. This is simply a good movie.
--------------------
|
final work.ipynb | ###Markdown
Preprocessing
###Code
from google.colab import files
uploaded = files.upload()
# Import our dependencies
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
# Import and read the charity_data.csv.
import pandas as pd
# application_df = pd.read_csv("../Resources/charity_data.csv")
application_df = pd.read_csv('charity_data.csv')
application_df.head()
# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.
# YOUR CODE GOES HERE
application_df = application_df.drop(["EIN","NAME"],axis=1)
application_df.shape
# Determine the number of unique values in each column.
# YOUR CODE GOES HERE
application_df.nunique()
# Look at APPLICATION_TYPE value counts for binning
# YOUR CODE GOES HERE
application_value = application_df.loc[:,'APPLICATION_TYPE'].value_counts(normalize= True)
application_value
# Choose a cutoff value and create a list of application types to be replaced
# use the variable name `application_types_to_replace`
# YOUR CODE GOES HERE
application_types_to_replace = list(application_value[application_value<=0.031050].index)
# Replace in dataframe
for app in application_types_to_replace:
application_df['APPLICATION_TYPE'] = application_df['APPLICATION_TYPE'].replace(app,"Other")
# Check to make sure binning was successful
application_df['APPLICATION_TYPE'].value_counts()
# Look at CLASSIFICATION value counts for binning
#TRYING TO GET 90% OF DATA USING NORMALIZE =TRUE
# YOUR CODE GOES HERE
classification_value = application_df.loc[:,'CLASSIFICATION'].value_counts(normalize = False)
classification_value
# You may find it helpful to look at CLASSIFICATION value counts >1
# YOUR CODE GOES HERE
classification_types_to_replace = list(classification_value[classification_value < 1883].index)
classification_types_to_replace
# Choose a cutoff value and create a list of classifications to be replaced
# use the variable name `classifications_to_replace`
# YOUR CODE GOES HERE
#TRYING TO ELIMINATE OUTLIERS BY GETTING 90% OF DATA
# Replace in dataframe
for cls in classification_types_to_replace:
application_df['CLASSIFICATION'] = application_df['CLASSIFICATION'].replace(cls,"Other")
# Check to make sure binning was successful
application_df['CLASSIFICATION'].value_counts()
application_df.info()
x_APP = application_df.drop('IS_SUCCESSFUL', axis=1)
X = pd.get_dummies(x_APP)
X.shape
X.head()
# Convert categorical data to numeric with `pd.get_dummies`
# YOUR CODE GOES HERE
y = application_df.IS_SUCCESSFUL.values
y.shape
application_df.IS_SUCCESSFUL.value_counts()
# Split our preprocessed data into our features and target arrays
# YOUR CODE GOES HERE
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Split the preprocessed data into a training and testing dataset
# YOUR CODE GOES HERE
# Create a StandardScaler instances
scaler = StandardScaler()
# Fit the StandardScaler
X_scaler = scaler.fit(X_train)
# Scale the data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
Compile, Train and Evaluate the Model
###Code
# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.
# YOUR CODE GOES HERE
#FIRST MODEL HAS GOT TRAIN_DATA ACCURACY = 0.729, TEST_DATA ACCURACY = 0.734 ITS A UNDERFIT
# Define the deep learning model
nn_model = tf.keras.models.Sequential()
nn_model.add(tf.keras.layers.Dense(units=50, activation="relu"))
nn_model.add(tf.keras.layers.Dense(units=50, activation="relu"))
nn_model.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
# Compile the Sequential model together and customize metrics
nn_model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Train the model
fit_model = nn_model.fit(X_train_scaled, y_train, epochs=60)
# Evaluate the model using the test data
model_loss, model_accuracy = nn_model.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# First hidden layer
# YOUR CODE GOES HERE
# Second hidden layer
# YOUR CODE GOES HERE
# Output layer
# YOUR CODE GOES HERE
# Check the structure of the model
nn_model.summary()
# Create a DataFrame containing training history
application_df = pd.DataFrame(fit_model.history)
# Increase the index by 1 to match the number of epochs
application_df.index += 1
# Plot the loss
application_df.plot(y="loss")
# Plot the accuracy
application_df.plot(y="accuracy")
# Train the model
# YOUR CODE GOES HERE
# Evaluate the model using the test data
model_loss, model_accuracy = nn_model.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.
# YOUR CODE GOES HERE
#MY SECOND MODEL HAS TRAIN DATA ACCURACY IS 0.7388 AND TEST DATA ACCURACY IS 0.7301 ITS A GOOD FIT
# Define the deep learning model
nn_model = tf.keras.models.Sequential()
nn_model.add(tf.keras.layers.Dense(units=100, activation="relu"))
nn_model.add(tf.keras.layers.Dense(units=100, activation="relu"))
nn_model.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
# Compile the Sequential model together and customize metrics
nn_model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy" ])
# Train the model
fit_model = nn_model.fit(X_train_scaled, y_train, epochs=75)
# Check the structure of the model
nn_model.summary()
# Create a DataFrame containing training history
application_df = pd.DataFrame(fit_model.history)
# Increase the index by 1 to match the number of epochs
application_df.index += 1
# Plot the loss
application_df.plot(y="loss")
# Plot the accuracy
application_df.plot(y="accuracy")
# Evaluate the model using the test data
model_loss, model_accuracy = nn_model.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.
# YOUR CODE GOES HERE
#MY THIRD MODEL HAS ACTIVATION CHANGED INTO tanh SO I GOT TRAIN DATA AS ACCURACY 0.7393 AND TEST DATA AS ACCURACY 0.7309 SO ITS A BESTFIT
# Define the deep learning model
nn_model = tf.keras.models.Sequential()
nn_model.add(tf.keras.layers.Dense(units=100, activation="tanh"))
nn_model.add(tf.keras.layers.Dense(units=100, activation="tanh"))
nn_model.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
# Compile the Sequential model together and customize metrics
nn_model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy" ])
# Train the model
fit_model = nn_model.fit(X_train_scaled, y_train, epochs=75)
# Check the structure of the model
nn_model.summary()
#Create a DataFrame containing training history
application_df = pd.DataFrame(fit_model.history)
# Increase the index by 1 to match the number of epochs
application_df.index += 1
# Plot the loss
application_df.plot(y="loss")
# Plot the accuracy
application_df.plot(y="accuracy")
# Evaluate the model using the test data
model_loss, model_accuracy = nn_model.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
#IN DATA PREPROCESSING I WAS ABLE TO ELIMINATE OUTLIERS AND WORK ON 90% OF DATA
#MODEL 1 IS UNDERFIT IT HAS LOW BIAS TO TRAINING DATA AND LOW VARIANCE TO TEST DATA
#MODEL 2 AND MY MODEL 3 WITH MODIFICATION OF ACTIVATION = tanh GIVES A GOOD FIT
# Export our model to HDF5 file
from google.colab import files
nn_model.save('/content/Alphabet_Soup_model3.h5')
files.download('/content/Alphabet_Soup_model3.h5')
###Output
_____no_output_____ |
notebooks/SIR_simulation.ipynb | ###Markdown
SIR Modelling
###Code
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
from scipy import optimize
from scipy import integrate
%matplotlib inline
mpl.rcParams['figure.figsize']=(16,9)
pd.set_option('display.max_rows',500)
sns.set(style="whitegrid")
# We start with a small data set initially
df_analyse=pd.read_csv('../data/processed/COVID_JH_data_small_flat_table.csv',sep=';',parse_dates=[0])
df_analyse=df_analyse.sort_values('date',ascending=True)
df_analyse.head()
N0=234494
beta=0.4
gamma=0.1
I0=df_analyse.Germany[35]
S0=N0-I0
R0=0
# Function for the SIR model
def SIR_model(SIR,beta,gamma):
'SIR model for simulatin spread'
'S: Susceptible population'
'I: Infected popuation'
'R: Recovered population'
'S+I+R=N (remains constant)'
'dS+dI+dR=0 model has to satisfy this condition at all time'
S,I,R=SIR
dS_dt=-beta*S*I/N0
dI_dt=beta*S*I/N0-gamma*I
dR_dt=gamma*I
return ([dS_dt,dI_dt,dR_dt])
SIR=np.array([S0,I0,R0])
propagation_rates=pd.DataFrame(columns={'susceptible':S0,'infected':I0,'recovered':R0})
for each_t in np.arange(100):
new_delta_vec=SIR_model(SIR,beta,gamma)
SIR=SIR+new_delta_vec
propagation_rates=propagation_rates.append({'susceptible':SIR[0],'infected':SIR[1],'recovered':SIR[2]},ignore_index=True)
fig,ax1=plt.subplots(1,1)
ax1.plot(propagation_rates.index,propagation_rates.susceptible,label='susceptible')
ax1.plot(propagation_rates.index,propagation_rates.recovered,label='recovered')
ax1.plot(propagation_rates.index,propagation_rates.infected,label='infected')
ax1.set_ylim(10,N0)
ax1.set_yscale('linear')
ax1.set_title('Scenario SIR simulation',size=16)
ax1.set_xlabel('Time in days',size=16)
ax1.legend(loc='best',prop={'size' :16})
###Output
_____no_output_____
###Markdown
Fitting parameters for the SIR Model
###Code
ydata=np.array(df_analyse.Germany[36:])
t=np.arange(len(ydata))
def SIR_model_t(SIR,t,beta,gamma):
'SIR model for simulatin spread'
'S: Susceptible population'
'I: Infected popuation'
'R: Recovered population'
'S+I+R=N (remains constant)'
'dS+dI+dR=0 model has to satisfy this condition at all time'
S,I,R=SIR
dS_dt=-beta*S*I/N0
dI_dt=beta*S*I/N0-gamma*I
dR_dt=gamma*I
return ([dS_dt,dI_dt,dR_dt])
def fit_odeint(x,beta,gamma):
return integrate.odeint(SIR_model_t,(S0,I0,R0),t,args=(beta,gamma))[:,1]
popt=[beta,gamma]
fit_odeint(t,*popt)
popt,pcov=optimize.curve_fit(fit_odeint,t,ydata)
perr=np.sqrt(np.diag(pcov))
#the diagonal of the covariance matrix gives the variance of the parameters
#sqrt of the variance gives the standard deviation
print('standard deviation:',str(perr), 'Start infection:',ydata[0])
print('optimal parameters: beta',popt[0],' Gamma:',popt[1])
fitted=fit_odeint(t,*popt)
plt.semilogy(t,ydata,'o')
plt.semilogy(t,fitted)
plt.title('Fit of SIR model for Germany')
plt.ylabel('Infected population')
plt.xlabel('Days')
plt.show()
print('Optimal parameters: beta =',popt[0],'and gamma = ',popt[1])
print('Basic Reproduction Number R0 ',popt[0]/ popt[1])
print('This ratio is derived as the expected number of new infections (these new infections are sometimes called secondary infections from a single infection in a population where all subjects are susceptible. @wiki')
###Output
_____no_output_____
###Markdown
Dynamic beta in SIR simulation
###Code
t_initial=21
t_intro_measures=14
t_hold=21
t_relax=21
beta_max=0.4
beta_min=0.11
gamma=0.1
pd_beta=np.concatenate((np.array(t_initial*[beta_max]),
np.linspace(beta_max,beta_min,t_intro_measures),
np.array((t_hold)*[beta_min]),
np.linspace(beta_min,beta_max,t_relax),
))
pd_beta
N0
SIR=np.array([S0,I0,R0])
propagation_rates=pd.DataFrame(columns={'susceptible':S0,'infected':I0,'recovered':R0})
for each_beta in pd_beta:
new_delta_vec=SIR_model(SIR,each_beta,gamma)
SIR=SIR+new_delta_vec
propagation_rates=propagation_rates.append({'susceptible':SIR[0],'infected':SIR[1],'recovered':SIR[2]},ignore_index=True)
fig, ax1 = plt.subplots(1, 1)
ax1.plot(propagation_rates.index,propagation_rates.infected,label='infected',linewidth=3)
t_phases=np.array([t_initial,t_intro_measures,t_hold,t_relax]).cumsum()
ax1.bar(np.arange(len(ydata)),ydata, width=0.8,label='Current infected in Germany',color='r')
ax1.axvspan(0,t_phases[0], facecolor='b', alpha=0.2,label='No measures')
ax1.axvspan(t_phases[0],t_phases[1], facecolor='b', alpha=0.3,label='Hard measures introduced')
ax1.axvspan(t_phases[1],t_phases[2], facecolor='b', alpha=0.4,label='Hold measures')
ax1.axvspan(t_phases[2],t_phases[3], facecolor='b', alpha=0.5,label='Relax measures')
ax1.axvspan(t_phases[3],len(propagation_rates.infected), facecolor='b', alpha=0.6,label='repeat hard measures')
ax1.set_ylim(10, 1.5*(ydata[len(ydata)-1]))#propagation_rates.infected))
ax1.set_yscale('log')
ax1.set_title('Scenario SIR simulation',size=16)
ax1.set_xlabel('Time in days',size=16)
ax1.legend(loc='best',prop={'size': 16});
plt.show()
import os
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input,Output
import plotly.io as pio
df_SIR_large=pd.read_csv('../data/processed/COVID_JH_flat_table_confirmed.csv',sep=';',parse_dates=[0])
df_SIR_large=df_SIR_large.sort_values('date',ascending=True)
fig=go.Figure()
app=dash.Dash()
app.layout=html.Div([
dcc.Markdown('''
# Applied Datascience on COVID-19 Data
This Dashboard shows the confirmed infected cases and the simulated
SIR curve.
'''),
# For Country dropdown menu
dcc.Markdown(''' ## Single-Select Country for Visualization'''),
dcc.Dropdown( id='single_select_country',
options=[{'label':each,'value':each} for each in df_SIR_large.columns[1:]],
value='Germany',
multi=False),
#For changing beta ,gamma, t_initial, t_intro_measures,t_hold,t_relax
dcc.Markdown(''' ## Change the values below(and press enter) to manipulate the SIR curve:'''),
html.Label(["No measures introduced(days):",
dcc.Input(id='t_initial',
type='number',
value=28,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Measures introduced over(days):",
dcc.Input(id='t_intro_measures',
type='number',
value=14,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Introduced measures hold time(days):",
dcc.Input(id='t_hold',
type='number',
value=21,debounce=True)],style={"margin-left": "30px"}),
html.Br(),
html.Br(),
html.Label(["Introduced measures relaxed(days):",
dcc.Input(id='t_relax',
type='number',
value=21,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Beta max:",
dcc.Input(id='beta_max',
type='number',
value=0.4,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Beta min:",
dcc.Input(id='beta_min',
type='number',
value=0.11,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Gamma:",
dcc.Input(id='gamma',
type='number',
value=0.1,debounce=True)],style={"margin-left": "30px"}),
html.Br(),
html.Br(),
# For plotting graph
dcc.Graph(figure=fig,
id='SIR_curve',)
])
@app.callback(
Output('SIR_curve', 'figure'),
[Input('single_select_country', 'value'),
Input('t_initial','value'),
Input('t_intro_measures','value'),
Input('t_hold','value'),
Input('t_relax','value'),
Input('beta_max','value'),
Input('beta_min','value'),
Input('gamma','value')])
def update_figure(country,initial_time,intro_measures,hold_time,relax_time,max_beta,min_beta,gamma_max):
ydata=df_SIR_large[country][df_SIR_large[country]>=30]
xdata=np.arange(len(ydata))
N0=5000000
I0=30
S0=N0-I0
R0=0
gamma=gamma_max
SIR=np.array([S0,I0,R0])
t_initial=initial_time
t_intro_measures=intro_measures
t_hold=hold_time
t_relax=relax_time
beta_max=max_beta
beta_min=min_beta
propagation_rates=pd.DataFrame(columns={'susceptible':S0,'infected':I0,'recovered':R0})
pd_beta=np.concatenate((np.array(t_initial*[beta_max]),
np.linspace(beta_max,beta_min,t_intro_measures),
np.array(t_hold*[beta_min]),
np.linspace(beta_min,beta_max,t_relax),
))
def SIR_model(SIR,beta,gamma):
'SIR model for simulatin spread'
'S: Susceptible population'
'I: Infected popuation'
'R: Recovered population'
'S+I+R=N (remains constant)'
'dS+dI+dR=0 model has to satisfy this condition at all time'
S,I,R=SIR
dS_dt=-beta*S*I/N0
dI_dt=beta*S*I/N0-gamma*I
dR_dt=gamma*I
return ([dS_dt,dI_dt,dR_dt])
for each_beta in pd_beta:
new_delta_vec=SIR_model(SIR,each_beta,gamma)
SIR=SIR+new_delta_vec
propagation_rates=propagation_rates.append({'susceptible':SIR[0],'infected':SIR[1],'recovered':SIR[2]},ignore_index=True)
fig=go.Figure()
fig.add_trace(go.Bar(x=xdata,
y=ydata,
marker_color='crimson',
name="Confirmed Cases '%s'" %country
))
fig.add_trace(go.Scatter(x=xdata,
y=propagation_rates.infected,
mode='lines',
marker_color='blue',
name="Simulated curve for'%s'" %country ))
fig.update_layout(shapes=[
dict(type='rect',xref='x',yref='paper',x0=0,y0=0,x1=t_initial,y1=1,fillcolor="LightSalmon",opacity=0.4,layer="below",line_width=0,),
dict(type='rect',xref='x',yref='paper',x0=t_initial,y0=0,x1=t_initial+t_intro_measures,y1=1,fillcolor="LightSalmon",opacity=0.5,layer="below",line_width=0,),
dict(type='rect',xref='x',yref='paper',x0=t_initial+t_intro_measures,y0=0,x1=t_initial+t_intro_measures+t_hold,y1=1,fillcolor="LightSalmon",opacity=0.6,layer='below',line_width=0,),
dict(type='rect',xref='x',yref='paper',x0=t_initial+t_intro_measures+t_hold,y0=0,x1=t_initial+t_intro_measures+t_hold+t_relax,y1=1,fillcolor='LightSalmon',opacity=0.7,layer='below',line_width=0,)
],
title='SIR Simulation Scenario',
title_x=0.5,
xaxis=dict(title='Time(days)',
titlefont_size=16),
yaxis=dict(title='Confirmed cases[JH Data, log scale] ',
type='log',
titlefont_size=16,
),
width=1280,
height=600,
template='plotly_dark'
)
return fig
if __name__ == '__main__':
app.run_server(debug=True,use_reloader=False)
np.arange(10)
###Output
_____no_output_____ |
4 jigsaw/simple-lstm-fastai.ipynb | ###Markdown
Preface This kernel is a fork of https://www.kaggle.com/bminixhofer/simple-lstm-pytorch-version made to work on Fast.AI Imports & Utility functions
###Code
from fastai.train import Learner
from fastai.train import DataBunch
from fastai.callbacks import *
from fastai.basic_data import DatasetType
import numpy as np
import pandas as pd
import os
import time
import gc
import random
from tqdm._tqdm_notebook import tqdm_notebook as tqdm
from keras.preprocessing import text, sequence
import torch
from torch import nn
from torch.utils import data
from torch.nn import functional as F
# disable progress bars when submitting
def is_interactive():
return 'SHLVL' not in os.environ
if not is_interactive():
def nop(it, *a, **k):
return it
tqdm = nop
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything()
CRAWL_EMBEDDING_PATH = '../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec'
GLOVE_EMBEDDING_PATH = '../input/glove840b300dtxt/glove.840B.300d.txt'
NUM_MODELS = 2
LSTM_UNITS = 128
DENSE_HIDDEN_UNITS = 4 * LSTM_UNITS
MAX_LEN = 220
def get_coefs(word, *arr):
return word, np.asarray(arr, dtype='float32')
def load_embeddings(path):
with open(path) as f:
return dict(get_coefs(*line.strip().split(' ')) for line in tqdm(f))
def build_matrix(word_index, path):
embedding_index = load_embeddings(path)
embedding_matrix = np.zeros((len(word_index) + 1, 300))
unknown_words = []
for word, i in word_index.items():
try:
embedding_matrix[i] = embedding_index[word]
except KeyError:
unknown_words.append(word)
return embedding_matrix, unknown_words
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def train_model(learn,test,output_dim,lr=0.001,
batch_size=512, n_epochs=4,
enable_checkpoint_ensemble=True):
all_test_preds = []
checkpoint_weights = [2 ** epoch for epoch in range(n_epochs)]
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)
n = len(learn.data.train_dl)
phases = [(TrainingPhase(n).schedule_hp('lr', lr * (0.6**(i)))) for i in range(n_epochs)]
sched = GeneralScheduler(learn, phases)
learn.callbacks.append(sched)
for epoch in range(n_epochs):
learn.fit(1)
test_preds = np.zeros((len(test), output_dim))
for i, x_batch in enumerate(test_loader):
X = x_batch[0].cuda()
y_pred = sigmoid(learn.model(X).detach().cpu().numpy())
test_preds[i * batch_size:(i+1) * batch_size, :] = y_pred
all_test_preds.append(test_preds)
if enable_checkpoint_ensemble:
test_preds = np.average(all_test_preds, weights=checkpoint_weights, axis=0)
else:
test_preds = all_test_preds[-1]
return test_preds
class SpatialDropout(nn.Dropout2d):
def forward(self, x):
x = x.unsqueeze(2) # (N, T, 1, K)
x = x.permute(0, 3, 2, 1) # (N, K, 1, T)
x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked
x = x.permute(0, 3, 2, 1) # (N, T, 1, K)
x = x.squeeze(2) # (N, T, K)
return x
class NeuralNet(nn.Module):
def __init__(self, embedding_matrix, num_aux_targets):
super(NeuralNet, self).__init__()
embed_size = embedding_matrix.shape[1]
self.embedding = nn.Embedding(max_features, embed_size)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
self.embedding_dropout = SpatialDropout(0.3)
self.lstm1 = nn.LSTM(embed_size, LSTM_UNITS, bidirectional=True, batch_first=True)
self.lstm2 = nn.LSTM(LSTM_UNITS * 2, LSTM_UNITS, bidirectional=True, batch_first=True)
self.linear1 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS)
self.linear2 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS)
self.linear_out = nn.Linear(DENSE_HIDDEN_UNITS, 1)
self.linear_aux_out = nn.Linear(DENSE_HIDDEN_UNITS, num_aux_targets)
def forward(self, x):
h_embedding = self.embedding(x)
h_embedding = self.embedding_dropout(h_embedding)
h_lstm1, _ = self.lstm1(h_embedding)
h_lstm2, _ = self.lstm2(h_lstm1)
# global average pooling
avg_pool = torch.mean(h_lstm2, 1)
# global max pooling
max_pool, _ = torch.max(h_lstm2, 1)
h_conc = torch.cat((max_pool, avg_pool), 1)
h_conc_linear1 = F.relu(self.linear1(h_conc))
h_conc_linear2 = F.relu(self.linear2(h_conc))
hidden = h_conc + h_conc_linear1 + h_conc_linear2
result = self.linear_out(hidden)
aux_result = self.linear_aux_out(hidden)
out = torch.cat([result, aux_result], 1)
return out
def preprocess(data):
'''
Credit goes to https://www.kaggle.com/gpreda/jigsaw-fast-compact-solution
'''
punct = "/-'?!.,#$%\'()*+-/:;<=>@[\\]^_`{|}~`" + '""โโโ' + 'โฮธรทฮฑโขร โฮฒโ
ยณฯโโนยดยฐยฃโฌ\รโขโยฒโโ&'
def clean_special_chars(text, punct):
for p in punct:
text = text.replace(p, ' ')
return text
data = data.astype(str).apply(lambda x: clean_special_chars(x, punct))
return data
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
train = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/train.csv')
test = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv')
x_train = preprocess(train['comment_text'])
y_train = np.where(train['target'] >= 0.5, 1, 0)
y_aux_train = train[['target', 'severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']]
x_test = preprocess(test['comment_text'])
max_features = None
tokenizer = text.Tokenizer()
tokenizer.fit_on_texts(list(x_train) + list(x_test))
x_train = tokenizer.texts_to_sequences(x_train)
x_test = tokenizer.texts_to_sequences(x_test)
x_train = sequence.pad_sequences(x_train, maxlen=MAX_LEN)
x_test = sequence.pad_sequences(x_test, maxlen=MAX_LEN)
max_features = max_features or len(tokenizer.word_index) + 1
max_features
crawl_matrix, unknown_words_crawl = build_matrix(tokenizer.word_index, CRAWL_EMBEDDING_PATH)
print('n unknown words (crawl): ', len(unknown_words_crawl))
glove_matrix, unknown_words_glove = build_matrix(tokenizer.word_index, GLOVE_EMBEDDING_PATH)
print('n unknown words (glove): ', len(unknown_words_glove))
embedding_matrix = np.concatenate([crawl_matrix, glove_matrix], axis=-1)
embedding_matrix.shape
del crawl_matrix
del glove_matrix
gc.collect()
x_train_torch = torch.tensor(x_train, dtype=torch.long)
y_train_torch = torch.tensor(np.hstack([y_train[:, np.newaxis], y_aux_train]), dtype=torch.float32)
x_test_torch = torch.tensor(x_test, dtype=torch.long)
###Output
_____no_output_____
###Markdown
Training
###Code
batch_size = 512
train_dataset = data.TensorDataset(x_train_torch, y_train_torch)
valid_dataset = data.TensorDataset(x_train_torch[:batch_size], y_train_torch[:batch_size])
test_dataset = data.TensorDataset(x_test_torch)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
databunch = DataBunch(train_dl=train_loader,valid_dl=valid_loader)
all_test_preds = []
for model_idx in range(NUM_MODELS):
print('Model ', model_idx)
seed_everything(1234 + model_idx)
model = NeuralNet(embedding_matrix, y_aux_train.shape[-1])
learn = Learner(databunch,model,loss_func=nn.BCEWithLogitsLoss(reduction='mean'))
test_preds = train_model(learn,test_dataset,output_dim=y_train_torch.shape[-1])
all_test_preds.append(test_preds)
print()
submission = pd.DataFrame.from_dict({
'id': test['id'],
'prediction': np.mean(all_test_preds, axis=0)[:, 0]
})
submission.to_csv('submission.csv', index=False)
###Output
_____no_output_____ |
test/keras/document-denoiser/trainer.ipynb | ###Markdown
Training a Document Denoiser Model with AutoEncoders
###Code
# _WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.24.*, run `git checkout -b 0.24` or switch to the `0.24` branch on GitHub)_
import keras
import cv2
import numpy as np
import pandas as pd
import seaborn as sns
import os
import ntpath
from glob import glob
from matplotlib.pyplot import imshow
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model, load_model
from keras.layers import Activation, Flatten, Dropout, SpatialDropout2D, Conv2D, UpSampling2D, MaxPooling2D, add, concatenate, Input, BatchNormalization
from keras.backend import set_image_data_format
from keras.utils import plot_model
###Output
_____no_output_____
###Markdown
Download DatasetDownload the dataset from [kaggle (denoising dirty documents)](https://www.kaggle.com/c/denoising-dirty-documents/data). You will need to be logged in to be able to download the data.Once downloaded run the following commands
###Code
!unzip denoising-dirty-documents.zip && rm denoising-dirty-documents.zip
!mv denoising-dirty-documents/*.zip . && rm -rf denoising-dirty-documents
!unzip '*.zip' > /dev/null && rm *.zip
###Output
_____no_output_____
###Markdown
Define the Data GeneratorInclude data augmentation because the dataset is rather small.
###Code
x_dirty = sorted(glob("train/*.png"))
x_cleaned = sorted(glob("train_cleaned/*.png"))
x_test = sorted(glob("test/*.png"))
input_shape = (260, 540)
height = input_shape[0]
width = input_shape[1]
x_train, x_valid, y_train, y_valid = train_test_split(x_dirty, x_cleaned, test_size=0.20)
set_image_data_format("channels_last")
def model_train_generator(x_train, y_train, epochs, batch_size, resize_shape):
white_fill = 1.0
datagen = ImageDataGenerator(
rotation_range=180,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.3,
fill_mode="constant",
cval=white_fill,
horizontal_flip=True,
vertical_flip=True,
)
for _ in range(epochs):
for x_file, y_file in zip(x_train, y_train):
x_img = cv2.imread(x_file, cv2.IMREAD_GRAYSCALE) / 255.0
y_img = cv2.imread(y_file, cv2.IMREAD_GRAYSCALE) / 255.0
xs = []
ys = []
for i in range(batch_size):
if i == 0:
x = x_img
y = y_img
else:
params = datagen.get_random_transform(img_shape=x_img.shape)
x = datagen.apply_transform(np.expand_dims(x_img, 2), params)
y = datagen.apply_transform(np.expand_dims(y_img, 2), params)
x = cv2.resize(x, resize_shape[::-1], interpolation=cv2.INTER_AREA)
y = cv2.resize(y, resize_shape[::-1], interpolation=cv2.INTER_AREA)
x = np.expand_dims(x, 2)
y = np.expand_dims(y, 2)
xs.append(x)
ys.append(y)
xs_imgs = np.array(xs)
ys_imgs = np.array(ys)
yield (xs_imgs, ys_imgs)
def model_valid_generator(x_valid, y_valid, epochs, resize_shape):
xs = []
ys = []
for x_file, y_file in zip(x_valid, y_valid):
x_img = cv2.imread(x_file, cv2.IMREAD_GRAYSCALE) / 255.0
y_img = cv2.imread(y_file, cv2.IMREAD_GRAYSCALE) / 255.0
x = cv2.resize(x_img, resize_shape[::-1], interpolation=cv2.INTER_AREA)
y = cv2.resize(y_img, resize_shape[::-1], interpolation=cv2.INTER_AREA)
x = np.expand_dims(x, 2)
x = np.expand_dims(x, 0)
y = np.expand_dims(y, 2)
y = np.expand_dims(y, 0)
xs.append(x)
ys.append(y)
for _ in range(epochs):
for xs_img, ys_img in zip(xs, ys):
yield (xs_img, ys_img)
###Output
_____no_output_____
###Markdown
Create the Model
###Code
def create_encoder(input_shape):
inp = Input(shape=input_shape)
x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1),
input_shape=input_shape, activation="relu", padding="same")(inp)
x = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2,2))(x)
x = Conv2D(filters=32, kernel_size=(3,3), strides=(1,1),
activation="relu", padding="same")(x)
x = BatchNormalization()(x)
return inp, x
def create_decoder(inp):
x = Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), activation="relu",
padding="same")(inp)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2,2))(x)
x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1),
activation="relu", padding="same")(x)
x = BatchNormalization()(x)
x = Conv2D(filters=1, kernel_size=(1,1), strides=(1,1),
activation="sigmoid", padding="same")(x)
x = BatchNormalization()(x)
return inp, x
def create_autoencoder(input_shape):
enc_inp, encoder = create_encoder(input_shape)
dec_inp, autoencoder = create_decoder(encoder)
model = Model(inputs=[enc_inp], outputs=[autoencoder], name='AutoEncoder')
return model
model = create_autoencoder((height, width, 1))
model.summary()
model.compile(optimizer='adam', loss='mse')
epochs = 20
batch_size = 8
samples = len(x_train)
validation_samples = len(x_valid)
train_generator = model_train_generator(x_train, y_train, epochs=epochs, batch_size=batch_size, resize_shape=(height, width))
valid_generator = model_valid_generator(x_valid, y_valid, epochs=epochs, resize_shape=(height, width))
###Output
_____no_output_____
###Markdown
Train the AutoEncoder Model
###Code
hist_obj = model.fit_generator(train_generator, validation_data=valid_generator, validation_steps=validation_samples, steps_per_epoch=samples, epochs=epochs, shuffle=True)
hist_pd = pd.DataFrame(hist_obj.history, index=np.arange(1, len(hist_obj.history['loss'])+1))
hist_pd.index.name = 'epoch'
sns.lineplot(data=hist_pd)
model_name = "model.h5"
model.save(model_name)
# model = load_model(model_name)
###Output
_____no_output_____
###Markdown
Testing Accuracy
###Code
def test_generator(x_test, resize_shape):
for sample in x_test:
img = cv2.imread(sample, cv2.IMREAD_GRAYSCALE) / 255.0
res_img = cv2.resize(img, resize_shape[::-1], interpolation=cv2.INTER_AREA)
res_img = np.expand_dims(res_img, 0)
res_img = np.expand_dims(res_img, 3)
np_img = np.array(res_img)
yield (np_img, np_img)
steps = len(x_test)
test_gen = test_generator(x_test, input_shape)
loss = model.evaluate_generator(test_gen, steps=steps)
print("MSE Loss:", loss)
###Output
MSE Loss: 0.07084273546934128
###Markdown
Sample Prediction
###Code
img = cv2.imread(x_test[0], cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, input_shape[::-1], interpolation=cv2.INTER_AREA)
imshow(img, cmap='gray')
def make_prediction(img):
processed = img / 255.0
processed = np.expand_dims(processed, 0)
processed = np.expand_dims(processed, 3)
pred = model.predict(processed)
pred = np.squeeze(pred, 3)
pred = np.squeeze(pred, 0)
out_img = pred * 255
out_img[out_img > 255.0] = 255.0
out_img = out_img.astype(np.uint8)
return out_img
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
pred = make_prediction(img)
imshow(pred, cmap='gray')
output_dir = 'test_preds'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for x_test_file in x_test:
img = cv2.imread(x_test_file, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, input_shape[::-1], interpolation=cv2.INTER_AREA)
pred = make_prediction(img)
filename = path_leaf(x_test_file)
filepath = os.path.join(output_dir, filename)
cv2.imwrite(filepath, pred)
###Output
_____no_output_____ |
docs/notebooks/f16-shield.ipynb | ###Markdown
CSAF F16 ExampleThis notebook illustrates how to run CSAF on a model of the F16. Each system in CSAF is comprised of a set of components that communicate with each other by sending messages defined in the ROS format (http://wiki.ros.org/Messages). Creating messagesBefore we can create components, we need to define the message formats that a component uses to communicate over. In this example, we'll look at the messages used by the F16 low-level controller (LLC). It receives state from the F16 plant and outputs a set of control signals. The cell below loads the state output message from the F16 plant. The `version_major`, `version_minor`, `topic` and `time` fields are required for all CSAF messages. The remainder of the values are state variables in the aircraft model. For example, `vt` is the air speed, `theta` is the pitch and `psi` is the yaw.
###Code
!cat /csaf-system/components/msg/f16plant_state.msg
###Output
_____no_output_____
###Markdown
Now let's look at the message used to capture control signals from the low-level controller. In addition to the standard fields, the message defines `delta_e` for the elevator, `delta_a` for the ailerons, `delta_r` for the rudder and `throttle`.
###Code
!cat /csaf-system/components/msg/f16llc_output.msg
###Output
_____no_output_____
###Markdown
Creating componentsA component in CSAF is defined by a TOML configuration file and a model file. The configuration file defines the messages a component consumes, the messages it produces and the parameters of the model. Below is the configuration file for the F16 low-level controller.
###Code
!cat /csaf-system/components/f16llc.toml
###Output
_____no_output_____
###Markdown
Next we need to define the actual implementation of the low-level controller component. CSAF provides a very concise mechanism for doing so. All of the logic needed to generate, serialize and transport ROS messages is handled by the framework itself. This allows component implementations to focus on the core control logic by defining one of more of the following methods `model_output`, `model_state_update`, `model_info` and `model_update`. The full implementation for LLC is below.
###Code
!cat /csaf-system/components/f16llc.py
###Output
_____no_output_____
###Markdown
Creating a control systemFrom collection of components, we can build a full control system. The control system is again defined by a simple TOML configuration that describes the interconnections between components.
###Code
!cat /csaf-system/f16_shield_config.toml
###Output
_____no_output_____
###Markdown
Loading the configuration
###Code
import csaf.config as cconf
import csaf.system as csys
# create a csaf configuration out of toml
my_conf = cconf.SystemConfig.from_toml("/csaf-system/f16_shield_config.toml")
###Output
_____no_output_____
###Markdown
Display the system topology
###Code
from IPython.display import Image
import pathlib
plot_fname = f"pub-sub-plot.png"
# plot configuration pub/sub diagram as a file -- proj specicies a dot executbale and -Gdpi is a valid dot
# argument to change the image resolution
my_conf.plot_config(fname=pathlib.Path(plot_fname).resolve(), prog=["dot", "-Gdpi=400"])
# display written file to notebook
Image(plot_fname, height=600)
###Output
_____no_output_____
###Markdown
Simulating the system
###Code
# create pub/sub components out of the configuration
my_system = csys.System.from_config(my_conf)
simulation_timespan = [0, 35.0]
# simulate and collect time traces out of the components
trajs = my_system.simulate_tspan(simulation_timespan, show_status=True)
# destroy components and unbind all used sockets
my_system.unbind()
###Output
_____no_output_____
###Markdown
F16 flight animation
###Code
# if you want to use the notebook engine needed to play animations in the notebook
# uncomment the line below:
# %matplotlib notebook
import sys
sys.path.append('/csaf-system')
from f16_plot import plot3d_anim
video = plot3d_anim(trajs["plant"])
# if the animation doesn't play well--translate the animation
# object to a JS/HTML video and display it
from IPython.display import HTML
HTML(video.to_jshtml())
###Output
_____no_output_____
###Markdown
2D plot of the F16 model
###Code
from f16_plot import plot_simple
plot_simple(trajs)
###Output
_____no_output_____
###Markdown
Plot the state variables
###Code
import matplotlib.pyplot as plt
import numpy as np
# select component to plot
component_name = "plant"
# select topic of component to plot
topic_name = "states"
if component_name in trajs:
# time trace of component
ttrace = trajs[component_name]
if not hasattr(ttrace, topic_name):
raise RuntimeError(f"ERROR! Invalid topic name {topic_name} for component {component_name}")
# collect time and data to plot
t, data = ttrace.times, np.array(getattr(ttrace, topic_name))
# number of dimensions -> number of plots
n_dim = data.shape[1]
# get full component name
component_vname = my_conf.get_component_settings(component_name)["config"]["system_name"]
# get names of topic from ROSmsg -- skip boilerplate
names = my_conf.get_msg_setting(component_name, topic_name, "msg").fields_no_header
# create matplotlib axes and plot
fig, axs = plt.subplots(figsize=(12/2, n_dim*4/2),nrows=n_dim, sharex=True)
for idx, ax in enumerate(axs):
# plot formatting
ax.plot(t, data[:, idx], LineWidth=2)
ax.set_ylabel(names[idx])
ax.set_xlim(min(t), max(t))
ax.grid()
# set figure title
axs[0].set_title(f"{component_vname} - Topic \"{topic_name.title()}\"")
# on last axis, set the time label
axs[-1].set_xlabel("time (s)")
else:
raise RuntimeError(f"ERROR! Invalid component name {component_name}")
###Output
_____no_output_____
###Markdown
Replay simulation in FlightGearIf you have [FlightGear installed](file://../../examples/f16/flightgear/FLIGHTGEAR.md), start it on your host with `examples/f16/flightgear/launch_fg.sh`Once FlightGear fully loads, we can replay the simulation in real-time.
###Code
# uncomment to run
#from f16_plot import render_in_flightgear
#render_in_flightgear(trajs)
###Output
_____no_output_____
###Markdown
Get Component SignalsThis shows how to get the input and signals associated with a component identified by a component name. The function `get_component_io` is implemented, get the input topics of the controller, then calculating what the input buffer of the component is at each time step.
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csaf.trace as ctr
# get io of controller
cio = ctr.get_component_io("controller", trajs, my_conf)
# transform f16 state for controller input
xequil = np.array([502.0, 0.03887505597600522, 0.0, 0.0, 0.03887505597600522, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0, 9.05666543872074])
uequil = np.array([0.13946204864060271, -0.7495784725828754, 0.0, 0.0])
x_delta = np.array(np.hstack((trajs["plant"].states.copy(), trajs["controller"].states.copy())))
x_delta[:, :len(xequil)] -= np.tile(xequil, (len(trajs["plant"].states), 1))
# llc f16 inputs/ outputs
outs = np.array(cio["outputs"])[:, 1:4]
ins = x_delta[:, (1, 7, 13, 2, 6, 8, 14, 15)]
# uncomment to save to txt files
#np.savetxt("output_csaf.txt", outs)
#np.savetxt("input_csaf.txt", ins)
plt.title("Controller Input Signals for Autopilot")
plt.plot(outs[:, :1])
plt.grid()
plt.show()
# put in dataframe
input_fields = [(p, t) for p, t in my_conf._config["components"]["controller"]["sub"]]
in_fields = np.concatenate([[f"input-{p}-{f}" for f in my_conf.get_msg_setting(p, t, "msg").fields_no_header] for p, t in input_fields])
out_fields = [f"output-controller-{f}" for f in my_conf.get_msg_setting("controller", "outputs", "msg").fields_no_header]
columns = np.concatenate([["times"], in_fields, out_fields])
df = pd.DataFrame(columns=columns, data=np.hstack((cio["times"][:, np.newaxis], cio["inputs"], cio["outputs"])))
#df.to_csv("controller-llc-traces.csv", index=False)
###Output
_____no_output_____ |
sagetrain/RoboNote.ipynb | ###Markdown
Installing donkeyThe Donkey library has several components.It is first and foremost a python library installed where your other python libraries are (e.g. system python or virtualenv). After installation, you can import it as any normal python library:```pythonimport donkeycar as dk```It also has a CLI with tools mainly used to aid training:```bashdonkey --help```A `Vehicle` application, installed to the `d2` directory by default. This is where you'll find the `manage.py` script, which is used for both **driving** and **training**.```bash~/d2/manage.py --help``` Install
###Code
# Make sure we're in SageMaker root
%cd ~/SageMaker
# Install
!pip install donkeycar==2.2.4
# Create a new car using the library CLI
!donkey createcar --path ~/d2
###Output
_____no_output_____
###Markdown
Installation is now finished. You'll find the `manage.py` script in the `d2` directory as usual.**Note** that you need to install the Donkey library for every new [Jupyter notebook](http://jupyter.org/) you create. Cloning the git you only need to do once per *Notebook instance* though. Copy Files from S3 Make sure to replace the 'xxx' with the S3 path to your data
###Code
!aws s3 cp s3://xxxx/xxxx.tar.gz ~/d2/data/
###Output
_____no_output_____
###Markdown
Unzip Data File
###Code
!tar -xvzf ~/d2/data/xxxx.tar.gz
###Output
_____no_output_____
###Markdown
Train Make sure to replace the 'xxx' and 'zzz' with your tub name and with your model name
###Code
!python ~/d2/manage.py train --tub ~/SageMaker/xxxx --model ~/d2/models/zzz
###Output
_____no_output_____
###Markdown
Upload Model to S3 Make sure to replace 'zzz' with your model name and 'xxx' with your S3 path for your model
###Code
!aws s3 cp ~/d2/models/zzz s3://xxx/xxx/zzz
###Output
_____no_output_____ |
Text-Summarisation-for-Covid-Data.ipynb | ###Markdown
Some basic poitnts about NLPNatural Language Processing or NLP is basically a sub-field of artificial intelligence in which we make computer system learn, analyse and generate natural languageNLP : consists of NLU and NLG NLU - Natural Language Understanding NLG - Natural Language Generation5 different phases of NLU and NLGLexical Processing:- tokenisation. morphological analysis, processing on individual wordsSyntactic Processing :- Internal representation of the text, example a parse tree representation.Semantic Processing :- Clarifying the meaning of the word, meaning of words may be different in different context, for example, Federal Bank, bank of a riverDisposal/Pragmatic Processing:- Former deals with emotions (like text to speech) and Pragmatic deals with stories (eg John is a monk. He goes to Church Daily. He is a Catholic.) Text Summmarisation SystemCondensing a longer document into a short concise document without losing the core informationBased on input, it can be a sinlge document or multi-document summaryBased on the Purpose: Like some documents are generic or some from one domain (like summarising covid-19 dataset is domain)Query Based: User asks questions.Extractive (just retains main sentences) and Abstractive (writing the summary in own words)Starting with Code (Explained with comments) Text summariation by frequency of words
###Code
#Importing the Libraries
#NLTK-natural language toolkit for natural language processing
#CORPUS- Collection of Documents, eg Wall Street Journal CORPUS
#using stop-words CORPUS, stop-words are words like of, are, is etc,
#which occur more frequently and have no semantic meaning
#We need to tokenize the words because we need to compute the frequency of each word
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize,sent_tokenize
#import documents
f = open(('./trial_covid_dataset.txt'),"r")
text = f.read()
f.close()
#So, we have stored the document's text into text variable
#Preprocessing the data : Very Important to avoid overfit or underfit
#Step-1 We tokenize each sentence
sent_tokens = nltk.sent_tokenize(text)
word_tokens = nltk.word_tokenize(text)
#Step-2 We convert to lower case
word_tokens_lower = [word.lower() for word in word_tokens]
#Step-3 remove stopwords
stopWords = list(set(stopwords.words('english'))) #getting all stopwords of English and storing in StopWords
word_tokens_refined = [word for word in word_tokens_lower if word not in stopWords]
FreqTable = {} #defining a dictionary
for word in word_tokens_refined:
if word in FreqTable:
FreqTable[word]+=1
else:
FreqTable[word]=1
##This is here major part of summary comes to play
sentence_value = {}
for sentence in sent_tokens:
sentence_value[sentence]=0
for word,freq in FreqTable.items():
if word in sentence.lower():
sentence_value[sentence]+=freq
#How it works on the basis of frequency can be visualised in the console
#For inding the score of each sentence
#Finding the average
sum = 0
for sentence in sentence_value:
sum = sum + sentence_value[sentence]
average = sum/len(sentence_value)
#We'll be using it to generate the summary
summary = ''
for sentence in sent_tokens:
if sentence_value[sentence]>average:
summary=summary+sentence
print(summary)
###Output
Success from two leading coronavirus vaccine programs likely means other frontrunners will also show strong protection against COVID-19, Bill Gates said Tuesday.The fact that two coronavirus vaccines recently showed strong protection against COVID-19 bodes well for other leading programs led by AstraZeneca, Novavax, and Johnson & Johnson, Bill Gates said Tuesday.The billionaire Microsoft founder and philanthropist said it will be easier to boost manufacturing and distribute these other shots to the entire world, particularly developing nations.The vaccine space has seen a flurry of good news in recent days, marked by overwhelming success in late-stage trials by both Pfizer and Moderna."With the very good news from Pfizer and Moderna, we think it's now likely that AstraZeneca, Novavax, and Johnson & Johnson will also likely show very strong efficacy," Gates told journalist Andrew Ross Sorkin.
|
1.Study/2. with computer/4.Programming/2.Python/3. Study/02_Numpy_Pandas/exam_01_correlation_coefficient_pearson.ipynb | ###Markdown
์๊ด๊ณ์ ( Correlation Coefficient ) Index1. ๋ถ์ฐ1. ๊ณต๋ถ์ฐ1. ์๊ด๊ณ์1. ๊ฒฐ์ ๊ณ์1. ํ๋ฆฌ๋ฏธ์ด๋ฆฌ๊ทธ ๋ฐ์ดํฐ ์๊ด๊ณ์ ๋ถ์
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
์ํ ๋ฐ์ดํฐ ์์ฑ
###Code
data1 = np.array([80, 85, 100, 90, 95])
data2 = np.array([70, 80, 100, 95, 95])
###Output
_____no_output_____
###Markdown
2. ๋ถ์ฐ(variance)- 1๊ฐ์ ์ด์ฐ์ ๋๋ฅผ ๋ํ๋- ํธ์ฐจ์ ๊ณฑ์ ํ๊ท $ variance = \frac{\sum_{i=1}^n{(x_i-\bar{x})^2}}{n}, (\bar{x}:ํ๊ท ) $
###Code
# variance code
def variance(data):
avg = np.average(data)
var = 0
for num in data:
var += (num - avg) ** 2
return var / len(data)
variance(data1), variance(data2), variance(data1) ** 0.5, variance(data2) ** 0.5
np.var(data1), np.var(data2), np.std(data1), np.std(data2)
###Output
_____no_output_____
###Markdown
์ผ๋ฐ ํจ์์ numpy ํจ์์ ํผํฌ๋จผ์ค ๋น๊ต
###Code
p_data1 = np.random.randint(60, 100, int(1E5))
p_data2 = np.random.randint(60, 100, int(1E5))
# ์ผ๋ฐํจ์
%%time
variance(p_data1), variance(p_data2)
# numpy
%%time
np.var(p_data1), np.var(p_data2)
###Output
CPU times: user 1.71 ms, sys: 3.43 ms, total: 5.14 ms
Wall time: 2.94 ms
###Markdown
3. ๊ณต๋ถ์ฐ(covariance)- 2๊ฐ์ ํ๋ฅ ๋ณ์์ ์๊ด์ ๋๋ฅผ ๋ํ๋- ํ๊ท ํธ์ฐจ๊ณฑ- ๋ฐฉํฅ์ฑ์ ๋ณด์ฌ์ค์ ์์ผ๋ ๊ฐ๋๋ฅผ ๋ํ๋ด๋๋ฐ ํ๊ณ๊ฐ ์๋ค - ํ๋ณธ๋ฐ์ดํฐ์ ํฌ๊ธฐ์ ๋ฐ๋ผ์ ๊ฐ์ ์ฐจ์ด๊ฐ ํฐ ๋จ์ ์ด ์๋ค $ covariance = \frac{\sum_{i=1}^{n}{(x_i-\bar{x})(y_i-\bar{y})}}{n}, (\bar{x}:x์ ํ๊ท , \bar{y}:y์ ํ๊ท ) $
###Code
# covariance function
data1 = np.array([80, 85, 100, 90, 95])
data2 = np.array([70, 80, 100, 95, 95])
np.cov(data1, data2)[0, 1]
data3 = np.array([80, 85, 100, 90, 95])
data4 = np.array([100, 90, 70, 90, 80])
np.cov(data3, data4)[0, 1]
data5 = np.array([800, 850, 1000, 900, 950])
data6 = np.array([1000, 900, 700, 900, 800])
np.cov(data5, data6)[0, 1]
###Output
_____no_output_____
###Markdown
4. ์๊ด๊ณ์(correlation coefficient)- ๊ณต๋ถ์ฐ์ ํ๊ณ๋ฅผ ๊ทน๋ณตํ๊ธฐ ์ํด์ ๋ง๋ค์ด์ง- -1 ~ 1๊น์ง์ ์๋ฅผ ๊ฐ์ง๋ฉฐ 0๊ณผ ๊ฐ๊น์ธ์๋ก ์๊ด๋๊ฐ ์ ์์ ์๋ฏธ- x์ ๋ถ์ฐ๊ณผ y์ ๋ถ์ฐ์ ๊ณฑํ ๊ฒฐ๊ณผ์ ์ ๊ณฑ๊ทผ์ ๋๋ ์ฃผ๋ฉด x๋ y์ ๋ณํ๋์ด ํด์๋ก 0์ ๊ฐ๊น์์ง- https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.corrcoef.html $ correlation-coefficient = \frac{๊ณต๋ถ์ฐ}{\sqrt{{x๋ถ์ฐ} \cdot {y๋ถ์ฐ}}} $ ์ต์ข
์๊ด๊ณ์ $ r = \frac{\sum(x-\bar{x})(y-\bar{y})}{\sqrt{{\sum(x-\bar{x})^2}\cdot{\sum(y-\bar{y})^2}}} $
###Code
# correlation coefficient function
np.corrcoef(data1, data2)[0,1],\
np.corrcoef(data3, data4)[0,1],\
np.corrcoef(data5, data6)[0,1]
###Output
_____no_output_____
###Markdown
5. ๊ฒฐ์ ๊ณ์(cofficient of determination: R-squared)- x๋ก๋ถํฐ y๋ฅผ ์์ธกํ ์ ์๋ ์ ๋- ์๊ด๊ณ์์ ์ ๊ณฑ (์๊ด๊ณ์๋ฅผ ์์ํ)- ์์น๊ฐ ํด์๋ก ํ๊ธฐ๋ถ์์ ํตํด ์์ธกํ ์ ์๋ ์์น์ ์ ๋๊ฐ ๋ ์ ํ
###Code
np.corrcoef(data1, data2)[0,1] ** 2,\
np.corrcoef(data1, data4)[0,1] ** 2
###Output
_____no_output_____
###Markdown
6. ํ๋ฆฌ๋ฏธ์ด๋ฆฌ๊ทธ ๋ฐ์ดํฐ ์๊ด๊ณ์ ๋ถ์- 2016๋
ํ๋ฆฌ๋ฏธ์ด๋ฆฌ๊ทธ ์ฑ์ ์์ ๋์ ๊ณผ ์ค์ ๋ฐ์ดํฐ์ค์ ์น์ ์ ์ํฅ์ ๋ ๋ง์ด ์ค ๋ฐ์ดํฐ๋?
###Code
import pandas as pd
!ls datas
df = pd.read_csv("datas/premierleague.csv")
df.tail()
# ๋์
gf = np.array(df["gf"])
gf
# ์ค์
ga = np.array(df["ga"])
ga
# ์น์
points = np.array(df["points"])
points
data1, data2 = np.corrcoef(gf, points)[0, 1] ** 2, np.corrcoef(ga, points)[0, 1] ** 2
data1, data2
round(data1, 2), round(data2, 2)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.