prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import pytz
import dateutil
import numpy as np
from datetime import datetime
from dateutil.tz import tzlocal
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, date_range, Series, NaT, Index, Timestamp,
Int64Index, Period)
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
# GH 18951: tz-aware to tz-aware
idx = date_range('20170101', periods=4, tz='US/Pacific')
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101 03:00:00', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
# GH 18951: tz-naive to tz-aware
idx = date_range('20170101', periods=4)
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returning NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = | Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object) | pandas.Index |
import pandas as pd
import numpy as np
import pycountry_convert as pc
import pycountry
import os
from iso3166 import countries
PATH_AS_RELATIONSHIPS = '../Datasets/AS-relationships/20210701.as-rel2.txt'
NODE2VEC_EMBEDDINGS = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
DEEPWALK_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/DeepWalk_128.csv'
DIFF2VEC_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/Diff2Vec_128.csv'
NETMF_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/NetMF_128.csv'
NODESKETCH_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/NodeSketch_128.csv'
WALKLETS_EMBEDDINGS_256 = '../Check_for_improvements/Embeddings/Walklets_256.csv'
NODE2VEC_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
NODE2VEC_LOCAL_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_p2_64.csv'
NODE2VEC_GLOBAL_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_q2_64.csv'
DIFF2VEC_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Diff2Vec_64.csv'
NETMF_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/NetMF_64.csv'
NODESKETCH_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/NodeSketch_64.csv'
NODE2VEC_WL5_E3_LOCAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_ep3_local.csv'
NODE2VEC_WL5_E3_GLOBAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_ep3_global.csv'
NODE2VEC_64_WL5_E1_GLOBAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_global.csv'
BGP2VEC_64 = '../Check_for_improvements/Embeddings/Node2Vec_bgp2Vec.csv'
BGP2VEC_32 = '../Check_for_improvements/Embeddings/BGP2VEC_32'
WALKLETS_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/Walklets_128.csv'
STORE_CSV_TO_FOLDER = '../Embeddings_Visualization/StorePreprocessedEmb'
def country_flag(data):
"""
:param data: Contains a dataframe combining 3 datasets
:param list_alpha_2: Contains the 2-letter abbreviation from each country
:return: Matches the acronyms with the Fullname of the countries
"""
list_alpha_2 = [i.alpha2 for i in list(countries)]
if data['AS_rank_iso'] in list_alpha_2:
return pycountry.countries.get(alpha_2=data['AS_rank_iso']).name
else:
return 'Unknown Code'
def country_to_continent(country_name):
"""
This function takes as input a country name and returns the continent that the given country belongs.
:param country_name: Contains the name of a country
:return: The continent
"""
try:
country_alpha2 = pc.country_name_to_country_alpha2(country_name)
country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
country_continent_name = pc.convert_continent_code_to_continent_name(country_continent_code)
return country_continent_name
except:
return np.nan
def convert_country_to_continent(data):
"""
The function converts iso = alpha_2 (example: US) to the whole name of the country. Needs (import iso3166)
:param data: Contains a dataframe combining 4 datasets
:return: The continent for each country
"""
data['AS_rank_iso'] = data.apply(country_flag, axis=1)
temp_list = []
for i in range(0, len(data)):
temp_list.append(country_to_continent(data['AS_rank_iso'][i]))
df = pd.DataFrame(temp_list, columns=['AS_rank_iso'])
data['AS_rank_iso'] = df['AS_rank_iso']
return data['AS_rank_iso']
def merge_datasets(final_df, embeddings_df):
"""
:param final_df: Its the dataset that is generated in Analysis/aggregate_data folder
:param embeddings_df: Contains pretrained embeddings
:return: A new merged dataset (containing improvement_score and the embedding of each ASN)
"""
print(final_df['ASN'].isin(embeddings_df['ASN']).value_counts())
mergedStuff = pd.merge(embeddings_df, final_df, on=['ASN'], how='left')
mergedStuff.replace('', np.nan, inplace=True)
return mergedStuff
def get_path_and_filename(model, dimensions):
"""
:param model: The model's name
:param dimensions: The number of dimensions of the given model
:return: The path where the script will be stored and its name
"""
file_name = 'Preprocessed' + str(model) + str(dimensions) + f'.csv'
outdir = STORE_CSV_TO_FOLDER
if not os.path.exists(outdir):
os.mkdir(outdir)
full_name = os.path.join(outdir, file_name)
return full_name
def read_Node2Vec_embeddings_file():
"""
:return: A dataframe containing the ASNs and the embeddings of each ASn created based on Node2Vec algorithm.
"""
emb_df = pd.read_table(NODE2VEC_EMBEDDINGS, skiprows=1, header=None, sep=" ")
# name the columns
rng = range(0, 65)
new_cols = ['dim_' + str(i) for i in rng]
emb_df.columns = new_cols
# rename first column
emb_df.rename(columns={'dim_0': 'ASN'}, inplace=True)
return emb_df
def read_karateClub_embeddings_file(emb, dimensions):
"""
Karateclub library requires nodes to be named with consecutive Integer numbers. In the end gives as an output
containing the embeddings in ascending order. So in this function we need to reassign each ASN to its own embedding.
:param emb: A dataset containing pretrained embeddings
:param dimensions: The dimensions of the given dataset
:return: A dataframe containing pretrained embeddings
"""
if dimensions == 64:
if emb == 'Diff2Vec':
df = pd.read_csv(DIFF2VEC_EMBEDDINGS_64, sep=',')
elif emb == 'NetMF':
df = pd.read_csv(NETMF_EMBEDDINGS_64, sep=',')
elif emb == 'NodeSketch':
df = pd.read_csv(NODESKETCH_EMBEDDINGS_64, sep=',')
elif emb == 'Walklets':
df = pd.read_csv(WALKLETS_EMBEDDINGS_128, sep=',')
elif emb == 'Node2Vec_Local':
df = pd.read_csv(NODE2VEC_LOCAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_Global':
df = | pd.read_csv(NODE2VEC_GLOBAL_EMBEDDINGS_64, sep=',') | pandas.read_csv |
# This script runs the RDD models for a paper on the impact of COVID-19 on academic publishing
# Importing required modules
import pandas as pd
import datetime
import numpy as np
import statsmodels.api as stats
from matplotlib import pyplot as plt
import gender_guesser.detector as gender
from ToTeX import restab
# Defining a helper function for identifying COVID-19 related papers
def covid(papers, row):
string = str(papers.Title[row]) + str(papers.Abstract[row]) + str(papers.Keywords[row])
if 'covid' in string.lower():
return 1
else:
return 0
# Defining a helper function for isolating the name of the first author
def first_name(auths):
a = auths.index("'")
try:
b = auths[a+1:].index(' ')
except:
b = auths[a+1:].index("'")
return auths[a+1:b+2]
# Defining a helper function for isolating the national affiliation of the first author
def first_nationality(affils):
if str(affils) == 'nan':
affils = ''
else:
try:
a = affils.index("',")
except:
a = len(affils) - 2
c = affils[:a].count(', ')
for j in range(c):
b = affils[:a].index(', ')
affils = affils[b+2:a]
return affils
# Reading in the data
print('Reading in the data.......')
papers = pd.read_csv('C:/Users/User/Documents/Data/COVID-19/MDPI_data.csv')
# Control for COVID-19 related papers
# Creating the list
print('Creating a flag for COVID-19 related papers.......')
c19 = [covid(papers, row) for row in range(len(papers))]
# Adding COVID data to data set
print('Adding COVID-19 flag to the data set.......')
c19 = pd.Series(c19, name = 'COVID')
papers = pd.concat([papers, c19], axis = 1)
# Checking the number of COVID-19 related papers after the time cut-off as an anecdote:
# Note that this stat does not reflect dropping certain papers due to being publishing in unestablished journals
post_study_papers = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d')]
poststudy_covid = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d') and papers.COVID[i] == 1]
# Create a list of journals which will be included in the study - those with pubs prior to 2020
print('Removing papers from journals first published post 2020-01-01.......')
journals = []
for journal in papers.Journal.unique():
j = papers[papers.Journal == journal].reset_index()
if datetime.datetime.strptime(min(j.Accepted), '%Y-%m-%d') < datetime.datetime.strptime('2020-01-01', '%Y-%m-%d') and datetime.datetime.strptime(max(j.Accepted), '%Y-%m-%d') > datetime.datetime.strptime('2019-01-01', '%Y-%m-%d'):
journals.append(j.Journal[0])
# Subset data based on journals
df = papers[papers.Journal.isin(journals)].reset_index(drop = True)
# Subset data based on submission date
print('Removing papers from outside of the study time frame.......')
post1812 = [int(datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2018-12-31', '%Y-%m-%d')) for i in range(len(df))]
pre2007 = [int(datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') < datetime.datetime.strptime('2020-07-01', '%Y-%m-%d')) for i in range(len(df))]
study = pd.Series([post1812[i] * pre2007[i] for i in range(len(post1812))], name = 'Study')
df = pd.concat([df, study], axis = 1)
df = df[df.Study == 1].reset_index(drop = True)
# Computing the number of authors
print('Computing the number of authors for each paper.......')
numb_authors = [df.Authors[i].count(',') + 1 for i in range(len(df))]
numb_authors = pd.Series(numb_authors, name = 'Author_Count')
df = pd.concat([df, numb_authors], axis = 1)
# Predict perceived gender of the first author only
print('Predicting the perceived gender of first authors for each paper.......')
gd = gender.Detector()
first_author_gender = [gd.get_gender(first_name(df.Authors[i])) for i in range(len(df))]
first_author_gender = pd.Series(first_author_gender, name = 'Gender')
df = pd.concat([df, first_author_gender], axis = 1)
# Finding the nationality of the first author
print('Finding the nationality of the first author for each paper.......')
first_nat = [first_nationality(df.Affiliations[i]) for i in range(len(df))]
first_nat = pd.Series(first_nat, name = 'Nationality')
df = pd.concat([df, first_nat], axis = 1)
# Estimating the percentage of male / female authors for each paper
# Defining a helper function for the main function below
def inp_trimmer(inp):
a = inp.index("'") # mimic first_name
try:
b = inp[a+1:].index(' ') # mimic first_name
except:
b = inp[a+1:].index("'") # mimic first_name
inp = inp[b+3:] # shorten inp
try:
c = inp.index("',") # find next name or end of inp
inp = inp[c+3:]
except:
inp = ']'
return inp
# Defining a function to parse names and run them through the existing function for first author names
def all_auths(inp,nu):
if nu % 100 == 0: # Just a visual queue because this isn't particularly fast
print('Working on records ' + str(nu+1) + ' through ' + str(nu+101) + ' of 167,703.......')
gd = gender.Detector()
listicle = []
while inp != ']':
listicle.append(gd.get_gender(first_name(inp)))
inp = inp_trimmer(inp)
return listicle
# Applying this function to predict the perceived genders of all authors
# This is currently commented out because it takes quite a long time to run and too many authors are categorized as 'unknown'
#all_genders = [all_auths(df.Authors[i].replace('"',"'"),i) for i in range(len(df))]
# Below are lists of countries categorized by the World Bank Analytical Classification quartiles
high = ['Andorra', 'Antigua and Barbuda', 'Aruba', 'Australia', 'Austria', 'The Bahamas', 'Bahrain',
'Barbados', 'Belgium', 'Bermuda', 'Brunei', 'Canada', 'The Cayman Islands', 'Channel Islands',
'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Equatorial Guinea', 'Estonia', 'Faeroe Islands',
'Finland', 'France', 'French Polynesia', 'Germany', 'Greece', 'Greenland', 'Hong Kong', 'Hungary',
'Iceland', 'Ireland', 'Isle of Man', 'Israel', 'Italy', 'Japan', 'Korea', 'Kuwait', 'Liechtenstein',
'Luxembourg', 'Macao', 'Malta', 'Monaco', 'The Netherlands', 'New Caledonia', 'New Zealand',
'Northern Mariana Islands', 'Norway', 'Oman', 'Portugal', 'Qatar', 'San Marino', 'Saudi Arabia',
'Singapore', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'Taiwan', 'Trinidad and Tobago',
'United Arab Emirates', 'UK', 'USA']
upper_mid = ['Algeria', 'American Samoa', 'Argentina', 'Belarus', 'Bosnia and Herzegovina', 'Botswana', 'Brazil',
'Bulgaria', 'Chile', 'Colombia', 'Costa Rica', 'Cuba', 'Dominica', 'Dominican Republic', 'Fiji',
'Gabon', 'Grenada', 'Jamaica', 'Kazakhstan', 'Latvia', 'Lebanon', 'Libya', 'Lithuania', 'Macedonia',
'Malaysia', 'Mauritius', 'Mexico', 'Montenegro', 'Namibia', 'Palau', 'Panama', 'Peru', 'Poland',
'Romania', 'Russia', 'Serbia', 'Seychelles', 'South Africa', 'Saint Kitts and Nevis', 'Saint Lucia',
'Saint Vincent and the Grenadines', 'Suriname', 'Turkey', 'Uruguay', 'Venezuela']
lower_mid = ['Albania', 'Angola', 'Armenia', 'Azerbaijan', 'Belize', 'Bhutan', 'Bolivia', 'Cabo Verde', 'Cameroon',
'China', 'Republic of the Congo', 'Ivory Coast', 'Djibouti', 'Ecuador', 'Egypt', 'El Salvador', 'Georgia',
'Guatemala', 'Guyana', 'Honduras', 'India', 'Indonesia', 'Iran', 'Iraq', 'Jordan', 'Kiribati',
'Kosovo', 'Lesotho', 'Maldives', 'Marshall Islands', 'Micronesia', 'Moldova', 'Mongolia', 'Morocco',
'Nicaragua', 'Nigeria', 'Pakistan', 'Papua New Guinea', 'Paraguay', 'Philippines', 'Samoa',
'Sao Tome and Principe', 'Solomon Islands', 'Sri Lanka', 'Sudan', 'Eswatini', 'Syria', 'Palestine',
'Thailand', 'Timor-Leste', 'Tonga', 'Tunisia', 'Turkmenistan', 'Ukraine', 'Vanuatu', 'West Bank and Gaza']
low = ['Afghanistan', 'Bangladesh', 'Benin', 'Burkina Faso', 'Burundi', 'Cambodia', 'Central African Republic',
'Chad', 'Comoros', 'Democratic Republic of the Congo', 'Eritrea', 'Ethiopia', 'The Gambia', 'Ghana', 'Guinea',
'Guinea-Bissau', 'Haiti', 'Kenya', 'Korea, Dem. Rep.', 'Kyrgyzstan', 'Laos', 'Liberia', 'Madagascar', 'Malawi',
'Mali', 'Mauritania', 'Mozambique', 'Myanmar', 'Nepal', 'Niger', 'Rwanda', 'Senegal', 'Sierra Leone', 'Somalia',
'Tajikistan', 'Tanzania', 'Togo', 'Uganda', 'Uzbekistan', 'Vietnam', 'Yemen', 'Zambia', 'Zimbabwe']
# Defining a dictionary for determining the WBAC quartile
qh = {h:'q1' for h in high}
qu = {h:'q2' for h in upper_mid}
qm = {h:'q3' for h in lower_mid}
ql = {h:'q4' for h in low}
qd = {**qh, **qu, **qm, **ql}
# Defining a function for determining the quartile of the first author's nationality
def f_quart(inp):
try:
res = qd[inp]
except:
res = ''
return res
# Determining the quartile of the affiliation of the first author
fq = [f_quart(x) for x in df.Nationality]
fq = pd.Series(fq, name = 'First_Quartile')
df = pd.concat([df, fq], axis = 1)
# Defining a function to determine the 'top quartile' for each paper
def quart(inp,nu):
if nu % 100 == 0: # Just a visual queue because this isn't particularly fast
print('Working on records ' + str(nu+1) + ' through ' + str(nu+101) + ' of 167,703.......')
listicle = []
while inp != ']':
try:
listicle.append(f_quart(first_nationality(inp)))
inp = inp_trimmer(inp)
except:
inp = ']'
if 'q1' in listicle:
res = 'q1'
elif 'q2' in listicle:
res = 'q2'
elif 'q3' in listicle:
res = 'q3'
else:
res = 'q4'
return res
# Determining the 'top quartile' present in each paper
print('Determining the top WBAC quartile present in each paper.......')
quarts = [quart(df.Affiliations[i],i) for i in range(len(df.Affiliations))]
# An indicator variable for whether or not a Q1 (high) nation contributed
q1 = [1 if q == 'q1' else 0 for q in quarts]
# Appending these two lists to the main df
quarts = pd.Series(quarts, name = 'Top_Quartile')
q1 = pd.Series(q1, name = 'Q1')
df = pd.concat([df, quarts, q1], axis = 1)
# 5443 of 167,703 had no discernable Nationality and are dropped here
df = df[df.First_Quartile != ''].reset_index(drop = True)
# Checking the number of COVID-19 related papers after the time cut-off as an anecdote:
# Note that this stat does now reflect dropping certain papers due to being publishing in unestablished journals
post_study_papers2 = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d')]
poststudy_covid2 = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d') and papers.COVID[i] == 1]
# Determining if the journal uses single blind or double blind peer review
print('Determining if the journal uses single blind or double blind peer review.......')
# Lists of journals with a double blind peer review policy
db_journals = ['Adm. Sci.', 'AgriEngineering', 'Arts', 'Buildings',
'Economies', 'Educ. Sci.', 'Games', 'Genealogy', 'Humanities',
'J. Intell.', 'J. Open Innov. Technol. Mark. Complex.',
'Journal. Media.', 'Languages', 'Laws', 'Psych', 'Religions',
'Soc. Sci.', 'Societies', 'Toxins']
db = [1 if j in db_journals else 0 for j in df.Journal]
db = pd.Series(db, name = 'Double_Blind')
df = pd.concat([df, db], axis = 1)
# Computing the distances
print('Calculating distances from thresholds.......')
# Distance from March 16 (middle of March)
XX = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-16', '%Y-%m-%d') for i in range(len(df))]
XX = [x.days for x in XX]
XX = pd.Series(XX, name = 'X-c')
df = pd.concat([df, XX], axis = 1)
# Squared distance from March 16 (middle of March)
XX2 = df['X-c']*df['X-c']
XX2 = pd.Series(XX2, name = '(X-c)^2')
df = pd.concat([df, XX2], axis = 1)
# Cubed distance from March 16 (middle of March)
XX3 = df['X-c']*df['X-c']*df['X-c']
XX3 = pd.Series(XX3, name = '(X-c)^3')
df = pd.concat([df, XX3], axis = 1)
# Distance from surrounding days to serve as robustness checks
# One week prior to March 16
XX01 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-17', '%Y-%m-%d') for i in range(len(df))]
XX02 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-18', '%Y-%m-%d') for i in range(len(df))]
XX03 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-19', '%Y-%m-%d') for i in range(len(df))]
XX04 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-20', '%Y-%m-%d') for i in range(len(df))]
XX05 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-21', '%Y-%m-%d') for i in range(len(df))]
XX06 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-22', '%Y-%m-%d') for i in range(len(df))]
XX07 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-23', '%Y-%m-%d') for i in range(len(df))]
XX01 = [x.days for x in XX01]
XX02 = [x.days for x in XX02]
XX03 = [x.days for x in XX03]
XX04 = [x.days for x in XX04]
XX05 = [x.days for x in XX05]
XX06 = [x.days for x in XX06]
XX07 = [x.days for x in XX07]
XX01 = pd.Series(XX01, name = 'X-1-c')
XX02 = pd.Series(XX02, name = 'X-2-c')
XX03 = pd.Series(XX03, name = 'X-3-c')
XX04 = pd.Series(XX04, name = 'X-4-c')
XX05 = pd.Series(XX05, name = 'X-5-c')
XX06 = pd.Series(XX06, name = 'X-6-c')
XX07 = pd.Series(XX07, name = 'X-7-c')
df = pd.concat([df, XX01, XX02, XX03, XX04, XX05, XX06, XX07], axis = 1)
# One week post March 16
XX11 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-15', '%Y-%m-%d') for i in range(len(df))]
XX12 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-14', '%Y-%m-%d') for i in range(len(df))]
XX13 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-13', '%Y-%m-%d') for i in range(len(df))]
XX14 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-12', '%Y-%m-%d') for i in range(len(df))]
XX15 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-11', '%Y-%m-%d') for i in range(len(df))]
XX16 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-10', '%Y-%m-%d') for i in range(len(df))]
XX17 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-09', '%Y-%m-%d') for i in range(len(df))]
XX11 = [x.days for x in XX11]
XX12 = [x.days for x in XX12]
XX13 = [x.days for x in XX13]
XX14 = [x.days for x in XX14]
XX15 = [x.days for x in XX15]
XX16 = [x.days for x in XX16]
XX17 = [x.days for x in XX17]
XX11 = pd.Series(XX11, name = 'X+1-c')
XX12 = pd.Series(XX12, name = 'X+2-c')
XX13 = pd.Series(XX13, name = 'X+3-c')
XX14 = pd.Series(XX14, name = 'X+4-c')
XX15 = pd.Series(XX15, name = 'X+5-c')
XX16 = pd.Series(XX16, name = 'X+6-c')
XX17 = pd.Series(XX17, name = 'X+7-c')
df = pd.concat([df, XX11, XX12, XX13, XX14, XX15, XX16, XX17], axis = 1)
# Adding the post-effect variables for the main regression
D = [1 if df['X-c'][i] >= 0 else 0 for i in range(len(df))]
D = pd.Series(D, name = 'D')
DXc = D*df['X-c']
DXc2 = D*df['X-c']*df['X-c']
DXc3 = D*df['X-c']*df['X-c']*df['X-c']
DXc = pd.Series(DXc, name = 'D(X-c)')
DXc2 = pd.Series(DXc2, name = 'D(X-c)^2')
DXc3 = pd.Series(DXc3, name = 'D(X-c)^3')
df = pd.concat([df, D, DXc, DXc2, DXc3], axis = 1)
# Adding the post-effect variables for the robustness checks
D01 = [1 if df['X-1-c'][i] >= 0 else 0 for i in range(len(df))]
D02 = [1 if df['X-2-c'][i] >= 0 else 0 for i in range(len(df))]
D03 = [1 if df['X-3-c'][i] >= 0 else 0 for i in range(len(df))]
D04 = [1 if df['X-4-c'][i] >= 0 else 0 for i in range(len(df))]
D05 = [1 if df['X-5-c'][i] >= 0 else 0 for i in range(len(df))]
D06 = [1 if df['X-6-c'][i] >= 0 else 0 for i in range(len(df))]
D07 = [1 if df['X-7-c'][i] >= 0 else 0 for i in range(len(df))]
D01 = pd.Series(D01, name = 'D-1')
D02 = pd.Series(D02, name = 'D-2')
D03 = pd.Series(D03, name = 'D-3')
D04 = pd.Series(D04, name = 'D-4')
D05 = pd.Series(D05, name = 'D-5')
D06 = pd.Series(D06, name = 'D-6')
D07 = pd.Series(D07, name = 'D-7')
D11 = [1 if df['X+1-c'][i] >= 0 else 0 for i in range(len(df))]
D12 = [1 if df['X+2-c'][i] >= 0 else 0 for i in range(len(df))]
D13 = [1 if df['X-3-c'][i] >= 0 else 0 for i in range(len(df))]
D14 = [1 if df['X+4-c'][i] >= 0 else 0 for i in range(len(df))]
D15 = [1 if df['X+5-c'][i] >= 0 else 0 for i in range(len(df))]
D16 = [1 if df['X+6-c'][i] >= 0 else 0 for i in range(len(df))]
D17 = [1 if df['X+7-c'][i] >= 0 else 0 for i in range(len(df))]
D11 = pd.Series(D11, name = 'D+1')
D12 = pd.Series(D12, name = 'D+2')
D13 = pd.Series(D13, name = 'D+3')
D14 = pd.Series(D14, name = 'D+4')
D15 = pd.Series(D15, name = 'D+5')
D16 = pd.Series(D16, name = 'D+6')
D17 = pd.Series(D17, name = 'D+7')
df = pd.concat([df, D01, D02, D03, D04, D05, D06, D07, D11, D12, D13, D14, D15, D16, D17], axis = 1)
DXc01 = D01*df['X-1-c']
DXc02 = D02*df['X-2-c']
DXc03 = D03*df['X-3-c']
DXc04 = D04*df['X-4-c']
DXc05 = D05*df['X-5-c']
DXc06 = D06*df['X-6-c']
DXc07 = D07*df['X-7-c']
DXc11 = D11*df['X+1-c']
DXc12 = D12*df['X+2-c']
DXc13 = D13*df['X+3-c']
DXc14 = D14*df['X+4-c']
DXc15 = D15*df['X+5-c']
DXc16 = D16*df['X+6-c']
DXc17 = D17*df['X+7-c']
DXc01 = pd.Series(DXc01, name = 'D-1(X-c)')
DXc02 = pd.Series(DXc02, name = 'D-2(X-c)')
DXc03 = pd.Series(DXc03, name = 'D-3(X-c)')
DXc04 = pd.Series(DXc04, name = 'D-4(X-c)')
DXc05 = pd.Series(DXc05, name = 'D-5(X-c)')
DXc06 = pd.Series(DXc06, name = 'D-6(X-c)')
DXc07 = pd.Series(DXc07, name = 'D-7(X-c)')
DXc11 = pd.Series(DXc11, name = 'D+1(X-c)')
DXc12 = pd.Series(DXc12, name = 'D+2(X-c)')
DXc13 = pd.Series(DXc13, name = 'D+3(X-c)')
DXc14 = pd.Series(DXc14, name = 'D+4(X-c)')
DXc15 = pd.Series(DXc15, name = 'D+5(X-c)')
DXc16 = pd.Series(DXc16, name = 'D+6(X-c)')
DXc17 = pd.Series(DXc17, name = 'D+7(X-c)')
df = pd.concat([df, DXc01, DXc02, DXc03, DXc04, DXc05, DXc06, DXc07, DXc11, DXc12, DXc13, DXc14, DXc15, DXc16, DXc17], axis = 1)
# Calculating a total author time to add to the data set as a potential dependent variable
A = [df.Total[i] - df.Editor[i] for i in range(len(df))]
A = pd.Series(A, name = 'Author')
df = pd.concat([df, A], axis = 1)
# Adding natural logarithm transformed arXiv data
ln_arXiv7 = pd.Series(np.log(df.arXiv7.values), name = 'ln_arXiv7')
ln_arXiv14 = pd.Series(np.log(df.arXiv14.values), name = 'ln_arXiv14')
ln_arXiv30 = pd.Series(np.log(df.arXiv30.values), name = 'ln_arXiv30')
ln_new7 = pd.Series(np.log(df.new7.values), name = 'ln_new7')
ln_new14 = pd.Series(np.log(df.new14.values), name = 'ln_new14')
ln_new30 = pd.Series(np.log(df.new30.values), name = 'ln_new30')
df = pd.concat([df, ln_arXiv7, ln_arXiv14, ln_arXiv30, ln_new7, ln_new14, ln_new30], axis = 1)
# Two journals had a bad date resulting in an infeasible value for Stage1 so they are dropped here
df = df[df.Stage1 >= 0].reset_index(drop = True)
# Defining a function for adding a month dummy
def month(m):
md = {'01':'JAN', '02':'FEB', '03':'MAR', '04':'APR', '05':'MAY', '06':'JUN',
'07':'JUL', '08':'AUG', '09':'SEP', '10':'OCT', '11':'NOV', '12':'DEC', } # a month dictionary
s = m[5:7] # the month as a number stored as a string
mon = md[s]# getting the month from the dictionary
return mon
# Add a month dummy using the function
months = [month(m) for m in df.Submitted]
months = pd.Series(months, name = 'Month')
df = pd.concat([df, months], axis = 1)
# Prepping the data for the regressions
Stage1 = np.log(df.Stage1.values)
Stage2 = np.log(df.Stage2.values)
Stage3 = np.log(df.Stage3.values)
Total = np.log(df.Total.values)
Editor = np.log(df.Editor.values)
XX = stats.add_constant(df[['X-c', '(X-c)^2', '(X-c)^3', 'D', 'D(X-c)', 'D(X-c)^2', 'D(X-c)^3',
'COVID', 'Double_Blind', 'Author_Count', 'ln_arXiv14']])
# Creating the fixed effects
dG = pd.get_dummies(df['Gender'])
dF = pd.get_dummies(df['Frascati'])
dQ = pd.get_dummies(df['First_Quartile'])
dN = | pd.get_dummies(df['Nationality']) | pandas.get_dummies |
# data from https://www.ssa.gov/oact/babynames/limits.html
import os
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
# add your names to plot here in tic marks or quotes, seperated by commas: 'Joe', 'Joseph'
name_to_plot = ['Sarah', 'Sara']
# M, F, or B for both
sex = "F"
#Change to true/false to show or not show the sum of the names in your list, helpful for spelling variants
show_sum = True
# Set top n to 0 to not display most popular names
show_top_n = 10
# limit the most popular names to a recent timeframe (the most popular names in the 60s were very common)
show_top_n_since = 1880
# Show name neighbors (names that are similar in popularity to the plotted names)
name_neighbors = 0 # will show above and below
name_data = | pd.read_csv("name_data.csv") | pandas.read_csv |
import pandas as pd
import requests
from pathlib import Path
from tqdm.auto import tqdm
tqdm.pandas()
rki_to_iso = {0: 'DE',
1: 'DE-SH',
2: 'DE-HH',
3: 'DE-NI',
4: 'DE-HB',
5: 'DE-NW',
6: 'DE-HE',
7: 'DE-RP',
8: 'DE-BW',
9: 'DE-BY',
10: 'DE-SL',
11: 'DE-BE',
12: 'DE-BB',
13: 'DE-MV',
14: 'DE-SN',
15: 'DE-ST',
16: 'DE-TH'}
def process_data(df):
df['location'] = df.Bundesland_Id.replace(rki_to_iso)
df.drop(columns = ['Bundesland', 'Bundesland_Id', '7T_Hospitalisierung_Inzidenz'], inplace = True, errors = 'ignore')
df.rename({'Datum': 'date', 'Altersgruppe': 'age_group','7T_Hospitalisierung_Faelle': 'value'},
axis = 'columns', inplace = True)
df = df[['date', 'location', 'age_group', 'value']]
return df
# get path of all available files
url = "https://api.github.com/repos/robert-koch-institut/COVID-19-Hospitalisierungen_in_Deutschland/git/trees/master?recursive=1"
r = requests.get(url)
res = r.json()
files = [file["path"] for file in res["tree"] if (file["path"].startswith('Archiv/') and file["path"].endswith('Deutschland_COVID-19-Hospitalisierungen.csv'))]
df_files = | pd.DataFrame({'filename':files}) | pandas.DataFrame |
import pandas as pd
import requests
import os.path
import bs4
import requests
import urllib3
import csv
from os import path
#Data loader functions belong here. This is where
# information about the data files is found.
def load_proteomics(version='current', level='protein',
prefix="", suffix="Total Intensity",
contains=[], prepend_label=""):
# Step 1. Which file are we reading from?
file = get_file(key = version)
if file==1:
print("Error with file download.")
return False
# Step 2. Read the file
df = pd.read_csv(file, sep='\t', header=0, index_col=index_col)
#We want to use "Protein" "Protein IDs" or "Protein ID" as the index,
# or "Sequence" in the case of peptides;
# here we check which exists in this file.
index = False
if level == 'peptide': index = "Sequence"
elif level == "protein":
if df.columns.contains('Protein'): index = "Protein"
elif df.columns.contains("Protein IDs"): index = "Protein IDs"
if index:
df.set_index(index)
# Step 3. Filter
headings = df.columns
if suffix:#filter by options such as suffix
headings = [i for i in headings if i.endswith(suffix)]
if prefix:#filter by columns beginning in prefix
headings = [i for i in headings if i.startswith(prefix)]
for req in contains:
headings = [i for i in headings if req in i]
for req in not_contains:
headings = [i for i in headings if req not in i]
# Optional 3b: Filter rows
# This may not be necessary or may be done differently.
# For example, ignoring those MQ marks as likely contaminants
#drop contaminents and decoys
if 'Potential contaminant' in df.headers:
df = df.drop(df[df['Potential contaminant'] == '+'].index)
if 'Reverse' in df.headers:
df = df.drop(df[df.Reverse == '+'].index)
if level=='protein':
#optionally, discard those that were only identified by site
#this will not work for peptide level analysis
if 'Only identified by site' in df.headers:
df = df.drop(df[df['Only identified by site'] == '+'].index)
df = df[headings]
# Step 4. Clean headers
# Remove the prefix (ie, "Total Intensity") from the column names
# optionally prepends a sample type (ie, "HeLa")
new_names={}
for c in df.columns.values:
sample_name = c[len(prefix):].strip()
sample_name = c[:len(suffix)].strip()
new_names[c] = "{0}_{1}".format(prepend_label, sample_name)
df.rename(columns=new_names, inplace=True)
df.head()
# Return data
return df
def load_max_quant(version = 'current', level='protein',
prefix="Intensity", contains=["_"],
sample_type=""
):
#Takes a file and returns a dataframe.
# file: the file path to read from
# The rest of the paramters are used to select the columns.
# By default, it will look for ones starting with 'Reporter intensity'
# that do not contain 'count' or 'corrected' and use the 'Protein IDs'
# column as the indecies. These will be the raw intensity values.
#file = get_file(key = version)#We need to add max_quant files to the index_url on box so we can use their keys on this
if level=='protein':
path = "data/proteinGroups_{0}.txt".format(version)
url = "data/proteinGroups_{0}_url.txt".format(version)
elif level=="peptide":
path = "data/peptides_{0}.txt".format(version)
url = "data/peptides_{0}_url.txt".format(version)
else:
#unknown level
print ("Please specify either 'protein' or 'peptide' level.")
return False
file = download_file(download_to_path=path, url_file_path=url)
#read in data
df = pd.read_csv(file, sep='\t', header=0, index_col=0)
#filter the columns based on the prefix and other "contains" requirements
headings = df.columns
if prefix:#filter by columns beginning in prefix
headings = [i for i in headings if i.startswith(prefix)]
for req in contains:
headings = [i for i in headings if req in i]
#drop contaminents and decoys
df = df.drop(df[df['Potential contaminant'] == '+'].index)
df = df.drop(df[df.Reverse == '+'].index)
if level=='protein':
#optionally, discard those that were only identified by site
#this will not work for peptide
df = df.drop(df[df['Only identified by site'] == '+'].index)
df = df[headings]
# Remove the prefix (ie, "Total Intensity") from the column names
# optionally prepends a sample type (ie, "HeLa"
new_names={}
for c in df.columns.values:
sample_name = c[len(prefix):].strip()
new_names[c] = "{0}_{1}".format(sample_type, sample_name)
df.rename(columns=new_names, inplace=True)
df.head()
return df
def get_file(key = 'current'):
#Takes the version we are looking for and sets up a table
#from the url file so that we can use the version passed in as
#a key to identify what url from the index table to download.
url_file = open('data/index_url.txt', 'r')
url = url_file.read().strip()
url_file.close()
table_file_path = download_file(download_to_path="data/index_table.tsv", url = url)
table = pd.read_csv(table_file_path, sep='\t', header = 0, index_col = 'key')
file_url = table.loc[key]
file_name="data/{0}.tsv".format(key)
url_name = file_url[0]
return download_file(download_to_path=file_name, url=url_name, redownload = False)
def load_FragPipe(version = 'current', contains=[],level='protein',
suffix="Total Intensity"):
#Takes a file and returns a dataframe.
# file: the file path to read from
# The rest of the paramters are used to select the columns.
# By default, it will look for ones ending with 'Total intensity'
# that do not contain 'count' or 'corrected' and use the 'Protein IDs'
# column as the indecies. These will be the raw intensity values.
file = get_file(key = version)
if file==1:
print("Error with file download.")
return False
if version=='June':not_contains=['15']#drop extra replicate - Yiran said these two weren't good quality, I just forgot to not run it so for now I'll exclude it at this level
else: not_contains=[]
#read in data
if level == 'protein': index_col = 3
else: index_col=0 #for peptides and by default, take the first column as index
df = pd.read_csv(file, sep='\t', header=0, index_col=index_col)
#filter the columns based on the prefix and other "contains" requirements
headings = df.columns
if suffix:#filter by options such as suffix, contains
headings = [i for i in headings if i.endswith(suffix)]
for req in contains:
headings = [i for i in headings if req in i]
for req in not_contains:
headings = [i for i in headings if req not in i]
df = df[headings]
# Remove the "Total Intensity" part of the column names
new_names={}
for c in df.columns.values:
new_names[c] = c.split(' ')[0]
df.rename(columns=new_names, inplace=True)
df.head()
return df
def download_file(download_to_path="data/datafile.txt", url='',
password_file_path="data/password.txt", redownload=False):
"""Download a file from a given url to the specified location.
Parameters:
path (str): The path to the file to save the file to on the local machine.
Returns:
str: The path the file was downloaded to.
"""
if redownload or path.exists(download_to_path) == False: #If the file has been downloaded, or the user wants to update, download the file
if url == '':
print("URL MUST BE SPECIFIED FOR DOWNLOAD")
return 1
for i in range(2):
with requests.Session() as session: # Use a session object to save cookies
# Construct the urls for our GET and POST requests
get_url = url
post_url = get_url.replace("https://byu.box.com/shared", "https://byu.app.box.com/public")
# Send initial GET request and parse the request token out of the response
get_response = session.get(get_url)
soup = bs4.BeautifulSoup(get_response.text, "html.parser")
token_tag = soup.find(id="request_token")
#print (token_tag)
#print (type(token_tag))
#This cheks if there is a password file and if it found a password requirement on the file
if token_tag is not None:
#This identifies if the error was with the password file path.
if path.exists(password_file_path) == False:
print("MISSING PASSWORD FILE")
return 1
#print("Checking password...")
password_file = open(password_file_path, 'r')
password = password_file.read().strip()
password_file.close()
token = token_tag.get("value")
# Send a POST request, with the password and token, to get the data
payload = {
'password': password,
'request_token': token}
response = session.post(post_url, data=payload)
with open(download_to_path, 'wb') as dest:
dest.write(response.content)
#This will download the file if it was not password protected
else:
#print("No password needed")
response = requests.get(post_url, allow_redirects=True)
with open(download_to_path, 'wb') as out_file:
out_file.write(response.content)
return download_to_path
def load_fasta(file="data/uniprot-filtered-proteome_3AUP000005640_reviewed_human.fasta"):
#file is formated:
#>sp|Q96IY4|CBPB2_HUMAN Carboxypeptidase B2 OS=Homo sapiens OX=9606 GN=CPB2 PE=1 SV=2
#MKLCS...
headings = {}
with open(file) as f:
for line in f:
if line.startswith('>'):#header line
ID = line.split('|')[1]
name=line.split('|')[2].split('=')[0].strip('OS')
headings[ID]=name
headings = pd.Series(list(headings.values()), index=headings.keys())
return headings
def names_max_quant():
file = download_file(download_to_path="data/proteinGroups.txt", url_file_path="data/proteinGroups_url.txt")
df = | pd.read_csv(file, sep='\t', header=0, index_col=0, usecols=['Protein IDs','Gene names','Fasta headers']) | pandas.read_csv |
# -*- coding: utf-8 -*-
import time
import hashlib
import traceback
import pandas as pd
# 配置
mapping = {
"cy": [(1, 7), (4, 5)], # 餐饮(测试)
# "cy": [(1, 187432), (4, 220703)], # 餐饮
"cs": [(2, 48732), (5, 22389), (7, 72084), (8, -1)], # 催收
"ys": [(3, 193302), (6, -1)] # 疑似催收
}
def format_tel(tel):
if isinstance(tel, str):
return ''.join(i for i in tel if i.isdigit())
else:
return None
def phone_encode(string, method="md5_sha1"):
try:
if not string:
return None
if 'md5' in method:
m = hashlib.md5()
m.update(string.encode(encoding='utf-8'))
string = m.hexdigest()
string = string[0:32]
if 'sha1' in method:
s = hashlib.sha1()
s.update(string.encode(encoding='utf-8').upper())
string = s.hexdigest().upper()
return string
except:
print(traceback.format_exc())
return None
def cuishou_db_typed(cuishou_file, canyin_file, new_cuishou_db_file):
try:
cuishou = | pd.read_csv(cuishou_file, header=None, names=['tel', 'type']) | pandas.read_csv |
'''
Created on April 15, 2012
Last update on July 18, 2015
@author: <NAME>
@author: <NAME>
@author: <NAME>
'''
import pandas as pd
import numpy as np
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(df[price].rolling(n).mean(), name=name)
return out(SETTINGS, df, result)
def emaHelper(price, n, alphaIn=None):
"""
Algorithm by Stockchart
"""
length_of_df = len(price.axes[0])
initial_sma = price[0:n].mean()
ema = pd.Series(np.nan, index=range(0, length_of_df))
ema.iat[n-1] = initial_sma
if(not alphaIn):
alpha = (2.0/(n + 1.0))
else:
alpha = alphaIn
for i in range(n, length_of_df):
ema.iat[i] = price.iat[i]* alpha + (1-alpha)* ema.iat[i-1]
return ema
def EMA(df, n=5, price='Close'):
"""
Exponential Moving Average
"""
result = emaHelper(df, n)
return result
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
L = len(df['High'])
TR_l = [None]*L
for i in range(1, L):
TR = max(df['High'].iloc[i] - df['Low'].iloc[i], \
abs(df['High'].iloc[i] - df['Close'].iloc[i-1]), \
abs(df['Low'].iloc[i] - df['Close'].iloc[i-1]) )
TR_l[i] = TR
TR_s = pd.Series(TR_l[1::])
alpha = 1.0/n
result = emaHelper(TR_s, n, alpha)
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(df[price].rolling(n).mean())
MSD = pd.Series(df[price].rolling(n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
result = pd.DataFrame([PP, R1, S1, R2, S2, R3, S3]).transpose()
return out(SETTINGS, df, result)
def STOK(df):
"""
Stochastic oscillator %K
"""
result = | pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k') | pandas.Series |
"""Analysis tools."""
import ast
import json
import os
from typing import Any, Dict, Optional, Union
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def analyze() -> None:
"""Return info messages as dict, plot prize timeline and save both.
Notes
-----
For some cards, the grade data is of type `str` instead of `int` or `float`.
pd.DataFrames assign the data-types column-wise by using the most compatible
type. For example, the grade "Authentic" for only one card causes pandas
to transform the whole column with grades in {1.0, 1.5, ... 10.0} to be
stored as `str`. Therefore, all grades are converted to `str` for a unified
treatment. These strings are written like floats with one decimal place.
"""
# Read grades. Delimeter has to be semicolon only.
if not os.path.exists("input/input.csv"):
raise ValueError("'input.csv' not found")
df = pd.read_csv("input/input.csv", sep=";")
# Convert List-String in input column to List[Union[float,int]].
grades = df["grades (list)"].apply(ast.literal_eval).tolist()
# Iterate over cards in input DataFrame.
for row in range(len(df)):
# Get card_name from urls
card_name = _get_card_name(df.iloc[row, 0])
# Important: Convert col to `str` as described in the notes.
df_out = pd.read_csv(f"output/data/{card_name}.csv", parse_dates=["date"])
df_out["grade"] = df_out["grade"].astype(str)
# Iterate over grades per card.
for g in grades[row]:
# Parse grade as str due to reasons above.
g = str(float(g))
# Info on grades outside {1.0, 1.5, ... 10.0}.
msg_grades = _str_unusual_grades(df_out)
# Info on the share with this grade.
msg_grades_n = _str_n_grade(df_out, g)
# Drop rows with different grades.
df_filt = _filter_grade_data(df_out, g)
# Compute compund annual growth rate (CAGR).
msg_return = _str_comp_ann_g(df_filt)
# Store infos in dictionary and print.
card_dict: Dict[str, Optional[str]] = {
"ident": f"{card_name}-{g}",
"compound annual growth": msg_return,
"info grades number": msg_grades_n,
}
# Drop message if there are no unausual grades.
if msg_grades is None:
pass
else:
card_dict["info grades"] = msg_grades
# Print info.
for v in card_dict.values():
print(v)
# Save dictionary.
if not os.path.exists("output/nmbrs"):
os.makedirs("output/nmbrs")
with open(f"output/nmbrs/{card_name}-grade-{g}.json", "w") as fp:
json.dump(card_dict, fp)
# Plot and save prize trend.
_scatter_prize_time(df_filt, f"{card_name}-grade-{g}")
def _str_unusual_grades(df: pd.DataFrame) -> Union[str, None]:
"""Print the number of unusual grades."""
grades = np.arange(0, 10.5, 0.5).astype(float)
catch_grades = []
for item in df["grade"]:
try:
if float(item) not in grades:
catch_grades.append(item)
except ValueError:
catch_grades.append(item)
if catch_grades == []:
return None
else:
return (
f"– Over all grades, {len(catch_grades)} of {len(df)} cards do not receive"
f" standard grades. These grades are in {set(catch_grades)}"
)
def _get_card_name(card_url: str) -> str: # noqa: D102
c_name = card_url.split("-cards/")[1].split("/values")[0].replace("/", "-")
return c_name
def _str_n_grade(df: pd.DataFrame, grade: str) -> str:
"""Print the number of cards with grade `grade`."""
n_cards = len(df)
n_grade = len(df[(df["grade"]) == grade])
perc = round((n_grade / n_cards) * 100, 2)
return (
f"– The number of cards with grade {grade} is {n_grade} "
f"of {n_cards} cards. That is {perc}%."
)
def _filter_grade_data(df: pd.DataFrame, grade: str) -> pd.DataFrame:
"""Reduce df to date and price data for cards with grade `grade`."""
df = df[(df["grade"]) == grade]
df = df[["date", "prize"]]
return df
def _str_comp_ann_g(df: pd.DataFrame) -> str:
"""Print the average annual prize growth."""
if df.empty is True:
return "There is no prize data for this grade."
else:
df["year"] = | pd.DatetimeIndex(df["date"]) | pandas.DatetimeIndex |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import tempfile
import time
from collections import OrderedDict
from datetime import datetime
from string import printable
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet
except ImportError: # pragma: no cover
fastparquet = None
try:
import sqlalchemy
except ImportError: # pragma: no cover
sqlalchemy = None
from .... import tensor as mt
from .... import dataframe as md
from ....config import option_context
from ....tests.core import require_cudf, require_ray
from ....utils import arrow_array_to_objects, lazy_import, pd_release_version
from ..dataframe import from_pandas as from_pandas_df
from ..series import from_pandas as from_pandas_series
from ..index import from_pandas as from_pandas_index, from_tileable
from ..from_tensor import dataframe_from_tensor, dataframe_from_1d_tileables
from ..from_records import from_records
ray = lazy_import("ray")
_date_range_use_inclusive = pd_release_version[:2] >= (1, 4)
def test_from_pandas_dataframe_execution(setup):
# test empty DataFrame
pdf = pd.DataFrame()
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(columns=list("ab"))
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(
np.random.rand(20, 30), index=[np.arange(20), np.arange(20, 0, -1)]
)
df = from_pandas_df(pdf, chunk_size=(13, 21))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
def test_from_pandas_series_execution(setup):
# test empty Series
ps = pd.Series(name="a")
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = from_pandas_series(ps)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
ps = pd.Series(
np.random.rand(20), index=[np.arange(20), np.arange(20, 0, -1)], name="a"
)
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
def test_from_pandas_index_execution(setup):
pd_index = pd.timedelta_range("1 days", periods=10)
index = from_pandas_index(pd_index, chunk_size=7)
result = index.execute().fetch()
pd.testing.assert_index_equal(pd_index, result)
def test_index_execution(setup):
rs = np.random.RandomState(0)
pdf = pd.DataFrame(
rs.rand(20, 10),
index=np.arange(20, 0, -1),
columns=["a" + str(i) for i in range(10)],
)
df = from_pandas_df(pdf, chunk_size=13)
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf.index)
result = df.columns.execute().fetch()
pd.testing.assert_index_equal(result, pdf.columns)
# df has unknown chunk shape on axis 0
df = df[df.a1 < 0.5]
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf[pdf.a1 < 0.5].index)
s = pd.Series(pdf["a1"], index=pd.RangeIndex(20))
series = from_pandas_series(s, chunk_size=13)
# test series.index which has value
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
s = pdf["a2"]
series = from_pandas_series(s, chunk_size=13)
# test series.index
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
# test tensor
raw = rs.random(20)
t = mt.tensor(raw, chunk_size=13)
result = from_tileable(t).execute().fetch()
pd.testing.assert_index_equal(result, pd.Index(raw))
def test_initializer_execution(setup):
arr = np.random.rand(20, 30)
pdf = pd.DataFrame(arr, index=[np.arange(20), np.arange(20, 0, -1)])
df = md.DataFrame(pdf, chunk_size=(15, 10))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
df = md.DataFrame(arr, index=md.date_range("2020-1-1", periods=20))
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result, pd.DataFrame(arr, index=pd.date_range("2020-1-1", periods=20))
)
df = md.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=md.date_range("1/1/2010", periods=6, freq="D"),
)
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result,
pd.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=pd.date_range("1/1/2010", periods=6, freq="D"),
),
)
s = np.random.rand(20)
ps = pd.Series(s, index=[np.arange(20), np.arange(20, 0, -1)], name="a")
series = md.Series(ps, chunk_size=7)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = md.Series(s, index=md.date_range("2020-1-1", periods=20))
result = series.execute().fetch()
pd.testing.assert_series_equal(
result, pd.Series(s, index=pd.date_range("2020-1-1", periods=20))
)
pi = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
index = md.Index(md.Index(pi))
result = index.execute().fetch()
pd.testing.assert_index_equal(pi, result)
def test_index_only(setup):
df = md.DataFrame(index=[1, 2, 3])
pd.testing.assert_frame_equal(df.execute().fetch(), pd.DataFrame(index=[1, 2, 3]))
s = md.Series(index=[1, 2, 3])
pd.testing.assert_series_equal(s.execute().fetch(), pd.Series(index=[1, 2, 3]))
df = md.DataFrame(index=md.Index([1, 2, 3]))
pd.testing.assert_frame_equal(df.execute().fetch(), pd.DataFrame(index=[1, 2, 3]))
s = md.Series(index=md.Index([1, 2, 3]), dtype=object)
pd.testing.assert_series_equal(
s.execute().fetch(), pd.Series(index=[1, 2, 3], dtype=object)
)
def test_series_from_tensor(setup):
data = np.random.rand(10)
series = md.Series(mt.tensor(data), name="a")
pd.testing.assert_series_equal(series.execute().fetch(), pd.Series(data, name="a"))
series = md.Series(mt.tensor(data, chunk_size=3))
pd.testing.assert_series_equal(series.execute().fetch(), pd.Series(data))
series = md.Series(mt.ones((10,), chunk_size=4))
pd.testing.assert_series_equal(
series.execute().fetch(),
pd.Series(np.ones(10)),
)
index_data = np.random.rand(10)
series = md.Series(
mt.tensor(data, chunk_size=3),
name="a",
index=mt.tensor(index_data, chunk_size=4),
)
pd.testing.assert_series_equal(
series.execute().fetch(), pd.Series(data, name="a", index=index_data)
)
series = md.Series(
mt.tensor(data, chunk_size=3),
name="a",
index=md.date_range("2020-1-1", periods=10),
)
pd.testing.assert_series_equal(
series.execute().fetch(),
pd.Series(data, name="a", index=pd.date_range("2020-1-1", periods=10)),
)
def test_from_tensor_execution(setup):
tensor = mt.random.rand(10, 10, chunk_size=5)
df = dataframe_from_tensor(tensor)
tensor_res = tensor.execute().fetch()
pdf_expected = pd.DataFrame(tensor_res)
df_result = df.execute().fetch()
pd.testing.assert_index_equal(df_result.index, pd.RangeIndex(0, 10))
pd.testing.assert_index_equal(df_result.columns, pd.RangeIndex(0, 10))
pd.testing.assert_frame_equal(df_result, pdf_expected)
# test from tensor with unknown shape
tensor2 = tensor[tensor[:, 0] < 0.9]
df = dataframe_from_tensor(tensor2)
df_result = df.execute().fetch()
tensor_res = tensor2.execute().fetch()
pdf_expected = pd.DataFrame(tensor_res)
pd.testing.assert_frame_equal(df_result.reset_index(drop=True), pdf_expected)
# test converted with specified index_value and columns
tensor2 = mt.random.rand(2, 2, chunk_size=1)
df2 = dataframe_from_tensor(
tensor2, index=pd.Index(["a", "b"]), columns=pd.Index([3, 4])
)
df_result = df2.execute().fetch()
pd.testing.assert_index_equal(df_result.index, pd.Index(["a", "b"]))
pd.testing.assert_index_equal(df_result.columns, pd.Index([3, 4]))
# test converted from 1-d tensor
tensor3 = mt.array([1, 2, 3])
df3 = dataframe_from_tensor(tensor3)
result3 = df3.execute().fetch()
pdf_expected = pd.DataFrame(np.array([1, 2, 3]))
pd.testing.assert_frame_equal(pdf_expected, result3)
# test converted from identical chunks
tensor4 = mt.ones((10, 10), chunk_size=3)
df4 = dataframe_from_tensor(tensor4)
result4 = df4.execute().fetch()
pdf_expected = pd.DataFrame(tensor4.execute().fetch())
pd.testing.assert_frame_equal(pdf_expected, result4)
# from tensor with given index
tensor5 = mt.ones((10, 10), chunk_size=3)
df5 = dataframe_from_tensor(tensor5, index=np.arange(0, 20, 2))
result5 = df5.execute().fetch()
pdf_expected = pd.DataFrame(tensor5.execute().fetch(), index=np.arange(0, 20, 2))
pd.testing.assert_frame_equal(pdf_expected, result5)
# from tensor with given index that is a tensor
raw7 = np.random.rand(10, 10)
tensor7 = mt.tensor(raw7, chunk_size=3)
index_raw7 = np.random.rand(10)
index7 = mt.tensor(index_raw7, chunk_size=4)
df7 = dataframe_from_tensor(tensor7, index=index7)
result7 = df7.execute().fetch()
pdf_expected = pd.DataFrame(raw7, index=index_raw7)
pd.testing.assert_frame_equal(pdf_expected, result7)
# from tensor with given index is a md.Index
raw10 = np.random.rand(10, 10)
tensor10 = mt.tensor(raw10, chunk_size=3)
index10 = md.date_range("2020-1-1", periods=10, chunk_size=3)
df10 = dataframe_from_tensor(tensor10, index=index10)
result10 = df10.execute().fetch()
pdf_expected = pd.DataFrame(raw10, index=pd.date_range("2020-1-1", periods=10))
pd.testing.assert_frame_equal(pdf_expected, result10)
# from tensor with given columns
tensor6 = mt.ones((10, 10), chunk_size=3)
df6 = dataframe_from_tensor(tensor6, columns=list("abcdefghij"))
result6 = df6.execute().fetch()
pdf_expected = pd.DataFrame(tensor6.execute().fetch(), columns=list("abcdefghij"))
pd.testing.assert_frame_equal(pdf_expected, result6)
# from 1d tensors
raws8 = [
("a", np.random.rand(8)),
("b", np.random.randint(10, size=8)),
("c", ["".join(np.random.choice(list(printable), size=6)) for _ in range(8)]),
]
tensors8 = OrderedDict((r[0], mt.tensor(r[1], chunk_size=3)) for r in raws8)
raws8.append(("d", 1))
raws8.append(("e", pd.date_range("2020-1-1", periods=8)))
tensors8["d"] = 1
tensors8["e"] = raws8[-1][1]
df8 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8])
result = df8.execute().fetch()
pdf_expected = pd.DataFrame(OrderedDict(raws8))
pd.testing.assert_frame_equal(result, pdf_expected)
# from 1d tensors and specify index with a tensor
index_raw9 = np.random.rand(8)
index9 = mt.tensor(index_raw9, chunk_size=4)
df9 = dataframe_from_1d_tileables(
tensors8, columns=[r[0] for r in raws8], index=index9
)
result = df9.execute().fetch()
pdf_expected = pd.DataFrame(OrderedDict(raws8), index=index_raw9)
pd.testing.assert_frame_equal(result, pdf_expected)
# from 1d tensors and specify index
df11 = dataframe_from_1d_tileables(
tensors8,
columns=[r[0] for r in raws8],
index=md.date_range("2020-1-1", periods=8),
)
result = df11.execute().fetch()
pdf_expected = pd.DataFrame(
OrderedDict(raws8), index=pd.date_range("2020-1-1", periods=8)
)
pd.testing.assert_frame_equal(result, pdf_expected)
def test_from_records_execution(setup):
dtype = np.dtype([("x", "int"), ("y", "double"), ("z", "<U16")])
ndarr = np.ones((10,), dtype=dtype)
pdf_expected = pd.DataFrame.from_records(ndarr, index=pd.RangeIndex(10))
# from structured array of mars
tensor = mt.ones((10,), dtype=dtype, chunk_size=3)
df1 = from_records(tensor)
df1_result = df1.execute().fetch()
pd.testing.assert_frame_equal(df1_result, pdf_expected)
# from structured array of numpy
df2 = from_records(ndarr)
df2_result = df2.execute().fetch()
pd.testing.assert_frame_equal(df2_result, pdf_expected)
def test_read_csv_execution(setup):
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64),
columns=["a", "b", "c"],
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
r = md.read_csv(file_path, index_col=0)
mdf = r.execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
# size_res = self.executor.execute_dataframe(r, mock=True)
# assert sum(s[0] for s in size_res) == os.stat(file_path).st_size
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=10).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
mdf = md.read_csv(file_path, index_col=0, nrows=1).execute().fetch()
pd.testing.assert_frame_equal(df[:1], mdf)
# test names and usecols
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64),
columns=["a", "b", "c"],
)
df.to_csv(file_path, index=False)
mdf = md.read_csv(file_path, usecols=["c", "b"]).execute().fetch()
pd.testing.assert_frame_equal(pd.read_csv(file_path, usecols=["c", "b"]), mdf)
mdf = (
md.read_csv(file_path, names=["a", "b", "c"], usecols=["c", "b"])
.execute()
.fetch()
)
pd.testing.assert_frame_equal(
pd.read_csv(file_path, names=["a", "b", "c"], usecols=["c", "b"]), mdf
)
mdf = (
md.read_csv(file_path, names=["a", "b", "c"], usecols=["a", "c"])
.execute()
.fetch()
)
pd.testing.assert_frame_equal(
pd.read_csv(file_path, names=["a", "b", "c"], usecols=["a", "c"]), mdf
)
mdf = md.read_csv(file_path, usecols=["a", "c"]).execute().fetch()
pd.testing.assert_frame_equal(pd.read_csv(file_path, usecols=["a", "c"]), mdf)
# test sep
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=["a", "b", "c"]
)
df.to_csv(file_path, sep=";")
pdf = pd.read_csv(file_path, sep=";", index_col=0)
mdf = md.read_csv(file_path, sep=";", index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = (
md.read_csv(file_path, sep=";", index_col=0, chunk_bytes=10)
.execute()
.fetch()
)
pd.testing.assert_frame_equal(pdf, mdf2)
# test missing value
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
{
"c1": [np.nan, "a", "b", "c"],
"c2": [1, 2, 3, np.nan],
"c3": [np.nan, np.nan, 3.4, 2.2],
}
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=12).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
index = pd.date_range(start="1/1/2018", periods=100)
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
},
index=index,
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=100).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
# test nan
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
}
)
df.iloc[20:, :] = pd.NA
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0, head_lines=10, chunk_bytes=200)
result = mdf.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
# dtypes is inferred as expected
pd.testing.assert_series_equal(
mdf.dtypes, pd.Series(["float64", "object", "int64"], index=df.columns)
)
# test compression
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.gzip")
index = pd.date_range(start="1/1/2018", periods=100)
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
},
index=index,
)
df.to_csv(file_path, compression="gzip")
pdf = pd.read_csv(file_path, compression="gzip", index_col=0)
mdf = md.read_csv(file_path, compression="gzip", index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = (
md.read_csv(file_path, compression="gzip", index_col=0, chunk_bytes="1k")
.execute()
.fetch()
)
pd.testing.assert_frame_equal(pdf, mdf2)
# test multiple files
for merge_small_file_option in [{"n_sample_file": 1}, None]:
with tempfile.TemporaryDirectory() as tempdir:
df = pd.DataFrame(np.random.rand(300, 3), columns=["a", "b", "c"])
file_paths = [os.path.join(tempdir, f"test{i}.csv") for i in range(3)]
df[:100].to_csv(file_paths[0])
df[100:200].to_csv(file_paths[1])
df[200:].to_csv(file_paths[2])
mdf = (
md.read_csv(
file_paths,
index_col=0,
merge_small_file_options=merge_small_file_option,
)
.execute()
.fetch()
)
pd.testing.assert_frame_equal(df, mdf)
mdf2 = (
md.read_csv(file_paths, index_col=0, chunk_bytes=50).execute().fetch()
)
pd.testing.assert_frame_equal(df, mdf2)
# test wildcards in path
with tempfile.TemporaryDirectory() as tempdir:
df = pd.DataFrame(np.random.rand(300, 3), columns=["a", "b", "c"])
file_paths = [os.path.join(tempdir, f"test{i}.csv") for i in range(3)]
df[:100].to_csv(file_paths[0])
df[100:200].to_csv(file_paths[1])
df[200:].to_csv(file_paths[2])
# As we can not guarantee the order in which these files are processed,
# the result may not keep the original order.
mdf = md.read_csv(f"{tempdir}/*.csv", index_col=0).execute().fetch()
pd.testing.assert_frame_equal(df, mdf.sort_index())
mdf2 = (
md.read_csv(f"{tempdir}/*.csv", index_col=0, chunk_bytes=50)
.execute()
.fetch()
)
pd.testing.assert_frame_equal(df, mdf2.sort_index())
# test read directory
with tempfile.TemporaryDirectory() as tempdir:
testdir = os.path.join(tempdir, "test_dir")
os.makedirs(testdir, exist_ok=True)
df = pd.DataFrame(np.random.rand(300, 3), columns=["a", "b", "c"])
file_paths = [os.path.join(testdir, f"test{i}.csv") for i in range(3)]
df[:100].to_csv(file_paths[0])
df[100:200].to_csv(file_paths[1])
df[200:].to_csv(file_paths[2])
# As we can not guarantee the order in which these files are processed,
# the result may not keep the original order.
mdf = md.read_csv(testdir, index_col=0).execute().fetch()
pd.testing.assert_frame_equal(df, mdf.sort_index())
mdf2 = md.read_csv(testdir, index_col=0, chunk_bytes=50).execute().fetch()
pd.testing.assert_frame_equal(df, mdf2.sort_index())
@pytest.mark.skipif(pa is None, reason="pyarrow not installed")
def test_read_csv_use_arrow_dtype(setup):
rs = np.random.RandomState(0)
df = pd.DataFrame(
{
"col1": rs.rand(100),
"col2": rs.choice(["a" * 2, "b" * 3, "c" * 4], (100,)),
"col3": np.arange(100),
}
)
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df.to_csv(file_path, index=False)
pdf = pd.read_csv(file_path)
mdf = md.read_csv(file_path, use_arrow_dtype=True)
result = mdf.execute().fetch()
assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)
assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)
pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)
with tempfile.TemporaryDirectory() as tempdir:
with option_context({"dataframe.use_arrow_dtype": True}):
file_path = os.path.join(tempdir, "test.csv")
df.to_csv(file_path, index=False)
pdf = pd.read_csv(file_path)
mdf = md.read_csv(file_path)
result = mdf.execute().fetch()
assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)
assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)
pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)
# test compression
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.gzip")
df.to_csv(file_path, compression="gzip", index=False)
pdf = pd.read_csv(file_path, compression="gzip")
mdf = md.read_csv(file_path, compression="gzip", use_arrow_dtype=True)
result = mdf.execute().fetch()
assert isinstance(mdf.dtypes.iloc[1], md.ArrowStringDtype)
assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)
pd.testing.assert_frame_equal(arrow_array_to_objects(result), pdf)
@require_cudf
def test_read_csv_gpu_execution(setup_gpu):
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
}
)
df.to_csv(file_path, index=False)
pdf = pd.read_csv(file_path)
mdf = md.read_csv(file_path, gpu=True).execute().fetch()
pd.testing.assert_frame_equal(
pdf.reset_index(drop=True), mdf.to_pandas().reset_index(drop=True)
)
mdf2 = md.read_csv(file_path, gpu=True, chunk_bytes=200).execute().fetch()
pd.testing.assert_frame_equal(
pdf.reset_index(drop=True), mdf2.to_pandas().reset_index(drop=True)
)
def test_read_csv_without_index(setup):
# test csv file without storing index
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=["a", "b", "c"]
)
df.to_csv(file_path, index=False)
pdf = pd.read_csv(file_path)
mdf = md.read_csv(file_path).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = md.read_csv(file_path, chunk_bytes=10).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
file_path2 = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.random.RandomState(0).rand(100, 10),
columns=[f"col{i}" for i in range(10)],
)
df.to_csv(file_path2, index=False)
mdf3 = md.read_csv(file_path2, chunk_bytes=os.stat(file_path2).st_size / 5)
result = mdf3.execute().fetch()
expected = pd.read_csv(file_path2)
pd.testing.assert_frame_equal(result, expected)
# test incremental_index = False
mdf4 = md.read_csv(
file_path2,
chunk_bytes=os.stat(file_path2).st_size / 5,
incremental_index=False,
)
result = mdf4.execute().fetch()
assert not result.index.is_monotonic_increasing
expected = pd.read_csv(file_path2)
pd.testing.assert_frame_equal(result.reset_index(drop=True), expected)
@pytest.mark.skipif(sqlalchemy is None, reason="sqlalchemy not installed")
def test_read_sql_execution(setup):
import sqlalchemy as sa
rs = np.random.RandomState(0)
test_df = pd.DataFrame(
{
"a": np.arange(10).astype(np.int64, copy=False),
"b": [f"s{i}" for i in range(10)],
"c": rs.rand(10),
"d": [
datetime.fromtimestamp(time.time() + 3600 * (i - 5)) for i in range(10)
],
}
)
with tempfile.TemporaryDirectory() as d:
table_name = "test"
table_name2 = "test2"
uri = "sqlite:///" + os.path.join(d, "test.db")
test_df.to_sql(table_name, uri, index=False)
# test read with table name
r = md.read_sql_table("test", uri, chunk_size=4)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, test_df)
# test read with sql string and offset method
r = md.read_sql_query(
"select * from test where c > 0.5", uri, parse_dates=["d"], chunk_size=4
)
result = r.execute().fetch()
pd.testing.assert_frame_equal(
result, test_df[test_df.c > 0.5].reset_index(drop=True)
)
# test read with sql string and partition method with integer cols
r = md.read_sql(
"select * from test where b > 's5'",
uri,
parse_dates=["d"],
partition_col="a",
num_partitions=3,
)
result = r.execute().fetch()
pd.testing.assert_frame_equal(
result, test_df[test_df.b > "s5"].reset_index(drop=True)
)
# test read with sql string and partition method with datetime cols
r = md.read_sql_query(
"select * from test where b > 's5'",
uri,
parse_dates={"d": "%Y-%m-%d %H:%M:%S"},
partition_col="d",
num_partitions=3,
)
result = r.execute().fetch()
pd.testing.assert_frame_equal(
result, test_df[test_df.b > "s5"].reset_index(drop=True)
)
# test read with sql string and partition method with datetime cols
r = md.read_sql_query(
"select * from test where b > 's5'",
uri,
parse_dates=["d"],
partition_col="d",
num_partitions=3,
index_col="d",
)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, test_df[test_df.b > "s5"].set_index("d"))
# test SQL that return no result
r = md.read_sql_query("select * from test where a > 1000", uri)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, pd.DataFrame(columns=test_df.columns))
engine = sa.create_engine(uri)
m = sa.MetaData()
try:
# test index_col and columns
r = md.read_sql_table(
"test",
engine.connect(),
chunk_size=4,
index_col="a",
columns=["b", "d"],
)
result = r.execute().fetch()
expected = test_df.copy(deep=True)
expected.set_index("a", inplace=True)
del expected["c"]
pd.testing.assert_frame_equal(result, expected)
# do not specify chunk_size
r = md.read_sql_table(
"test", engine.connect(), index_col="a", columns=["b", "d"]
)
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, expected)
table = sa.Table(table_name, m, autoload=True, autoload_with=engine)
r = md.read_sql_table(
table,
engine,
chunk_size=4,
index_col=[table.columns["a"], table.columns["b"]],
columns=[table.columns["c"], "d"],
)
result = r.execute().fetch()
expected = test_df.copy(deep=True)
expected.set_index(["a", "b"], inplace=True)
pd.testing.assert_frame_equal(result, expected)
# test table with primary key
sa.Table(
table_name2,
m,
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("a", sa.Integer),
sa.Column("b", sa.String),
sa.Column("c", sa.Float),
sa.Column("d", sa.DateTime),
)
m.create_all(engine)
test_df = test_df.copy(deep=True)
test_df.index.name = "id"
test_df.to_sql(table_name2, uri, if_exists="append")
r = md.read_sql_table(table_name2, engine, chunk_size=4, index_col="id")
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, test_df)
finally:
engine.dispose()
@pytest.mark.skipif(pa is None, reason="pyarrow not installed")
def test_read_sql_use_arrow_dtype(setup):
rs = np.random.RandomState(0)
test_df = pd.DataFrame(
{
"a": np.arange(10).astype(np.int64, copy=False),
"b": [f"s{i}" for i in range(10)],
"c": rs.rand(10),
"d": [
datetime.fromtimestamp(time.time() + 3600 * (i - 5)) for i in range(10)
],
}
)
with tempfile.TemporaryDirectory() as d:
table_name = "test"
uri = "sqlite:///" + os.path.join(d, "test.db")
test_df.to_sql(table_name, uri, index=False)
r = md.read_sql_table("test", uri, chunk_size=4, use_arrow_dtype=True)
result = r.execute().fetch()
assert isinstance(r.dtypes.iloc[1], md.ArrowStringDtype)
assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)
pd.testing.assert_frame_equal(arrow_array_to_objects(result), test_df)
# test read with sql string and offset method
r = md.read_sql_query(
"select * from test where c > 0.5",
uri,
parse_dates=["d"],
chunk_size=4,
use_arrow_dtype=True,
)
result = r.execute().fetch()
assert isinstance(r.dtypes.iloc[1], md.ArrowStringDtype)
assert isinstance(result.dtypes.iloc[1], md.ArrowStringDtype)
pd.testing.assert_frame_equal(
arrow_array_to_objects(result),
test_df[test_df.c > 0.5].reset_index(drop=True),
)
@pytest.mark.pd_compat
def test_date_range_execution(setup):
chunk_sizes = [None, 3]
inclusives = ["both", "neither", "left", "right"]
if _date_range_use_inclusive:
with pytest.warns(FutureWarning, match="closed"):
md.date_range("2020-1-1", periods=10, closed="right")
for chunk_size, inclusive in itertools.product(chunk_sizes, inclusives):
kw = dict()
if _date_range_use_inclusive:
kw["inclusive"] = inclusive
else:
if inclusive == "neither":
continue
elif inclusive == "both":
inclusive = None
kw["closed"] = inclusive
# start, periods, freq
dr = md.date_range("2020-1-1", periods=10, chunk_size=chunk_size, **kw)
result = dr.execute().fetch()
expected = pd.date_range("2020-1-1", periods=10, **kw)
pd.testing.assert_index_equal(result, expected)
# end, periods, freq
dr = md.date_range(end="2020-1-10", periods=10, chunk_size=chunk_size, **kw)
result = dr.execute().fetch()
expected = pd.date_range(end="2020-1-10", periods=10, **kw)
pd.testing.assert_index_equal(result, expected)
# start, end, freq
dr = md.date_range("2020-1-1", "2020-1-10", chunk_size=chunk_size, **kw)
result = dr.execute().fetch()
expected = pd.date_range("2020-1-1", "2020-1-10", **kw)
pd.testing.assert_index_equal(result, expected)
# start, end and periods
dr = md.date_range(
"2020-1-1", "2020-1-10", periods=19, chunk_size=chunk_size, **kw
)
result = dr.execute().fetch()
expected = pd.date_range("2020-1-1", "2020-1-10", periods=19, **kw)
pd.testing.assert_index_equal(result, expected)
# start, end and freq
dr = md.date_range(
"2020-1-1", "2020-1-10", freq="12H", chunk_size=chunk_size, **kw
)
result = dr.execute().fetch()
expected = pd.date_range("2020-1-1", "2020-1-10", freq="12H", **kw)
pd.testing.assert_index_equal(result, expected)
# test timezone
dr = md.date_range("2020-1-1", periods=10, tz="Asia/Shanghai", chunk_size=7)
result = dr.execute().fetch()
expected = pd.date_range("2020-1-1", periods=10, tz="Asia/Shanghai")
pd.testing.assert_index_equal(result, expected)
# test periods=0
dr = md.date_range("2020-1-1", periods=0)
result = dr.execute().fetch()
expected = | pd.date_range("2020-1-1", periods=0) | pandas.date_range |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import base64
from dash import Dash, dcc, html, callback_context
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from datetime import date, datetime
import io
import numpy as np
import pandas as pd
import plotly.express as px
import requests
# In[ ]:
# geo-json file transformed from justinelliotmeyers github
url_geo = "https://github.com/beto-Sibileau/ico_bihar_vacc_dist/raw/main/India_District_10_BR.geojson"
response_geo = requests.get(url_geo)
geofile = response_geo.json()
# district names geo-json/df mapping
data_2_map_district = {
"ARARIA": "Araria",
"ARWAL": "Arwal",
"AURANGABAD": "Aurangabad",
"BANKA": "Banka",
"BEGUSARAI": "Begusarai",
"BHAGALPUR": "Bhagalpur",
"BHOJPUR": "Bhojpur",
"BUXAR": "Buxar",
"CHAMPARANE EAST": "Purba Champaran",
"CHAMPARANE WEST": "Pashchim Champaran",
"DARBHANGA": "Darbhanga",
"GAYA": "Gaya",
"GOPALGANJ": "Gopalganj",
"JAMUI": "Jamui",
"JEHANABAD": "Jehanabad",
"KAIMUR": "Kaimur (bhabua)",
"KATIHAR": "Katihar",
"KHAGARIA": "Khagaria",
"KISHANGANJ": "Kishanganj",
"LAKHISARAI": "Lakhisarai",
"MADHEPURA": "Madhepura",
"MADHUBANI": "Madhubani",
"MUNGER": "Munger",
"MUZAFFARPUR": "Muzaffarpur",
"NALANDA": "Nalanda",
"NAWADA": "Nawada",
"PATNA": "Patna",
"PURNIA": "Purnia",
"ROHTAS": "Rohtas",
"SAHARSA": "Saharsa",
"SAMASTIPUR": "Samastipur",
"SARAN": "Saran",
"SHEIKHPURA": "Sheikhpura",
"SHEOHAR": "Sheohar",
"SITAMARHI": "Sitamarhi",
"SIWAN": "Siwan",
"SUPAUL": "Supaul",
"VAISHALI": "Vaishali",
}
# In[ ]:
# geo-json file refactor from google datameet group attach: 7f92b3f234ecf846/india%20subdists%202001.zip
url_geo_block = "https://raw.githubusercontent.com/beto-Sibileau/ico_bihar_vacc_dist/main/admin_census_bihar_blocks_refactor.json"
response_geo_block = requests.get(url_geo_block)
geofile_block = response_geo_block.json()
list_of_dict = [a_dict["properties"] for a_dict in geofile_block["features"]]
block_names = [a_polyg["NAME1_"] for a_polyg in list_of_dict]
fids = [a_polyg["FID"] for a_polyg in list_of_dict]
admin_blocks_df = pd.DataFrame({"Fid": fids, "Block_Id": block_names})
# In[ ]:
# dbc button: upload csv
bt_up = dcc.Upload(
dbc.Button(
html.P(
["Click to Upload ", html.Code("csv"), " File"],
style={
"margin-top": "12px",
"fontWeight": "bold",
},
),
id="btn",
class_name="me-1",
outline=True,
color="info",
),
id="upload-data",
)
# dash date picker
date_picker = dcc.DatePickerRange(
id="my-date-picker-range",
min_date_allowed=date(2021, 10, 26),
max_date_allowed=date(2022, 2, 27),
# initial_visible_month=date(2022, 1, 1),
start_date=date(2021, 10, 26), # date(2022, 1, 1), #
end_date=date(2022, 2, 27), # date(2022, 1, 31), #
display_format="Do MMM YYYY",
style={"fontSize": 20},
)
# dbc data upload row
upload_row = dbc.Container(
dbc.Row(
[
dbc.Col(
html.Div(
[
html.P(
"Load Vaccine Distribution",
style={
"fontWeight": "bold",
"fontSize": "18px",
"marginBottom": "10px",
"textAlign": "center",
},
),
bt_up,
html.Div(
id="uploading-state",
className="output-uploading-state",
style={
"color": "DarkGreen",
"textAlign": "center",
},
),
]
),
width="auto",
),
dbc.Col(
html.Div(
[
html.P(
"Click to Select Dates",
style={
"fontWeight": "bold",
"fontSize": "18px",
"marginBottom": "20px",
"textAlign": "center",
},
),
date_picker,
]
),
width="auto",
),
],
justify="evenly",
align="start",
),
fluid=True,
)
# In[ ]:
# function to return cards layout
# dbc kpi card: https://www.nelsontang.com/blog/2020-07-02-dash-bootstrap-kpi-card/
def create_card(card_title, card_num):
card = dbc.Card(
[
dbc.CardBody(
[
html.H4(
card_title,
className="card-title",
style={"textAlign": "center"},
),
html.P(
children="N/A",
className="card-text",
id=f"card-top-list-{card_num}",
),
# note there's card-text bootstrap class ...
# html.P(
# "Target: $10.0 M",
# className="card-target",
# ),
# html.Span([
# html.I(className="fas fa-arrow-circle-up up"),
# html.Span(" 5.5% vs Last Year",
# className="up")
# ])
]
)
],
color="info",
outline=True,
)
return card
# cards to display top districts and blocks
cards_row = dbc.Container(
dbc.Row(
[
dbc.Col(create_card("Vaccine Top 10 Divisions", 1), width="auto"),
dbc.Col(create_card("Vaccine Top 10 Districts", 2), width="auto"),
dbc.Col(create_card("Vaccine Top 10 Subdivisions", 3), width="auto"),
dbc.Col(create_card("Vaccine Top 10 Blocks", 4), width="auto"),
],
justify="evenly",
),
fluid=True,
)
# In[ ]:
# dictionary for plotly: label with no figure
label_no_fig = {
"layout": {
"xaxis": {"visible": False},
"yaxis": {"visible": False},
"annotations": [
{
"text": "No matching data",
"xref": "paper",
"yref": "paper",
"showarrow": False,
"font": {"size": 28},
}
],
}
}
# In[ ]:
# dbc select: KPI map
dd_kpi_map = dbc.Select(
id="my-map-dd",
options=[],
value="",
)
# hard-coded columns: presence of officials
col_officials = ["MOIC_pres", "MO_pres", "BHM_pres", "BCM_pres", "CCM_pres"]
# hard-coded options for map
kpi_map_value = ["Sess_plan", "Sess_with_vacc", "Sess_with_vacc_ratio", *col_officials]
kpi_map_label = [
"Total number of planned sessions",
"Total number of sessions where the vaccine distribution was done by 8.00 am",
"Proportion of sessions where the vaccine distribution was done by 8.00 am [%]",
"Proportion of sessions with presence of MOIC officials during the vaccine distribution [%]",
"Proportion of sessions with presence of MO officials during the vaccine distribution [%]",
"Proportion of sessions with presence of BHM officials during the vaccine distribution [%]",
"Proportion of sessions with presence of BCM officials during the vaccine distribution [%]",
"Proportion of sessions with presence of CCM officials during the vaccine distribution [%]",
]
map_options = [{"label": l, "value": v} for l, v in zip(kpi_map_label, kpi_map_value)]
# dbc map/maps row
map_row = dbc.Container(
[
dbc.Row(
[
dbc.Col(
html.Div(
[
html.P(
"Key Performance Indicators: Bihar Administrative Geolocation",
style={
"fontWeight": "bold", # 'normal', #
"textAlign": "left", # 'center', #
# 'paddingTop': '25px',
"color": "DeepSkyBlue",
"fontSize": "18px",
"marginBottom": "10px",
},
),
dd_kpi_map,
]
),
width="auto",
),
],
justify="start",
align="start",
style={
"paddingLeft": "25px",
"marginBottom": "30px",
},
),
dbc.Row(
[
dbc.Col(dcc.Graph(id="district-plot", figure=label_no_fig), width=5),
dbc.Col(dcc.Graph(id="block-plot", figure=label_no_fig), width=5),
],
justify="evenly",
align="start",
),
],
fluid=True,
)
# In[ ]:
# # dbc select: trends
# dd_kpi_trends = dbc.Select(
# id="my-trends-dd",
# options=[],
# value="",
# )
# dcc dropdown: trends --> dcc allows multi, styling not as dbc
dd_kpi_trends = dcc.Dropdown(
id="my-trends-dd",
options=[],
value="",
multi=True,
)
# hard-coded options for trends
kpi_with_trend_value = ["Sess_plan", "Sess_with_vacc", "ALL_pres", *col_officials]
kpi_with_trend_label = [
"Trend in the total number of planned sessions",
"Trend in the total number of sessions where the vaccine distribution was done by 8.00 am",
"Trend in the presence of at least one official during the vaccine distribution",
"Trend in the presence of MOIC officials during the vaccine distribution",
"Trend in the presence of MO officials during the vaccine distribution",
"Trend in the presence of BHM officials during the vaccine distribution",
"Trend in the presence of BCM officials during the vaccine distribution",
"Trend in the presence of CCM officials during the vaccine distribution",
]
trend_options = [
{"label": l, "value": v} for l, v in zip(kpi_with_trend_label, kpi_with_trend_value)
]
# # dbc select: districts
# dd_districts = dbc.Select(
# id="my-districts-dd",
# options=[],
# value="",
# )
# dcc dropdown: districts --> dcc allows multi, styling not as dbc
dd_districts = dcc.Dropdown(
id="my-districts-dd",
options=[],
value="",
multi=True,
)
# hard-coded options for districts
district_options = [{"label": k, "value": k} for k in data_2_map_district]
# dbc ButtonGroup with RadioItems
button_group = html.Div(
[
dbc.RadioItems(
id="radios",
className="btn-group",
inputClassName="btn-check",
labelClassName="btn btn-outline-info",
labelCheckedClassName="active",
options=[
{"label": "Monthly", "value": "M"},
{"label": "Weekly", "value": "W"},
{"label": "Daily", "value": "D"},
],
value="D",
),
],
className="radio-group",
)
# dbc trends row
trends_row = dbc.Container(
[
dbc.Row(
[
dbc.Col(
html.Div(
[
html.P(
"Key Performance Indicators: District-wise trends",
style={
"fontWeight": "bold", # 'normal', #
"textAlign": "left", # 'center', #
# 'paddingTop': '25px',
"color": "DeepSkyBlue",
"fontSize": "18px",
"marginBottom": "10px",
},
),
dd_kpi_trends,
],
style={"font-size": "85%"},
),
width={"size": 5}, # width="auto",
),
dbc.Col(
html.Div(
[dd_districts], style={"font-size": "85%"}
), # , className="dash-bootstrap"
width={"size": 4, "offset": 1},
),
],
justify="start",
align="end",
style={
"paddingLeft": "25px",
"marginBottom": "25px",
},
),
dbc.Row(
[
dbc.Col(button_group, width="auto"),
],
justify="start",
align="start",
style={"paddingLeft": "25px"},
),
dbc.Row(
[
dbc.Col(dcc.Graph(id="trends-plot", figure=label_no_fig), width=7),
],
justify="evenly",
align="start",
),
],
fluid=True, # className="dash-bootstrap"
)
# In[ ]:
# # dbc select: Pie charts for "presence of officials"
# dd_offic_pie = dbc.Select(
# id="my-offic-dd",
# options=[],
# value="",
# )
# # hard-coded options for pie charts dropdown
# offic_value = ["ALL_pres", *col_officials]
# offic_label = [
# "Percentage of at-least-one-official presence during the vaccine distribution",
# "Percentage of MOIC officials presence during the vaccine distribution",
# "Percentage of MO officials presence during the vaccine distribution",
# "Percentage of BHM officials presence during the vaccine distribution",
# "Percentage of BCM officials presence during the vaccine distribution",
# "Percentage of CCM officials presence during the vaccine distribution",
# ]
# offic_options = [{"label": l, "value": v} for l, v in zip(offic_label, offic_value)]
# dbc select: dynamic selector for "presence of officials" Pie charts
dd_dyn_pie = dbc.Select(
id="my-dyn-dd",
options=[],
value="",
)
# dbc ButtonGroup with RadioItems: drill down admin levels
admin_levels_group = html.Div(
[
dbc.RadioItems(
id="admin-radios",
className="btn-group",
inputClassName="btn-check",
labelClassName="btn btn-outline-info",
labelCheckedClassName="active",
options=[
{"label": "Division", "value": 1},
{"label": "District", "value": 2},
{"label": "Subdivision", "value": 3},
{"label": "Block", "value": 4},
],
value=1,
),
],
className="radio-group",
)
# dbc pie chart row: presence of officials
pie_row = dbc.Container(
[
dbc.Row(
[
dbc.Col(
html.Div(
[
html.P(
"Presence of Officials across administrative levels",
style={
"fontWeight": "bold", # 'normal', #
"textAlign": "left", # 'center', #
# 'paddingTop': '25px',
"color": "DeepSkyBlue",
"fontSize": "18px",
"marginBottom": "10px",
},
),
dd_dyn_pie,
]
),
width="auto",
),
# dbc.Col(
# html.Div([dd_dyn_pie]), # , className="dash-bootstrap"
# width="auto",
# ),
],
justify="start",
align="end",
style={
"paddingLeft": "25px",
"marginBottom": "25px",
},
),
dbc.Row(
[
dbc.Col(admin_levels_group, width="auto"),
],
justify="start",
align="start",
style={"paddingLeft": "25px", "marginBottom": "10px"},
),
dbc.Row(
[
dbc.Col(dcc.Graph(id="pie-plot-1", figure=label_no_fig), width="auto"),
dbc.Col(dcc.Graph(id="pie-plot-2", figure=label_no_fig), width="auto"),
],
justify="evenly",
align="start",
),
],
fluid=True, # className="dash-bootstrap"
)
# In[ ]:
# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
fontawesome_stylesheet = "https://use.fontawesome.com/releases/v5.8.1/css/all.css"
# Build App
# app = JupyterDash(__name__, external_stylesheets=external_stylesheets)
app = Dash(
__name__, external_stylesheets=[dbc.themes.BOOTSTRAP, fontawesome_stylesheet]
)
# app = JupyterDash(__name__)
# to deploy using WSGI server
server = app.server
# app tittle for web browser
app.title = "UNICEF Bihar Vaccine Distribution"
# title row
title_row = dbc.Container(
dbc.Row(
[
dbc.Col(
html.Img(src="assets/logo-unicef-large.svg"),
width=3,
# width={"size": 3, "offset": 1},
style={"paddingLeft": "20px", "paddingTop": "20px"},
),
dbc.Col(
html.Div(
[
html.H6(
"ICO Bihar Vaccine Distribution",
style={
"fontWeight": "bold",
"textAlign": "center",
"paddingTop": "25px",
"color": "white",
"fontSize": "32px",
},
),
]
),
# width='auto',
width={"size": "auto", "offset": 1},
),
],
justify="start",
align="center",
),
fluid=True,
)
# App Layout
app.layout = html.Div(
[
# title Div
html.Div(
[title_row],
style={
"height": "100px",
"width": "100%",
"backgroundColor": "DeepSkyBlue",
"margin-left": "auto",
"margin-right": "auto",
"margin-top": "15px",
},
),
# div upload row
html.Div(
[upload_row],
style={
"paddingTop": "20px",
},
),
html.Hr(
style={
"color": "DeepSkyBlue",
"height": "3px",
"margin-top": "30px",
"margin-bottom": "0",
}
),
# div 4-cards row
dcc.Loading(
children=html.Div(
[cards_row],
style={
"paddingTop": "30px",
# 'paddingBottom': '30px',
},
),
id="loading-kpis",
type="circle",
fullscreen=True,
),
html.Hr(
style={
"color": "DeepSkyBlue",
"height": "3px",
"margin-top": "30px",
"margin-bottom": "0",
}
),
# div map row
dcc.Loading(
children=html.Div(
[map_row],
style={
"paddingTop": "20px",
},
),
id="loading-map",
type="circle",
fullscreen=True,
),
html.Hr(
style={
"color": "DeepSkyBlue",
"height": "3px",
"margin-top": "30px",
"margin-bottom": "0",
}
),
# div trends row (no loading added)
html.Div(
[trends_row],
style={
"paddingTop": "20px",
},
),
html.Hr(
style={
"color": "DeepSkyBlue",
"height": "3px",
"margin-top": "30px",
"margin-bottom": "0",
}
),
# div pies row (no loading added)
html.Div(
[pie_row],
style={
"paddingTop": "20px",
},
),
# dbc Modal: output msg from load button - wraped by Spinner
dcc.Loading(
children=dbc.Modal(
[
dbc.ModalHeader(
dbc.ModalTitle("Upload Message"), close_button=False
),
dbc.ModalBody(id="my-modal-body"),
dbc.ModalFooter(
dbc.Button("Close", id="btn-close", class_name="ms-auto")
),
],
id="my-modal",
is_open=False,
keyboard=False,
backdrop="static",
scrollable=True,
centered=True,
),
id="loading-modal",
type="circle",
fullscreen=True,
),
# hidden div: ouput msg from load button
html.Div(id="output-data-upload", style={"display": "none"}),
# hidden div: share csv-df in Dash
html.Div(id="csv-df", style={"display": "none"}),
# hidden div: share calculated kpis for districts
html.Div(id="df-district-kpis", style={"display": "none"}),
# hidden div: share calculated kpis for blocks
html.Div(id="df-block-kpis", style={"display": "none"}),
# hidden div: share calculations for divisions
html.Div(id="df-div-kpis", style={"display": "none"}),
# hidden div: share calculations for Sub_divisions
html.Div(id="df-subdiv-kpis", style={"display": "none"}),
# hidden div: share calculated kpis for trends (districts)
html.Div(id="df-trends-kpis", style={"display": "none"}),
# hidden div: share selected admin level df for pie chart
html.Div(id="df-admin-selected", style={"display": "none"}),
]
)
# In[ ]:
# showing Loading trick for dcc Upload? - Also displays loaded filename
@app.callback(
Output("uploading-state", "children"),
Input("upload-data", "contents"),
State("upload-data", "filename"),
prevent_initial_call=True,
)
def upload_triggers_spinner(_, filename):
return filename
# In[ ]:
def read_csv_file(contents, filename, date):
# decoded as proposed in Dash Doc
_, content_string = contents.split(",")
decoded = base64.b64decode(content_string)
try:
if "csv" in filename:
# Assume user uploaded a csv file
# read csv into dataframe: appointments
vacc_dist_df = pd.read_csv(io.BytesIO(decoded), parse_dates=["Date"])
else:
# Warn user hasn't uploaded a csv file
return (
[
"Vaccine Distribution must be a ",
html.Code("csv"),
" File",
],
{},
)
except Exception as e:
print(e)
# Warn user csv file hasn't been read
return (
f"There was an error processing {filename}",
{},
)
# simple validation: col_2_check must be in dataframe
col_2_check = [
"Date",
"S_Num",
"District",
"Block",
"Sess_plan",
"Sess_with_vacc",
"Notes",
"Division",
"Sub_division",
"Block_Id",
"Sub_div_Id",
]
col_2_check.extend(col_officials)
col_check = [col in vacc_dist_df.columns for col in col_2_check]
# missing columns
miss_col = [i for (i, v) in zip(col_2_check, col_check) if not v]
# missing columns result
if all(col_check):
# return ingestion message and csv data
return (
[
f"Uploaded File is {filename}",
html.Br(),
f"Last modified datetime is {datetime.fromtimestamp(date)}",
],
# csv to json: sharing data within Dash
vacc_dist_df.to_json(orient="split"),
)
else:
# return ingestion message and no dataframe
(
[
f"Uploaded File is {filename}",
html.Br(),
f"Last modified datetime is {datetime.fromtimestamp(date)}",
html.Br(),
f"KPIs not calculated. Missing columns: {miss_col}",
],
# no dataframe return
{},
)
# In[ ]:
@app.callback(
Output("output-data-upload", "children"),
Output("csv-df", "children"),
Input("upload-data", "contents"),
State("upload-data", "filename"),
State("upload-data", "last_modified"),
prevent_initial_call=True,
)
def wrap_csv_read(loaded_file, file_name, file_last_mod):
# coded as proposed in Dash Doc
# callback sees changes in content only (eg: not same content with different filename)
if loaded_file is not None:
# returned: (msg_out, app_json)
return read_csv_file(loaded_file, file_name, file_last_mod)
# In[ ]:
@app.callback(
Output("my-modal-body", "children"),
Output("my-modal", "is_open"),
Input("output-data-upload", "children"),
Input("btn-close", "n_clicks"),
State("my-modal", "is_open"),
prevent_initial_call=True,
)
def update_modal(msg_in, click_close, is_open):
# identify callback context
triger_id = callback_context.triggered[0]["prop_id"].split(".")[0]
# specify action by trigger
if "data-upload" in triger_id:
return (
html.P(msg_in),
not is_open,
)
else:
# button close
return {}, not is_open
# In[ ]:
# vaccine distribution: aggregate planned sessions in dates
def district_calc(df, ini_date, end_date):
# json to dataframe
vacc_df = pd.read_json(
df,
orient="split",
convert_dates=["Date"],
)
# filter `Date` within dates
query_dates = [
"`Date` >= @ini_date",
"`Date` <= @end_date",
]
df_in_dates = vacc_df.query("&".join(query_dates)).reset_index(drop=True)
# no entry between dates: return empty
if df_in_dates.empty:
return (
"N/A",
"N/A",
"N/A",
"N/A",
{},
{},
{},
{},
[],
"",
[],
"",
[],
"",
{},
) # , [], "", [], "", {}
# kpi: District/Blocks sessions and vacc distribution - Count rows in Notes
top_block_df = (
df_in_dates.groupby("Block_Id", sort=False)
.agg({"Sess_plan": "sum", "Sess_with_vacc": "sum", "Notes": "size"})
.astype({"Sess_plan": "int64", "Sess_with_vacc": "int64"})
)
top_distr_df = (
df_in_dates.groupby("District", sort=False)
.agg({"Sess_plan": "sum", "Sess_with_vacc": "sum", "Notes": "size"})
.astype({"Sess_plan": "int64", "Sess_with_vacc": "int64"})
)
# kpi: Div/Sub_div sessions and vacc distribution - Count rows in Notes
top_div_df = (
df_in_dates.groupby("Division", sort=False)
.agg({"Sess_plan": "sum", "Sess_with_vacc": "sum", "Notes": "size"})
.astype({"Sess_plan": "int64", "Sess_with_vacc": "int64"})
)
top_subdiv_df = (
df_in_dates.groupby("Sub_div_Id", sort=False)
.agg({"Sess_plan": "sum", "Sess_with_vacc": "sum", "Notes": "size"})
.astype({"Sess_plan": "int64", "Sess_with_vacc": "int64"})
)
# kpi: District/Blocks % sessions with vacc distribution
top_block_df["Sess_with_vacc_ratio"] = round(
top_block_df.Sess_with_vacc / top_block_df.Sess_plan * 100
)
top_distr_df["Sess_with_vacc_ratio"] = round(
top_distr_df.Sess_with_vacc / top_distr_df.Sess_plan * 100
)
# kpi: District/Blocks % sessions with presence of officials
is_present_list = []
for off_col in col_officials:
is_y_present = df_in_dates[off_col].str.contains(
"y", case=False, regex=False, na=False
)
is_n_present = df_in_dates[off_col].str.contains(
"n", case=False, regex=False, na=False
)
is_present = is_y_present & ~is_n_present
top_block_df[off_col] = round(
df_in_dates[is_present]
.groupby("Block_Id", sort=False)
.agg({"Notes": "size"})
.Notes
/ top_block_df.Notes
* 100
)
top_distr_df[off_col] = round(
df_in_dates[is_present]
.groupby("District", sort=False)
.agg({"Notes": "size"})
.Notes
/ top_distr_df.Notes
* 100
)
# add distr/block absolutes
off_col_abs = off_col + "_abs"
top_block_df[off_col_abs] = (
df_in_dates[is_present]
.groupby("Block_Id", sort=False)
.agg({"Notes": "size"})
.Notes
)
top_distr_df[off_col_abs] = (
df_in_dates[is_present]
.groupby("District", sort=False)
.agg({"Notes": "size"})
.Notes
)
# add div/sub_div absolutes
top_div_df[off_col_abs] = (
df_in_dates[is_present]
.groupby("Division", sort=False)
.agg({"Notes": "size"})
.Notes
)
top_subdiv_df[off_col_abs] = (
df_in_dates[is_present]
.groupby("Sub_div_Id", sort=False)
.agg({"Notes": "size"})
.Notes
)
# normalize presence of officials
df_in_dates.loc[is_present, off_col] = "YES"
df_in_dates.loc[~is_present, off_col] = "NO"
# store mask for "at least one present"
is_present_list.append(is_present)
# "at least one official present": absolute yes
is_one_or_more = np.logical_or.reduce(is_present_list)
one_plus_col_y = "YES"
top_block_df[one_plus_col_y] = (
df_in_dates[is_one_or_more]
.groupby("Block_Id", sort=False)
.agg({"Notes": "size"})
.Notes
)
top_distr_df[one_plus_col_y] = (
df_in_dates[is_one_or_more]
.groupby("District", sort=False)
.agg({"Notes": "size"})
.Notes
)
# add div/sub_div
top_div_df[one_plus_col_y] = (
df_in_dates[is_one_or_more]
.groupby("Division", sort=False)
.agg({"Notes": "size"})
.Notes
)
top_subdiv_df[one_plus_col_y] = (
df_in_dates[is_one_or_more]
.groupby("Sub_div_Id", sort=False)
.agg({"Notes": "size"})
.Notes
)
# "at least one official present": absolute no
one_plus_col_n = "NO"
top_block_df[one_plus_col_n] = (
df_in_dates[~is_one_or_more]
.groupby("Block_Id", sort=False)
.agg({"Notes": "size"})
.Notes
)
top_distr_df[one_plus_col_n] = (
df_in_dates[~is_one_or_more]
.groupby("District", sort=False)
.agg({"Notes": "size"})
.Notes
)
# add div/sub_div
top_div_df[one_plus_col_n] = (
df_in_dates[~is_one_or_more]
.groupby("Division", sort=False)
.agg({"Notes": "size"})
.Notes
)
top_subdiv_df[one_plus_col_n] = (
df_in_dates[~is_one_or_more]
.groupby("Sub_div_Id", sort=False)
.agg({"Notes": "size"})
.Notes
)
# sort values for kpi TOP District/Blocks
top_block_df.reset_index(inplace=True)
top_block_df.sort_values(
"Sess_with_vacc",
ascending=False,
ignore_index=True,
inplace=True,
)
top_distr_df.reset_index(inplace=True)
top_distr_df.sort_values(
"Sess_with_vacc",
ascending=False,
ignore_index=True,
inplace=True,
)
# sort values for kpi TOP Div/Sub_div
top_div_df.reset_index(inplace=True)
top_div_df.sort_values(
"Sess_with_vacc",
ascending=False,
ignore_index=True,
inplace=True,
)
top_subdiv_df.reset_index(inplace=True)
top_subdiv_df.sort_values(
"Sess_with_vacc",
ascending=False,
ignore_index=True,
inplace=True,
)
# # add new aggregate for sessions: division and subdivision
# top_div_df = (
# df_in_dates.groupby("Division", sort=False, as_index=False)
# .agg({"Sess_plan": "sum", "Sess_with_vacc": "sum"})
# .astype({'Sess_plan': 'int64', 'Sess_with_vacc': 'int64'})
# ).sort_values(
# "Sess_with_vacc",
# ascending=False,
# ignore_index=True,
# )
# top_subdiv_df = (
# df_in_dates.groupby("Sub_div_Id", sort=False, as_index=False)
# .agg({"Sess_plan": "sum", "Sess_with_vacc": "sum"})
# .astype({'Sess_plan': 'int64', 'Sess_with_vacc': 'int64'})
# ).sort_values(
# "Sess_with_vacc",
# ascending=False,
# ignore_index=True,
# )
# map visualization: not reported blocks
not_report_blocks = np.setdiff1d(
admin_blocks_df.Block_Id.values,
top_block_df.Block_Id.values,
assume_unique=True,
)
# concat not_report_blocks, replace NaNs and cast kpis to integer
top_block_df = (
pd.concat(
[top_block_df, pd.DataFrame({"Block_Id": not_report_blocks})],
ignore_index=True,
)
.fillna(-1)
.astype({elem: "int64" for elem in kpi_with_trend_value if elem != "ALL_pres"})
)
# (note all kpi cols cast float)
# kpi: District sessions and vacc distribution - Time-series: df_in_dates refactor
# drop not relevant columns
df_in_dates.drop(
columns=[
"S_Num",
"Block",
"Block_Id",
"Notes",
"Division",
"Sub_division",
"Sub_div_Id",
],
inplace=True,
)
# transform presence of officials to numeric
df_in_dates.replace({"YES": 1, "NO": 0}, inplace=True, regex=False)
# "at least one official present": for percentage in districts
df_in_dates["ALL_pres"] = 0
df_in_dates.loc[is_one_or_more, "ALL_pres"] = 1
# count entries for "at least one official present" percentage in districts
df_in_dates["n_entries"] = 1
# daily aggregate by district
distr_daily_agg_df = (
df_in_dates.groupby(["Date", "District"], sort=False).agg("sum").astype("int64")
)
# rewrite "ALL_pres" with percentages
distr_daily_agg_df.loc[:, "ALL_pres"] = round(
distr_daily_agg_df.ALL_pres / distr_daily_agg_df.n_entries * 100
)
# weekly aggregate by district
distr_weekly_agg_df = (
df_in_dates.set_index("Date")
.groupby([pd.Grouper(freq="W"), "District"], sort=False)
.agg("sum")
.astype("int64")
)
# rewrite "ALL_pres" with percentages
distr_weekly_agg_df.loc[:, "ALL_pres"] = round(
distr_weekly_agg_df.ALL_pres / distr_weekly_agg_df.n_entries * 100
)
# monthly aggregate by district
distr_monthly_agg_df = (
df_in_dates.set_index("Date")
.groupby([ | pd.Grouper(freq="M") | pandas.Grouper |
import pandas as pd
class LSPCResultsFile(object):
"""
A light weight LSPC results parser.
"""
def __init__(self, results_path, summary_path, summary_EOF=-2):
"""
---------
Requires:
- results_path: str, path to the results.csv results file.
- summary_path: str, path to the results.out summary file.
---------
Optional:
- summary_EOF: (-)int, default=-2. Number of end of file comment lines.
----------
Properties:
- results_path: The path of results_path as input in __init__.
- Returns: str
- summary_path: The path of summary_path as input in __init__.
- Returns: str
- raw_results: The unmodified csv file of self.results_path as read
by pandas.read_csv().
- Returns: pandas.DataFrame
- raw_summary: A string representation of the raw summary file.
- Returns: str
- parsed_summary: A parsed version of self.raw_summary containing the
units and description of each variable.
- Returns: pandas.DataFrame
- parsed_results: The `raw_results` joined to `parsed_summary`
- Returns: pandas.DataFrame
"""
if (summary_EOF > 0) or not isinstance(summary_EOF, int):
e = '`summary_EOF` must be a negative int.'
raise(ValueError(e))
self.results_path = results_path
self.summary_path = summary_path
self._summary_EOF = summary_EOF
self._raw_results = None
self._raw_summary = None
self._parsed_summary = None
self._parsed_results = None
@property
def raw_results(self):
"""
The unmodified csv file of self.results_path as read
by pandas.read_csv().
Returns: pandas.DataFrame
"""
if self._raw_results is None:
self._raw_results = | pd.read_csv(self.results_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import etherscan as eth
import pandas as pd
import numpy as np
from cassandra.cluster import Cluster
from random import sample, choice
from time import sleep
import logging
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
logging.getLogger("cassandra").setLevel(logging.CRITICAL)
logging.getLogger("Cluster").setLevel(logging.CRITICAL)
class EthValidate(object):
def __init__(self, apikey, cluster_addresses, keyspace):
self.etherscan = eth.Etherscan(apikey)
self.cluster_nodes = cluster_addresses
self.keyspace = keyspace
self.apikey = apikey
def _get_from_es(self, adr: str, sb, eb, sort, offset=5000, blockstep=500_000):
res = []
fromblock = sb
while fromblock < eb:
toblock = min(fromblock+blockstep, eb)
page = 1
try:
while True:
res.extend(self.etherscan.get_normal_txs_by_address_paginated(adr, page=page, startblock=fromblock, endblock=toblock, offset=offset, sort=sort))
page += 1
sleep(0.1)
except AssertionError as e: # thrown by etherscan
if e.__str__().startswith("None"):
raise ValueError(f"API threw error '{e}'. Please fix offset and blockstep size.")
pass # etherscan api did not find any more results, which is fine
fromblock += blockstep
logging.info(f"Collected {len(res)} tx")
return res
def get_normal_tx_rom_etherscan_api(self, adr: str, endblock: int, maxlength=10_000) -> pd.DataFrame:
r = self.etherscan.get_normal_txs_by_address(adr, 0, endblock, "asc")
if len(r) >= maxlength:
logging.info(f"API returned its limit of {maxlength} transactions. Trying to retrieve _all_ tx now; this will take a while (and possible fail).")
startblock = r[0]["blockNumber"]
r = self._get_from_es(adr, int(startblock), int(endblock), "asc")
for x in r:
x["value"] = int(x["value"])
# set dtype=object to avoid problems with huge wei amounts and numeric datatypes later on
df = | pd.DataFrame.from_dict(r, dtype=object) | pandas.DataFrame.from_dict |
from scipy import sparse
from numpy import array
from scipy.sparse import csr_matrix
import os
import copy
import datetime
import warnings
from matplotlib import pyplot as plt
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import math
from datetime import datetime
import random
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import gc
###############################################
########## 数据加载
###############################################
userAll = pd.read_csv('../../data/original_data/user_app_usage.csv', names=['uId', 'appId', 'duration', 'times', 'use_date'], dtype={'uId':np.int32, 'appId':str, 'duration':np.float32,
'times':np.float32, 'use_date':str})
user_app_actived = pd.read_csv('../data/processed_data/user_app_actived.csv')
app_info = pd.read_csv('../../data/processed_data/app_info.csv', dtype={'appId':str, 'category':str})
age_train=pd.read_csv('../../data/processed_data/age_train.csv').sort_values('uId')
age_test=pd.read_csv('../../data/processed_data/age_test.csv').sort_values('uId')
age_train_usage=pd.read_csv('../../data/processed_data/age_train_usage.csv').sort_values('uId')
age_test_usage=pd.read_csv('../../data/processed_data/age_test_usage.csv').sort_values('uId')
###############################################
########## 激活表全量样本的TFIDF特征
##############################################
app_list = user_app_actived.appId.str.split('#')
app_list = [" ".join(app) for app in app_list]
tf_vec = TfidfVectorizer(lowercase=False,ngram_range=(1,1), min_df=0.0008, token_pattern='(?u)\\b\\w+\\b')
full_tfidf = tf_vec.fit_transform(app_list).toarray()
full_tfidf = full_tfidf.astype(np.float16)
print(full_tfidf.shape)
full_tfidf = pd.DataFrame(full_tfidf,dtype='float16')
full_tfidf = pd.concat([user_app_actived[['uId']],full_tfidf],axis=1)
train = pd.merge(age_train[['uId']],full_tfidf, how='inner', on='uId').fillna(0)
test = pd.merge(age_test,full_tfidf, how='inner', on='uId').fillna(0)
train.sort_values('uId', axis=0, ascending=True, inplace=True)
test.sort_values('uId', axis=0, ascending=True, inplace=True)
train.drop('uId',axis=1,inplace=True)
test.drop('uId',axis=1,inplace=True)
del user_app_actived,full_tfidf,app_list
gc.collect()
train = csr_matrix(train)
test = csr_matrix(test)
print(train.shape)
print(test.shape)
gc.collect()
sparse.save_npz('../../data/csr_features_full/actived_app_tfidf_train_3000.npz', tfidf_train)
sparse.save_npz('../../data/csr_features_full/actived_app_tfidf_test_3000.npz', tfidf_test)
# actived_app_tfidf_train_3000 = sparse.load_npz('../../data/csr_features_full/actived_app_tfidf_train_3000.npz')
# actived_app_tfidf_test_3000 = sparse.load_npz('../../data/csr_features_full/actived_app_tfidf_test_3000.npz')
###############################################
########## 激活表中关于usage表样本的TFIDF特征
##############################################
app_list = user_app_actived.appId.str.split('#')
app_list = [" ".join(app) for app in app_list]
tf_vec = TfidfVectorizer(lowercase=False,ngram_range=(1,1), min_df=0.0008, token_pattern='(?<KEY>')
full_tfidf = tf_vec.fit_transform(app_list).toarray()
full_tfidf = full_tfidf.astype(np.float16)
print(full_tfidf.shape)
full_tfidf = pd.DataFrame(full_tfidf,dtype='float16')
full_tfidf = pd.concat([user_app_actived[['uId']],full_tfidf],axis=1)
train = | pd.merge(age_train_usage[['uId']],full_tfidf, how='inner', on='uId') | pandas.merge |
import os
os.environ["MKL_NUM_THREADS"]="1"
print(os.environ["MKL_NUM_THREADS"])
import numpy as np
import turbofats
import pickle
import sys
from pathlib import Path
import pandas as pd
#from joblib import Parallel, delayed, dump
#result = Parallel(n_jobs=10)(delayed(compute_fats_features)(batch_names) for batch_names in split_list_in_chunks(file_list, 100))
def compute_fats_features(df_data):
"""
Receives a dataframe with the detections
Returns a dataframe with the features
"""
# TODO: STUDY BIASED FEATURES
feature_list = ['Amplitude', 'AndersonDarling', 'Autocor_length',
'Beyond1Std',
'Con', 'Eta_e',
'Gskew',
'MaxSlope', 'Mean', 'Meanvariance', 'MedianAbsDev',
'MedianBRP', 'PairSlopeTrend', 'PercentAmplitude', 'Q31',
'PeriodLS_v2',
'Period_fit_v2', 'Psi_CS_v2', 'Psi_eta_v2', 'Rcs',
'Skew', 'SmallKurtosis', 'Std',
'StetsonK', 'Harmonics',
'Pvar', 'ExcessVar',
'GP_DRW_sigma', 'GP_DRW_tau', 'SF_ML_amplitude', 'SF_ML_gamma',
'IAR_phi',
'LinearTrend',
'PeriodPowerRate']
lc_ids = df_data.index.unique()
feature_space = turbofats.NewFeatureSpace(feature_list=feature_list,
data_column_names=["mag", "mjd", "err"])
features_all = []
for k, lc_id in enumerate(lc_ids):
if np.mod(k, 1000):
print(k, len(lc_ids), k/len(lc_ids))
df_lc = df_data.loc[lc_id]
df_lc = df_lc.rename(columns={"flux": "mag", "flux_err": "err"}, errors="raise")
features = []
for fid in range(6):
df_lc_fid = df_lc.loc[df_lc.passband == fid]
#print(df_lc_fid.shape[0])
#if df_lc_fid.shape[0] > 60: # Adjust this
df_features_single_band = feature_space.calculate_features(df_lc_fid[["mag", "mjd", "err"]])
#else:
# df_features_single_band = pd.DataFrame(np.nan,
# index=[lc_id],
# columns=feature_list)
#print(df_features_single_band)
df_features_single_band = df_features_single_band.rename(lambda x: x+"_"+str(fid), axis='columns')
features.append(df_features_single_band)
features_all.append( | pd.concat(features, axis=1, sort=True) | pandas.concat |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_without_partition_range(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(["a", "c"], dtype="object"),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series([None, None], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_selection(mssql_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_projection(mssql_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_spja(mssql_url: str) -> None:
query = """
SELECT test_bool, AVG(test_float) AS avg, SUM(test_int) AS sum
FROM test_table AS a, test_str AS b
WHERE a.test_int = b.id AND test_nullint IS NOT NULL
GROUP BY test_bool
ORDER BY sum
"""
df = read_sql(mssql_url, query, partition_on="sum", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([True, False, None], dtype="boolean"),
"avg": pd.Series([None, 3, 5.45], dtype="float64"),
"sum": pd.Series([1, 3, 4], dtype="Int64"),
},
)
df = df.sort_values("sum").reset_index(drop=True)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_some_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < 1"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": pd.Series([0], dtype="int64"),
"test_nullint": pd.Series([5], dtype="Int64"),
"test_str": pd.Series(["a"], dtype="object"),
"test_float": pd.Series([3.1], dtype="float"),
"test_bool": pd.Series([None], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_types(mssql_url: str) -> None:
query = "SELECT * FROM test_types"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int1": pd.Series([0, 255, None], dtype="Int64"),
"test_int2": pd.Series([-32768, 32767, None], dtype="Int64"),
"test_int4": pd.Series([-2147483648, 2147483647, None], dtype="Int64"),
"test_int8": pd.Series(
[-9223372036854775808, 9223372036854775807, None], dtype="Int64"
),
"test_float24": pd.Series([None, 1.18e-38, 3.40e38], dtype="float"),
"test_float53": pd.Series([None, -2.23e-308, 1.79e308], dtype="float"),
"test_floatn": pd.Series([None, 0, 123.1234567], dtype="float"),
"test_date": pd.Series(
["1999-07-25", None, "2021-01-28"], dtype="datetime64[ns]"
),
"test_time": pd.Series(["00:00:00", "23:59:59", None], dtype="object"),
"test_datetime": pd.Series(
[None, "2020-12-31 23:59:59", "2021-01-28 10:30:30"],
dtype="datetime64[ns]",
),
"test_smalldatetime": pd.Series(
["1990-01-01 10:00:00", None, "2079-06-05 23:00:00"],
dtype="datetime64[ns]",
),
"test_naivedatetime": pd.Series(
["1753-01-01 12:00:00", "2038-12-31 01:00:00", None],
dtype="datetime64[ns]",
),
"test_naivedatetime2": pd.Series(
["1900-01-01 12:00:00.12345", None, "2027-03-18 14:30:30.54321"],
dtype="datetime64[ns]",
),
"test_new_decimal": | pd.Series([1.1, 2.2, None], dtype="float") | pandas.Series |
# %%
import warnings
warnings.filterwarnings("ignore")
from folktables import (
ACSDataSource,
ACSIncome,
ACSEmployment,
ACSMobility,
ACSPublicCoverage,
ACSTravelTime,
)
import pandas as pd
from collections import defaultdict
from scipy.stats import kstest, wasserstein_distance
import seaborn as sns
sns.set_style("whitegrid")
import numpy as np
import random
import sys
import matplotlib.pyplot as plt
# Scikit-Learn
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.linear_model import LogisticRegression, Lasso, LinearRegression
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import (
accuracy_score,
roc_auc_score,
mean_squared_error,
mean_absolute_error,
mean_absolute_percentage_error,
)
from sklearn.dummy import DummyRegressor
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
# Specific packages
from xgboost import XGBRegressor, XGBClassifier
import shap
from tqdm import tqdm
# Home made code
import sys
sys.path.append("../")
from fairtools.utils import loop_estimators_fairness, psi, loop_estimators
from ATC_opt import ATC
# Seeding
np.random.seed(0)
random.seed(0)
# %%
# Load data
data_source = ACSDataSource(survey_year="2014", horizon="1-Year", survey="person")
ca_data = data_source.get_data(states=["CA"], download=True)
data_source = ACSDataSource(survey_year="2016", horizon="1-Year", survey="person")
mi_data = data_source.get_data(states=["HI"], download=True)
# %%
states = [
"MI",
"TN",
"CT",
"OH",
"NE",
"IL",
"FL",
]
nooo = [
"OK",
"PA",
"KS",
"IA",
"KY",
"NY",
"LA",
"TX",
"UT",
"OR",
"ME",
"NJ",
"ID",
"DE",
"MN",
"WI",
"CA",
"MO",
"MD",
"NV",
"HI",
"IN",
"WV",
"MT",
"WY",
"ND",
"SD",
"GA",
"NM",
"AZ",
"VA",
"MA",
"AA",
"NC",
"SC",
"DC",
"VT",
"AR",
"WA",
"CO",
"NH",
"MS",
"AK",
"RI",
"AL",
"PR",
]
data_source = ACSDataSource(survey_year="2018", horizon="1-Year", survey="person")
# %%
ca_features, ca_labels, ca_group = ACSEmployment.df_to_numpy(ca_data)
mi_features, mi_labels, mi_group = ACSEmployment.df_to_numpy(mi_data)
## Conver to DF
ca_features = pd.DataFrame(ca_features, columns=ACSEmployment.features)
mi_features = pd.DataFrame(mi_features, columns=ACSEmployment.features)
# %%
# Modeling
# model = XGBClassifier(verbosity=0, silent=True, use_label_encoder=False, njobs=1)
model = LogisticRegression()
# Train on CA data
preds_ca = cross_val_predict(
model, ca_features, ca_labels, cv=3, method="predict_proba"
)[:, 1]
model.fit(ca_features, ca_labels)
# Test on MI data
preds_mi = model.predict_proba(mi_features)[:, 1]
# Threshold classifier
atc = ATC()
atc.fit(model.predict_proba(ca_features), ca_labels)
# %%
## Can we learn to solve this issue?
################################
####### PARAMETERS #############
SAMPLE_FRAC = 1_000
ITERS = 20_0
# Init
train_error = accuracy_score(ca_labels, np.round(preds_ca))
train_error_acc = accuracy_score(ca_labels, np.round(preds_ca))
# xAI Train
# explainer = shap.Explainer(model)
explainer = shap.LinearExplainer(
model, ca_features, feature_dependence="correlation_dependent"
)
shap_test = explainer(ca_features)
shap_test = pd.DataFrame(shap_test.values, columns=ca_features.columns)
# Lets add the target to ease the sampling
mi_full = mi_features.copy()
mi_full["group"] = mi_group
mi_full["target"] = mi_labels
def create_meta_data(test, samples, boots):
# Init
train = defaultdict()
train_target_shift = defaultdict()
performance = defaultdict()
train_shap = defaultdict()
atc_scores = defaultdict()
for i in tqdm(range(0, boots), leave=False, desc="Test Bootstrap", position=1):
# Initiate
row = []
row_target_shift = []
row_shap = []
# Sampling
aux = test.sample(n=samples, replace=True)
# Performance calculation
preds = model.predict(aux.drop(columns=["target", "group"]))
performance[i] = train_error - accuracy_score(aux.target, preds)
# ATC
atc_scores[i] = (
atc.predict(model.predict_proba(aux.drop(columns=["target", "group"])))
/ 100
- train_error_acc
)
# Shap values calculation
shap_values = explainer(aux.drop(columns=["target", "group"]))
shap_values = pd.DataFrame(shap_values.values, columns=ca_features.columns)
for feat in ca_features.columns:
# Michigan
ks = ca_features[feat].mean() - aux[feat].mean()
sh = shap_test[feat].mean(), shap_values[feat].mean()
row.append(ks)
row_shap.append(sh)
# Target shift
ks_target_shift = preds_ca.mean() - preds.mean()
row_target_shift.append(ks_target_shift)
# Save results
train_shap[i] = row_shap
train[i] = row
train_target_shift[i] = row_target_shift
## Train (previous test)
train_df = pd.DataFrame(train).T
train_df.columns = ca_features.columns
train_shap_df = | pd.DataFrame(train_shap) | pandas.DataFrame |
from random import shuffle
import numpy as np
import torch.nn.functional as F
import torch
import pathlib
import pandas as pd
from torch.autograd import Variable
from networks.net_api.losses import CombinedLoss
from torch.optim import lr_scheduler
import os
from tqdm import tqdm
def per_class_dice(y_pred, y_true, num_class):
avg_dice = 0
y_pred = y_pred.data.cpu().numpy()
y_true = y_true.data.cpu().numpy()
for i in range(num_class):
GT = y_true == (i)
Pred = y_pred == (i)
inter = np.sum(np.multiply(GT, Pred)) + 0.0001
union = np.sum(GT) + np.sum(Pred) + 0.0001
t = 2 * inter / union
avg_dice = avg_dice + (t / num_class)
return avg_dice
def per_class_dice(y_pred, y_true, num_class):
avg_dice = 0
y_pred = y_pred.data.cpu().numpy()
y_true = y_true.data.cpu().numpy()
for i in range(num_class):
GT = y_true == (i)
Pred = y_pred == (i)
inter = np.sum(np.multiply(GT, Pred)) + 0.0001
union = np.sum(GT) + np.sum(Pred) + 0.0001
t = 2 * inter / union
avg_dice = avg_dice + (t / num_class)
return avg_dice
class Solver(object):
# global optimiser parameters
default_optim_args = {"lr": 0.1,
"momentum" : 0.9,
"weight_decay": 0.0001}
gamma = 0.1
step_size = 30
NumClass = 10 # TO CHANGE
def __init__(self, device, optim=torch.optim.SGD, optim_args={}):
optim_args_merged = self.default_optim_args.copy()
optim_args_merged.update(optim_args)
self.optim_args = optim_args_merged
self.optim = optim
self.loss_func = CombinedLoss(device)
self.device = device
self._reset_histories()
def _reset_histories(self):
"""
Resets train and val histories for the accuracy and the loss.
"""
self.train_loss_history = []
self.train_acc_history = []
def train(self, model, train_loader, model_path, num_epochs=10, log_nth=5, exp_dir_name='exp_default'):
"""
Train a given model with the provided data.
Inputs:
- model: model object initialized from a torch.nn.Module
- train_loader: train data in torch.utils.data.DataLoader
- num_epochs: total number of training epochs
- log_nth: log training accuracy and loss every nth iteration
"""
optim = self.optim(model.parameters(), **self.optim_args)
# learning rate schedular
scheduler = lr_scheduler.StepLR(optim, step_size=self.step_size,
gamma=self.gamma) # decay LR by a factor of 0.1 every 30 epochs
iter_per_epoch = 1
# iter_per_epoch = len(train_loader)
model.to(self.device)
print('START TRAIN.')
curr_iter = 0
per_epoch_train_acc = []
for epoch in range(num_epochs):
scheduler.step()
self._reset_histories()
model.train()
iteration = 0
batch = tqdm(enumerate(train_loader), total=len(train_loader))
for i_batch, sample_batched in batch:
X = Variable(sample_batched[0], requires_grad=True)
y = Variable(sample_batched[1])
w = Variable(sample_batched[2])
if model.is_cuda:
X, y, w = X.cuda(), y.cuda(), w.cuda()
optim.zero_grad()
output = model(X)
loss = self.loss_func(output, y, w)
loss.backward()
optim.step()
_,batch_output =torch.max(F.softmax(output, dim=1), dim=1)
_, y = torch.max(y, dim=1)
avg_dice = per_class_dice(batch_output, y, self.NumClass)
self.train_loss_history.append(loss.detach().item())
self.train_acc_history.append(avg_dice)
per_epoch_train_acc.append(np.sum(np.asarray(self.train_acc_history))/len(train_loader))
print('[Epoch : {} / {}]: {:.2f}'.format(epoch, num_epochs, avg_dice.item()))
full_save_path = os.path.join(model_path, exp_dir_name)
pathlib.Path(full_save_path).mkdir(parents=True, exist_ok=True)
model.save(os.path.join(full_save_path, 'relaynet_epoch'+ str(epoch + 1) + '.model'))
d = {'train_history': per_epoch_train_acc}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy import nan
from pandas import Timestamp
from pandas.core.index import MultiIndex
from pandas.core.api import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_frame_equal, assert_series_equal
)
from pandas.compat import (lmap)
from pandas import compat
import pandas.core.common as com
import numpy as np
import pandas.util.testing as tm
import pandas as pd
class TestGroupByFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_series(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
expected)
def test_filter_mixed_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.loc[[]])
def test_filter_out_no_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df(self):
# GH12768
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_with_axis_in_groupby(self):
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = pd.DataFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.groupby(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.
transform(lambda x: len(x) < N / 10).astype('bool')]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.loc[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
| assert_series_equal(actual, expected) | pandas.util.testing.assert_series_equal |
import os
import pandas as pd
import pytest
@pytest.mark.skipif(
os.name == "nt", reason="Skip *nix-specific tests on Windows"
)
def test_convert_unix_date():
unix = [
"1284101485",
1_284_101_486,
"1284101487000",
1_284_101_488_000,
"1284101489",
"1284101490",
-2_147_483_648,
2_147_483_648,
]
df = | pd.DataFrame(unix, columns=["dates"]) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
DatetimeIndex,
Series,
concat,
isna,
notna,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
pytest.param(
lambda x: np.isfinite(x).astype(float).sum(),
"count",
{},
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
pytest.param(
lambda x: np.isfinite(x).astype(float).sum(),
"count",
{},
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if roll_func != "sum":
result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs)
result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs)
tm.assert_almost_equal(result0, result1)
def test_nans_count():
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(50, min_periods=30).count()
tm.assert_almost_equal(
result.iloc[-1], np.isfinite(obj[10:-10]).astype(float).sum()
)
@pytest.mark.parametrize(
"roll_func, kwargs",
[
["mean", {}],
["sum", {}],
["median", {}],
["min", {}],
["max", {}],
["std", {}],
["std", {"ddof": 0}],
["var", {}],
["var", {"ddof": 0}],
],
)
@pytest.mark.parametrize("minp", [0, 99, 100])
def test_min_periods(series, minp, roll_func, kwargs):
result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)(
**kwargs
)
expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)(
**kwargs
)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
def test_min_periods_count(series):
result = series.rolling(len(series) + 1, min_periods=0).count()
expected = series.rolling(len(series), min_periods=0).count()
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
@pytest.mark.parametrize(
"roll_func, kwargs, minp",
[
["mean", {}, 15],
["sum", {}, 15],
["count", {}, 0],
["median", {}, 15],
["min", {}, 15],
["max", {}, 15],
["std", {}, 15],
["std", {"ddof": 0}, 15],
["var", {}, 15],
["var", {"ddof": 0}, 15],
],
)
def test_center(roll_func, kwargs, minp):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(20, min_periods=minp, center=True), roll_func)(
**kwargs
)
expected = getattr(
concat([obj, Series([np.NaN] * 9)]).rolling(20, min_periods=minp), roll_func
)(**kwargs)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"roll_func, kwargs, minp, fill_value",
[
["mean", {}, 10, None],
["sum", {}, 10, None],
["count", {}, 0, 0],
["median", {}, 10, None],
["min", {}, 10, None],
["max", {}, 10, None],
["std", {}, 10, None],
["std", {"ddof": 0}, 10, None],
["var", {}, 10, None],
["var", {"ddof": 0}, 10, None],
],
)
def test_center_reindex_series(series, roll_func, kwargs, minp, fill_value):
# shifter index
s = [f"x{x:d}" for x in range(12)]
series_xp = (
getattr(
series.reindex(list(series.index) + s).rolling(window=25, min_periods=minp),
roll_func,
)(**kwargs)
.shift(-12)
.reindex(series.index)
)
series_rs = getattr(
series.rolling(window=25, min_periods=minp, center=True), roll_func
)(**kwargs)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
@pytest.mark.parametrize(
"roll_func, kwargs, minp, fill_value",
[
["mean", {}, 10, None],
["sum", {}, 10, None],
["count", {}, 0, 0],
["median", {}, 10, None],
["min", {}, 10, None],
["max", {}, 10, None],
["std", {}, 10, None],
["std", {"ddof": 0}, 10, None],
["var", {}, 10, None],
["var", {"ddof": 0}, 10, None],
],
)
def test_center_reindex_frame(frame, roll_func, kwargs, minp, fill_value):
# shifter index
s = [f"x{x:d}" for x in range(12)]
frame_xp = (
getattr(
frame.reindex(list(frame.index) + s).rolling(window=25, min_periods=minp),
roll_func,
)(**kwargs)
.shift(-12)
.reindex(frame.index)
)
frame_rs = getattr(
frame.rolling(window=25, min_periods=minp, center=True), roll_func
)(**kwargs)
if fill_value is not None:
frame_xp = frame_xp.fillna(fill_value)
tm.assert_frame_equal(frame_xp, frame_rs)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error():
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
@pytest.mark.parametrize(
"data_type",
[np.dtype(f"f{width}") for width in [4, 8]]
+ [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"],
)
def test_rolling_min_max_numeric_types(data_type):
# GH12373
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
assert result.dtypes[0] == np.dtype("f8")
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=0).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = | Series(dtype=np.float64) | pandas.Series |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
from unittest import TestCase
import h5py
import numpy as np
import pandas as pd
from exatomic import Universe
from exatomic.base import resource
from exatomic.molcas.output import Output, Orb, HDF
# TODO : change df.shape[0] == num to len(df.index) == num everywhere
class TestOutput(TestCase):
"""Test the Molcas output file editor."""
def setUp(self):
self.cdz = Output(resource('mol-carbon-dz.out'))
self.uo2sp = Output(resource('mol-uo2-anomb.out'))
self.mamcart = Output(resource('mol-ch3nh2-631g.out'))
self.mamsphr = Output(resource('mol-ch3nh2-anovdzp.out'))
self.c2h6 = Output(resource('mol-c2h6-basis.out'))
def test_add_orb(self):
"""Test adding orbital file functionality."""
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'))
self.assertTrue(hasattr(self.mamcart, 'momatrix'))
self.assertTrue(hasattr(self.mamcart, 'orbital'))
with self.assertRaises(ValueError):
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'))
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'),
mocoefs='same')
self.assertTrue('same' in self.mamcart.momatrix.columns)
self.assertTrue('same' in self.mamcart.orbital.columns)
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'),
mocoefs='diff', orbocc='diffocc')
self.assertTrue('diff' in self.mamcart.momatrix.columns)
self.assertTrue('diffocc' in self.mamcart.orbital.columns)
uni = self.mamcart.to_universe()
self.assertTrue(hasattr(uni, 'momatrix'))
self.assertTrue(hasattr(uni, 'orbital'))
def test_add_overlap(self):
"""Test adding an overlap matrix."""
self.cdz.add_overlap(resource('mol-carbon-dz.overlap'))
self.assertTrue(hasattr(self.cdz, 'overlap'))
uni = self.cdz.to_universe()
self.assertTrue(hasattr(uni, 'overlap'))
def test_parse_atom(self):
"""Test the atom table parser."""
self.uo2sp.parse_atom()
self.assertEqual(self.uo2sp.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2sp.atom))))
self.mamcart.parse_atom()
self.assertEqual(self.mamcart.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamcart.atom))))
self.mamsphr.parse_atom()
self.assertEqual(self.mamsphr.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamsphr.atom))))
def test_parse_basis_set_order(self):
"""Test the basis set order table parser."""
self.uo2sp.parse_basis_set_order()
self.assertEqual(self.uo2sp.basis_set_order.shape[0], 69)
cols = list(set(self.uo2sp.basis_set_order._columns))
test = pd.DataFrame(self.uo2sp.basis_set_order[cols])
self.assertTrue(np.all(pd.notnull(test)))
self.mamcart.parse_basis_set_order()
self.assertEqual(self.mamcart.basis_set_order.shape[0], 28)
cols = list(set(self.mamcart.basis_set_order._columns))
test = pd.DataFrame(self.mamcart.basis_set_order[cols])
self.assertTrue(np.all(pd.notnull(test)))
self.mamsphr.parse_basis_set_order()
self.assertEqual(self.mamsphr.basis_set_order.shape[0], 53)
cols = list(set(self.mamsphr.basis_set_order._columns))
test = pd.DataFrame(self.mamsphr.basis_set_order[cols])
self.assertTrue(np.all(pd.notnull(test)))
def test_parse_basis_set(self):
"""Test the gaussian basis set table parser."""
self.uo2sp.parse_basis_set()
self.assertEqual(self.uo2sp.basis_set.shape[0], 451)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2sp.basis_set))))
self.mamcart.parse_basis_set()
self.assertEqual(self.mamcart.basis_set.shape[0], 84)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamcart.basis_set))))
self.mamsphr.parse_basis_set()
self.assertEqual(self.mamsphr.basis_set.shape[0], 148)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamsphr.basis_set))))
self.c2h6.parse_basis_set()
self.assertTrue(hasattr(self.c2h6, 'basis_set'))
def test_to_universe(self):
"""Test that the Outputs can be converted to universes."""
uni = self.uo2sp.to_universe()
self.assertIs(type(uni), Universe)
uni = self.mamcart.to_universe()
self.assertIs(type(uni), Universe)
uni = self.mamsphr.to_universe()
self.assertIs(type(uni), Universe)
class TestOrb(TestCase):
"""Test the Molcas Orb file parser."""
def test_parse_old_uhf(self):
sym = Orb(resource('mol-c2h6-old-sym.uhforb'))
nym = Orb(resource('mol-c2h6-old-nosym.uhforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_old_orb(self):
sym = Orb(resource('mol-c2h6-old-sym.scforb'))
nym = Orb(resource('mol-c2h6-old-nosym.scforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_uhf(self):
sym = Orb(resource('mol-c2h6-sym.uhforb'))
nym = Orb(resource('mol-c2h6-nosym.uhforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_orb(self):
sym = Orb(resource('mol-c2h6-sym.scforb'))
nym = Orb(resource('mol-c2h6-nosym.scforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_momatrix(self):
"""Test the momatrix table parser."""
uo2sp = Orb(resource('mol-uo2-anomb.scforb'))
uo2sp.parse_momatrix()
self.assertEqual(uo2sp.momatrix.shape[0], 4761)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(uo2sp.momatrix))))
self.assertTrue(np.all(pd.notnull(pd.DataFrame(uo2sp.orbital))))
mamcart = Orb(resource('mol-ch3nh2-631g.scforb'))
mamcart.parse_momatrix()
self.assertEqual(mamcart.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamcart.momatrix))))
self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamcart.orbital))))
mamsphr = Orb(resource('mol-ch3nh2-anovdzp.scforb'))
mamsphr.parse_momatrix()
self.assertEqual(mamsphr.momatrix.shape[0], 2809)
self.assertTrue(np.all(pd.notnull( | pd.DataFrame(mamsphr.momatrix) | pandas.DataFrame |
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import warnings
import itertools
import datetime
import os
from math import sqrt
#import seaborn as sns
class ContagionAnalysis():
def __init__(self, world):
self.world = world
# time as lable to write files
now = datetime.datetime.now()
self.now = now.strftime("%Y-%m-%d_%H:%M")
def run_contaigon_analysis(self, opinion_type, analysis="expo_frac", n_bins = 20, binning = True, save_plots = False, show_plot=True, write_data = True, output_folder = ""):
''' Makes a full contagion analysis
Parameters:
opinion_type: (str) name of trait
analysis: (str) name of analysis type (expo_frac, expo_nmb)
n_bins: (int) number of bins
binning: (bool) if to do binning
save_plots: (bool) if to save plot on hd
write_data: (bool) if to write data on hd
ouput_folder: (str) folder to save data + plots
'''
# name to lable files
name = self.world.name + \
"_" + analysis + \
"_" + self.now
self.output_folder = output_folder
print("Write into: " + self.TEMP_DIR + output_folder)
if not os.path.exists(self.TEMP_DIR + output_folder):
os.makedirs(self.TEMP_DIR + output_folder)
# calc exposure
exposure = self.calc_exposure(analysis, opinion_type)
#write data
if write_data:
exposure.to_pickle(self.TEMP_DIR + output_folder + "exposure_" + name + ".pkl")
# calc trait change
data, expo_agg = self.opinion_change_per_exposure(exposure, opinion_type)
#write data
if write_data:
data.to_pickle(self.TEMP_DIR + output_folder + "data_" + name + ".pkl")
# plot
plot_data = self.plot_opinion_change_per_exposure_number(data, analysis, binning, n_bins, \
save_plots, show_plot)
return [data, plot_data]
def _get_neighbors(self,g,i):
''' returns neighbors of node i in graph g '''
try:
return [n for n in g[i]]
except KeyError:
return []
def _calc_expo_frac(self, node_id, opinion_type, t, op_nodes, graph, all_opinions):
''' Calculate exposure as fraction of encounters to people with other opinion '''
neighbors = self._get_neighbors(graph, node_id)
opinions = op_nodes.loc[neighbors]
nmb_1 = opinions.loc[opinions[opinion_type] == True, opinion_type].count()
nmb_2 = opinions.loc[opinions[opinion_type] == False, opinion_type].count()
exposure = pd.DataFrame({ opinion_type: [True, False],\
'n_influencer': [nmb_1, nmb_2],\
'frac_influencer': [nmb_1, nmb_2] })
if (len(neighbors) <= 2) & (self.world.type == "SYN"):
if self.world.cc == True:
exposure *= 0
# normalize exposure
if len(neighbors) > 0:
exposure.frac_influencer /= len(neighbors)
exposure['n_nbs'] = len(neighbors)
exposure['node_id'] = node_id
exposure['time'] = t
return exposure
def calc_exposure(self, analysis, opinion_type, exposure_time = 7):
''' Calculate exposure for opinion type, distinguish between different analysis types '''
print("INFO: Calc exposure...")
# prepare some varibales for late use
all_opinions = pd.DataFrame( self.world.op_nodes[opinion_type].unique(), \
columns=[opinion_type])
nodes = self.world.op_nodes.node_id.unique()
self.world.op_nodes.time = pd.to_datetime(self.world.op_nodes.time)
op_nodes = [self.world.op_nodes[self.world.op_nodes.time == t].set_index('node_id') \
for t in self.world.time.time]
# distinguish between analysis types and calc exposure
if analysis == "expo_frac":
print("INFO: Calc expo frac")
expo = []
for t in self.world.time.time:
rel_graph = self.world.get_relation_graph_t(t = t)
op_nodes_t = self.world.op_nodes.loc[self.world.op_nodes.time == t].set_index('node_id')
expo += [ self._calc_expo_frac( node_id, opinion_type, t, op_nodes_t, rel_graph, all_opinions) \
for node_id in nodes]
expo = pd.concat(expo)
# calc mean over last exposure_time days
sigma = pd.to_timedelta(exposure_time, unit='d').total_seconds() #seconds
two_sigma_sqr = 2* sigma * sigma
expo.time = pd.to_datetime(expo.time)
expo = expo.groupby(['node_id',opinion_type])["time", "n_influencer", "n_nbs", "frac_influencer"].apply( \
lambda p: self._agg_expo(p, two_sigma_sqr, analysis) \
).reset_index()
if analysis == "expo_frac":
expo.set_index(['node_id','time',opinion_type],inplace=True)
expo["exposure"] = expo.n_influencer_mean / expo.n_nbs_mean
expo.reset_index(inplace=True)
expo.set_index(['node_id','time'],inplace=True)
return expo
def _agg_expo(self, expo_slice, two_sigma_sqr, analysis):
''' weighted temporal mean of expo_slice '''
expo_slice = expo_slice.copy()
expo_slice.time = expo_slice.time.astype('int')/1000000000.0 # to seconds
time_matrix = np.array([expo_slice.time.values]*len(expo_slice.time))
diff = (time_matrix - time_matrix.transpose()) #seconds
matrix = np.exp(-(diff * diff)/two_sigma_sqr)
filter_past = np.tril(np.ones_like(matrix))
matrix *= filter_past
if analysis == "expo_nmb":
expo_slice["exposure"] = np.dot(matrix, expo_slice.exposure)
else:
norm = np.dot(matrix, np.ones_like(expo_slice.frac_influencer))
expo_slice["frac_influencer_mean"] = np.dot(matrix, expo_slice.frac_influencer)
expo_slice["frac_influencer_mean"] /= norm
expo_slice["n_influencer_summed"] = np.dot(matrix, expo_slice.n_influencer)
expo_slice["n_influencer_mean"] = expo_slice["n_influencer_summed"] / norm
expo_slice["n_nbs_summed"] = np.dot(matrix, expo_slice.n_nbs)
expo_slice["n_nbs_mean"] = expo_slice["n_nbs_summed"] / norm
expo_slice.time = pd.to_datetime(expo_slice.time, unit="s")
return expo_slice.set_index("time")
def opinion_change_per_exposure(self, exposure, opinion_type, opinion_change_time = 1):
''' calculated if the opinion changed '''
print("INFO: Calc op-change")
exposure = exposure.copy()
op_nodes = self.world.op_nodes.copy()
op_nodes.time = | pd.to_datetime(op_nodes.time) | pandas.to_datetime |
import Dataset
from Estimators import XGB
from Utils import Profiler
import pandas as pd
from IPython.display import display
import xgboost as xgb
import gc
profile = Profiler()
profile.Start()
# Gather Data
train_X, test_X, train_Y = Dataset.Load('AllData_v3')
# Convert data to DMatrix
dtrain = xgb.DMatrix(train_X, train_Y)
dtest = xgb.DMatrix(test_X)
# Define estimator parameters
params = {'eta' :0.3,
'gamma' :0,
'max_depth' :6,
'min_child_weight' :10,
'subsample' :1,
'colsample_bytree' :1,
'colsample_bylevel' :1,
'lambda' :1,
'alpha' :0,
'scale_pos_weight' :1,
'objective' :'gpu_binary:logistic', # for cpu choose 'gpu_binary:logistic'
'eval_metric' :'auc',
'tree_method' :'gpu_hist', # for cpu choose 'hist'
'silent' :1
}
# Parameters that are to be supplied to cross-validation
cv_params = {
"dtrain" : dtrain,
"num_boost_round" : 10000,
"nfold" : 5,
"maximize" : True,
"early_stopping_rounds" : 30,
"verbose_eval" : 10
}
# Step 1
print("Creating Step-1 param grid and running GridSearch")
param_grid = { "max_depth" : range(3,9,1),
'min_child_weight' : range(10,71,10)}
model = XGB(params)
gs_results, params = model.gridsearch(param_grid, cv_params)
gs_summary = gs_results
del model
gc.collect()
# Step 2
print("Creating Step-2 param grid and running GridSearch")
param_grid = { "gamma" : [i/10.0 for i in range(0,5)]}
model = XGB(params)
gs_results, params = model.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
del model
gc.collect()
# Step 3
print("Creating Step-3 param grid and running GridSearch")
param_grid = { "subsample" : [i/10.0 for i in range(6,10)],
'colsample_bytree' : [i/10.0 for i in range(6,10)]}
model = XGB(params)
gs_results, params = model.gridsearch(param_grid, cv_params)
gs_summary = pd.concat([gs_summary, gs_results], ignore_index=True)
del model
gc.collect()
# Step 4
print("Creating Step-4 param grid and running GridSearch")
param_grid = { "lambda" : [0.01, 0.03, 0.1, 0.3, 1]}
model = XGB(params)
gs_results, params = model.gridsearch(param_grid, cv_params)
gs_summary = | pd.concat([gs_summary, gs_results], ignore_index=True) | pandas.concat |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains function templates used by the auto-generation script
"""
# below imports are copied into the auto-generated source file as-is
# for the auto-generation script to work ensure they are not mixed up with code
import numba
import numpy
import operator
import pandas
from numba.core.errors import TypingError
from numba import types
from sdc.utilities.sdc_typing_utils import (TypeChecker, check_index_is_numeric, check_types_comparable,
find_common_dtype_from_numpy_dtypes)
from sdc.datatypes.common_functions import (sdc_join_series_indexes, )
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (string_array_type, str_arr_is_na)
from sdc.utilities.utils import sdc_overload, sdc_overload_method
from sdc.functions import numpy_like
from sdc.datatypes.range_index_type import RangeIndexType
def sdc_pandas_series_binop(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.binop
Limitations
-----------
Parameters ``level`` and ``axis`` are currently unsupported by Intel Scalable Dataframe Compiler
Examples
--------
.. literalinclude:: ../../../examples/series/series_binop.py
:language: python
:lines: 27-
:caption:
:name: ex_series_binop
.. command-output:: python ./series/series_binop.py
:cwd: ../../../examples
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.binop` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
_func_name = 'Method binop().'
ty_checker = TypeChecker(_func_name)
self_is_series, other_is_series = isinstance(self, SeriesType), isinstance(other, SeriesType)
if not (self_is_series or other_is_series):
return None
# this overload is not for string series
self_is_string_series = self_is_series and isinstance(self.dtype, types.UnicodeType)
other_is_string_series = other_is_series and isinstance(other.dtype, types.UnicodeType)
if self_is_string_series or other_is_string_series:
return None
if not isinstance(self, (SeriesType, types.Number)):
ty_checker.raise_exc(self, 'pandas.series or scalar', 'self')
if not isinstance(other, (SeriesType, types.Number)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
operands_are_series = self_is_series and other_is_series
if operands_are_series:
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
series_data_comparable = check_types_comparable(self, other)
if not series_data_comparable:
raise TypingError('{} Not supported for not-comparable operands. \
Given: self={}, other={}'.format(_func_name, self, other))
if not isinstance(level, types.Omitted) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(fill_value, (types.Omitted, types.Number, types.NoneType)) and fill_value is not None:
ty_checker.raise_exc(fill_value, 'number', 'fill_value')
fill_value_is_none = isinstance(fill_value, (types.NoneType, types.Omitted)) or fill_value is None
if not isinstance(axis, types.Omitted) and axis != 0:
ty_checker.raise_exc(axis, 'int', 'axis')
# specializations for numeric series only
if not operands_are_series:
def _series_binop_scalar_impl(self, other, level=None, fill_value=None, axis=0):
if self_is_series == True: # noqa
numpy_like.fillna(self._data, inplace=True, value=fill_value)
result_data = numpy.empty(len(self._data), dtype=numpy.float64)
result_data[:] = self._data + numpy.float64(other)
return pandas.Series(result_data, index=self._index, name=self._name)
else:
numpy_like.fillna(other._data, inplace=True, value=fill_value)
result_data = numpy.empty(len(other._data), dtype=numpy.float64)
result_data[:] = numpy.float64(self) + other._data
return pandas.Series(result_data, index=other._index, name=other._name)
return _series_binop_scalar_impl
else: # both operands are numeric series
# optimization for series with default indexes, that can be aligned differently
if (isinstance(self.index, types.NoneType) and isinstance(other.index, types.NoneType)):
def _series_binop_none_indexes_impl(self, other, level=None, fill_value=None, axis=0):
numpy_like.fillna(self._data, inplace=True, value=fill_value)
numpy_like.fillna(other._data, inplace=True, value=fill_value)
if (len(self._data) == len(other._data)):
result_data = numpy_like.astype(self._data, numpy.float64)
result_data = result_data + other._data
return pandas.Series(result_data)
else:
left_size, right_size = len(self._data), len(other._data)
min_data_size = min(left_size, right_size)
max_data_size = max(left_size, right_size)
result_data = numpy.empty(max_data_size, dtype=numpy.float64)
_fill_value = numpy.nan if fill_value_is_none == True else fill_value # noqa
if (left_size == min_data_size):
result_data[:min_data_size] = self._data
for i in range(min_data_size, len(result_data)):
result_data[i] = _fill_value
result_data = result_data + other._data
else:
result_data[:min_data_size] = other._data
for i in range(min_data_size, len(result_data)):
result_data[i] = _fill_value
result_data = self._data + result_data
return pandas.Series(result_data)
return _series_binop_none_indexes_impl
else:
left_index_is_range = isinstance(self.index, (RangeIndexType, types.NoneType))
right_index_is_range = isinstance(other.index, (RangeIndexType, types.NoneType))
check_index_equal = left_index_is_range and right_index_is_range
self_index_dtype = RangeIndexType.dtype if isinstance(self.index, types.NoneType) else self.index.dtype
other_index_dtype = RangeIndexType.dtype if isinstance(other.index, types.NoneType) else other.index.dtype
index_dtypes_match = self_index_dtype == other_index_dtype
if not index_dtypes_match:
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[self_index_dtype, other_index_dtype], [])
else:
numba_index_common_dtype = self_index_dtype
def _series_binop_common_impl(self, other, level=None, fill_value=None, axis=0):
left_index, right_index = self.index, other.index
numpy_like.fillna(self._data, inplace=True, value=fill_value)
numpy_like.fillna(other._data, inplace=True, value=fill_value)
if check_index_equal == True: # noqa
equal_indexes = numpy_like.array_equal(left_index, right_index)
else:
equal_indexes = False
if (left_index is right_index or equal_indexes):
result_data = numpy.empty(len(self._data), dtype=numpy.float64)
result_data[:] = self._data + other._data
if index_dtypes_match == False: # noqa
result_index = numpy_like.astype(left_index, numba_index_common_dtype)
else:
result_index = left_index.values if left_index_is_range == True else left_index # noqa
return pandas.Series(result_data, index=result_index)
# TODO: replace below with core join(how='outer', return_indexers=True) when implemented
joined_index, left_indexer, right_indexer = sdc_join_series_indexes(left_index, right_index)
result_size = len(joined_index)
left_values = numpy.empty(result_size, dtype=numpy.float64)
right_values = numpy.empty(result_size, dtype=numpy.float64)
_fill_value = numpy.nan if fill_value_is_none == True else fill_value # noqa
for i in range(result_size):
left_pos, right_pos = left_indexer[i], right_indexer[i]
left_values[i] = self._data[left_pos] if left_pos != -1 else _fill_value
right_values[i] = other._data[right_pos] if right_pos != -1 else _fill_value
result_data = left_values + right_values
return pandas.Series(result_data, joined_index)
return _series_binop_common_impl
return None
def sdc_pandas_series_comp_binop(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.comp_binop
Limitations
-----------
Parameters ``level`` and ``axis`` are currently unsupported by Intel Scalable Dataframe Compiler
Examples
--------
.. literalinclude:: ../../../examples/series/series_comp_binop.py
:language: python
:lines: 27-
:caption:
:name: ex_series_comp_binop
.. command-output:: python ./series/series_comp_binop.py
:cwd: ../../../examples
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.comp_binop` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op8
"""
_func_name = 'Method comp_binop().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(level, types.Omitted) or level is None):
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(fill_value, (types.Omitted, types.Number, types.NoneType)) and fill_value is not None:
ty_checker.raise_exc(fill_value, 'number', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
self_is_series, other_is_series = isinstance(self, SeriesType), isinstance(other, SeriesType)
if not (self_is_series or other_is_series):
return None
if not isinstance(self, (SeriesType, types.Number, types.UnicodeType)):
ty_checker.raise_exc(self, 'pandas.series or scalar', 'self')
if not isinstance(other, (SeriesType, types.Number, types.UnicodeType)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
operands_are_series = self_is_series and other_is_series
if operands_are_series:
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
series_data_comparable = check_types_comparable(self, other)
if not series_data_comparable:
raise TypingError('{} Not supported for not-comparable operands. \
Given: self={}, other={}'.format(_func_name, self, other))
fill_value_is_none = isinstance(fill_value, (types.NoneType, types.Omitted)) or fill_value is None
if not operands_are_series:
def _series_comp_binop_scalar_impl(self, other, level=None, fill_value=None, axis=0):
if self_is_series == True: # noqa
numpy_like.fillna(self._data, inplace=True, value=fill_value)
return pandas.Series(self._data < other, index=self._index, name=self._name)
else:
numpy_like.fillna(other._data, inplace=True, value=fill_value)
return | pandas.Series(self < other._data, index=other._index, name=other._name) | pandas.Series |
#!/usr/bin/env python3
"""
Copyright 2020 <NAME>
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
__name__: pay_slip_analysis.py
__description__: Process & parse PaySlips & merge them into a CSV and create visualization
__author__: <NAME>
__version__: 1.0
__created__: 2020-03-15
__updated__: 2020-04-19
"""
import os
import datetime
from dateutil import parser
import calendar
import locale
import logging
import functools
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import tabula
from constants import COL_OUT_CSV_LIST
from constants import EARNING_COLS_TUP
from constants import DEDUCTION_COLS_TUP
from constants import DEDUCTION_OLD_COLS_TUP
from constants import DEDUCTION_NEW_COLS_TUP
from constants import PDF_NET_PAY_LABEL
from environment import Env
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
DF_COLUMN_DICT = {
"value_index": 2,
"net_pay_index": 4,
"df_year_month_index": 0,
"earning_title_column": None,
"earning_current_column": "Unnamed: 1",
"earning_all_column": "Unnamed: 2",
"deduction_title_column": "Unnamed: 3",
"deduction_current_column": "Unnamed: 4",
"deduction_all_column": "Unnamed: 5",
"net_pay_column": None
}
def main():
"""
1. Read and parse PDF files into month-wise DF
2. Create CSV by combining all the DFs
3. Create visualizations from the CSV
:return: None
"""
logger = Env.setup_logging()
path_base = os.path.join(os.getcwd(), "PDF")
csv_path = os.path.join(os.getcwd(), "output", "Salary_Slips_Merged.csv")
pdf_path_list = sorted([os.path.join(path_base, file_name) for file_name in os.listdir(path_base) if
(os.path.isfile(os.path.join(path_base, file_name)) and
os.path.basename(file_name).endswith(".pdf") and ("Pay" in file_name))])
combined_payslip_df = pd.DataFrame()
for pdf_path in pdf_path_list:
monthly_df = pdf_to_df(pdf_path, logger)
combined_payslip_df = combined_payslip_df.append(monthly_df)
combined_payslip_df.to_csv(csv_path, index=None)
if not os.path.exists(os.path.dirname(csv_path)):
os.mkdir(os.path.dirname(csv_path))
side_by_side_bar_plot(csv_path)
line_plot(csv_path)
def side_by_side_bar_plot(csv_path):
"""
Create month wise side by side bar plot of deduction, net pay and total salary and persist the same
:param csv_path: Absolute CSV path of the combined payslips
:return: None
"""
csv_df = pd.read_csv(csv_path)
csv_df["YearMonth"] = csv_df["YearMonthDate"].apply(
lambda ymd: str(datetime.datetime.strftime(parser.parse(ymd), "%b-%y")))
# Get desired columns
df = csv_df[["YearMonth", "Deductions", "NetPay", "Total"]]
# Massage DF into
melt_df = df.melt(id_vars='YearMonth').rename(columns=str.title)
# Create color palette
rgb_palette = ["#e74c3c", "#2ecc71", "#3498db"]
# Create subplots
sns.set()
fig, ax = plt.subplots(figsize=(15, 10))
# Create bar plot
graph = sns.barplot(x='Yearmonth', y='Value', hue='Variable', data=melt_df, ax=ax, palette=rgb_palette)
# Enhance graph, label, font and alignment
beautify_graph(graph, ax)
sns.despine(fig)
plt.savefig(os.path.join(os.getcwd(), "output", "side_by_side_bar_graph.png"))
plt.show()
def line_plot(csv_path):
"""
Create month wise trend line graph of deduction, net pay and total salary and persist the same
:param csv_path: Absolute CSV path of the combined payslips
:return: None
"""
csv_df = pd.read_csv(csv_path)
df = csv_df[["YearMonthDate", "Deductions", "NetPay", "Total"]]
tidy = df.melt(id_vars='YearMonthDate').rename(columns=str.title)
sns.set()
fig, ax = plt.subplots(figsize=(15, 10))
# Create color palette
rgb_palette = ["#e74c3c", "#2ecc71", "#3498db"]
graph = sns.lineplot(x='Yearmonthdate', y='Value', hue='Variable', data=tidy, palette=rgb_palette)
for item in graph.get_xticklabels():
item.set_rotation(90)
beautify_graph(graph, ax)
sns.despine(fig)
plt.savefig(os.path.join(os.getcwd(), "output", "trend_line_graph.png"))
plt.show()
def beautify_graph(graph, ax):
"""
Set ticks, labels and fonts
:param graph: Graph object
:param ax: Axis object
:return: None
"""
# Set label padding
ax.xaxis.labelpad = 15
ax.yaxis.labelpad = 15
# Change tick font size
ax.xaxis.set_tick_params(labelsize=10)
ax.yaxis.set_tick_params(labelsize=10)
# Change tick padding
ax.tick_params(axis='x', which='major', pad=3)
ax.tick_params(axis='y', which='major', pad=3)
# Change rotation of X tick labels
for item in graph.get_xticklabels():
item.set_rotation(90)
# Get rid of title in legend
leg = graph.axes.get_legend()
leg.set_title(None)
# Set thousand separator for y axis
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
# Set labels and title
ax.set_ylabel('Amount (₹)')
ax.set_xlabel('Year Month')
ax.set_title('Monthly Salary Analysis')
def pdf_to_df(pdf_path: str, logger: logging.Logger) -> pd.DataFrame:
"""
Read PDF into unorganized DF using tabula; extract and organize the columns and values into a DF
:param pdf_path: Absolute path of the salary PDF
:param logger: Logger object
:return: DF of monthly salary details
"""
# Don't output warnings if there are any
pdf_df = tabula.read_pdf(pdf_path, silent=True)
column_month_name = pdf_df.columns[0]
year_month_date = get_year_month(pdf_df)
logger.info(f"{year_month_date}")
DF_COLUMN_DICT["earning_title_column"] = column_month_name
DF_COLUMN_DICT["net_pay_column"] = column_month_name
emp_title = pdf_df[column_month_name].iloc[0].splitlines()
emp_value = pdf_df["Unnamed: 1"].iloc[0].splitlines()
"""
Earning Current Month
"""
earning_current_title = pdf_df[column_month_name].iloc[2].splitlines()
earning_current_value = pdf_df["Unnamed: 1"].iloc[2].splitlines()
earning_current_list = to_float_list(earning_current_value)
earning_to_date_value = pdf_df["Unnamed: 2"].iloc[2].splitlines()
"""
Deduction Current Month
"""
deduction_title = pdf_df["Unnamed: 3"].iloc[2].splitlines()
deduction_current_value = pdf_df["Unnamed: 4"].iloc[2].splitlines()
deduction_current_list = to_float_list(deduction_current_value)
deduction_to_date_value = pdf_df["Unnamed: 5"].iloc[2].splitlines()
check_ded_col = validate_deduction_cols(year_month_date, deduction_title)
if not check_ded_col:
logger.critical(f"Deduction column check failed")
deduction_current_dict = dict(zip(deduction_title, deduction_current_list))
net_pay_lines = pdf_df[DF_COLUMN_DICT["net_pay_column"]].iloc[DF_COLUMN_DICT["net_pay_index"]].splitlines()
net_pay_amount = locale.atof(str(net_pay_lines[0]).replace(PDF_NET_PAY_LABEL, ""))
current_total = functools.reduce(lambda x, y: x + y, earning_current_list)
current_deduction = functools.reduce(lambda x, y: x + y, deduction_current_list)
earning_current_dict = dict(zip(earning_current_title, earning_current_list))
earning_current_dict["YearMonthDate"] = year_month_date
earning_current_dict["CalculatedTotal"] = current_total
earning_deduction_dict_list = [{**earning_current_dict, **deduction_current_dict}]
earning_current_df = | pd.DataFrame(earning_deduction_dict_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index( | lrange(0, 8, 2) | pandas.compat.lrange |
from pandas import read_csv, DataFrame, concat, Series, set_option, reset_option, option_context
from os import path
import matplotlib.pyplot as plt
from time import time
start_total = time()
df = read_csv(path.join("Output", "url_frequency.csv"), names=["url", "frequency"])
category_urls = read_csv("url_categories_copy.csv", names=["category", "url"])
#categories = DataFrame(columns=["category", "url"])
categories_only = []
urls_only = []
start = time()
for url in df["url"][:-1]:
c = category_urls[category_urls["url"] == url] #.str.contains(str(url))
if not c.empty:# and len(str(url)) > 5:
categories_only.append(c["category"].iloc[0])
urls_only.append(c["url"].iloc[0])
#categories = concat([categories, c])
categories = DataFrame({"category": categories_only, "url": urls_only})
print("Time to concat:", round(time()-start, 2))
start = time()
#print(categories)
categories["frequency"] = | Series() | pandas.Series |
#!/user/bin/env/python
'''
Input a list and change to a series. Finally concat to a new Data Frame
'''
import pandas as pd
class ConcatFeature:
'''
Parameters:
-------------------------
feature: Takes in a list or series
df: Takes in a dictionary or dataframe
Returns:
-------------------------
a pandas dataframe of concatenated column to the data frame
'''
# feature are columns of data frame and df is panda's data frame
def __init__(self, feature, df):
self.feature = feature
self.df = df
def concat(self):
# Change list to a Series
feature_series = pd.Series(self.feature)
# Input a dictionary Change to dataframe
df = pd.DataFrame(self.df)
# Return new datafram with concatenated feature
return | pd.concat([df, feature_series], axis=1) | pandas.concat |
"""
general utilities for re-use
"""
from configparser import ConfigParser
import os
import pandas as pd
import pickle
from timelogging.timeLog import log
from typing import List, Tuple, Union, Optional
config_parser = ConfigParser()
def assertDirExistent(path):
if not os.path.exists(path):
raise IOError(f'{path} does not exist')
def assertFileInxestent(file_path):
""" assert if file is inexistent
:param: filePath"""
if os.path.isfile(file_path):
raise FileExistsError(f'{file_path} already exists')
def read_data(
in_path: str,
text_name: Optional[str] = None,
summary_name: Optional[str] = None,
limit: Optional[int] = None
) -> List[Union[Tuple, str]]:
"""general function to call all types of import functions
Args:
in_path (str): file path to read from
text_name (Optional[str], optional): name of the text file.
Defaults to None.
summary_name (Optional[str], optional): name of the summary file.
Defaults to None.
limit (Optional[int], optional): data limitation.
Defaults to None.
Returns:
List[Union[Tuple, str]]: all textes as list
"""
if text_name is None \
and summary_name is None:
return read_single_txt(in_path)
else:
if all(".txt" in item for item in [text_name, summary_name]):
return read_txt(in_path, text_name, summary_name, limit)
elif all(".csv" in item for item in [text_name, summary_name]):
return read_csv(in_path, text_name, summary_name, limit)
elif all(".pickle" in item for item in [text_name, summary_name]):
return read_pickle(in_path, text_name, summary_name, limit)
else:
log(f"{text_name} or {summary_name} is not supported!")
exit()
def read_csv(
in_path: str,
text_name: str,
summary_name: str,
limit: Optional[int] = None
) -> List[Tuple[str, str]]:
"""read data from csv file
Args:
in_path (str): file path to read from
text_name (Optional[str], optional): name of the text file.
Defaults to None.
summary_name (Optional[str], optional): name of the summary file.
Defaults to None.
limit (Optional[int], optional): data limitation.
Defaults to None.
Returns:
List[Tuple[str, str]]: listed text and assigned summary
"""
df = | pd.read_csv(in_path, escapechar="\\") | pandas.read_csv |
import requests
from bs4 import BeautifulSoup
import pandas as pd
# TODO: staticmethod 제거
class GetDaumNews:
def __init__(self):
pass
@staticmethod
def get_url(page, date):
return 'http://media.daum.net/breakingnews/politics?page={}®Date={}'.format(page, date)
@staticmethod
def get_ind_news(urls):
news_urls = []
for url in urls:
response = requests.get(url)
contents = BeautifulSoup(response.content, "html.parser")
news_list = contents.find_all('div', 'cont_thumb')
news_list = news_list[:-2]
for news in news_list:
news_urls.append(news.a['href'])
return news_urls
@staticmethod
def get_news_text(urls):
df = | pd.DataFrame(columns=['sentence']) | pandas.DataFrame |
from pynwb import NWBFile, NWBHDF5IO, TimeSeries, ProcessingModule
from pynwb.core import MultiContainerInterface, NWBDataInterface
from scipy.stats import mode
from glob import glob
import numpy as np
import pandas as pd
import scipy.signal as signal
import scipy.interpolate as interpolate
import multiprocessing
import itertools
import os
import logging
logger = logging.getLogger(__name__)
class NWBDataset:
"""A class for loading/preprocessing data from NWB files for
the NLB competition
"""
def __init__(self, fpath, prefix='', split_heldout=True, skip_fields=[]):
"""Initializes an NWBDataset, loading data from
the indicated file(s)
Parameters
----------
fpath : str
Either the path to an NWB file or to a directory
containing NWB files
prefix : str, optional
A pattern used to filter the NWB files in directory
by name. By default, prefix='' loads all .nwb files in
the directory. Please refer to documentation for
the `glob` module for more details:
https://docs.python.org/3/library/glob.html
split_heldout : bool, optional
Whether to load heldin units and heldout units
to separate fields or not, by default True
skip_fields : list, optional
List of field names to skip during loading,
which may be useful if memory is an issue.
Field names must match the names automatically
assigned in the loading process. Spiking data
can not be skipped. Field names in the list
that are not found in the dataset are
ignored
"""
fpath = os.path.expanduser(fpath)
self.fpath = fpath
self.prefix = prefix
# Check if file/directory exists
if not os.path.exists(fpath):
raise FileNotFoundError(f"Specified file or directory not found")
# If directory, look for files with matching prefix
if os.path.isdir(fpath):
filenames = sorted(glob(os.path.join(fpath, prefix + "*.nwb")))
else:
filenames = [fpath]
# If no files found
if len(filenames) == 0:
raise FileNotFoundError(f"No matching files with prefix {prefix} found in directory {fpath}")
# If multiple files found
elif len(filenames) > 1:
loaded = [self.load(fname, split_heldout=split_heldout, skip_fields=skip_fields) for fname in filenames]
datas, trial_infos, descriptions, bin_widths = [list(out) for out in zip(*loaded)]
assert np.all(np.array(bin_widths) == bin_widths[0]), "Bin widths of loaded datasets must be the same"
# Shift loaded files to stack them into continuous array
def trial_shift(x, shift_ms, trial_offset):
if x.name.endswith('_time'):
return x + pd.to_timedelta(shift_ms, unit='ms')
elif x.name == 'trial_id':
return x + trial_offset
else:
return x
# Loop through files, shifting continuous data
past_end = datas[0].index[-1].total_seconds() + round(50 * bin_widths[0] / 1000, 4)
descriptions_full = descriptions[0]
tcount = len(trial_infos[0])
for i in range(1, len(datas)):
block_start_ms = np.ceil(past_end * 10) * 100
datas[i] = datas[i].shift(block_start_ms, freq='ms')
trial_infos[i] = trial_infos[i].apply(trial_shift, shift_ms=block_start_ms, trial_offset=tcount)
descriptions_full.update(descriptions[i])
past_end = datas[i].index[-1].total_seconds() + round(50 * bin_widths[i] / 1000, 4)
tcount += len(trial_infos[i])
# Stack data and reindex to continuous
self.data = pd.concat(datas, axis=0, join='outer')
self.trial_info = pd.concat(trial_infos, axis=0, join='outer').reset_index(drop=True)
self.descriptions = descriptions_full
self.bin_width = bin_widths[0]
new_index = pd.to_timedelta((np.arange(round(self.data.index[-1].total_seconds() * 1000 / self.bin_width) + 1) * self.bin_width).round(4), unit='ms')
self.data = self.data.reindex(new_index)
self.data.index.name = 'clock_time'
# If single file found
else:
data, trial_info, descriptions, bin_width = self.load(filenames[0], split_heldout=split_heldout, skip_fields=skip_fields)
self.data = data
self.trial_info = trial_info
self.descriptions = descriptions
self.bin_width = bin_width
def load(self, fpath, split_heldout=True, skip_fields=[]):
"""Loads data from an NWB file into two dataframes,
one for trial info and one for time-varying data
Parameters
----------
fpath : str
Path to the NWB file
split_heldout : bool, optional
Whether to load heldin units and heldout units
to separate fields or not, by default True
skip_fields : list, optional
List of field names to skip during loading,
which may be useful if memory is an issue.
Field names must match the names automatically
assigned in the loading process. Spiking data
can not be skipped. Field names in the list
that are not found in the dataset are
ignored
Returns
-------
tuple
Tuple containing a pd.DataFrame of continuous loaded
data, a pd.DataFrame with trial metadata, a dict
with descriptions of fields in the DataFrames, and
the bin width of the loaded data in ms
"""
logger.info(f"Loading {fpath}")
# Open NWB file
io = NWBHDF5IO(fpath, 'r')
nwbfile = io.read()
# Load trial info and units
trial_info = (
nwbfile.trials.to_dataframe()
.reset_index()
.rename({'id': 'trial_id', 'stop_time': 'end_time'}, axis=1))
units = nwbfile.units.to_dataframe()
# Load descriptions of trial info fields
descriptions = {}
for name, info in zip(nwbfile.trials.colnames, nwbfile.trials.columns):
descriptions[name] = info.description
# Find all timeseries
def make_df(ts):
"""Converts TimeSeries into pandas DataFrame"""
if ts.timestamps is not None:
index = ts.timestamps[()]
else:
index = np.arange(ts.data.shape[0]) / ts.rate + ts.starting_time
columns = ts.comments.split('[')[-1].split(']')[0].split(',') if 'columns=' in ts.comments else None
df = pd.DataFrame(ts.data[()], index=pd.to_timedelta(index, unit='s'), columns=columns)
return df
def find_timeseries(nwbobj):
"""Recursively searches the NWB file for time series data"""
ts_dict = {}
for child in nwbobj.children:
if isinstance(child, TimeSeries):
if child.name in skip_fields:
continue
ts_dict[child.name] = make_df(child)
descriptions[child.name] = child.description
elif isinstance(child, ProcessingModule):
pm_dict = find_timeseries(child)
ts_dict.update(pm_dict)
elif isinstance(child, MultiContainerInterface):
for field in child.children:
if isinstance(field, TimeSeries):
name = child.name + "_" + field.name
if name in skip_fields:
continue
ts_dict[name] = make_df(field)
descriptions[name] = field.description
return ts_dict
# Create a dictionary containing DataFrames for all time series
data_dict = find_timeseries(nwbfile)
# Calculate data index
start_time = 0.0
bin_width = 1 # in ms, this will be the case for all provided datasets
rate = round(1000. / bin_width, 2) # in Hz
# Use obs_intervals, or last trial to determine data end
end_time = round(max(units.obs_intervals.apply(lambda x: x[-1][-1])) * rate) * bin_width
if (end_time < trial_info['end_time'].iloc[-1]):
print("obs_interval ends before trial end") # TO REMOVE
end_time = round(trial_info['end_time'].iloc[-1] * rate) * bin_width
timestamps = (np.arange(start_time, end_time, bin_width) / 1000).round(6)
timestamps_td = pd.to_timedelta(timestamps, unit='s')
# Check that all timeseries match with calculated timestamps
for key, val in list(data_dict.items()):
if not np.all(np.isin(np.round(val.index.total_seconds(), 6), timestamps)):
logger.warning(f"Dropping {key} due to timestamp mismatch.")
data_dict.pop(key)
def make_mask(obs_intervals):
"""Creates boolean mask to indicate when spiking data is not in obs_intervals"""
mask = np.full(timestamps.shape, True)
for start, end in obs_intervals:
start_idx = np.ceil(round((start - timestamps[0]) * rate, 6)).astype(int)
end_idx = np.floor(round((end - timestamps[0]) * rate, 6)).astype(int)
mask[start_idx:end_idx] = False
return mask
# Prepare variables for spike binning
masks = [(~units.heldout).to_numpy(), units.heldout.to_numpy()] if split_heldout else [np.full(len(units), True)]
for mask, name in zip(masks, ['spikes', 'heldout_spikes']):
# Check if there are any units
if not np.any(mask):
continue
# Allocate array to fill with spikes
spike_arr = np.full((len(timestamps), np.sum(mask)), 0.0, dtype='float16')
# Bin spikes using decimal truncation and np.unique - faster than np.histogram with same results
for idx, (_, unit) in enumerate(units[mask].iterrows()):
spike_idx, spike_cnt = np.unique(((unit.spike_times - timestamps[0]) * rate).round(6).astype(int), return_counts=True)
spike_arr[spike_idx, idx] = spike_cnt
# Replace invalid intervals in spike recordings with NaNs
if 'obs_intervals' in units.columns:
neur_mask = make_mask(units[mask].iloc[0].obs_intervals)
if np.any(spike_arr[neur_mask]):
logger.warning("Spikes found outside of observed interval.")
spike_arr[neur_mask] = np.nan
# Create DataFrames with spike arrays
data_dict[name] = pd.DataFrame(spike_arr, index=timestamps_td, columns=units[mask].index).astype('float16', copy=False)
# Create MultiIndex column names
data_list = []
for key, val in data_dict.items():
chan_names = None if type(val.columns) == pd.RangeIndex else val.columns
val.columns = self._make_midx(key, chan_names=chan_names, num_channels=val.shape[1])
data_list.append(val)
# Assign time-varying data to `self.data`
data = pd.concat(data_list, axis=1)
data.index.name = 'clock_time'
data.sort_index(axis=1, inplace=True)
# Convert time fields in trial info to timedelta
# and assign to `self.trial_info`
def to_td(x):
if x.name.endswith('_time'):
return pd.to_timedelta(x, unit='s')
else:
return x
trial_info = trial_info.apply(to_td, axis=0)
io.close()
return data, trial_info, descriptions, bin_width
def make_trial_data(self,
start_field='start_time',
end_field='end_time',
align_field=None,
align_range=(None, None),
margin=0,
ignored_trials=None,
allow_overlap=False,
allow_nans=False):
"""Makes a DataFrame of trialized data based on
an alignment field
Parameters
----------
start_field : str, optional
The field in `trial_info` to use as the beginning of
each trial, by default 'start_time'
end_field : str, optional
The field in `trial_info` to use as the end of each trial,
by default 'end_time'
align_field : str, optional
The field in `trial_info` to use for alignment,
by default None, which does not align trials and
instead takes them in their entirety
align_range : tuple of int, optional
The offsets to add to the alignment field to
calculate the alignment window, by default (None, None)
uses `trial_start` and `trial_end`
margin : int, optional
The number of ms of extra data to include on either end of
each trial, labeled with the `margin` column for easy
removal. Margins are useful for decoding and smoothing
ignored_trials : pd.Series or np.ndarray, optional
A boolean pd.Series or np.ndarray of the same length
as trial_info with True for the trials to ignore, by
default None ignores no trials. This is useful for
rejecting trials outside of the alignment process
allow_overlap : bool, optional
Whether to allow overlap between trials, by default False
truncates each trial at the start of the subsequent trial
allow_nans : bool, optional
Whether to allow NaNs within trials, by default False
drops all timestamps containing NaNs in any column
Returns
-------
pd.DataFrame
A DataFrame containing trialized data. It has the same
fields as the continuous `self.data` DataFrame, but
adds `trial_id`, `trial_time`, and `align_time`. It also
resets the index so `clock_time` is a column rather than
an index. This DataFrame can be pivoted to plot its
various fields across trials, aligned relative to
`align_time`, `trial_time`, or `clock_time`
"""
# Allow rejection of trials by passing a boolean series
trial_info = self.trial_info.copy()
trial_info['next_start'] = trial_info['start_time'].shift(-1)
if ignored_trials is not None:
trial_info = trial_info.loc[~ignored_trials]
if len(trial_info) == 0:
logger.warning("All trials ignored. No trial data made")
return
# Find alignment points
bin_width = pd.to_timedelta(self.bin_width, unit='ms')
trial_start = trial_info[start_field]
trial_end = trial_info[end_field]
next_start = trial_info['next_start']
if align_field is not None:
align_left = align_right = trial_info[align_field]
else:
align_field = f'{start_field} and {end_field}' # for logging
align_left = trial_start
align_right = trial_end
# Find start and end points based on the alignment range
start_offset, end_offset = pd.to_timedelta(align_range, unit='ms')
if not pd.isnull(start_offset) and not pd.isnull(end_offset):
if not ((end_offset - start_offset) / bin_width).is_integer():
# Round align offsets if alignment range is not multiple of bin width
end_offset = start_offset + (end_offset - start_offset).round(bin_width)
align_range = (
int(round(start_offset.total_seconds() * 1000)),
int(round(end_offset.total_seconds() * 1000))
)
logger.warning('Alignment window not integer multiple of bin width. '
f'Rounded to {align_range}')
if pd.isnull(start_offset):
align_start = trial_start
else:
align_start = align_left + start_offset
if pd.isnull(end_offset):
# Subtract small interval to prevent inclusive timedelta .loc indexing
align_end = trial_end - pd.to_timedelta(1, unit='us')
else:
align_end = align_right + end_offset - pd.to_timedelta(1, unit='us')
# Add margins to either end of the data
margin_delta = pd.to_timedelta(margin, unit='ms')
margin_start = align_start - margin_delta
margin_end = align_end + margin_delta
trial_ids = trial_info['trial_id']
# Store the alignment data in a dataframe
align_data = pd.DataFrame({
'trial_id': trial_ids,
'margin_start': margin_start,
'margin_end': margin_end,
'align_start': align_start,
'align_end': align_end,
'trial_start': trial_start,
'align_left': align_left}).dropna()
# Bound the end by the next trial / alignment start
align_data['end_bound'] = (
pd.concat([next_start, align_start], axis=1)
.min(axis=1)
.shift(-1))
trial_dfs = []
num_overlap_trials = 0
def make_trial_df(args):
idx, row = args
# Handle overlap with the start of the next trial
endpoint = row.margin_end
trial_id = row.trial_id
overlap = False
if not pd.isnull(row.end_bound) and \
row.align_end > row.end_bound:
overlap = True
if not allow_overlap:
# Allow overlapping margins, but not aligned data
endpoint = row.end_bound + margin_delta - pd.to_timedelta(1, unit='us')
# Take a slice of the continuous data
trial_idx = pd.Series(self.data.index[self.data.index.slice_indexer(row.margin_start, endpoint)])
# Add trial identifiers
trial_df = pd.DataFrame({
('trial_id', ''): np.repeat(trial_id, len(trial_idx)),
('trial_time', ''): (trial_idx - row.trial_start.ceil(bin_width)),
('align_time', ''): (trial_idx - row.align_left.ceil(bin_width)),
('margin', ''): ((trial_idx < row.align_start) | (row.align_end < trial_idx))})
trial_df.index = trial_idx
return overlap, trial_df
overlaps, trial_dfs = zip(*[make_trial_df(args) for args in align_data.iterrows()])
num_overlap_trials = sum(overlaps)
# Summarize alignment
logger.info(f'Aligned {len(trial_dfs)} trials to '
f'{align_field} with offset of {align_range} ms '
f'and margin of {margin}.')
# Report any overlapping trials to the user.
if num_overlap_trials > 0:
if allow_overlap:
logger.warning(
f'Allowed {num_overlap_trials} overlapping trials.')
else:
logger.warning(
f'Shortened {num_overlap_trials} trials to prevent overlap.')
# Combine all trials into one DataFrame
trial_data = pd.concat(trial_dfs)
trial_data.reset_index(inplace=True)
trial_data = trial_data.merge(self.data, how='left', left_on=[('clock_time', '')], right_index=True)
# Sanity check to make sure there are no duplicated `clock_time`'s
if not allow_overlap:
# Duplicated points in the margins are allowed
td_nonmargin = trial_data[~trial_data.margin]
assert td_nonmargin.clock_time.duplicated().sum() == 0, \
'Duplicated points still found. Double-check overlap code.'
# Make sure NaN's caused by adding trialized data to self.data are ignored
nans_found = trial_data.isnull().sum().max()
if nans_found > 0:
pct_nan = (nans_found / len(trial_data)) * 100
if allow_nans:
logger.warning(f'NaNs found in {pct_nan:.2f}% of `trial_data`.')
else:
logger.warning(f'NaNs found in `self.data`. Dropping {pct_nan:.2f}% '
'of points to remove NaNs from `trial_data`.')
trial_data = trial_data.dropna()
trial_data.sort_index(axis=1, inplace=True)
return trial_data
def resample(self, target_bin):
"""Rebins spikes and performs antialiasing + downsampling on
continuous signals
Parameters
----------
target_bin : int
The target bin size in milliseconds. Note that it must be an
integer multiple of self.bin_width
"""
logger.info(f'Resampling data to {target_bin} ms.')
# Check that resample_factor is an integer
if target_bin == self.bin_width:
logger.warning(f'Dataset already at {target_bin} ms resolution, skipping resampling...')
return
assert target_bin % self.bin_width == 0, \
'target_bin must be an integer multiple of bin_width.'
resample_factor = int(round(target_bin / self.bin_width))
# Resample data based on signal type
cols = self.data.columns
data_list = []
for signal_type in cols.get_level_values(0).unique():
if 'spikes' in signal_type:
# Rebin spikes, preserving original nan locations
arr = self.data[signal_type].to_numpy()
dtype = self.data[signal_type].dtypes.iloc[0]
nan_mask = np.isnan(arr[::resample_factor])
if arr.shape[0] % resample_factor != 0:
extra = arr[-(arr.shape[0] % resample_factor):]
arr = arr[:-(arr.shape[0] % resample_factor)]
else:
extra = None
arr = np.nan_to_num(arr, copy=False).reshape((arr.shape[0] // resample_factor, resample_factor, -1)).sum(axis=1)
if extra is not None:
arr = np.vstack([arr, np.nan_to_num(extra, copy=False).sum(axis=0)])
arr[nan_mask] = np.nan
resamp = pd.DataFrame(arr, index=self.data.index[::resample_factor], dtype=dtype)
elif signal_type == 'target_pos':
# Resample target pos for MC_RTT
resamp = self.data[signal_type].iloc[::resample_factor]
else:
# Resample with Chebyshev for other data types
dtype = self.data[signal_type].dtypes.iloc[0]
nan_mask = self.data[signal_type].iloc[::resample_factor].isna()
if np.any(self.data[signal_type].isna()):
self.data[signal_type] = self.data[signal_type].apply(lambda x: x.interpolate(limit_direction='both'))
decimated_df = signal.decimate(
self.data[signal_type], resample_factor, axis=0, n=500, ftype='fir')
decimated_df[nan_mask] = np.nan
resamp = pd.DataFrame(decimated_df, index=self.data.index[::resample_factor], dtype=dtype)
resamp.columns = pd.MultiIndex.from_product([[signal_type], self.data[signal_type].columns], names=('signal_type', 'channel'))
data_list.append(resamp)
# Replace old data
self.data = pd.concat(data_list, axis=1)
self.data.index.freq = f'{target_bin}ms'
self.bin_width = target_bin
def smooth_spk(self,
gauss_width,
signal_type=None,
name=None,
overwrite=False,
ignore_nans=False,
parallelized=True,
dtype="float64"):
"""Applies Gaussian smoothing to the data. Most often
applied to spikes
Parameters
----------
gauss_width : int
The standard deviation of the Gaussian to use for
smoothing, in ms
signal_type : str or list of str, optional
The group of signals to smooth, by default
None, which smooths 'spikes' and 'heldout_spikes'
if present in the DataFrame
name : str, optional
The name to use for the smoothed data when adding
it back to the DataFrame, by default None. If
provided, the new signal_type name will be
the original name + '_' + `name`. Must be provided
if overwrite is False
overwrite : bool, optional
Whether to overwrite the original data,
by default False
ignore_nans : bool, optional
Whether to ignore NaNs when smoothing, by default
False. When NaNs are not ignored, they propagate
into valid data during convolution, but ignoring
NaNs is much slower
parallelized : bool, optional
Whether to parallelize the smoothing operation
with multiprocessing.Pool.map(). This may cause
issues on certain systems, so it can be disabled
dtype : str or dtype
Data type for the smoothing output to be cast to,
in case of memory issues or precision concerns.
By default 'float64'. Only other float dtypes are
recommended
"""
assert name or overwrite, \
('You must either provide a name for the smoothed '
'data or specify to overwrite the existing data.')
if signal_type is None:
signal_type = [field for field in ['spikes', 'heldout_spikes'] if field in self.data.columns]
logger.info(f'Smoothing {signal_type} with a '
f'{gauss_width} ms Gaussian.')
# Compute Gauss window and std with respect to bins
gauss_bin_std = gauss_width / self.bin_width
# the window extends 3 x std in either direction
win_len = int(6 * gauss_bin_std)
# Create Gaussian kernel
window = signal.gaussian(win_len, gauss_bin_std, sym=True)
window /= np.sum(window)
# Extract spiking data
spike_vals = self.data[signal_type].to_numpy()
# Parallelized implementation for smoothing data
if parallelized:
spike_vals_list = [spike_vals[:,i] for i in range(spike_vals.shape[1])]
y_list = _poolmap(
smooth_column, itertools.product(spike_vals_list, [window], [ignore_nans], [dtype]))
smoothed_spikes = np.vstack(y_list).T
else:
smoothed_spikes = np.apply_along_axis(lambda x: smooth_column((x, window, ignore_nans, dtype)), 0, spike_vals)
# Create list of column names
col_names = []
if isinstance(signal_type, str):
signal_type = [signal_type]
for st in signal_type:
columns = self.data[st].columns
if overwrite:
smoothed_name = st
else:
smoothed_name = st + '_' + name
col_names += list(zip([smoothed_name]*len(columns), columns))
# Write data to DataFrame
if overwrite:
self.data.drop(col_names, axis=1, inplace=True)
smoothed_df = pd.DataFrame(smoothed_spikes, index=self.data.index, columns=pd.MultiIndex.from_tuples(col_names))
self.data = pd.concat([self.data, smoothed_df], axis=1)
self.data.sort_index(axis=1, inplace=True)
# deleting and concatenating new data is much faster than overwriting, but less memory efficient
# can replace with:
# if overwrite:
# self.data[col_names] = smoothed_spikes
# else:
# smoothed_df = pd.DataFrame(smoothed_spikes, index=self.data.index, columns=pd.MultiIndex.from_tuples(col_names))
# self.data = pd.concat([self.data, smoothed_df], axis=1)
# self.data.sort_index(axis=1, inplace=True)
# if memory is an issue
def add_continuous_data(self, cts_data, signal_type, chan_names=None):
"""Adds a continuous data field to the main DataFrame
Parameters
----------
cts_data : np.ndarray
A numpy array whose first dimension matches the DataFrame
at self.data
signal_name : str
The label for this group of signals
chan_names : list of str, optional
The channel names for this data
"""
logger.info(f'Adding continuous {signal_type} to the main DataFrame.')
# Make MultiIndex columns
midx = self._make_midx(signal_type, chan_names, cts_data.shape[1])
# Build the DataFrame and attach it to the current dataframe
new_data = pd.DataFrame(
cts_data, index=self.data.index, columns=midx)
self.data = pd.concat([self.data, new_data], axis=1)
def add_trialized_data(self, trial_data, signal_type, chan_names=None):
"""Adds a trialized data field to the main DataFrame
Parameters
----------
trial_data : pd.DataFrame
A trial_data dataframe containing a data field
that will be added to the continuous dataframe
signal_type : str
The label for the data to be added
chan_names : list of str, optional
The channel names for the data when added
"""
logger.info(f'Adding trialized {signal_type} to the main DataFrame')
new_data = trial_data[['clock_time', signal_type]].set_index('clock_time')
self.data = pd.concat([self.data, new_data], axis=1)
def _make_midx(self, signal_type, chan_names=None, num_channels=None):
"""Creates a pd.MultiIndex for a given signal_type
Parameters
----------
signal_type : str
Name of the signal type, to be used as the first level value
of MultiIndex
chan_names : list, optional
Name of individual channels. If not provided,
channel names will be automatically generated as
['0000', '0001', etc.]
num_channels : int, optional
Number of channels to create names for. Required if
`chan_names` is not provided
"""
if chan_names is None:
if 'rates' in signal_type:
# If merging rates, use the same names as the spikes
chan_names = self.data.spikes.columns
else:
# Otherwise, generate names for the channels
assert num_channels is not None, "`num_channels` must be provided if `chan_names` is not provided"
chan_names = [f'{i:04d}' for i in range(num_channels)]
# Create the MultiIndex for this data
midx = pd.MultiIndex.from_product(
[[signal_type], chan_names], names=('signal_type', 'channel'))
return midx
def calculate_onset(self,
field_name,
onset_threshold,
peak_prominence=0.1,
peak_distance_s=0.1,
multipeak_threshold=0.2):
"""Calculates onset for a given field by finding
peaks and threshold crossings. Developed for
speed onset calculation
Parameters
----------
field_name : str
The field to use for onset calculation, used
with recursive getattr on self.data
onset_threshold : float
The threshold for onset as a percentage of the
peak height
peak_prominence : float, optional
Minimum prominence of peaks. Passed to
`scipy.signal.find_peaks`, by default 0.1
peak_distance_s : float, optional
Minimum distance between peaks. Passed to
`scipy.signal.find_peaks`, by default 0.1
multipeak_threshold : float, optional
Subsequent peaks within a trial must be no
larger than this percentage of the first peak,
otherwise the onset calculation fails, by default 0.2
Returns
-------
pd.Series
The times of identified peaks
"""
import functools
def rgetattr(obj, attr, *args):
"""A recursive drop-in replacement for getattr,
which also handles dotted attr strings
"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
logger.info(f'Calculating {field_name} onset.')
sig = rgetattr(self.data, field_name)
# Find peaks
peaks, properties = signal.find_peaks(
sig,
prominence=peak_prominence,
distance=peak_distance_s / (self.bin_width / 1000.0))
peak_times = pd.Series(self.data.index[peaks])
# Find the onset for each trial
onset, onset_index = [], []
for index, row in self.trial_info.iterrows():
trial_start, trial_end = row['start_time'], row['end_time']
# Find the peaks within the trial boundaries
trial_peaks = peak_times[
(trial_start < peak_times) & (peak_times < trial_end)]
peak_signals = sig.loc[trial_peaks]
# Remove trials with multiple larger peaks
if multipeak_threshold is not None and len(trial_peaks) > 1:
# Make sure the first peak is relatively large
if peak_signals[0]*multipeak_threshold < peak_signals[1:].max():
continue
elif len(trial_peaks) == 0:
# If no peaks are found for this trial, skip it
continue
# Find the point just before speed crosses the threshold
signal_threshold = onset_threshold * peak_signals[0]
under_threshold = sig[trial_start:trial_peaks.iloc[0]] < signal_threshold
if under_threshold.sum() > 0:
onset.append(under_threshold[::-1].idxmax())
onset_index.append(index)
# Add the movement onset for each trial to the DataFrame
onset_name = field_name.split('.')[-1] + '_onset'
logger.info(f'`{onset_name}` field created in trial_info.')
self.trial_info[onset_name] = | pd.Series(onset, index=onset_index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 16:29:34 2020
@author: <NAME>
"""
from sqlalchemy import create_engine
import pandas as pd
import os
import sys
################################################################################
# > QUERY TO POSTGRESQL DATABASE
################################################################################
def get_atc_icd_data(conn):
SQL_query = """select c.code, ac.code, ac.name, cngc.type
from commonnamegroup cng
inner join commonnamegroup_indication cngi on cngi.commonnamegroupid = cng.commonnamegroupid
inner join indicationgroup_indication igi on igi.indicationid = cngi.indicationid
inner join commonnamegroup_composition cngc on cngc.commonnamegroupid = cng.commonnamegroupid
inner join cim10_indicationgroup cig on cig.indicationgroupid = igi.indicationgroupid
inner join cim10 c on c.cim10id = cig.cim10id
inner join commonnamegroup_atc cnga on cnga.commonnamegroupid = cng.commonnamegroupid
inner join atcclass ac on ac.atcclassid = cnga.atcclassid
group by c.code, ac.code, ac.name, cngc.type order by c.code asc
;"""
df = pd.read_sql_query(SQL_query, conn)
df.to_csv('data_with_atc_icd_codes.csv', index = False, encoding = 'utf-8-sig')
return df
def get_sideeffects_atc_data(conn):
SQL_query = """select se.sideeffectid, se.name, ac.code, ac.name
from commonnamegroup cng
inner join commonnamegroup_indication cngi on cngi.commonnamegroupid = cng.commonnamegroupid
inner join commonnamegroup_sideeffect cngs on cngs.commonnamegroupid = cngi.commonnamegroupid
inner join sideeffect se on se.sideeffectid = cngs.sideeffectid
inner join commonnamegroup_atc cnga on cnga.commonnamegroupid = cng.commonnamegroupid
inner join atcclass ac on ac.atcclassid = cnga.atcclassid
group by se.sideeffectid, se.name, ac.code, ac.name
;"""
df = pd.read_sql_query(SQL_query, conn)
df.to_csv('data_with_atc_sideeffects.csv', index = False, encoding = 'utf-8-sig')
return df
################################################################################
# > CONNECTION SETUP POSTGRESQL DATABASE
################################################################################
def init_db(database='local'):
"""
OPTIONS:
local :: LOCAL DATABASE
"""
global dbengine, conn
# __SQL CONNECTION SETUP__
if database == 'local':
user = "username"
password = "*******"
database = "dbname"
host = "localhost"
dbengine = create_engine(f'postgresql://{user}:{password}@{host}/{database}',
echo=False)
conn = dbengine.connect()
if __name__ == "__main__":
if len(sys.argv) > 1:
flag = sys.argv[1]
else:
flag = 0
print("Give one option to create the datasets: 1-PostgreSQL or 2-Pandas")
################################################################################
# > LOAD DATA TO POSTGRESQL DATABASE AND QUERY RESULTS
################################################################################
if flag == "1":
init_db('local')
entries = os.listdir('latam_csv/')
for entry in entries:
data = pd.read_csv('latam_csv/' + entry, encoding = 'utf-8-sig')
data.to_sql(entry.split('.', 2)[0], dbengine, index=False, if_exists='append')
get_atc_icd_data(conn)
get_sideeffects_atc_data(conn)
conn.close()
################################################################################
# > COMBINE DATA USING PANDAS INNER JOINS WITHOUT POSTGRESQL DATABASE
################################################################################
elif flag == "2":
# ATC AND CIE
commonnamegroup = pd.read_csv('latam_csv/commonnamegroup.csv', encoding = 'utf-8-sig')
commonnamegroup_indication = pd.read_csv('latam_csv/commonnamegroup_indication.csv', encoding = 'utf-8-sig')
merged_inner = | pd.merge(left=commonnamegroup, right=commonnamegroup_indication, left_on='commonnamegroupid', right_on='commonnamegroupid') | pandas.merge |
# Reviews Counts for Non-US Regions (raw & normalized)
import pandas as pd
import numpy as np
import os
folder = 'non_us_reviews/'
files = os.listdir(folder)
master = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from dplypy.dplyframe import DplyFrame
from dplypy.pipeline import join
def test_join():
df_l = DplyFrame(
pd.DataFrame(
data={
"common": [1, 2, 3, 4],
"left_index": ["a", "b", "c", "d"],
"left_key": [3, 4, 7, 6],
"col3": [6, 7, 8, 9],
"col4": [9, 10, 11, 12],
}
)
)
df_r = DplyFrame(
pd.DataFrame(
data={
"common": [1, 2, 3, 4],
"right_index": ["a", "b", "foo", "d"],
"right_key": [3, 4, 5, 6],
"col3": [1, 2, 3, 4],
"col4": [5, 6, 7, 8],
}
)
)
# on
output1 = df_l + join(df_r, on="common")
expected1 = df_l.pandas_df.merge(df_r.pandas_df, on="common")
pd.testing.assert_frame_equal(output1.pandas_df, expected1)
# sort
output2 = df_l + join(df_r, on="common", sort=True)
expected2 = df_l.pandas_df.merge(df_r.pandas_df, on="common", sort=True)
pd.testing.assert_frame_equal(output2.pandas_df, expected2)
# left_on, right_on, suffixes
output3 = df_l + join(
df_r, left_on="left_key", right_on="right_key", suffixes=("_foo_x", "_foo_y")
)
expected3 = df_l.pandas_df.merge(
df_r.pandas_df,
left_on="left_key",
right_on="right_key",
suffixes=("_foo_x", "_foo_y"),
)
pd.testing.assert_frame_equal(output3.pandas_df, expected3)
try:
df_l + join(df_r, left_on="left_key")
except pd.errors.MergeError:
pass
else:
raise AssertionError("MergeError was not raised")
try:
df_l + join(df_r, right_on="right_key")
except pd.errors.MergeError:
pass
else:
raise AssertionError("MergeError was not raised")
try:
df_l + join(
df_r, left_on="left_key", right_on="right_key", suffixes=(None, None)
)
except ValueError:
pass
else:
raise AssertionError("ValueError was not raised")
# left_index, right_index
output4 = df_l + join(df_r, left_index=True, right_index=True)
expected4 = df_l.pandas_df.merge(df_r.pandas_df, left_index=True, right_index=True)
pd.testing.assert_frame_equal(output4.pandas_df, expected4)
output5 = df_l + join(df_r, left_on="left_key", right_index=True)
expected5 = df_l.pandas_df.merge(
df_r.pandas_df, left_on="left_key", right_index=True
)
| pd.testing.assert_frame_equal(output5.pandas_df, expected5) | pandas.testing.assert_frame_equal |
import os, unittest, pandas as pd, numpy as np
from saspt.trajectory_group import TrajectoryGroup
from saspt.constants import TRACK, FRAME, PY, PX, TRACK_LENGTH, JUMPS_PER_TRACK, DFRAMES, DR2, DY, DX, RBME
from saspt.utils import track_length
from saspt.io import is_detections
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES = os.path.join(TEST_DIR, "fixtures")
class TestTrajectoryGroup(unittest.TestCase):
def setUp(self):
# Simple set of trajectories
self.sample_detections = pd.DataFrame({
TRACK: [ 0, 1, 1, -1, 3, 3, 3, 4, 4],
FRAME: [ 0, 0, 1, 1, 1, 2, 3, 6, 7],
PY: [ 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8],
PX: [ 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]
})
# More complex set of trajectories
self.track_csv = os.path.join(FIXTURES, "sample_tracks.csv")
# Sample set of TrajectoryGroup initialization kwargs
self.init_kwargs = dict(pixel_size_um=0.160, frame_interval=0.00748,
splitsize=10, start_frame=0)
def tearDown(self):
pass
def test_split_tracks(self):
# Test 1: Small set of trajectories with ground truth answer
splitsize = 1
old_indices = np.array([0, 0, 1, 3, 3, 3, 5, 5])
new_indices = TrajectoryGroup.split_tracks(old_indices, splitsize)
assert (new_indices == np.array([0, 0, 1, 2, 2, 3, 4, 4])).all(), new_indices
# Test 2: Large set of trajectories
splitsize = 4
detections = | pd.read_csv(self.track_csv) | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import ElasticNetCV
from sklearn.model_selection import KFold
from sklearn.cross_validation import KFold as kfo
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
from mlxtend.regressor import StackingCVRegressor
from lightgbm import LGBMRegressor
import feature_list
def select_drop_standand(traindata, testdata, num):
# select features, from feature_list.py
if num == 1:
selected, select_list = feature_list.select_feature1(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature1(testdata, False)
if num == 2:
selected, select_list = feature_list.select_feature2(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature2(testdata, False)
if num == 3:
selected, select_list = feature_list.select_feature3(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature3(testdata, False)
if num == 4:
selected, select_list = feature_list.select_feature4(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature4(testdata, False)
if num == 5:
selected, select_list = feature_list.select_feature5(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature5(testdata, False)
if num == 6:
selected, select_list = feature_list.select_feature6(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature6(testdata, False)
if num == 7:
selected, select_list = feature_list.select_feature7(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature7(testdata, False)
if num == 8:
selected, select_list = feature_list.select_feature8(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature8(testdata, False)
if num == 9:
selected, select_list = feature_list.select_feature9(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature9(testdata, False)
if num == 10:
selected, select_list = feature_list.select_feature10(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature10(testdata, False)
if num == 11:
selected, select_list = feature_list.select_feature11(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature11(testdata, False)
if num == 12:
selected, select_list = feature_list.select_feature12(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature12(testdata, False)
if num == 13:
selected, select_list = feature_list.select_feature13(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature13(testdata, False)
if num == 14:
selected, select_list = feature_list.select_feature14(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature14(testdata, False)
if num == 15:
selected, select_list = feature_list.select_feature15(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature15(testdata, False)
if num == 16:
selected, select_list = feature_list.select_feature16(traindata, True)
selected_testB_features, select_list_testB = feature_list.select_feature16(testdata, False)
selected.reset_index(drop=True, inplace=True)
selected_testB_features.reset_index(drop=True, inplace=True)
# clear empty row
selected_nonan = selected.dropna(axis=0, how='any')
train_targets = pd.DataFrame(selected_nonan['charge_energy'], columns=['charge_energy'])
train_nonan_features = selected_nonan.drop(['charge_energy'], axis=1)
train_test_features = pd.concat([train_nonan_features, selected_testB_features], axis=0)
train_test_features.reset_index(drop=True, inplace=True)
# RobustScaler quantile_range=(25.0, 75.0) # Standardization based on quantile
select_list.remove('charge_energy')
x_scaler = RobustScaler()
y_scaler = RobustScaler()
n_X_train_test = x_scaler.fit_transform(np.array(train_test_features))
# n_y_train = y_scaler.fit_transform(np.log1p(np.array(train_targets))) # ln(x+1) Transformation
n_y_train = y_scaler.fit_transform(np.array(train_targets))
n_X_train_test_pd = pd.DataFrame(n_X_train_test, columns=select_list)
n_X_train_test_mer = n_X_train_test_pd.copy()
# Time dimension sparse matrix
# chargemode_dummies = pd.get_dummies(train_test_features['charge_mode'], prefix='mode', prefix_sep='_')
# hour_dummies = pd.get_dummies(train_test_features['hour'], prefix='hour', prefix_sep='_')
# week_dummies = pd.get_dummies(train_test_features['week'], prefix='week', prefix_sep='_')
# month_dummies = pd.get_dummies(train_test_features['month'], prefix='month', prefix_sep='_')
# if 'phase' in select_list:
# phase_dummies = pd.get_dummies(train_test_features['phase'], prefix='phase', prefix_sep='_')
# n_X_train_test_mer = pd.concat([n_X_train_test_pd, chargemode_dummies, hour_dummies, week_dummies, month_dummies,phase_dummies], axis=1)
# n_X_train_test_mer.drop(['charge_mode', 'hour', 'week', 'month', 'phase'], axis=1, inplace=True)
# else:
# n_X_train_test_mer = pd.concat([n_X_train_test_pd, chargemode_dummies, hour_dummies, week_dummies, month_dummies], axis=1)
# n_X_train_test_mer.drop(['charge_mode', 'hour', 'week', 'month'], axis=1, inplace=True)
n_testB = n_X_train_test_mer.tail(selected_testB_features.shape[0])
n_X_train = n_X_train_test_mer.drop(n_testB.index.tolist())
return n_X_train, n_y_train, n_testB, y_scaler
ram_num = 5
kfolds = KFold(n_splits=10, shuffle=True, random_state=ram_num)
def cv_rmse(model, train, y_train):
rmse = np.sqrt(-cross_val_score(model, train, y_train, scoring="neg_mean_squared_error", cv = kfolds))
return(rmse)
def ridge_selector(k, X, y):
model = make_pipeline(RidgeCV(alphas = [k], cv=kfolds)).fit(X, y)
rmse = cv_rmse(model, X, y).mean()
return(rmse)
def lasso_selector(k, X, y):
model = make_pipeline(LassoCV(max_iter=1e7, alphas = [k],
cv = kfolds)).fit(X, y)
rmse = cv_rmse(model, X, y).mean()
return(rmse)
def stack_level1(clf, x_train, y_train, x_test, kf):
num_train = x_train.shape[0]
num_test = x_test.shape[0]
level1_train = np.zeros((num_train,))
level1_test = np.zeros((num_test,))
level1_test_kfold = np.empty((10, num_test)) # kfold = 10
for i, (train_index, test_index) in enumerate(kf):
x_tr = x_train[train_index]
y_tr = y_train[train_index]
x_te = x_train[test_index]
clf.fit(x_tr, y_tr)
level1_train[test_index] = clf.predict(x_te)
level1_test_kfold[i, :] = clf.predict(x_test)
level1_test[:] = level1_test_kfold.mean(axis=0)
return level1_train.reshape(-1, 1), level1_test.reshape(-1, 1)
if __name__ == '__main__':
# Read the data of 16 cars separately. features + target
readFile_carfeatures = []
readFile_testfeatures = []
car_train_list = []
car_test_list = []
filenum = 17
for i in range(1,filenum):
readFile_carfeatures.append('../dataset/feature/train_feature/car' + str(i) + '_features.csv')
for i in range(1,filenum):
readFile_testfeatures.append('../dataset/feature/test_feature/car' + str(i) + 'testB_features.csv')
# train features + target
for i in range(len(readFile_carfeatures)):
car_train = pd.read_csv(readFile_carfeatures[i], dtype={'charge_start_time': str, 'charge_end_time': str})
car_train_list.append(car_train)
# test features
for i in range(len(readFile_carfeatures)):
car_test = pd.read_csv(readFile_testfeatures[i])
car_test_list.append(car_test)
car_index = 9 # 0 = car1
car_train = pd.read_csv(readFile_carfeatures[car_index], dtype={'charge_start_time': str, 'charge_end_time': str})
car_test = | pd.read_csv(readFile_testfeatures[car_index]) | pandas.read_csv |
from Kernel import Kernel
from agent.ExchangeAgent import ExchangeAgent
from agent.NoiseAgent import NoiseAgent
from agent.ValueAgent import ValueAgent
from agent.market_makers.MarketMakerAgent import MarketMakerAgent
from util.order import LimitOrder
from util.oracle.SparseMeanRevertingOracle import SparseMeanRevertingOracle
from util import util
import numpy as np
import pandas as pd
import sys
# Some config files require additional command line parameters to easily
# control agent or simulation hyperparameters during coarse parallelization.
import argparse
parser = argparse.ArgumentParser(description='Detailed options for sparse_zi config.')
parser.add_argument('-b', '--book_freq', default=None,
help='Frequency at which to archive order book for visualization')
parser.add_argument('-c', '--config', required=True,
help='Name of config file to execute')
parser.add_argument('-l', '--log_dir', default=None,
help='Log directory name (default: unix timestamp at program start)')
parser.add_argument('-n', '--obs_noise', type=float, default=1000000,
help='Observation noise variance for zero intelligence agents (sigma^2_n)')
parser.add_argument('-o', '--log_orders', action='store_true',
help='Log every order-related action by every agent.')
parser.add_argument('-s', '--seed', type=int, default=None,
help='numpy.random.seed() for simulation')
parser.add_argument('-v', '--verbose', action='store_true',
help='Maximum verbosity!')
parser.add_argument('--config_help', action='store_true',
help='Print argument options for this config file')
args, remaining_args = parser.parse_known_args()
if args.config_help:
parser.print_help()
sys.exit()
# Historical date to simulate. Required even if not relevant.
historical_date = pd.to_datetime('2019-06-28')
# Requested log directory.
log_dir = args.log_dir
# Requested order book snapshot archive frequency.
book_freq = args.book_freq
# Observation noise variance for zero intelligence agents.
# This is a property of the agents, not the stock.
# Later, it could be a matrix across both.
sigma_n = args.obs_noise
# Random seed specification on the command line. Default: None (by clock).
# If none, we select one via a specific random method and pass it to seed()
# so we can record it for future use. (You cannot reasonably obtain the
# automatically generated seed when seed() is called without a parameter.)
# Note that this seed is used to (1) make any random decisions within this
# config file itself and (2) to generate random number seeds for the
# (separate) Random objects given to each agent. This ensure that when
# the agent population is appended, prior agents will continue to behave
# in the same manner save for influences by the new agents. (i.e. all prior
# agents still have their own separate PRNG sequence, and it is the same as
# before)
seed = args.seed
if not seed: seed = int(pd.Timestamp.now().timestamp() * 1000000) % (2 ** 32 - 1)
np.random.seed(seed)
# Config parameter that causes util.util.print to suppress most output.
# Also suppresses formatting of limit orders (which is time consuming).
util.silent_mode = not args.verbose
LimitOrder.silent_mode = not args.verbose
# Config parameter that causes every order-related action to be logged by
# every agent. Activate only when really needed as there is a significant
# time penalty to all that object serialization!
log_orders = args.log_orders
print("Silent mode: {}".format(util.silent_mode))
print("Logging orders: {}".format(log_orders))
print("Book freq: {}".format(book_freq))
print("ZeroIntelligenceAgent noise: {:0.4f}".format(sigma_n))
print("Configuration seed: {}\n".format(seed))
# Since the simulator often pulls historical data, we use a real-world
# nanosecond timestamp (pandas.Timestamp) for our discrete time "steps",
# which are considered to be nanoseconds. For other (or abstract) time
# units, one can either configure the Timestamp interval, or simply
# interpret the nanoseconds as something else.
# What is the earliest available time for an agent to act during the
# simulation?
midnight = historical_date
kernelStartTime = midnight
# When should the Kernel shut down? (This should be after market close.)
# Here we go for 5 PM the same day.
kernelStopTime = midnight + pd.to_timedelta('17:00:00')
# This will configure the kernel with a default computation delay
# (time penalty) for each agent's wakeup and recvMsg. An agent
# can change this at any time for itself. (nanoseconds)
defaultComputationDelay = 1000000000 # one second
# IMPORTANT NOTE CONCERNING AGENT IDS: the id passed to each agent must:
# 1. be unique
# 2. equal its index in the agents list
# This is to avoid having to call an extra getAgentListIndexByID()
# in the kernel every single time an agent must be referenced.
# This is a list of symbols the exchange should trade. It can handle any number.
# It keeps a separate order book for each symbol. The example data includes
# only JPM. This config uses generated data, so the symbol doesn't really matter.
# megashock_lambda_a is used to select spacing for megashocks (using an exponential
# distribution equivalent to a centralized Poisson process). Megashock mean
# and variance control the size (bimodal distribution) of the individual shocks.
# Note: sigma_s is no longer used by the agents or the fundamental (for sparse discrete simulation).
symbols = {'JPM': {'r_bar': 1e5, 'kappa': 1.67e-12, 'agent_kappa': 1.67e-15, 'sigma_s': 0, 'fund_vol': 1e-8,
'megashock_lambda_a': 2.77778e-13, 'megashock_mean': 1e3, 'megashock_var': 5e4,
'random_state': np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32, dtype='uint64'))}}
### Configure the Kernel.
kernel = Kernel("Base Kernel",
random_state=np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32, dtype='uint64')))
### Configure the agents. When conducting "agent of change" experiments, the
### new agents should be added at the END only.
agent_count = 0
agents = []
agent_types = []
### Configure an exchange agent.
# Let's open the exchange at 9:30 AM.
mkt_open = midnight + | pd.to_timedelta('09:30:00') | pandas.to_timedelta |
import random
import pandas as pd
from queue import CircularQueue
import nltk
#nltk.download()
from nltk.tree import ParentedTree as Tree
import en
class negation:
def __init__(self,max_length):
df = pd.read_csv(r'snli_1.0_train.txt', delimiter='\t')
self.df = df[df['sentence1'].apply(lambda x: len(x) < max_length)]
self.df.reset_index()
self.entailment_df=df[df['gold_label']=='entailment']
#breadth first search which returns the first
#node that's in the list of search parameters
def bfs(self,tree, search=None):
l=len([sub for sub in tree.subtrees()])
q = CircularQueue()
if tree.label()=='ROOT':
q.enqueue(tree[0])
else:
q.enqueue(tree)
i=0
while not q.is_empty():
t = q.dequeue()
if t.label() in search:
return t
for child in t:
if isinstance(child, nltk.tree.Tree) and child.label()!='S':
q.enqueue(child)
i+=1
if i>l+1:
print('BFS error')
return False
return False
#Negates the main verb in a sentence
def negate_verb(self,t):
verbs = ['VBP', 'VBD', 'VBZ', 'VBN', 'VB', 'VBG']
tr = self.bfs(t, ['VP'])
if tr is False:
print('tr false')
return False
# tr = self.bfs(t, ['NP'])
# if tr is None:
# return None
# else:
# tr.insert('RB', ['not'])
for index2, c in enumerate(tr.subtrees()):
if c.label() in verbs:
print('input word:',c.leaves()[0])
print('input label:',c.label())
word_negation=self.word_negation(c.leaves()[0],c.label())
print('word negation:',word_negation)
if word_negation is None:
return False
print('word negation:',word_negation)
c[0]=word_negation
break
return True
#Searches for an easily negated preposition
def negate_noun(self,t):
np=self.bfs(t,['NP'])
if tr is None:
return False
if pp is None:
return False
pp=self.bfs(np,['PP'])
for child in pp:
if child.label()=='IN':
word=child.leaves()[0]
if word=='with':
child.leaves()[0]='without'
else:
child.leaves()[0]='not '+child.label()
return True
return False
#Uses NodeBox to return the present tense of a verb
def present_tense(self,word):
return en.verb.present(word)
#Take a sentence and returns its negation
#This is pretty much the main function
def negate_sentence(self,t,prep=False):
one=False
b=False
for index,c in enumerate(t.subtrees()):
if c.label()=="S" and c!=t:
b= (b or self.negate_verb(c))
one=True
if one is False:
b=(b or self.negate_verb(t))
#self.negate_noun(t)
if prep:
sub = n.bfs(t, ['NP'])
if sub is not False:
for i, child in enumerate(sub):
if child.label() == 'PP':
v=False
for grandchild in sub:
if grandchild.label()=='VP':
v=True
if not v:
del sub[i]
break
return b
#Takes a verb and returns its negation or None
#if it cannot find it
def word_negation(self, word, label):
print('word:',word)
print('label:',label)
if label == 'VP':
if word == 'was':
return 'was not'
elif word == 'is':
return 'is not'
elif word == 'did':
return 'did not'
elif word=='be':
return "don't be"
else:
try:
tense=en.verb.tense(word)
except:
tense=en.verb.tense(en.verb.infinitive(word))
print('tense:', tense)
if tense=='infinitive':
return 'do not '+ word
elif tense=='past':
return 'did not '+present_tense(word)
elif tense=='present participle':
return 'not '+word
else:
return 'did not ' + present_tense(word)
if label == 'VBZ':
if word == 'is':
return 'is not'
else:
return 'does not ' + self.present_tense(word)
if label== 'VBP':
if word=='have':
print('happened')
return 'have not'
if word=='are':
return 'are not'
return 'do not ' + en.verb.infinitive(word)
if label == 'VBG':
if len(word) > 3 and word[-3:] == 'ing':
return 'not ' + word
if label == 'VBN':
return 'not' + word
if label=='VB':
return 'do not '+ word
def random_row(self):
i = random.randint(1,self.entailment_df.shape[0])
row = self.entailment_df.iloc[i]
id=row['pairID']
return id,row
def negate_row(self,row,i):
p_tree=Tree.fromstring(row['sentence1_parse'])
h_tree=Tree.fromstring(row['sentence2_parse'])
p_tree.pretty_print()
h_tree.pretty_print()
b=self.negate_sentence(p_tree)
if b is False:
return False
b=self.negate_sentence(h_tree)
if b is False:
print("B FALSE")
return False
for sub in p_tree.subtrees():
if sub.label()=='CC' or sub.label()=='PP':
return False
for sub in h_tree.subtrees():
if sub.label()=='CC' or sub.label()=='PP':
return False
p_sent=row['sentence1']
h_sent = row['sentence2']
neg_p_sent=' '.join([word for word in p_tree.flatten()[:]])
neg_h_sent = ' '.join([word for word in h_tree.flatten()[:]])
return i,p_sent,h_sent,neg_p_sent,neg_h_sent
def feeder(self):
while True:
i,row = self.random_row()
print('row:',row['sentence1'])
n=self.negate_row(row,i)
print('n:',n)
if not n is False:
return n
def contradiction_feeder(self):
while True:
i,row = self.random_row()
print('row:',row['sentence1'])
n=self.negate_row(row,i)
print('n:',n)
if not n is False:
return n
def create_df(self):
df=pd.DataFrame({'sentence1':["first"],
'sentence2':["second"]})
df.to_csv('contrapositives.csv')
def add_contradiction_sentences(self):
d = {}
df = pd.read_csv(r'contradiction_train.csv')
count = df.shape[0]
for row in range(count):
d[df.iloc[row]['index']] = 1
a = True
print("HELLO AND WELCOME")
while a:
try:
i, p, h, neg_p, neg_h = self.feeder()
if i in d:
print('REPEAT')
continue
except KeyError:
print('EXCEPTION OCCURRED')
continue
print('=' * 8)
print('Original Premise:')
print(p)
print()
print('Negated Premise:')
print(neg_p)
print()
print()
print('Original Hypothesis:')
print(h)
print()
print('Negated Hypothesis:')
print(neg_h)
s = str(raw_input('Does this negation make sense?\n'))
if s.strip() == 'yes':
print('OG df:')
print(df)
print(df.columns)
mini = | pd.DataFrame({'sentence1': [p], 'sentence2': [h], 'index': [i],'sentence1_negation':[neg_p],'sentence2_negation':[neg_h]}) | pandas.DataFrame |
import pandas as pd
import woodwork as ww
from sklearn.datasets import load_diabetes as load_diabetes_sk
def load_diabetes(return_pandas=False):
"""Load diabetes dataset. Regression problem
Returns:
Union[(ww.DataTable, ww.DataColumn), (pd.Dataframe, pd.Series)]: X and y
"""
data = load_diabetes_sk()
X = | pd.DataFrame(data.data, columns=data.feature_names) | pandas.DataFrame |
from __future__ import absolute_import
import functools as ft
import warnings
from logging_helpers import _L
from lxml.etree import QName, Element
import lxml.etree
import networkx as nx
import numpy as np
import pandas as pd
from .core import ureg
from .load import draw, load
from six.moves import zip
__all__ = ['detect_neighbours', 'draw_with_segment_rays',
'write_connections_layer']
DEFAULT_DISTANCE_THRESHOLD = 0.175 * ureg.mm
def detect_neighbours(chip_info,
distance_threshold=DEFAULT_DISTANCE_THRESHOLD):
segments = get_segment_rays(chip_info, magnitude=distance_threshold)
return get_all_intersections(segments)
def draw_with_segment_rays(chip_info,
distance_threshold=DEFAULT_DISTANCE_THRESHOLD,
axis=None):
import matplotlib.pyplot as plt
if axis is None:
fig, axis = plt.subplots(figsize=(50, 50))
result = draw(chip_info, ax=axis)
# result = draw(chip_info)
axis = result['axis']
for p in result['patches'].values():
p.set_alpha(.3)
light_green = '#90cd97'
dark_green = '#059748'
df_intersections = detect_neighbours(chip_info, distance_threshold=.175 *
ureg.mm)
for idx_i, segment_i in df_intersections.iterrows():
axis.arrow(segment_i['x_mid'], segment_i['y_mid'],
segment_i['x_normal'], segment_i['y_normal'],
width=.25,
edgecolor=dark_green, facecolor=light_green)
def get_all_intersections(df_rays):
'''
Parameters
----------
segment_rays : pandas.DataFrame
See return type of :func:`get_segment_rays()`.
'''
intersections = []
for i, ((id_i, vertex_i), segment_i) in enumerate(df_rays.iterrows()):
p = segment_i[['x_mid', 'y_mid']].values
r = segment_i[['x_normal', 'y_normal']].values
df_intersections_i = get_intersections(df_rays, p, r)
# Do not include self electrode in consideration for neighbours.
self_mask = df_intersections_i.index.get_level_values('id') == id_i
df_intersections_i = df_intersections_i.loc[~self_mask]
if df_intersections_i.shape[0]:
intersections.append(((id_i, vertex_i), df_intersections_i))
if not intersections:
return | pd.DataFrame() | pandas.DataFrame |
import unittest
import logging
import summer2020py.setup_logger as setup_logger
import summer2020py.make_genebody_coverage_graphs.make_genebody_coverage_graphs as mgcg
import pandas
import tempfile
import os
temp_wkdir_prefix = "TestMakeGeneBodyCoverageGraphs"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
# Some notes on testing conventions (more in cuppers convention doc):
# (1) Use "self.assert..." over "assert"
# - self.assert* methods: https://docs.python.org/2.7/library/unittest.html#assert-methods
# - This will ensure that if one assertion fails inside a test method,
# exectution won't halt and the rest of the test method will be executed
# and other assertions are also verified in the same run.
# (2) For testing exceptions use:
# with self.assertRaises(some_exception) as context:
# [call method that should raise some_exception]
# self.assertEqual(str(context.exception), "expected exception message")
#
# self.assertAlmostEquals(...) for comparing floats
class TestMakeGeneBodyCoverageGraphs(unittest.TestCase):
def test_main(self):
logger.debug("\n \n \n test_main \n \n ")
input_dir = os.path.join("assets", "notebook_inputs", "output_gbdy_cov")
logger.debug("input_dir: {}".format(input_dir))
with tempfile.TemporaryDirectory(prefix=temp_wkdir_prefix) as wkdir:
logger.debug("wkdir: {}".format(wkdir))
args = mgcg.build_parser().parse_args([
#"-s", source_dir,
"-i", input_dir,
"-o", wkdir,
"-of", "MYEXPERIMENTID"
])
mgcg.main(args)
#check that html files were outputted
self.assertTrue(os.path.exists(os.path.join(wkdir, "MYEXPERIMENTID_genebody_histogram_coverage_diff.html")))
self.assertTrue(os.path.exists(os.path.join(wkdir, "MYEXPERIMENTID_genebody_histogram_cov_diff_pct.html")))
self.assertTrue(os.path.exists(os.path.join(wkdir, "MYEXPERIMENTID_genebody_coverage_percentile.html")))
self.assertTrue(os.path.exists(os.path.join(wkdir, "MYEXPERIMENTID_genebody_coverage_counts.html")))
#check that the text files are the same as example outputss
#doesn't work for html files
outputted_files = [
os.path.join(wkdir, "MYEXPERIMENTID_all_genebody_coverage_r1200x6.txt"),
os.path.join(wkdir, "MYEXPERIMENTID_asymmetry_compare_80_20_r12x6.txt")
]
expected_files = [
os.path.join("assets", "example_notebook_outputs", "MYEXPERIMENTID_all_genebody_coverage_r1200x6.txt"),
os.path.join("assets", "example_notebook_outputs", "MYEXPERIMENTID_asymmetry_compare_80_20_r12x6.txt")
]
for i in range(0, len(outputted_files)):
opened_output = open(outputted_files[i], "r")
opened_expected = open(expected_files[i], "r")
logger.debug("checking {} against expected".format(outputted_files[i]))
self.assertEqual(opened_output.read(), opened_expected.read())
opened_output.close()
opened_expected.close()
def test_input_file_search(self):
logger.debug("\n \n \n test_input_file_search\n \n ")
input_dir = os.path.join("assets","notebook_inputs", "output_gbdy_cov")
logger.debug("input_dir: {}".format(input_dir))
input_files = mgcg.input_file_search(input_dir)
self.assertEqual(len(input_files), 12)
#check that the first 3 files are the correct ones
self.assertEqual(
os.path.join('assets','notebook_inputs','output_gbdy_cov','D121','D121.geneBodyCoverage.txt'),
input_files[0]
)
self.assertEqual(
os.path.join('assets','notebook_inputs','output_gbdy_cov','D122','D122.geneBodyCoverage.txt'),
input_files[1]
)
self.assertEqual(
os.path.join('assets','notebook_inputs','output_gbdy_cov','D123','D123.geneBodyCoverage.txt'),
input_files[2]
)
def test_load_genebody_coverage_data(self):
input_files = [
os.path.join("assets", "notebook_inputs", "output_gbdy_cov", "D121", "D121.geneBodyCoverage.txt"),
os.path.join("assets", "notebook_inputs", "output_gbdy_cov", "D122", "D122.geneBodyCoverage.txt")
]
inp_df_list = mgcg.load_genebody_coverage_data(input_files)
#check that there are two data frames
self.assertEqual(len(inp_df_list), 2)
#check that first df is the right shape
self.assertEqual(inp_df_list[0].shape[0], 100)
self.assertEqual(inp_df_list[0].shape[1], 2)
#check that second df is the right shape
self.assertEqual(inp_df_list[1].shape[0], 100)
self.assertEqual(inp_df_list[1].shape[1], 2)
#check that sample id are the right ones
self.assertEqual(inp_df_list[0].sample_id[0], "D121")
self.assertEqual(inp_df_list[1].sample_id[0], "D122")
def test_merge_dfs_into_one(self):
logger.debug("\n \n \n test_merge_dfs_into_one\n \n ")
#create first fake data frame
df = pandas.DataFrame({"coverage_counts":range(100000,500000, 4000), "sample_id":"FAKE"})
df.index.name = "genebody_pct"
df.index += 1
#create second fake data frame
df2 = pandas.DataFrame({"coverage_counts":range(120000,520000, 4000), "sample_id":"FACE"})
df2.index.name = "genebody_pct"
df2.index += 1
counts_df = mgcg.merge_dfs_into_one([df, df2])
logger.debug("counts_df: {}".format(counts_df))
#check that df is the right shape
self.assertEqual(counts_df.shape[0], 200)
self.assertEqual(counts_df.shape[1], 3)
#check that first sample id is fake and that 11th is face
self.assertEqual(counts_df.sample_id[0], "FAKE")
self.assertEqual(counts_df.sample_id[100], "FACE")
def test_sum_counts(self):
logger.debug("\n \n \n test_sum_counts\n \n ")
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
#create fake data frame
counts_df = pandas.DataFrame({"coverage_counts":list(range(100000,500000, 4000)) + list(range(120000,520000, 4000)), "sample_id":sample_ids})
sum_counts_df = mgcg.sum_counts(counts_df)
logger.debug("counts_df: {}".format(counts_df))
logger.debug("sum_counts_df: {}".format(sum_counts_df))
#check that df is the right shape
self.assertEqual(sum_counts_df.shape[0], 2)
self.assertEqual(sum_counts_df.shape[1], 1)
#check that the sums are correct
self.assertEqual(sum_counts_df.total_coverage_counts[0], 31800000)
self.assertEqual(sum_counts_df.total_coverage_counts[1], 29800000)
def test_calculate_percentile_df(self):
logger.debug("\n \n \n test_jcalculate_percentile_df\n \n ")
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
counts_df = pandas.DataFrame({"coverage_counts":list(range(100000,500000, 4000)) + list(range(120000,520000, 4000)), "sample_id":sample_ids})
sum_counts_df = pandas.DataFrame(data = {"total_coverage_counts":[31800000, 29800000]}, index = ["FACE", "FAKE"])
sum_counts_df.index.name = "sample_id"
percentile_df = mgcg.calculate_percentile_df(counts_df, sum_counts_df)
#check that df is the right shape
self.assertEqual(percentile_df.shape[0], 200)
self.assertEqual(percentile_df.shape[1], 4)
#check that first sample id is fake and that 11th is face
self.assertEqual(percentile_df.sample_id[0], "FAKE")
self.assertEqual(percentile_df.sample_id[100], "FACE")
#check that FAKE coveragecounts are 2.8 mil and FACE are 3 mil
self.assertEqual(percentile_df.total_coverage_counts[0], 29800000)
self.assertEqual(percentile_df.total_coverage_counts[100], 31800000)
#check first twenty percentiles to make sure they are correct
for i in range(0, 20):
self.assertEqual(percentile_df.coverage_percentile[i], percentile_df.coverage_counts[i] / percentile_df.total_coverage_counts[i])
def test_create_pct_df_list(self):
logger.debug("\n \n \n test_create_pct_df_list\n \n ")
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
coverage_percentile = list(range(3356,16756,134)) + list(range(2726,16226, 135))
for num in range(len(coverage_percentile)):
coverage_percentile[num] = coverage_percentile[num] / 1000000
percentile_df = pandas.DataFrame({"coverage_percentile":coverage_percentile, "sample_id":sample_ids, "genebody_pct":list(range(1,101))+ list(range(1,101))})
pct_df_list = mgcg.create_pct_df_list(percentile_df)
logger.debug("pct_df_list: {}".format(pct_df_list))
#checking 20th
self.assertEqual(pct_df_list[0].coverage_20pct[0], 0.005902)
self.assertEqual(pct_df_list[0].coverage_20pct[1], 0.005291)
#checking 50th
self.assertEqual(pct_df_list[1].coverage_50pct[0], 0.009922)
self.assertEqual(pct_df_list[1].coverage_50pct[1], 0.009341)
#checking 80th
self.assertEqual(pct_df_list[2].coverage_80pct[0], 0.013942)
self.assertEqual(pct_df_list[2].coverage_80pct[1], 0.013391)
def test_create_pct_comp_df(self):
logger.debug("\n \n \n test_create_pct_comp_df\n \n ")
df20 = pandas.DataFrame(data = {"coverage_20pct":[0.005902,0.005291]}, index = ["FAKE", "FACE"])
df20.index.name = "sample_id"
df50 = pandas.DataFrame(data = {"coverage_50pct":[0.009922,0.009341]}, index = ["FAKE", "FACE"])
df50.index.name = "sample_id"
df80 = pandas.DataFrame(data = {"coverage_80pct":[0.013942,0.013391]}, index = ["FAKE", "FACE"])
df80.index.name = "sample_id"
pct_comp_df = mgcg.create_pct_comp_df([df20, df50, df80])
logger.debug("pct_comp_df: {}".format(pct_comp_df))
self.assertAlmostEqual(pct_comp_df.cov_diff_pct[0], 0.810320, places=5)
self.assertAlmostEqual(pct_comp_df.cov_diff_pct[1], 0.867145, places=5)
def test_add_label_col(self):
logger.debug("\n \n \n test_add_label_col\n \n ")
pct_comp_df = pandas.DataFrame(data = {"cov_diff_pct":[0.810320,0.867145]}, index = ["FAKE", "FACE"])
pct_comp_df.index.name = "sample_id"
pct_comp_df = mgcg.add_label_col(pct_comp_df)
logger.debug("pct_comp_df: {}".format(pct_comp_df))
self.assertEqual(pct_comp_df.label[0], "FAKE 0.81")
self.assertEqual(pct_comp_df.label[1], "FACE 0.87")
def test_add_labels_based_on_sample_id(self):
logger.debug("\n \n \n test_add_labels_based_on_sample_id\n \n ")
sample_ids =[]
for i in range(0, 200):
if i < 100:
sample_ids.append("FAKE")
else:
sample_ids.append("FACE")
pct_comp_df = | pandas.DataFrame(data = {"cov_diff_pct":[0.810320,0.867145], "label":["FAKE 0.81", "FACE 0.87"]}, index = ["FAKE", "FACE"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import struct
import h5py
import time
import os
class Database():
"""Connection to an HDF5 database storing message and order book data.
Parameters
----------
path : string
Specifies location of the HDF5 file
names : list
Contains the stock tickers to include in the database
nlevels : int
Specifies the number of levels to include in the order book data
"""
def __init__(self, path, names, nlevels, method):
self.method = method
if self.method == 'hdf5':
try:
self.file = h5py.File(path, 'r+') # read/write, file must exist
print('Appending existing HDF5 file.')
for name in names:
if name in self.file['messages'].keys():
print('Overwriting message data for {}'.format(name))
del self.file['messages'][name]
if name in self.file['orderbooks'].keys():
print('Overwriting orderbook data for {}'.format(name))
del self.file['orderbooks'][name]
if name in self.file['trades'].keys():
print('Overwriting trades data for {}'.format(name))
del self.file['trades'][name]
if name in self.file['noii'].keys():
print('Overwriting noii data for {}'.format(name))
del self.file['noii'][name]
except OSError as e:
print('HDF5 file does not exist. Creating a new one.')
self.file = h5py.File(path, 'x') # create file, fail if exists
self.messages = self.file.require_group('messages')
self.orderbooks = self.file.require_group('orderbooks')
self.trades = self.file.require_group('trades')
self.noii = self.file.require_group('noii')
for name in names:
self.messages.require_dataset(name,
shape=(0, 8),
maxshape=(None, None),
dtype='i')
self.orderbooks.require_dataset(name,
shape=(0, 4 * nlevels + 2),
maxshape=(None, None),
dtype='i')
self.trades.require_dataset(name,
shape=(0, 5),
maxshape=(None, None),
dtype='i')
self.noii.require_dataset(name,
shape=(0, 14),
maxshape=(None, None),
dtype='i')
elif self.method == 'csv':
if os.path.exists('{}'.format(path)):
response = input('A database with that path already exists. Are you sure you want to proceed? [Y/N] ')
if response == 'Y':
proceed = True
for item in os.listdir('{}/messages/'.format(path)):
os.remove('{}/messages/{}'.format(path, item))
for item in os.listdir('{}/books/'.format(path)):
os.remove('{}/books/{}'.format(path, item))
for item in os.listdir('{}/trades/'.format(path)):
os.remove('{}/trades/{}'.format(path, item))
for item in os.listdir('{}/noii/'.format(path)):
os.remove('{}/noii/{}'.format(path, item))
os.rmdir('{}/messages/'.format(path))
os.rmdir('{}/books/'.format(path))
os.rmdir('{}/trades/'.format(path))
os.rmdir('{}/noii/'.format(path))
for item in os.listdir('{}'.format(path)):
os.remove('{}/{}'.format(path, item))
os.rmdir('{}'.format(path))
else:
# TODO: Need to exit the program
proceed = False
print('Process cancelled.')
else:
proceed = True
if proceed:
print('Creating a new database in directory: {}/'.format(path))
self.messages_path = '{}/messages/'.format(path)
self.books_path = '{}/books/'.format(path)
self.trades_path = '{}/trades/'.format(path)
self.noii_path = '{}/noii/'.format(path)
os.makedirs(path)
os.makedirs(self.messages_path)
os.makedirs(self.books_path)
os.makedirs(self.trades_path)
os.makedirs(self.noii_path)
columns = ['sec', 'nano', 'name']
columns.extend(['bidprc{}'.format(i) for i in range(nlevels)])
columns.extend(['askprc{}'.format(i) for i in range(nlevels)])
columns.extend(['bidvol{}'.format(i) for i in range(nlevels)])
columns.extend(['askvol{}'.format(i) for i in range(nlevels)])
for name in names:
with open(self.messages_path + 'messages_{}.txt'.format(name), 'w') as messages_file:
messages_file.write('sec,nano,name,type,refno,side,shares,price,mpid\n')
with open(self.books_path + 'books_{}.txt'.format(name), 'w') as books_file:
books_file.write(','.join(columns) + '\n')
with open(self.trades_path + 'trades_{}.txt'.format(name), 'w') as trades_file:
trades_file.write('sec,nano,name,side,shares,price\n')
with open(self.noii_path + 'noii_{}.txt'.format(name), 'w') as noii_file:
noii_file.write('sec,nano,name,type,cross,shares,price,paired,imb,dir,far,near,curr\n')
def close(self):
if self.method == 'hdf5':
self.file.close()
else:
pass
class Message():
"""A class representing out-going messages from the NASDAQ system.
Parameters
----------
sec : int
Seconds
nano : int
Nanoseconds
type : string
Message type
event : string
System event
name : string
Stock ticker
buysell : string
Trade position
price : int
Trade price
shares : int
Shares
refno : int
Unique reference number of order
newrefno : int
Replacement reference number
mpid: string
MPID attribution
"""
def __init__(self, date='.', sec=-1, nano=-1, type='.', event='.', name='.',
buysell='.', price=-1, shares=0, refno=-1, newrefno=-1, mpid='.'):
self.date = date
self.name = name
self.sec = sec
self.nano = nano
self.type = type
self.event = event
self.buysell = buysell
self.price = price
self.shares = shares
self.refno = refno
self.newrefno = newrefno
self.mpid = mpid
def __str__(self):
sep = ', '
line = ['sec=' + str(self.sec),
'nano=' + str(self.nano),
'type=' + str(self.type),
'event=' + str(self.event),
'name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'refno=' + str(self.refno),
'newrefno=' + str(self.newrefno),
'mpid= {}'.format(self.mpid)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'type: ' + str(self.type),
'event: ' + str(self.event),
'name: ' + str(self.name),
'buysell: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares),
'refno: ' + str(self.refno),
'newrefno: ' + str(self.newrefno),
'mpid: {}'.format(self.mpid)]
return 'Message(' + sep.join(line) + ')'
def split(self):
"""Converts a replace message to an add and a delete."""
assert self.type == 'U', "ASSERT-ERROR: split method called on non-replacement message."
if self.type == 'U':
new_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='U',
price=self.price,
shares=self.shares,
refno=self.refno,
newrefno=self.newrefno)
del_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='D',
refno=self.refno,
newrefno=-1)
add_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='U+',
price=self.price,
shares=self.shares,
refno=self.refno,
newrefno=self.newrefno)
return (new_message, del_message, add_message)
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(str(self.name))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.type))
values.append(str(self.event))
values.append(str(self.buysell))
values.append(int(self.price))
values.append(int(self.shares))
values.append(int(self.refno))
values.append(int(self.newrefno))
values.append(int(self.mpid))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.type == 'P':
if self.buysell == 'B':
side = -1
else:
side = 1
values = [self.sec, self.nano, side, self.price, self.shares]
return np.array(values)
else:
if self.type == 'A': # add
type = 0
elif self.type == 'F': # add w/mpid
type = 1
elif self.type == 'X': # cancel
type = 2
elif self.type == 'D': # delete
type = 3
elif self.type == 'E': # execute
type = 4
elif self.type == 'C': # execute w/price
type = 5
elif self.type == 'U': # replace
type = 6
else:
type = -1
if self.buysell == 'B': # bid
side = 1
elif self.buysell == 'S': # ask
side = -1
else:
side = 0
values = [self.sec,
self.nano,
type,
side,
self.price,
np.abs(self.shares),
self.refno,
self.newrefno]
return np.array(values)
def to_txt(self, path=None):
if self.type in ('S', 'H'):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.event)]
elif self.type in ('A', 'F', 'E', 'C', 'X', 'D', 'U'):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.refno),
str(self.buysell),
str(self.shares),
str(self.price / 10 ** 4),
str(self.mpid)]
elif self.type == 'P':
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.buysell),
str(self.shares),
str(self.price / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class NOIIMessage():
"""A class representing out-going messages from the NASDAQ system.
This class is specific to net order imbalance indicator messages and
cross trade messages.
Parameters
----------
sec: int
Seconds
nano: int
Nanoseconds
name: string
Stock ticker
type: string
Message type
cross: string
Cross type
buysell: string
Trade position
price: int
Trade price
shares: int
Shares
matchno: int
Unique reference number of trade
paired: int
Shares paired
imbalance: int
Shares imbalance
direction: string
Imbalance direction
far: int
Far price
near: int
Near price
current: int
Current refernce price
"""
def __init__(self, date='.', sec=-1, nano=-1, name='.', type='.', cross='.',
buysell='.', price=-1, shares=0, matchno=-1, paired=-1,
imbalance=-1, direction='.', far=-1, near=-1, current=-1):
self.date = date
self.sec = sec
self.nano = nano
self.name = name
self.type = type
self.cross = cross
self.buysell = buysell
self.price = price
self.shares = shares
self.matchno = matchno
self.paired = paired
self.imbalance = imbalance
self.direction = direction
self.far = far
self.near = near
self.current = current
def __str__(self):
sep = ', '
line = ['date=' + str(self.date),
'sec=' + str(self.sec),
'nano=' + str(self.nano),
'name=' + str(self.name),
'type=' + str(self.type),
'cross=' + str(self.cross),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'matchno=' + str(self.matchno),
'paired=' + str(self.paired),
'imbalance=' + str(self.imbalance),
'direction=' + str(self.direction),
'far=' + str(self.far),
'near=' + str(self.near),
'current=' + str(self.current)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['date=' + str(self.date),
'sec=' + str(self.sec),
'nano=' + str(self.nano),
'name=' + str(self.name),
'type=' + str(self.type),
'cross=' + str(self.cross),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'matchno=' + str(self.matchno),
'paired=' + str(self.paired),
'imbalance=' + str(self.imbalance),
'direction=' + str(self.direction),
'far=' + str(self.far),
'near=' + str(self.near),
'current=' + str(self.current)]
return 'Message(' + sep.join(line) + ')'
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.name))
values.append(str(self.type))
values.append(str(self.cross))
values.append(str(self.buysell))
values.append(int(self.price))
values.append(int(self.shares))
values.append(int(self.matchno))
values.append(int(self.paired))
values.append(int(self.imbalance))
values.append(int(self.direction))
values.append(int(self.far))
values.append(int(self.near))
values.append(int(self.current))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.type == 'Q': # cross trade
type = 0
elif self.type == 'I': # noii
type = 1
else:
type = -1
print('Unexpected NOII message type: {}'.format(self.type))
if self.cross == 'O': # opening cross
cross = 0
elif self.cross == 'C': # closing cross
cross = 1
elif self.cross == 'H': # halted cross
cross = 2
elif self.cross == 'I': # intraday cross
cross = 3
else:
cross = -1
print('Unexpected cross type: {}'.format(self.cross))
if self.buysell == 'B': # bid
side = 1
elif self.buysell == 'S': # ask
side = -1
else:
side = 0
if self.direction == 'B': # bid
dir = 1
elif self.direction == 'S': # ask
dir = -1
else:
dir = 0
values = [self.sec,
self.nano,
type,
cross,
side,
self.price,
self.shares,
self.matchno,
self.paired,
self.imbalance,
dir,
self.far,
self.near,
self.current]
return np.array(values)
def to_txt(self, path=None):
sep = ','
if self.type == 'Q':
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.cross),
str(self.shares),
str(self.price / 10 ** 4),
str(self.paired),
str(self.imbalance),
str(self.direction),
str(self.far),
str(self.near),
str(self.current)]
elif self.type == 'I':
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.cross),
str(self.shares),
str(self.price),
str(self.paired),
str(self.imbalance),
str(self.direction),
str(self.far / 10 ** 4),
str(self.near / 10 ** 4),
str(self.current / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class Trade():
"""A class representing trades on the NASDAQ system.
Parameters
----------
date: int
Date
sec : int
Seconds
nano : int
Nanoseconds
name : string
Stock ticker
side : string
Buy or sell
price : int
Trade price
shares : int
Shares
"""
def __init__(self, date='.', sec=-1, nano=-1, name='.', side='.', price=-1, shares=0):
self.date = date
self.name = name
self.sec = sec
self.nano = nano
self.side = side
self.price = price
self.shares = shares
def __str__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'name: ' + str(self.name),
'side: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'name: ' + str(self.name),
'side: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares)]
return 'Trade(' + sep.join(line) + ')'
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(str(self.name))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.side))
values.append(int(self.price))
values.append(int(self.shares))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.side == 'B':
side = -1
else:
side = 1
return np.array([self.sec, self.nano, side, self.price, self.shares])
def to_txt(self, path=None):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.side),
str(self.shares),
str(self.price / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class Messagelist():
"""A class to store messages.
Provides methods for writing to HDF5 and PostgreSQL databases.
Parameters
----------
date : string
Date to be assigned to data
names : list
Contains the stock tickers to include in the database
Attributes
----------
messages : dict
Contains a Message objects for each name in names
Examples
--------
Create a MessageList::
>> msglist = pk.Messagelist(date='112013', names=['GOOG', 'AAPL'])
"""
def __init__(self, date, names):
self.messages = {}
self.date = date
for name in names:
self.messages[name] = []
def add(self, message):
"""Add a message to the list."""
try:
self.messages[message.name].append(message)
except KeyError as e:
print("KeyError: Could not find {} in the message list".format(message.name))
def to_hdf5(self, name, db, grp):
"""Write messages to HDF5 file."""
assert db.method == 'hdf5', 'Attempted to write to non-HDF5 database'
m = self.messages[name]
if len(m) > 0:
listed = [message.to_array() for message in m]
array = np.array(listed)
if grp == 'messages':
db_size, db_cols = db.messages[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.messages[name].resize((db_resize, db_cols))
db.messages[name][db_size:db_resize, :] = array
if grp == 'trades':
db_size, db_cols = db.trades[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.trades[name].resize((db_resize, db_cols))
db.trades[name][db_size:db_resize, :] = array
if grp == 'noii':
db_size, db_cols = db.noii[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.noii[name].resize((db_resize, db_cols))
db.noii[name][db_size:db_resize, :] = array
self.messages[name] = [] # reset
print('wrote {} messages to dataset (name={}, group={})'.format(len(m), name, grp))
def to_txt(self, name, db, grp):
assert db.method == 'csv', 'Attempted to write to non-CSV database'
message_list = self.messages[name]
if len(message_list) > 0:
texted = [message.to_txt() for message in message_list]
if grp == 'messages':
with open('{}/messages_{}.txt'.format(db.messages_path, name), 'a') as fout:
fout.writelines(texted)
if grp == 'trades':
with open('{}/trades_{}.txt'.format(db.trades_path, name), 'a') as fout:
fout.writelines(texted)
if grp == 'noii':
with open('{}/noii_{}.txt'.format(db.noii_path, name), 'a') as fout:
fout.writelines(texted)
self.messages[name] = []
print('wrote {} messages to dataset (name={}, group={})'.format(len(message_list), name, grp))
class Order():
"""A class to represent limit orders.
Stores essential message data for order book reconstruction.
Attributes
----------
name : string
Stock ticker
buysell : string
Trade position
price : int
Trade price
shares : int
Shares
"""
def __init__(self, name='.', buysell='.', price='.', shares='.'):
self.name = name
self.buysell = buysell
self.price = price
self.shares = shares
def __str__(self):
sep = ', '
line = ['name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares)]
return 'Order(' + sep.join(line) + ')'
class Orderlist():
"""A class to store existing orders and process incoming messages.
This class handles the matching of messages to standing orders. Incoming messages are first matched to standing orders so that missing message data can be completed, and then the referenced order is updated based on the message.
Attributes
----------
orders : dict
Keys are reference numbers, values are Orders.
"""
def __init__(self):
self.orders = {}
def __str__(self):
sep = '\n'
line = []
for key in self.orders.keys():
line.append(str(key) + ': ' + str(self.orders[key]))
return sep.join(line)
# updates message by reference.
def complete_message(self, message):
"""Look up Order for Message and fill in missing data."""
if message.refno in self.orders.keys():
# print('complete_message received message: {}'.format(message.type))
ref_order = self.orders[message.refno]
if message.type == 'U':
message.name = ref_order.name
message.buysell = ref_order.buysell
elif message.type == 'U+': # ADD from a split REPLACE order
message.type = 'A'
message.name = ref_order.name
message.buysell = ref_order.buysell
message.refno = message.newrefno
message.newrefno = -1
elif message.type in ('E', 'C', 'X'):
message.name = ref_order.name
message.buysell = ref_order.buysell
message.price = ref_order.price
message.shares = -message.shares
elif message.type == 'D':
message.name = ref_order.name
message.buysell = ref_order.buysell
message.price = ref_order.price
message.shares = -ref_order.shares
def add(self, message):
"""Add a new Order to the list."""
order = Order()
order.name = message.name
order.buysell = message.buysell
order.price = message.price
order.shares = message.shares
self.orders[message.refno] = order
def update(self, message):
"""Update an existing Order based on incoming Message."""
if message.refno in self.orders.keys():
if message.type == 'E': # execute
self.orders[message.refno].shares += message.shares
elif message.type == 'X': # execute w/ price
self.orders[message.refno].shares += message.shares
elif message.type == 'C': # cancel
self.orders[message.refno].shares += message.shares
elif message.type == 'D': # delete
self.orders.pop(message.refno)
else:
pass
class Book():
"""A class to represent an order book.
This class provides a method for updating the state of an order book from an
incoming message.
Attributes
----------
bids : dict
Keys are prices, values are shares
asks : dict
Keys are prices, values are shares
levels : int
Number of levels of the the order book to track
sec : int
Seconds
nano : int
Nanoseconds
"""
def __init__(self, date, name, levels):
self.bids = {}
self.asks = {}
self.min_bid = -np.inf
self.max_ask = np.inf
self.levels = levels
self.sec = -1
self.nano = -1
self.date = date
self.name = name
def __str__(self):
sep = ', '
sorted_bids = sorted(self.bids.keys(), reverse=True) # high-to-low
sorted_asks = sorted(self.asks.keys()) # low-to-high
bid_list = []
ask_list = []
nbids = len(self.bids)
nasks = len(self.asks)
for i in range(0, self.levels):
if i < nbids:
bid_list.append(str(self.bids[sorted_bids[i]]) + '@' + str(sorted_bids[i]))
else:
pass
if i < nasks:
ask_list.append(str(self.asks[sorted_asks[i]]) + '@' + str(sorted_asks[i]))
else:
pass
return 'bids: ' + sep.join(bid_list) + '\n' + 'asks: ' + sep.join(ask_list)
def __repr__(self):
sep = ', '
sorted_bids = sorted(self.bids.keys(), reverse=True) # high-to-low
sorted_asks = sorted(self.asks.keys()) # low-to-high
bid_list = []
ask_list = []
nbids = len(self.bids)
nasks = len(self.asks)
for i in range(0, self.levels):
if i < nbids:
bid_list.append(str(self.bids[sorted_bids[i]]) + '@' + str(sorted_bids[i]))
else:
pass
if i < nasks:
ask_list.append(str(self.asks[sorted_asks[i]]) + '@' + str(sorted_asks[i]))
else:
pass
return 'Book( \n' + 'bids: ' + sep.join(bid_list) + '\n' + 'asks: ' + sep.join(ask_list) + ' )'
def update(self, message):
"""Update Book using incoming Message data."""
self.sec = message.sec
self.nano = message.nano
updated = False
if message.buysell == 'B':
if message.price in self.bids.keys():
self.bids[message.price] += message.shares
if self.bids[message.price] == 0:
self.bids.pop(message.price)
elif message.type in ('A', 'F'):
self.bids[message.price] = message.shares
elif message.buysell == 'S':
if message.price in self.asks.keys():
self.asks[message.price] += message.shares
if self.asks[message.price] == 0:
self.asks.pop(message.price)
elif message.type in ('A', 'F'):
self.asks[message.price] = message.shares
return self
def to_list(self):
"""Return Order as a list."""
values = []
values.append(self.date)
values.append(self.name)
values.append(int(self.sec))
values.append(int(self.nano))
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i])
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i])
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return values
def to_array(self):
'''Return Order as numpy array.'''
values = []
values.append(int(self.sec))
values.append(int(self.nano))
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i])
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i])
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return np.array(values)
def to_txt(self):
values = []
values.append(int(self.sec))
values.append(int(self.nano))
values.append(self.name)
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i] / 10 ** 4)
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i] / 10 ** 4)
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return ','.join([str(v) for v in values]) + '\n'
class Booklist():
"""A class to store Books.
Provides methods for writing to external databases.
Examples
--------
Create a Booklist::
>> booklist = pk.BookList(['GOOG', 'AAPL'], levels=10)
Attributes
----------
books : list
A list of Books
method : string
Specifies the type of database to create ('hdf5' or 'postgres')
"""
def __init__(self, date, names, levels, method):
self.books = {}
self.method = method
for name in names:
self.books[name] = {'hist': [], 'cur': Book(date, name, levels)}
def update(self, message):
"""Update Book data from message."""
b = self.books[message.name]['cur'].update(message)
if self.method == 'hdf5':
self.books[message.name]['hist'].append(b.to_array())
if self.method == 'csv':
self.books[message.name]['hist'].append(b.to_txt())
def to_hdf5(self, name, db):
"""Write Book data to HDF5 file."""
hist = self.books[name]['hist']
if len(hist) > 0:
array = np.array(hist)
db_size, db_cols = db.orderbooks[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.orderbooks[name].resize((db_resize, db_cols))
db.orderbooks[name][db_size:db_resize, :] = array
self.books[name]['hist'] = [] # reset
print('wrote {} books to dataset (name={})'.format(len(hist), name))
def to_txt(self, name, db):
hist = self.books[name]['hist']
if len(hist) > 0:
with open('{}/books_{}.txt'.format(db.books_path, name), 'a') as fout:
fout.writelines(hist)
self.books[name]['hist'] = [] # reset
print('wrote {} books to dataset (name={})'.format(len(hist), name))
def get_message_size(size_in_bytes):
"""Return number of bytes in binary message as an integer."""
(message_size,) = struct.unpack('>H', size_in_bytes)
return message_size
def get_message_type(type_in_bytes):
"""Return the type of a binary message as a string."""
return type_in_bytes.decode('ascii')
def get_message(message_bytes, message_type, date, time, version):
"""Return binary message data as a Message."""
if message_type in ('T', 'S', 'H', 'A', 'F', 'E', 'C', 'X', 'D', 'U', 'P', 'Q', 'I'):
message = protocol(message_bytes, message_type, time, version)
if version == 5.0:
message.sec = int(message.nano / 10 ** 9)
message.nano = message.nano % 10 ** 9
message.date = date
return message
else:
return None
def protocol(message_bytes, message_type, time, version):
"""Decode binary message data and return as a Message."""
if message_type in ('T', 'S', 'H', 'A', 'F', 'E', 'C', 'X', 'D', 'U', 'P'):
message = Message()
elif message_type in ('Q', 'I'):
message = NOIIMessage()
# elif message_type in ('H'):
# message = TradingActionMessage()
message.type = message_type
if version == 4.0:
if message.type == 'T': # time
temp = struct.unpack('>I', message_bytes)
message.sec = temp[0]
message.nano = 0
elif message_type == 'S': # systems
temp = struct.unpack('>Is', message_bytes)
message.sec = time
message.nano = temp[0]
message.event = temp[1].decode('ascii')
elif message_type == 'H': # trade-action
temp = struct.unpack('>I6sss4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = temp[1].decode('ascii').rstrip(' ')
message.event = temp[2].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>IQsI6sI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>IQsI6sI4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'E': # execute
temp = struct.unpack('>IQIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'C': # execute w/price (actually don't need price...)
temp = struct.unpack('>IQIQsI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
message.price = temp[5]
elif message.type == 'X': # cancel
temp = struct.unpack('>IQI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'D': # delete
temp = struct.unpack('>IQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
elif message.type == 'U': # replace
temp = struct.unpack('>IQQII', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.newrefno = temp[2]
message.shares = temp[3]
message.price = temp[4]
elif message.type == 'Q':
temp = struct.unpack('>IQ6sIQs', message_bytes)
message.sec = time
message.nano = temp[0]
message.shares = temp[1]
message.name = temp[2].decode('ascii').rstrip(' ')
message.price = temp[3]
message.event = temp[5].decode('ascii')
return message
elif version == 4.1:
if message.type == 'T': # time
temp = struct.unpack('>I', message_bytes)
message.sec = temp[0]
message.nano = 0
elif message.type == 'S': # systems
temp = struct.unpack('>Is', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = '.'
message.event = temp[1].decode('ascii')
elif message.type == 'H': # trade-action
temp = struct.unpack('>I8sss4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = temp[1].decode('ascii').rstrip(' ')
message.event = temp[2].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>IQsI8sI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>IQsI8sI4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
message.mpid = temp[6].decode('ascii').rstrip(' ')
elif message.type == 'E': # execute
temp = struct.unpack('>IQIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'C': # execute w/price
temp = struct.unpack('>IQIQsI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
message.price = temp[5]
elif message.type == 'X': # cancel
temp = struct.unpack('>IQI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'D': # delete
temp = struct.unpack('>IQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
elif message.type == 'U': # replace
temp = struct.unpack('>IQQII', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.newrefno = temp[2]
message.shares = temp[3]
message.price = temp[4]
elif message.type == 'Q': # cross-trade
temp = struct.unpack('>IQ8sIQs', message_bytes)
message.sec = time
message.nano = temp[0]
message.shares = temp[1]
message.name = temp[2].decode('ascii').rstrip(' ')
message.price = temp[3]
message.event = temp[5].decode('ascii')
elif message.type == 'P': # trade message
temp = struct.unpack('>IQsI8sIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
# message.matchno = temp[6]
elif message.type == 'I':
temp = struct.unpack('>IQQs8sIIIss', message_bytes)
message.sec = time
message.nano = temp[0]
message.paired = temp[1]
message.imbalance = temp[2]
message.direction = temp[3].decode('ascii')
message.name = temp[4].decode('ascii').rstrip(' ')
message.far = temp[5]
message.near = temp[6]
message.current = temp[7]
message.cross = temp[8].decode('ascii')
# message.pvar = temp[9].decode('ascii'])
return message
elif version == 5.0:
if message.type == 'T': # time
raise ValueError('Time messages not supported in ITCHv5.0.')
elif message_type == 'S': # systems
temp = struct.unpack('>HHHIs', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.event = temp[4].decode('ascii')
elif message.type == 'H':
temp = struct.unpack('>HHHI8sss4s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.name = temp[4].decode('ascii').rstrip(' ')
message.event = temp[5].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>HHHIQsI8sI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.buysell = temp[5].decode('ascii')
message.shares = temp[6]
message.name = temp[7].decode('ascii').rstrip(' ')
message.price = temp[8]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>HHHIQsI8sI4s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.buysell = temp[5].decode('ascii')
message.shares = temp[6]
message.name = temp[7].decode('ascii').rstrip(' ')
message.price = temp[8]
elif message.type == 'E': # execute
temp = struct.unpack('>HHHIQIQ', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
elif message.type == 'C': # execute w/price
temp = struct.unpack('>HHHIQIQsI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
message.price = temp[8]
elif message.type == 'X': # cancel
temp = struct.unpack('>HHHIQI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
elif message.type == 'D': # delete
temp = struct.unpack('>HHHIQ', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
elif message.type == 'U': # replace
temp = struct.unpack('>HHHIQQII', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.newrefno = temp[5]
message.shares = temp[6]
message.price = temp[7]
elif message.type == 'Q': # cross-trade
temp = struct.unpack('>HHHIQ8sIQ1s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.shares = temp[4]
message.name = temp[5].decode('ascii').rstrip(' ')
message.price = temp[6]
message.event = temp[8].decode('ascii')
return message
else:
raise ValueError('ITCH version ' + str(version) + ' is not supported')
def unpack(fin, ver, date, nlevels, names, method='csv', fout=None, host=None, user=None):
"""Read ITCH data file, construct LOB, and write to database.
This method reads binary data from a ITCH data file, converts it into human-readable data, then saves time series of out-going messages as well as reconstructed order book snapshots to a research database.
The version number of the ITCH data is specified as a float. Supported versions are: 4.1.
"""
BUFFER_SIZE = 10 ** 4
orderlist = Orderlist()
booklist = Booklist(date, names, nlevels, method)
messagelist = Messagelist(date, names)
tradeslist = Messagelist(date, names)
noiilist = Messagelist(date, names)
if method == 'hdf5':
db = Database(path=fout, names=names, nlevels=nlevels, method='hdf5')
log_path = os.path.abspath('{}/../system.log'.format(fout))
with open(log_path, 'w') as system_file:
system_file.write('sec,nano,name,event\n')
elif method == 'csv':
db = Database(path=fout, names=names, nlevels=nlevels, method='csv')
log_path = '{}/system.log'.format(fout)
with open(log_path, 'w') as system_file:
system_file.write('sec,nano,name,event\n')
data = open(fin, 'rb')
message_reads = 0
message_writes = 0
trade_writes = 0
noii_writes = 0
reading = True
clock = 0
start = time.time()
while reading:
# read message
message_size = get_message_size(data.read(2))
message_type = get_message_type(data.read(1))
message_bytes = data.read(message_size - 1)
message = get_message(message_bytes, message_type, date, clock, ver)
message_reads += 1
# update clock
if message_type == 'T':
if message.sec % 1800 == 0:
print('TIME={}'.format(message.sec))
clock = message.sec
# update system
if message_type == 'S':
print('SYSTEM MESSAGE: {}'.format(message.event))
message.to_txt(log_path)
if message.event == 'C': # end messages
reading = False
if message_type == 'H':
if message.name in names:
print('TRADING MESSAGE ({}): {}'.format(message.name, message.event))
message.to_txt(log_path)
# TODO: What to do about halts?
if message.event == 'H': # halted (all US)
pass
elif message.event == 'P': # paused (all US)
pass
elif message.event == 'Q': # quotation only
pass
elif message.event == 'T': # trading on nasdaq
pass
# complete message
if message_type == 'U':
message, del_message, add_message = message.split()
orderlist.complete_message(message)
orderlist.complete_message(del_message)
orderlist.complete_message(add_message)
if message.name in names:
message_writes += 1
orderlist.update(del_message)
booklist.update(del_message)
orderlist.add(add_message)
booklist.update(add_message)
messagelist.add(message)
# print('ORDER MESSAGE <REPLACE>')
elif message_type in ('E', 'C', 'X', 'D'):
orderlist.complete_message(message)
if message.name in names:
message_writes += 1
orderlist.update(message)
booklist.update(message)
messagelist.add(message)
# print('ORDER MESSAGE')
elif message_type in ('A', 'F'):
if message.name in names:
message_writes += 1
orderlist.add(message)
booklist.update(message)
messagelist.add(message)
# print('ORDER MESSAGE')
elif message_type == 'P':
if message.name in names:
trade_writes += 1
tradeslist.add(message)
# print('TRADE MESSAGE')
elif message_type in ('Q', 'I'):
if message.name in names:
noii_writes += 1
noiilist.add(message)
# print('NOII MESSAGE')
# write message
if method == 'hdf5':
if message_type in ('U', 'A', 'F', 'E', 'C', 'X', 'D'):
if message.name in names:
if len(messagelist.messages[message.name]) == BUFFER_SIZE:
messagelist.to_hdf5(name=message.name, db=db, grp='messages')
if len(booklist.books[message.name]['hist']) == BUFFER_SIZE:
booklist.to_hdf5(name=message.name, db=db)
elif message_type == 'P':
if message.name in names:
if len(tradeslist.messages[message.name]) == BUFFER_SIZE:
tradeslist.to_hdf5(name=message.name, db=db, grp='trades')
elif message_type in ('Q', 'I'):
if message.name in names:
if len(noiilist.messages[message.name]) == BUFFER_SIZE:
noiilist.to_hdf5(name=message.name, db=db, grp='noii')
elif method == 'csv':
if message_type in ('U', 'A', 'F', 'E', 'C', 'X', 'D'):
if message.name in names:
if len(messagelist.messages[message.name]) == BUFFER_SIZE:
messagelist.to_txt(name=message.name, db=db, grp='messages')
if len(booklist.books[message.name]['hist']) == BUFFER_SIZE:
booklist.to_txt(name=message.name, db=db)
elif message_type == 'P':
if message.name in names:
if len(tradeslist.messages[message.name]) == BUFFER_SIZE:
tradeslist.to_txt(name=message.name, db=db, grp='trades')
elif message_type in ('Q', 'I'):
if message.name in names:
if len(noiilist.messages[message.name]) == BUFFER_SIZE:
noiilist.to_txt(name=message.name, db=db, grp='noii')
# clean up
print('Cleaning up...')
for name in names:
if method == 'hdf5':
messagelist.to_hdf5(name=name, db=db, grp='messages')
booklist.to_hdf5(name=name, db=db)
tradeslist.to_hdf5(name=name, db=db, grp='trades')
noiilist.to_hdf5(name=name, db=db, grp='noii')
elif method == 'csv':
messagelist.to_txt(name=name, db=db, grp='messages')
booklist.to_txt(name=name, db=db)
tradeslist.to_txt(name=name, db=db, grp='trades')
noiilist.to_txt(name=name, db=db, grp='noii')
stop = time.time()
data.close()
db.close()
print('Elapsed time: {} seconds'.format(stop - start))
print('Messages read: {}'.format(message_reads))
print('Messages written: {}'.format(message_writes))
print('Trades written: {}'.format(trade_writes))
print('NOII written: {}'.format(noii_writes))
def load_hdf5(db, name, grp):
"""Read data from database and return pd.DataFrames."""
if grp == 'messages':
try:
with h5py.File(db, 'r') as f:
try:
messages = f['/messages/' + name]
data = messages[:, :]
T, N = data.shape
columns = ['sec',
'nano',
'type',
'side',
'price',
'shares',
'refno',
'newrefno']
df = pd.DataFrame(data, index=np.arange(0, T), columns=columns)
return df
except KeyError as e:
print('Could not find name {} in messages'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
if grp == 'books':
try:
with h5py.File(db, 'r') as f:
try:
data = f['/orderbooks/' + name]
nlevels = int((data.shape[1] - 2) / 4)
pidx = list(range(2, 2 + nlevels))
pidx.extend(list(range(2 + nlevels, 2 + 2 * nlevels)))
vidx = list(range(2 + 2 * nlevels, 2 + 3 * nlevels))
vidx.extend(list(range(2 + 3 * nlevels, 2 + 4 * nlevels)))
timestamps = data[:, 0:2]
prices = data[:, pidx]
volume = data[:, vidx]
base_columns = [str(i) for i in list(range(1, nlevels + 1))]
price_columns = ['bidprc.' + i for i in base_columns]
volume_columns = ['bidvol.' + i for i in base_columns]
price_columns.extend(['askprc.' + i for i in base_columns])
volume_columns.extend(['askvol.' + i for i in base_columns])
df_time = pd.DataFrame(timestamps, columns=['sec', 'nano'])
df_price = pd.DataFrame(prices, columns=price_columns)
df_volume = pd.DataFrame(volume, columns=volume_columns)
df_price = | pd.concat([df_time, df_price], axis=1) | pandas.concat |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import scipy.stats as stats
from scipy.optimize import brentq
from concentration import *
from uniform_concentration import *
import pdb
def plot_upper_tail(ns,s,ms,delta,maxiter):
plt.figure()
# Plot upper tail
for m in ms:
eta_star_upper = get_eta_star_upper(ns[0], m, alpha, delta, maxiter)
shats = [shat_upper_tail(s, n, m, delta, eta_star_upper, maxiter) for n in ns]
plt.plot(ns,shats,label=f'm={m}')
plt.xscale('log')
plt.axhline(y=s, xmin=0, xmax=1, linestyle='dashed')
plt.ylim([s-0.02,1])
plt.legend()
plt.savefig('../outputs/shat_upper_tail.pdf')
def plot_required_fdp(ns, m, alphas, deltas, maxiter):
sns.set(font_scale=1.5)
columns = ['alpha_plus','n','m','alpha','delta']
concat_list = []
# Plot upper tail
for i in range(len(alphas)):
alpha = alphas[i]
for j in range(len(deltas)):
delta = deltas[j]
local_list = [pd.DataFrame.from_dict({'alpha_plus': [required_empirical_risk(alpha, n, m, alpha, delta, maxiter, 100),],
'n': [n,],
'm': [m,],
'alpha': [alpha,],
'delta': [delta,]})
for n in ns]
concat_list = concat_list + local_list
df = | pd.concat(concat_list, ignore_index=True) | pandas.concat |
from collections import OrderedDict
from functools import partial
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz
import scipy.sparse as sps
import numpy as np
import pandas as pd
import bioframe
import cooler
from .lib.numutils import LazyToeplitz
def make_bin_aligned_windows(binsize, chroms, centers_bp, flank_bp=0,
region_start_bp=0, ignore_index=False):
"""
Convert genomic loci into bin spans on a fixed bin-size segmentation of a
genomic region. Window limits are adjusted to align with bin edges.
Parameters
-----------
binsize : int
Bin size (resolution) in base pairs.
chroms : 1D array-like
Column of chromosome names.
centers_bp : 1D or nx2 array-like
If 1D, center points of each window. If 2D, the starts and ends.
flank_bp : int
Distance in base pairs to extend windows on either side.
region_start_bp : int, optional
If region is a subset of a chromosome, shift coordinates by this amount.
Default is 0.
Returns
-------
DataFrame with columns:
'chrom' - chromosome
'start', 'end' - window limits in base pairs
'lo', 'hi' - window limits in bins
"""
if not (flank_bp % binsize == 0):
raise ValueError(
"Flanking distance must be divisible by the bin size.")
if isinstance(chroms, pd.Series) and not ignore_index:
index = chroms.index
else:
index = None
chroms = np.asarray(chroms)
centers_bp = np.asarray(centers_bp)
if len(centers_bp.shape) == 2:
left_bp = centers_bp[:, 0]
right_bp = centers_bp[:, 1]
else:
left_bp = right_bp = centers_bp
if np.any(left_bp > right_bp):
raise ValueError("Found interval with end > start.")
left = left_bp - region_start_bp
right = right_bp - region_start_bp
left_bin = (left / binsize).astype(int)
right_bin = (right / binsize).astype(int)
flank_bin = flank_bp // binsize
lo = left_bin - flank_bin
hi = right_bin + flank_bin + 1
windows = | pd.DataFrame(index=index) | pandas.DataFrame |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
renamed["foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
float_frame.rename(columns={"C": "foo"}, inplace=True)
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]})
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
expected = DataFrame(
[[1], [2]],
index=MultiIndex.from_tuples(
[("foo", "bah"), ("bar", "bas")], names=["a", "b"]
),
columns=["2001-01-01"],
)
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(
data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"]
)
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=["A", "B", "C", "D"])
with pytest.raises(KeyError, match="'E'] not found in axis"):
df.rename(columns={"A": "a", "E": "e"}, errors="raise")
@pytest.mark.parametrize(
"mapper, errors, expected_columns",
[
({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]),
({"A": "a"}, "raise", ["a", "B", "C", "D"]),
(str.lower, "raise", ["a", "b", "c", "d"]),
],
)
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=["A", "B", "C", "D"])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(["L0", "L1", "L2"])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(["L0", "L0", "L0"])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({"foo": stacked, "bar": stacked})
names = ["first", "second"]
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, level_codes) in enumerate(
zip(stacked.index.levels, stacked.index.codes)
):
values = lev.take(level_codes)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(
deleveled["first"], deleveled2["level_0"], check_names=False
)
tm.assert_series_equal(
deleveled["second"], deleveled2["level_1"], check_names=False
)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name="index")
tm.assert_series_equal(rdf["index"], exp)
# default name assigned, corner case
df = float_frame.copy()
df["index"] = "foo"
rdf = df.reset_index()
exp = Series(float_frame.index.values, name="level_0")
tm.assert_series_equal(rdf["level_0"], exp)
# but this is ok
float_frame.index.name = "index"
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled["index"], Series(float_frame.index))
tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = "columns"
resetted = float_frame.reset_index()
assert resetted.columns.name == "columns"
# only remove certain columns
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index(["A", "B"])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index("A")
xp = float_frame.reset_index().set_index(["index", "B"])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index("A", drop=True)
xp = float_frame.copy()
del xp["A"]
xp = xp.set_index(["B"], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
index=Index(range(2), name="x"),
)
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
result = df.set_index(["A", "B"]).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C", "D"]])
# With single-level Index (GH 16263)
result = df.set_index("A").reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index("A").reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(["A"]).reset_index(level=levels[0], drop=True)
tm.assert_frame_equal(result, df[["B", "C", "D"]])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ["A", "B"], ["A"]:
with pytest.raises(KeyError, match="Level E "):
df.set_index(idx_lev).reset_index(level=["A", "E"])
with pytest.raises(IndexError, match="Too many levels"):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series(
(9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed"
)
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted["time"].dtype == np.float64
resetted = df.reset_index()
assert resetted["time"].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ["x", "y", "z"]
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(
vals,
Index(idx, name="a"),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index()
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill="blah")
xp = DataFrame(
full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
df = DataFrame(
vals,
MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index("a")
xp = DataFrame(
full,
Index([0, 1, 2], name="d"),
columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill=None)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill="blah", col_level=1)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame(
{"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]})
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{
"A": ["a", "b", "c"],
"B": [np.nan, np.nan, np.nan],
"C": np.random.rand(3),
}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = DataFrame(
[[1, 2], [3, 4]],
columns=date_range("1/1/2013", "1/2/2013"),
index=["A", "B"],
)
result = df.reset_index()
expected = DataFrame(
[["A", 1, 2], ["B", 3, 4]],
columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)],
)
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame(
[[0, 0, 0], [1, 1, 1]],
columns=["index", "A", "B"],
index=RangeIndex(stop=2),
)
tm.assert_frame_equal(result, expected)
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"]
)
df = df.set_index(["A", "B"])
assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(["C", "D"])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert "FOO" in renamed
assert "foo" not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis="columns")
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis="index")
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index(
["A", "B"]
)
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# see gh-12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"])
# Named target and axis
over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=1)
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(columns=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, axis="columns")
# Too many targets
over_spec_msg = "Cannot specify all of 'mapper', 'index', 'columns'."
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, str.lower)
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.rename(id, mapper=id)
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=["b", "a"], columns=["e", "d"])
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_rename_positional(self):
df = DataFrame(columns=["A", "B"])
with tm.assert_produces_warning(FutureWarning) as rec:
result = df.rename(None, str.lower)
expected = DataFrame(columns=["a", "b"])
tm.assert_frame_equal(result, expected)
assert len(rec) == 1
message = str(rec[0].message)
assert "rename" in message
assert "Use named arguments" in message
def test_assign_columns(self, float_frame):
float_frame["hi"] = "there"
df = float_frame.copy()
df.columns = ["foo", "bar", "baz", "quux", "foo2"]
tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)
tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_ambiguous_warns(self):
df = DataFrame({"A": [1, 2]})
with tm.assert_produces_warning(FutureWarning):
df.rename(id, id)
with tm.assert_produces_warning(FutureWarning):
df.rename({0: 10}, {"A": "B"})
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {
"self",
"mapper",
"index",
"columns",
"axis",
"inplace",
"copy",
"level",
"errors",
}
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {
"self",
"labels",
"index",
"columns",
"axis",
"limit",
"copy",
"level",
"method",
"fill_value",
"tolerance",
}
def test_droplevel(self):
# GH20342
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
df = df.set_index([0, 1]).rename_axis(["a", "b"])
df.columns = MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
# test that dropping of a level in index works
expected = df.reset_index("a", drop=True)
result = df.droplevel("a", axis="index")
tm.assert_frame_equal(result, expected)
# test that dropping of a level in columns works
expected = df.copy()
expected.columns = Index(["c", "d"], name="level_1")
result = df.droplevel("level_2", axis="columns")
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestIntervalIndex:
def test_setitem(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df["B"] = s
df["C"] = np.array(s)
df["D"] = s.values
df["E"] = np.array(s.values)
assert is_categorical_dtype(df["B"])
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"])
assert | is_interval_dtype(df["D"].cat.categories) | pandas.core.dtypes.common.is_interval_dtype |
import logging
import joblib
import seaborn
import scipy.stats
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.utils import shuffle
all_feature_names = [
'slope',
'slope0',
'slope1',
'slope2',
'slope3',
'correlation',
'correlation0',
'correlation1',
'correlation2',
'correlation3',
'percent_duplicate_reads',
'mean_insert_size',
'unpaired_mapped_reads',
'standard_deviation_insert_size',
'ploidy',
'breakpoints',
]
align_metrics_columns = [
'cell_id',
'unpaired_mapped_reads', 'paired_mapped_reads',
'unpaired_duplicate_reads', 'paired_duplicate_reads',
'unmapped_reads', 'percent_duplicate_reads',
'total_reads', 'total_mapped_reads',
'total_duplicate_reads', 'total_properly_paired', 'coverage_breadth',
'coverage_depth', 'median_insert_size', 'mean_insert_size',
'standard_deviation_insert_size',
]
def subset_by_cell_cycle(cn_data, proportion_s):
cell_states = cn_data[['cell_id', 'cell_cycle_state']].drop_duplicates()
state_cell_ids = {}
for cell_cycle_state, df in cell_states.groupby('cell_cycle_state'):
state_cell_ids[cell_cycle_state] = shuffle(df['cell_id'].values.astype(str))
num_cells = len(state_cell_ids['S'])
cell_ids = (
list(state_cell_ids['S'][:int(proportion_s * num_cells)]) +
list(state_cell_ids['G1'][:int((1. - proportion_s) * num_cells)])
)
return cell_ids
def calculate_features(cn_data, metrics_data, align_metrics_data, agg_proportion_s=None, figures_prefix=None):
""" Calculate features based on copy number data
Args:
cn_data (pandas.DataFrame): HMMCopy reads data
metrics_data (pandas.DataFrame): HMMCopy metrics data
align_metrics_data (pandas.DataFrame): Alignment metrics data
agg_proportion_s (float, optional): Proportion of s to use in aggregate correction. Defaults to None, all available.
figures_prefix (str, optional): Prefix for figures. Defaults to None, no figures.
Returns:
pandas.DataFrame: Feature data
"""
cn_data = cn_data.merge(align_metrics_data[['cell_id', 'median_insert_size']])
corr_data = []
for library_id, library_cn_data in cn_data.groupby('library_id'):
logging.info(f'calculating features for {library_id}')
library_cn_data = library_cn_data[library_cn_data['gc'] < 1.]
library_cn_data = library_cn_data[library_cn_data['gc'] > 0.]
library_cn_data = library_cn_data[library_cn_data['state'] < 9]
library_cn_data = library_cn_data.merge(
library_cn_data.groupby('cell_id')['reads'].sum().rename('total_reads').reset_index())
library_cn_data['norm_reads'] = 1e6 * library_cn_data['reads'] / library_cn_data['total_reads']
library_cn_data = library_cn_data.query('state > 0').copy()
library_cn_data['norm_reads'] = library_cn_data['norm_reads'] / library_cn_data['state']
if len(library_cn_data.index) == 0:
logging.warning(f'library {library_id} filtered entirely')
continue
#
# Correct GC with aggregate data
#
logging.info(f'calculating aggregate features')
for use_norm_reads in (True, False):
if use_norm_reads:
reads_col = 'norm_reads'
else:
reads_col = 'reads'
if agg_proportion_s is not None:
cell_ids = subset_by_cell_cycle(library_cn_data, agg_proportion_s)
agg_data = library_cn_data[library_cn_data['cell_id'].isin(cell_ids)]
else:
agg_data = library_cn_data
agg_data = agg_data.groupby(['chr', 'start'])[reads_col].sum().reset_index()
agg_data = agg_data.merge(cn_data[['chr', 'start', 'gc']].drop_duplicates())
z = np.polyfit(agg_data['gc'].values, agg_data[reads_col].astype(float).values, 3)
p = np.poly1d(z)
if figures_prefix is not None:
fig = plt.figure(figsize=(3, 3))
ax = plt.gca()
seaborn.scatterplot(
'gc', reads_col,
data=agg_data,
alpha=0.01,
ax=ax)
x = np.linspace(agg_data['gc'].min(), agg_data['gc'].max(), 100)
plt.plot(x, p(x))
plt.title('agg fit on ' + library_id)
fig.savefig(figures_prefix + f'{library_id}_norm{use_norm_reads}_agg_fit.pdf')
library_cn_data['copy2_{}'.format(use_norm_reads * 1)] = library_cn_data[reads_col] / p(library_cn_data['gc'].values)
#
# Correct GC with per cell data
#
logging.info(f'calculating independent features')
for use_insert_size in (True, False):
if agg_proportion_s is not None:
cell_ids = subset_by_cell_cycle(library_cn_data, agg_proportion_s)
agg_data = library_cn_data[library_cn_data['cell_id'].isin(cell_ids)]
else:
agg_data = library_cn_data
if use_insert_size:
X = agg_data[['gc', 'median_insert_size']].values
else:
X = agg_data[['gc']].values
y = agg_data['norm_reads']
poly = PolynomialFeatures(3)
X_poly = poly.fit_transform(X)
reg = LinearRegression().fit(X_poly, y)
logging.info(
'Library {}, accuracy of Logistic regression classifier on training set: {:.4f}'
.format(library_id, reg.score(X_poly, y)))
if use_insert_size:
X = library_cn_data[['gc', 'median_insert_size']].values
else:
X = library_cn_data[['gc']].values
X_poly = poly.fit_transform(X)
corrected_column = 'copy3_{}'.format(use_insert_size * 1)
library_cn_data[corrected_column] = library_cn_data['norm_reads'] / reg.predict(X_poly)
cell_id = library_cn_data.sort_values('total_reads')['cell_id'].iloc[0]
plot_data = library_cn_data.query('cell_id == "{}"'.format(cell_id))
median_insert_size = plot_data['median_insert_size'].values[0]
if 'cell_cycle_state' in plot_data:
cell_cycle_state = plot_data['cell_cycle_state'].values[0]
else:
cell_cycle_state = 'unknown'
x = np.linspace(plot_data['gc'].min(), plot_data['gc'].max(), 100)
if use_insert_size:
x = np.array([x, median_insert_size * np.ones(x.shape)]).T
else:
x = np.array([x]).T
if figures_prefix is not None:
fig = plt.figure(figsize=(6, 6))
ax = plt.gca()
seaborn.scatterplot(
'gc', 'norm_reads',
data=plot_data,
alpha=0.1,
ax=ax)
plt.plot(x[:, 0], reg.predict(poly.fit_transform(x)))
plt.title('gc norm reads ' + corrected_column + ' ' + cell_id + ' ' + cell_cycle_state)
fig.savefig(figures_prefix + f'{library_id}_useinsert{use_insert_size}_gc_norm_reads.pdf')
if figures_prefix is not None:
fig = plt.figure(figsize=(6, 6))
ax = plt.gca()
seaborn.scatterplot(
'gc', corrected_column,
data=plot_data,
alpha=0.1,
ax=ax)
plt.title('gc corrected ' + corrected_column + ' ' + cell_id + ' ' + cell_cycle_state)
fig.savefig(figures_prefix + f'{library_id}_useinsert{use_insert_size}_gc_corrected.pdf')
logging.info(f'statistical tests and tabulation')
library_corr_data = []
for cell_id, cell_data in library_cn_data.groupby('cell_id'):
if cell_data.empty:
continue
correlation0, pvalue = scipy.stats.spearmanr(cell_data['gc'], cell_data['copy2_0'])
correlation1, pvalue = scipy.stats.spearmanr(cell_data['gc'], cell_data['copy2_1'])
correlation2, pvalue = scipy.stats.spearmanr(cell_data['gc'], cell_data['copy3_0'])
correlation3, pvalue = scipy.stats.spearmanr(cell_data['gc'], cell_data['copy3_1'])
correlation, pvalue = scipy.stats.spearmanr(cell_data['gc'], cell_data['norm_reads'])
slope0 = np.polyfit(cell_data['gc'].values, cell_data['copy2_0'].values, 1)[1]
slope1 = np.polyfit(cell_data['gc'].values, cell_data['copy2_1'].values, 1)[1]
slope3 = np.polyfit(cell_data['gc'].values, cell_data['copy3_0'].values, 1)[1]
slope2 = np.polyfit(cell_data['gc'].values, cell_data['copy3_1'].values, 1)[1]
slope = np.polyfit(cell_data['gc'].values, cell_data['norm_reads'].values, 1)[1]
library_corr_data.append(dict(
correlation=correlation,
correlation0=correlation0,
correlation1=correlation1,
correlation2=correlation2,
correlation3=correlation3,
pvalue=pvalue,
cell_id=cell_id,
slope0=slope0,
slope1=slope1,
slope2=slope2,
slope3=slope3,
slope=slope,
))
library_corr_data = pd.DataFrame(library_corr_data)
library_corr_data['library_id'] = library_id
corr_data.append(library_corr_data)
if figures_prefix is not None:
fig = plt.figure(figsize=(6, 6))
library_corr_data['correlation'].hist(bins=100)
plt.title('correlation hist ' + library_id)
fig.savefig(figures_prefix + f'{library_id}_correlation_hist.pdf')
plt.close('all')
corr_data = pd.concat(corr_data, sort=True, ignore_index=True)
corr_data = corr_data.dropna()
ploidy = cn_data.groupby('cell_id')['state'].mean().rename('ploidy').reset_index()
corr_data = corr_data.merge(align_metrics_data[align_metrics_columns].drop_duplicates())
corr_data = corr_data.merge(metrics_data[['cell_id', 'breakpoints']].drop_duplicates())
if 'cell_cycle_state' in metrics_data:
corr_data = corr_data.merge(metrics_data[['cell_id', 'cell_cycle_state']].drop_duplicates())
corr_data = corr_data.merge(ploidy)
return corr_data
cn_data_urls = [
'https://singlecelldata.blob.core.windows.net/results/SC-1563/results/results/hmmcopy_autoploidy/A90553C_multiplier0_reads.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1561/results/results/hmmcopy_autoploidy/A73044A_multiplier0_reads.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1583/results/results/hmmcopy_autoploidy/A96139A_multiplier0_reads.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1585/results/results/hmmcopy_autoploidy/A96147A_multiplier0_reads.csv.gz',
]
metrics_data_urls = [
'https://singlecelldata.blob.core.windows.net/results/SC-1563/results/results/hmmcopy_autoploidy/A90553C_multiplier0_metrics.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1561/results/results/hmmcopy_autoploidy/A73044A_multiplier0_metrics.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1583/results/results/hmmcopy_autoploidy/A96139A_multiplier0_metrics.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1585/results/results/hmmcopy_autoploidy/A96147A_multiplier0_metrics.csv.gz',
]
align_metrics_data_urls = [
'https://singlecelldata.blob.core.windows.net/results/SC-1563/results/results/alignment/A90553C_alignment_metrics.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1561/results/results/alignment/A73044A_alignment_metrics.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1583/results/results/alignment/A96139A_alignment_metrics.csv.gz',
'https://singlecelldata.blob.core.windows.net/results/SC-1585/results/results/alignment/A96147A_alignment_metrics.csv.gz',
]
cache_dir = './cachedir'
memory = joblib.Memory(cache_dir, verbose=10)
@memory.cache
def get_data(sas):
cn_data = []
for cn_data_url in cn_data_urls:
cn_data.append(pd.read_csv(cn_data_url + sas, compression='gzip'))
cn_data = pd.concat(cn_data, sort=True, ignore_index=True)
metrics_data = []
for metrics_data_url in metrics_data_urls:
metrics_data.append(pd.read_csv(metrics_data_url + sas, compression='gzip'))
metrics_data = pd.concat(metrics_data, sort=True, ignore_index=True)
align_metrics_data = []
for align_metrics_data_url in align_metrics_data_urls:
align_metrics_data.append( | pd.read_csv(align_metrics_data_url + sas, compression='gzip') | pandas.read_csv |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import (
datetime,
timedelta,
)
import pickle
import pprint
import pytz
import uuid
import pandas as pd
from nose_parameterized import parameterized
from zipline.finance.trading import with_environment
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
)
class FakeTable(object):
def __init__(self, name, count, dt, fuzzy_str):
self.name = name
self.count = count
self.dt = dt
self.fuzzy_str = fuzzy_str
self.df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST%s%s' % (self.fuzzy_str, i),
'company_name': self.name + str(i),
'start_date_nano': pd.Timestamp(dt, tz='UTC').value,
'end_date_nano': pd.Timestamp(dt, tz='UTC').value,
'exchange': self.name,
}
for i in range(1, self.count + 1)
]
)
def read(self, *args, **kwargs):
return self.df.to_records()
class FakeTableIdenticalSymbols(object):
def __init__(self, name, as_of_dates):
self.name = name
self.as_of_dates = as_of_dates
self.df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': self.name,
'company_name': self.name,
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': self.name,
}
for i, date in enumerate(self.as_of_dates)
]
)
def read(self, *args, **kwargs):
return self.df.to_records()
class FakeTableFromRecords(object):
def __init__(self, records):
self.records = records
self.df = pd.DataFrame.from_records(self.records)
def read(self, *args, **kwargs):
return self.df.to_records()
@with_environment()
def build_lookup_generic_cases(env=None):
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
table = FakeTableFromRecords(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
env.update_asset_finder(asset_metadata=table.df)
dupe_0, dupe_1, unique = assets = [
env.asset_finder.retrieve_asset(i)
for i in range(3)
]
# This expansion code is run at module import time, which means we have to
# clear the AssetFinder here or else it will interfere with the cache
# for other tests.
env.update_asset_finder(clear_metadata=True)
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(table, assets[0], None, assets[0]),
(table, assets[1], None, assets[1]),
(table, assets[2], None, assets[2]),
# int
(table, 0, None, assets[0]),
(table, 1, None, assets[1]),
(table, 2, None, assets[2]),
# Duplicated symbol with resolution date
(table, 'duplicated', dupe_0_start, dupe_0),
(table, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(table, 'unique', unique_start, unique),
(table, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(table, assets, None, assets),
(table, iter(assets), None, assets),
# Iterables of ints
(table, (0, 1), None, assets[:-1]),
(table, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(table, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(table, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(table,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date= | pd.Timestamp('2013-12-08 9:31AM', tz='UTC') | pandas.Timestamp |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["TD"].dtype == "timedelta64[ns]"
def test_combine_first_period(self):
data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7])
data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
df2 = DataFrame({"P": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.PeriodIndex(
["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
)
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == data1.dtype
# different freq
dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
df2 = DataFrame({"P": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = [
pd.Period("2011-01", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.NaT,
pd.Period("2012-01-02", freq="D"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == "object"
def test_combine_first_int(self):
# GH14687 - integer series that do no align exactly
df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
df2 = DataFrame({"a": [1, 4]}, dtype="int64")
result_12 = df1.combine_first(df2)
expected_12 = DataFrame({"a": [0, 1, 3, 5]})
tm.assert_frame_equal(result_12, expected_12)
result_21 = df2.combine_first(df1)
expected_21 = DataFrame({"a": [1, 4, 3, 5]})
tm.assert_frame_equal(result_21, expected_21)
@pytest.mark.parametrize("val", [1, 1.0])
def test_combine_first_with_asymmetric_other(self, val):
# see gh-20699
df1 = DataFrame({"isNum": [val]})
df2 = DataFrame({"isBool": [True]})
res = df1.combine_first(df2)
exp = DataFrame({"isBool": [True], "isNum": [val]})
tm.assert_frame_equal(res, exp)
def test_combine_first_string_dtype_only_na(self):
# GH: 37519
df = DataFrame({"a": ["962", "85"], "b": [pd.NA] * 2}, dtype="string")
df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype="string")
df.set_index(["a", "b"], inplace=True)
df2.set_index(["a", "b"], inplace=True)
result = df.combine_first(df2)
expected = DataFrame(
{"a": ["962", "85"], "b": [pd.NA] * 2}, dtype="string"
).set_index(["a", "b"])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from __future__ import absolute_import, division, unicode_literals
import unittest
import jsonpickle
from helper import SkippableTest
try:
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
except ImportError:
np = None
class PandasTestCase(SkippableTest):
def setUp(self):
if np is None:
self.should_skip = True
return
self.should_skip = False
import jsonpickle.ext.pandas
jsonpickle.ext.pandas.register_handlers()
def tearDown(self):
if self.should_skip:
return
import jsonpickle.ext.pandas
jsonpickle.ext.pandas.unregister_handlers()
def roundtrip(self, obj):
return jsonpickle.decode(jsonpickle.encode(obj))
def test_series_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
ser = pd.Series(
{
'an_int': np.int_(1),
'a_float': np.float_(2.5),
'a_nan': np.nan,
'a_minus_inf': -np.inf,
'an_inf': np.inf,
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.datetime64('2014-01-01'),
'complex': np.complex_(1 - 2j),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_({'a': 'b'}),
}
)
decoded_ser = self.roundtrip(ser)
assert_series_equal(decoded_ser, ser)
def test_dataframe_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.array([np.datetime64('2014-01-01')] * 3),
'complex': np.complex_([1 - 2j, 2 - 1.2j, 3 - 1.3j]),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_([{'a': 'b'}]*3),
}
)
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_multindex_dataframe_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{
'idx_lvl0': ['a', 'b', 'c'],
'idx_lvl1': np.int_([1, 1, 2]),
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
}
)
df = df.set_index(['idx_lvl0', 'idx_lvl1'])
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_dataframe_with_interval_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=pd.IntervalIndex.from_breaks([1, 2, 4])
)
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.Index(range(5, 10))
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.date_range(start='2019-01-01', end='2019-02-01', freq='D')
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_ragged_datetime_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-05'])
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timedelta_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.timedelta_range(start='1 day', periods=4, closed='right')
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_period_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_int64_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.Int64Index([-1, 0, 3, 4])
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_uint64_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.UInt64Index([0, 3, 4])
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_float64_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.Float64Index([0.1, 3.7, 4.2])
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_interval_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.IntervalIndex.from_breaks(range(5))
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_interval_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.IntervalIndex.from_breaks(pd.date_range('2019-01-01', '2019-01-10'))
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_multi_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.MultiIndex.from_product(((1, 2, 3), ('a', 'b')))
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timestamp_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
obj = pd.Timestamp('2019-01-01')
decoded_obj = self.roundtrip(obj)
assert decoded_obj == obj
def test_period_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
obj = pd.Timestamp('2019-01-01')
decoded_obj = self.roundtrip(obj)
assert decoded_obj == obj
def test_interval_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
obj = pd.Interval(2, 4, closed=str('left'))
decoded_obj = self.roundtrip(obj)
assert decoded_obj == obj
def test_b64(self):
"""Test the binary encoding"""
if self.should_skip:
return self.skip('pandas is not importable')
# array of substantial size is stored as b64
a = np.random.rand(20, 10)
index = ['Row' + str(i) for i in range(1, a.shape[0] + 1)]
columns = ['Col' + str(i) for i in range(1, a.shape[1] + 1)]
df = pd.DataFrame(a, index=index, columns=columns)
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_series_list_index(self):
"""Test pandas using series with a list index"""
expect = pd.Series(0, index=[1, 2, 3])
actual = self.roundtrip(expect)
self.assertEqual(expect.values[0], actual.values[0])
self.assertEqual(0, actual.values[0])
self.assertEqual(expect.index[0], actual.index[0])
self.assertEqual(expect.index[1], actual.index[1])
self.assertEqual(expect.index[2], actual.index[2])
def test_series_multi_index(self):
"""Test pandas using series with a multi-index"""
expect = | pd.Series(0, index=[[1], [2], [3]]) | pandas.Series |
"""Take Excel file from plate reader and conver to fraction infectivity."""
import argparse
import itertools
import os
import numpy as np
import pandas as pd
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='Convert plate reader '
'Excel file to fraction infectivity'
'csv.')
parser.add_argument('infile', type=str, help='Path to excel file'
'to convert to fraction infectivity.')
parser.add_argument('outfile', type=str, help="Path for output"
"fraction infectivity csvs.")
parser.add_argument('sheet_map', type=str, help="File to map "
"plate number and samples. Must have columns:"
"'Plate', 'Sample', 'Virus', 'SampleNum', "
"'PlateLayout', 'StartDil', and 'DilFactor'")
parser.add_argument('plate_layouts_dir', type=str, help='Directory'
'containing csv files specifying plate layouts.')
return parser.parse_args()
def get_locs(layout, value):
"""Get (index, column) tuples for location of value in layout df."""
locs_list = []
locs = layout.isin([value])
series = locs.any()
columns = list(series[series].index)
for col in columns:
rows = list(locs[col][locs[col]].index)
for row in rows:
locs_list.append((row, col))
return locs_list
def main():
"""Write fraction infectivity csv from plate reader data.
Use user defined-inputs to appropriately determine controls
and properly write fraction infectivity file.
"""
args = vars(parse_args())
excelfile = args['infile']
outfile = args['outfile']
sheet_map_file = args['sheet_map']
layouts_dir = args['plate_layouts_dir']
if not os.path.isfile(excelfile):
raise ValueError(f"Cannot find `excelfile`{excelfile}.")
sheet_data = pd.read_excel(excelfile,
sheet_name=None, # read all sheets
engine='xlrd',
skiprows=range(0, 22),
index_col=0,
nrows=8
)
sheet_map_df = pd.read_csv(sheet_map_file)
required_cols = ['Plate', 'Sample', 'SampleNum', 'Virus', 'PlateLayout',
'StartDil', 'DilFactor']
for col in required_cols:
if col not in sheet_map_df.columns:
raise ValueError(f"Required column {col} not in sample map.")
extra_sheets = set(sheet_data) - set(sheet_map_df['Plate'])
if extra_sheets:
raise ValueError(f"`excelfile` {excelfile} has the following extra "
f"sheets not in `sheet_mapping`: {extra_sheets}")
fract_infect_dict = {'serum': [], 'virus': [], 'replicate': [],
'concentration': [], 'fraction infectivity': []}
for plate in sheet_data:
plate_map = sheet_map_df[sheet_map_df['Plate'] == plate]
layout_df = pd.read_csv(f"{layouts_dir}/" +
f"{plate_map['PlateLayout'].iloc[0]}")
plate_df = sheet_data[plate].reset_index(drop=True)
plate_fract_infect = pd.DataFrame(index=plate_df.index,
columns=plate_df.columns)
# get background locations
bg_locs = get_locs(layout_df, 'neg_ctrl')
bg_rlus = []
for loc in bg_locs:
bg_rlus.append(plate_df.at[loc[0], int(loc[1])])
# get average of bg RLUs and subtract from plate readings
bg = np.average(bg_rlus)
plate_bg_sub = plate_df - bg
# Get locations for positive control (no serum) wells
pos_locs = get_locs(layout_df, 'pos_ctrl')
pos_cols = set(loc[1] for loc in pos_locs)
pos_idxs = set(loc[0] for loc in pos_locs)
# Determine plate orientation from positive control layout
if len(pos_cols) < len(pos_idxs):
orientation = 'V'
elif len(pos_cols) > len(pos_idxs):
orientation = 'H'
else:
raise ValueError("Unable to determine plate orientation from "
f"positive control locations ({pos_locs}).")
# Get values for pos ctrl wells and put in own df
pos_ctrl_values = pd.DataFrame(index=plate_df.index,
columns=plate_df.columns)
for pos_loc in pos_locs:
pos_ctrl_values.at[pos_loc[0], int(pos_loc[1])] = \
plate_bg_sub.at[pos_loc[0], int(pos_loc[1])]
# Calculate fraction infectivity based on positive ctrl values
if orientation == 'V':
pos_ctrl_series = pos_ctrl_values.mean(axis=1, skipna=True)
for row in plate_fract_infect.index:
plate_fract_infect.loc[row] = plate_bg_sub.loc[row] /\
pos_ctrl_series[row]
elif orientation == 'H':
pos_ctrl_series = pos_ctrl_values.mean(axis=0, skipna=True)
for col in plate_fract_infect.columns:
plate_fract_infect[col] = plate_bg_sub[col] /\
pos_ctrl_series[col]
else:
raise ValueError(f"Invalid orientation of {orientation}")
# Get locations for samples and add info to fract_infect_dict
sample_nums = plate_map['SampleNum'].tolist()
for sample in sample_nums:
sample_locs = get_locs(layout_df, str(sample))
sample_count = len(sample_locs)
start_dil = plate_map[plate_map['SampleNum'] == sample]\
['StartDil'].iloc[0]
dil_factor = plate_map[plate_map['SampleNum'] == sample]\
['DilFactor'].iloc[0]
sample_cols = set(loc[1] for loc in sample_locs)
sample_idxs = set(loc[0] for loc in sample_locs)
# Assuming all of one replicate is either in the same row or col
reps = min(len(sample_cols), len(sample_idxs))
if not sample_count % reps == 0:
raise ValueError("Sample number not evenly divisible by"
f"assumed number of reps {reps}.")
# Add sample, virus, replicate, and concentration info
fract_infect_dict['serum'].extend(
plate_map[plate_map['SampleNum'] == sample]['Sample']
.to_list()*sample_count)
fract_infect_dict['virus'].extend(
plate_map[plate_map['SampleNum'] == sample]['Virus']
.to_list()*sample_count)
for i in range(1, reps+1):
fract_infect_dict['replicate'].extend(
list(itertools.repeat(i, sample_count//reps)))
fract_infect_dict['concentration'].extend(
[(start_dil/(dil_factor**x))
for x in range(sample_count//reps)])
# Add fracction infectivities
if orientation == 'V':
for col in sample_cols:
fract_infect_dict['fraction infectivity'].extend(
plate_fract_infect[int(col)].dropna().to_list())
elif orientation == 'H':
for row in sample_idxs:
fract_infect_dict['fraction infectivity'].extend(
plate_fract_infect.loc[row].dropna().to_list())
else:
raise ValueError(f"Invalid orientation: {orientation}")
assert len(fract_infect_dict['serum']) == \
len(fract_infect_dict['virus']) == \
len(fract_infect_dict['replicate']) == \
len(fract_infect_dict['concentration']) == \
len(fract_infect_dict['fraction infectivity']), \
"Error in making fract_infect_dict"
fract_infect_df = | pd.DataFrame.from_dict(fract_infect_dict) | pandas.DataFrame.from_dict |
# Copyright 2021 Rosalind Franklin Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import subprocess
import yaml
import pandas as pd
from icecream import ic
from . import metadata as mdMod
class ctffind():
"""
Class encapsulating a ctffind object
"""
def __init__(self,
project_name,
md_in,
params_in,
logger_in,
):
"""
Initialising a ctffind object
ARGS:
project_name (str) :: name of current project
md_in (Metadata) :: metadata containing information of tilt-series to be processed
params_in (Params) :: params object containing configurations for ctffind
logger_in (Logger) :: logger object for recording ctffind process
"""
self.proj_name = project_name
self.prmObj = params_in
self.params = self.prmObj.params
self.logObj = logger_in
self.log = []
self._process_list = self.params['System']['process_list']
self.meta = pd.DataFrame(md_in.metadata)
self.meta = self.meta[self.meta['ts'].isin(self._process_list)]
self._get_images()
self.no_processes = False
self._check_processed_images()
self._set_output_path()
# Check if output folder exists, create if not
if not os.path.isdir(self.params['System']['output_path']):
subprocess.run(['mkdir', self.params['System']['output_path']],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='ascii')
def _get_images(self):
"""
Method to extract images for ctffind
Note: one image per tilt-series, criterion: image closest to 0 tilt angle
"""
self.ctf_images = pd.DataFrame(columns=self.meta.columns)
for curr_ts in self._process_list:
temp = self.meta[self.meta['ts']==curr_ts]
# ts_image = temp.loc[temp['angles'].abs().idxmin(axis=0)]
self.ctf_images = self.ctf_images.append(temp,
ignore_index=True)
def _set_output_path(self):
"""
Subroutine to set input and output path for "ctffound" images
"""
# copy values from output column to file_paths (input) column
self.ctf_images['file_paths'] = self.ctf_images.apply(lambda df: df['output'], axis=1)
# update output column
self.ctf_images['output'] = self.ctf_images.apply(
lambda row: f"{self.params['System']['output_path']}"
f"{self.params['System']['output_prefix']}_{row['ts']:03}_{row['angles']}_ctffind.mrc", axis=1)
def _check_processed_images(self):
"""
Method to check images which have already been processed before
"""
# Create new empty internal output metadata if no record exists
if not os.path.isfile(self.proj_name + '_ctffind_mdout.yaml'):
self.meta_out = pd.DataFrame(columns=self.meta.columns)
# Read in serialised metadata and turn into DataFrame if record exists
else:
_meta_record = mdMod.read_md_yaml(project_name=self.proj_name,
job_type='ctffind',
filename=self.proj_name + '_ctffind_mdout.yaml')
self.meta_out = pd.DataFrame(_meta_record.metadata)
self.meta_out.drop_duplicates(inplace=True)
# Compare output metadata and output folder
# If a file (in specified TS) is in record but missing, remove from record
if len(self.meta_out) > 0:
self._missing = self.meta_out.loc[~self.meta_out['output'].apply(lambda x: os.path.isfile(x))]
self._missing_specified = | pd.DataFrame(columns=self.meta.columns) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import Series, date_range
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestTruncate:
def test_truncate(self, datetime_series):
offset = BDay()
ts = datetime_series[::3]
start, end = datetime_series.index[3], datetime_series.index[6]
start_missing, end_missing = datetime_series.index[2], datetime_series.index[7]
# neither specified
truncated = ts.truncate()
tm.assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
tm.assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
tm.assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
tm.assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=datetime_series.index[0] - offset)
assert len(truncated) == 0
truncated = ts.truncate(before=datetime_series.index[-1] + offset)
assert len(truncated) == 0
msg = "Truncate: 1999-12-31 00:00:00 must be after 2000-02-14 00:00:00"
with pytest.raises(ValueError, match=msg):
ts.truncate(
before=datetime_series.index[-1] + offset,
after=datetime_series.index[0] - offset,
)
def test_truncate_nonsortedindex(self):
# GH#17935
s = pd.Series(["a", "b", "c", "d", "e"], index=[5, 3, 2, 9, 0])
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
s.truncate(before=3, after=9)
rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
ts.sort_values(ascending=False).truncate(before="2011-11", after="2011-12")
@pytest.mark.parametrize(
"before, after, indices",
[(1, 2, [2, 1]), (None, 2, [2, 1, 0]), (1, None, [3, 2, 1])],
)
@pytest.mark.parametrize("klass", [pd.Int64Index, pd.DatetimeIndex])
def test_truncate_decreasing_index(self, before, after, indices, klass):
# https://github.com/pandas-dev/pandas/issues/33756
idx = klass([3, 2, 1, 0])
if klass is pd.DatetimeIndex:
before = pd.Timestamp(before) if before is not None else None
after = pd.Timestamp(after) if after is not None else None
indices = [pd.Timestamp(i) for i in indices]
values = pd.Series(range(len(idx)), index=idx)
result = values.truncate(before=before, after=after)
expected = values.loc[indices]
tm.assert_series_equal(result, expected)
def test_truncate_datetimeindex_tz(self):
# GH 9243
idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific")
s = Series(range(len(idx)), index=idx)
result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4))
expected = | Series([1, 2, 3], index=idx[1:4]) | pandas.Series |
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import sklearn.datasets, sklearn.decomposition
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import sklearn_extra
from scipy import stats
from scipy.stats import kurtosis, skew
from collections import defaultdict
import statistics
from itertools import chain
from scipy.interpolate import interp1d
from collections import defaultdict
from nested_dict import nested_dict
def kmedoid_clusters():
editable_data_path =os.path.join(sys.path[0], 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
city = editable_data['city']
save_path = os.path.join(sys.path[0],'Scenario Generation', city)
representative_days_path = os.path.join(save_path,'Representative days')
if not os.path.exists(representative_days_path):
os.makedirs(representative_days_path)
folder_path = os.path.join(sys.path[0],str(city))
GTI_distribution = pd.read_csv(os.path.join(folder_path,'best_fit_GTI.csv'))
wind_speed_distribution = pd.read_csv(os.path.join(folder_path,'best_fit_wind_speed.csv'))
range_data = ['low','medium','high']
scenario_genrated = {}
scenario_probability = defaultdict(list)
solar_probability = defaultdict(list)
wind_probability = defaultdict(list)
for i in range(8760):
if GTI_distribution['Mean'][i] == 0:
solar_probability['low'].append(1/3)
solar_probability['medium'].append(1/3)
solar_probability['high'].append(1/3)
## If Solar GTI is normal: from Rice & Miller low = 0.112702 = (x-loc)/scale --> =tick
elif GTI_distribution['Best fit'][i] == 'norm':
solar_probability['low'].append(0.166667)
solar_probability['medium'].append(0.666667)
solar_probability['high'].append(0.166667)
## If Solar GTI is uniform: from Rice & Miller low = 0.112702 (i - loc)/scale
elif GTI_distribution['Best fit'][i] == 'uniform':
solar_probability['low'].append(0.277778)
solar_probability['medium'].append(0.444444)
solar_probability['high'].append(0.277778)
## If Solar GTI is expon: from Rice & Miller low = 0.415775 (i - loc)/scale, scale/scale)
elif GTI_distribution['Best fit'][i] == 'expon':
solar_probability['low'].append(0.711093)
solar_probability['medium'].append(0.278518)
solar_probability['high'].append(0.010389)
if wind_speed_distribution['Mean'][i] == 0:
wind_probability['low'].append(1/3)
wind_probability['medium'].append(1/3)
wind_probability['high'].append(1/3)
## If Solar GTI is normal: from Rice & Miller low = 0.112702 = (x-loc)/scale --> =tick
elif wind_speed_distribution['Best fit'][i] == 'norm':
wind_probability['low'].append(0.166667)
wind_probability['medium'].append(0.666667)
wind_probability['high'].append(0.166667)
## If Solar GTI is uniform: from Rice & Miller low = 0.112702 (i - loc)/scale
elif wind_speed_distribution['Best fit'][i] == 'uniform':
wind_probability['low'].append(0.277778)
wind_probability['medium'].append(0.444444)
wind_probability['high'].append(0.277778)
## If Solar GTI is expon: from Rice & Miller low = 0.415775 (i - loc)/scale, scale/scale)
elif wind_speed_distribution['Best fit'][i] == 'expon':
wind_probability['low'].append(0.711093)
wind_probability['medium'].append(0.278518)
wind_probability['high'].append(0.010389)
p_solar = nested_dict()
p_wind = nested_dict()
scenario_number = {}
num_scenario = 0
#laod the energy deamnd, solar, wind, and electricity emissions from scenario generation file
for i_demand in range_data:
for i_solar in range_data:
for i_wind in range_data:
for i_emission in range_data:
if i_demand=='low':
p_demand = 0.277778
elif i_demand=='medium':
p_demand = 0.444444
elif i_demand=='high':
p_demand = 0.277778
if i_emission=='low':
p_emission = 0.166667
elif i_emission=='medium':
p_emission = 0.666667
elif i_emission=='high':
p_emission = 0.166667
for day in range(365):
p_solar[i_solar][day] = sum(solar_probability[i_solar][day*24:(day+1)*24])/(sum(solar_probability[range_data[0]][day*24:(day+1)*24])+sum(solar_probability[range_data[1]][day*24:(day+1)*24])+sum(solar_probability[range_data[2]][day*24:(day+1)*24]))
p_wind[i_wind][day] = sum(wind_probability[i_wind][day*24:(day+1)*24])/(sum(wind_probability[range_data[0]][day*24:(day+1)*24])+sum(wind_probability[range_data[1]][day*24:(day+1)*24])+sum(wind_probability[range_data[2]][day*24:(day+1)*24]))
scenario_probability['D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission].append(p_demand*p_solar[i_solar][day]*p_wind[i_wind][day]*p_emission)
scenario_number['D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission]= num_scenario
num_scenario = num_scenario + 1
scenario_genrated['D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission] = pd.read_csv(os.path.join(save_path, 'D_'+i_demand+'_S_'+i_solar+'_W_'+i_wind+'_C_'+i_emission+'.csv'), header=None)
features_scenarios = defaultdict(list)
features_scenarios_list = []
features_probability_list = []
features_scenarios_nested = nested_dict()
k=0
days= 365
for scenario in scenario_genrated.keys():
scenario_genrated[scenario]=scenario_genrated[scenario]
for i in range(days):
if i==0:
data = scenario_genrated[scenario][1:25]
else:
data = scenario_genrated[scenario][25+(i-1)*24:25+(i)*24]
#Total electricity, heating, solar, wind, EF.
daily_list =list(chain(data[0].astype('float', copy=False),data[1].astype('float', copy=False),
data[2].astype('float', copy=False),data[3].astype('float', copy=False),data[6].astype('float', copy=False)))
features_scenarios[k*days+i] = daily_list
features_scenarios_nested[scenario][i] = features_scenarios[k*days+i]
features_scenarios_list.append(features_scenarios[k*days+i])
features_probability_list.append(scenario_probability[scenario][i])
k = k+1
A = np.asarray(features_scenarios_list)
#Convert the dictionary of features to Series
standardization_data = StandardScaler()
A_scaled = standardization_data.fit_transform(A)
# Create a PCA instance: pca
pca = PCA(n_components=int(editable_data['PCA numbers']))
principalComponents = pca.fit(A_scaled)
scores_pca = pca.transform(A_scaled)
#print('Score of features', scores_pca)
#print('Explained variance ratio',pca.explained_variance_ratio_)
# Plot the explained variances
# Save components to a DataFrame
features = range(pca.n_components_)
search_optimum_feature= editable_data['Search optimum PCA']
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
if search_optimum_feature == 'yes':
print('Defining the optimum number of features in the PCA method: ')
fig, ax = plt.subplots(figsize=(12, 6))
ax.bar(features, pca.explained_variance_ratio_.cumsum(), color='tab:blue')
ax.set_xlabel('PCA features',fontsize=BIGGER_SIZE)
ax.set_ylabel('Cumulative explained variance',fontsize=BIGGER_SIZE)
ax.set_xticks(features)
ax.set_title('The user should set a limit on the explained variance value and then, select the optimum number of PCA features',fontsize=BIGGER_SIZE)
plt.savefig(os.path.join(sys.path[0],'Explained variance vs PCA features.png'),dpi=300,facecolor='w')
plt.close()
print('"Explained variance vs PCA features" figure is saved in the directory folder')
print('You can use the figure to select the optimum number of features' )
print('You should enter the new optimum number of features in EditableFile.csv file and re-run this part')
plt.close()
PCA_components = pd.DataFrame(scores_pca)
inertia_list = []
search_optimum_cluster = editable_data['Search optimum clusters'] # if I want to search for the optimum number of clusters: 1 is yes, 0 is no
cluster_range = range(2,20,1)
if search_optimum_cluster=='yes':
print('Defining the optimum number of clusters: ')
fig, ax = plt.subplots(figsize=(12, 6))
for cluster_numbers in cluster_range:
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=0).fit(scores_pca)
inertia_list.append(kmedoids.inertia_)
plt.scatter(cluster_numbers,kmedoids.inertia_)
print('Cluster number:', cluster_numbers, ' Inertia of the cluster:', int(kmedoids.inertia_))
ax.set_xlabel('Number of clusters',fontsize=BIGGER_SIZE)
ax.set_ylabel('Inertia',fontsize=BIGGER_SIZE)
ax.set_title('The user should use "Elbow method" to select the number of optimum clusters',fontsize=BIGGER_SIZE)
ax.plot(list(cluster_range),inertia_list)
ax.set_xticks(np.arange(2,20,1))
plt.savefig(os.path.join(sys.path[0], 'Inertia vs Clusters.png'),dpi=300,facecolor='w')
plt.close()
print('"Inertia vs Clusters" figure is saved in the directory folder')
print('You can use the figure to select the optimum number of clusters' )
print('You should enter the new optimum number of clusters in EditableFile.csv file and re-run this part')
cluster_numbers= int(editable_data['Cluster numbers'])
kmedoids_org = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(A)
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(scores_pca)
label = kmedoids.fit_predict(scores_pca)
#filter rows of original data
probability_label = defaultdict(list)
index_label = defaultdict(list)
index_label_all = []
filtered_label={}
for i in range(cluster_numbers):
filtered_label[i] = scores_pca[label == i]
index_cluster=np.where(label==i)
if len(filtered_label[i])!=0:
index_cluster = index_cluster[0]
for j in index_cluster:
probability_label[i].append(features_probability_list[j])
index_label[i].append(j)
index_label_all.append(j)
else:
probability_label[i].append(0)
sum_probability = []
for key in probability_label.keys():
sum_probability.append(sum(probability_label[key]))
plt.scatter(filtered_label[i][:,0] , filtered_label[i][:,1] )
plt.xlabel('PCA 1')
plt.ylabel('PCA 2')
#plt.show()
plt.close()
plt.scatter(PCA_components[0], PCA_components[1], alpha=.1, color='black')
plt.xlabel('PCA 1')
plt.ylabel('PCA 2')
#plt.show()
plt.close()
#print(kmedoids.predict([[0,0,0], [4,4,4]]))
#print(kmedoids.cluster_centers_,kmedoids.cluster_centers_[0],len(kmedoids.cluster_centers_))
scores_pca_list={}
clusters={}
clusters_list = []
data_labels={}
for center in range(len(kmedoids.cluster_centers_)):
clusters['cluster centers '+str(center)]= kmedoids.cluster_centers_[center]
clusters_list.append(kmedoids.cluster_centers_[center].tolist())
for scenario in range(len(scores_pca)):
scores_pca_list[scenario]=scores_pca[scenario].tolist()
scores_pca_list[scenario].insert(0,kmedoids.labels_[scenario])
data_labels['labels '+str(scenario)]= scores_pca_list[scenario]
df_clusters= pd.DataFrame(clusters)
df_labels = pd.DataFrame(data_labels)
df_clusters.to_csv(os.path.join(representative_days_path, 'cluster_centers_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
df_labels.to_csv(os.path.join(representative_days_path, 'labels_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
#Reversing PCA using two methods:
#Reversing the cluster centers using method 1 (their results are the same)
clusters_reverse = pca.inverse_transform(kmedoids.cluster_centers_)
cluster_reverse_new = []
#Reversing the cluster centers using method 2 (their results are the same)
scores_pca_reverse = pca.inverse_transform(scores_pca)
for cluster_iterate in range(len(clusters_list)):
for pca_days in range(len(scores_pca)):
results_comparison = np.array_equal(np.array(clusters_list[cluster_iterate]), np.array(scores_pca[pca_days]))
if results_comparison:
cluster_reverse_new.append(scores_pca_reverse[pca_days])
Scenario_generated_new = standardization_data.inverse_transform(clusters_reverse)
#print('15 representative days',clusters_reverse[0][0],Scenario_generated_new[0][0],standardization_data.mean_[0],standardization_data.var_[0])
representative_day_all = {}
total_labels = []
represent_gaps = {}
scenario_data = {}
for key in filtered_label.keys():
total_labels.append(len(filtered_label[key]))
#print(len(probability_label[0])) 1990
#print(len(filtered_label[0])) 1990
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(120):
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(5): # 5 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
min_non_z = np.min(np.nonzero(scenario_data[k]))
max_non_z = np.max(np.nonzero(scenario_data[k]))
represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
for n in iranges:
next_n = next(iranges)
if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
for m in range(n,next_n+1):
scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'GTI (Wh/m^2)': scenario_data[2],
'Wind Speed (m/s)': scenario_data[3],
'Electricity EF (kg/kWh)': scenario_data[4],
'Labels': len(filtered_label[representative_day]),
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
print('cluster evaluation starts')
max_heating_scenarios_nested = nested_dict()
max_electricity_scenarios_nested = nested_dict()
total_heating_scenarios = []
total_electricity_scenarios = []
max_electricity_scenarios_nested_list = defaultdict(list)
max_heating_scenarios_nested_list = defaultdict(list)
accuracy_design_day = 0.99
design_day_heating = []
design_day_electricity = []
representative_day_max = {}
electricity_design_day = {}
heating_design_day = {}
i_demand=range_data[2]
i_solar=range_data[1]
i_wind=range_data[1]
i_emission=range_data[1]
scenario='D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission
for day in range(365):
for i in range(24):
k_elect=0
list_k_electricity = []
k_heat=0
list_k_heating = []
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path,'Represent_days_modified_'+str(representative_day)+ '.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
if features_scenarios_nested[scenario][day][0:24][i]>electricity_demand[i]:
k_elect=1
list_k_electricity.append(k_elect)
k_elect=0
if features_scenarios_nested[scenario][day][24:48][i]>heating_demand[i]:
k_heat=1
list_k_heating.append(k_heat)
k_heat=0
if sum(list_k_electricity)==cluster_numbers: #This hour does not meet by any of the representative days
max_electricity_scenarios_nested_list[i].append(features_scenarios_nested[scenario][day][0:24][i])
total_electricity_scenarios.append(features_scenarios_nested[scenario][day][0:24][i])
if sum(list_k_heating)==cluster_numbers: #This hour does not meet by any of the representative days
max_heating_scenarios_nested_list[i].append(features_scenarios_nested[scenario][day][24:48][i])
total_heating_scenarios.append(features_scenarios_nested[scenario][day][24:48][i])
total_electricity_scenarios.sort(reverse=True)
total_heating_scenarios.sort(reverse=True)
max_electricity_hour = total_electricity_scenarios[35]
max_heating_hour = total_heating_scenarios[35]
i_demand=range_data[2]
i_solar=range_data[1]
i_wind=range_data[1]
i_emission=range_data[1]
scenario='D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission
design_day_heating = []
design_day_electricity = []
for i in range(24):
design_day_electricity.append(np.max([j for j in max_electricity_scenarios_nested_list[i] if j<max_electricity_hour]))
design_day_heating.append(np.max([j for j in max_heating_scenarios_nested_list[i] if j<max_heating_hour]))
representative_day_max = {}
electricity_demand_total = defaultdict(list)
heating_demand_total = defaultdict(list)
heating_demand_max = {}
electricity_demand_max = {}
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path,'Represent_days_modified_'+str(representative_day)+ '.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
#hours_representative_day= round(sum_probability[representative_day]/sum(sum_probability),4)*8760
heating_demand_max[represent]= np.mean(heating_demand)
electricity_demand_max[represent]= np.mean(electricity_demand)
high_electricity_index = []
high_heating_index = []
high_electricity_value = []
high_heating_value = []
key_max_electricity=max(electricity_demand_max, key=electricity_demand_max.get)
key_max_heating=max(heating_demand_max, key=heating_demand_max.get)
for key, value in max_electricity_scenarios_nested.items():
for inner_key, inner_value in max_electricity_scenarios_nested[key].items():
if inner_value>electricity_demand_max[key_max_electricity]:
high_electricity_index.append(scenario_number[key]*365+inner_key)
high_electricity_value.append(inner_value)
for key, value in max_heating_scenarios_nested.items():
for inner_key, inner_value in max_heating_scenarios_nested[key].items():
if inner_value>heating_demand_max[key_max_heating]:
high_heating_index.append(scenario_number[key]*365+inner_key)
high_heating_value.append(inner_value)
sum_probability.append(0.5*len(total_electricity_scenarios)/len(index_label_all)*365)
sum_probability.append(len(total_heating_scenarios)/len(index_label_all)*365)
filtered_label[cluster_numbers]=len(total_electricity_scenarios)
filtered_label[cluster_numbers+1]=len(total_heating_scenarios)
representative_day = cluster_numbers
data_represent_days_modified={'Electricity total (kWh)': design_day_electricity,
'Heating (kWh)': representative_day_max[key_max_electricity]['Heating (kWh)'],
'GTI (Wh/m^2)': representative_day_max[key_max_electricity]['GTI (Wh/m^2)'],
'Wind Speed (m/s)': representative_day_max[key_max_electricity]['Wind Speed (m/s)'],
'Electricity EF (kg/kWh)':representative_day_max[key_max_electricity]['Electricity EF (kg/kWh)'],
'Labels': filtered_label[cluster_numbers],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
representative_day = cluster_numbers+1
data_represent_days_modified={'Electricity total (kWh)': representative_day_max[key_max_heating]['Electricity total (kWh)'],
'Heating (kWh)': design_day_heating,
'GTI (Wh/m^2)': representative_day_max[key_max_heating]['GTI (Wh/m^2)'],
'Wind Speed (m/s)': representative_day_max[key_max_heating]['Wind Speed (m/s)'],
'Electricity EF (kg/kWh)':representative_day_max[key_max_heating]['Electricity EF (kg/kWh)'],
'Labels': filtered_label[cluster_numbers+1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified= | pd.DataFrame(data_represent_days_modified) | pandas.DataFrame |
#Import libraries
from sklearn.model_selection import train_test_split
import sys, os, re, csv, codecs, numpy as np, pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from keras.utils import to_categorical
from keras.regularizers import l2,l1
# Keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.normalization import BatchNormalization
import matplotlib.pyplot as plt
from keras.callbacks import History,EarlyStopping,ModelCheckpoint
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import Imputer
# Others
import nltk
import string
from sklearn.manifold import TSNE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
path = 'data/'
EMBEDDING_FILE=f'glove.6B.300d.txt'
DATA_FILE=f'{path}Clean_Disasters_T_79187_.csv'
TRAIN_DATA_FILE=f'{path}train.csv'
TEST_DATA_FILE=f'{path}Clean_MODEL2__Earth_Hurr_for_27434.csv'
def read_dataset():
dataset = | pd.read_csv('data/Clean_Disasters_T_79187_.csv',delimiter = ',' ,converters={'text': str}, encoding = "ISO-8859-1") | pandas.read_csv |
from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.computation.check import _NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
with pytest.raises(ImportError):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError):
df.eval("A+1", engine="numexpr")
class TestDataFrameEval:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(np.random.randn(10, 2), columns=list("ab"))
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b], names=["color", "food"])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(
df.index.get_level_values("color").values, index=index, name="color"
)
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, "rating"]
df = DataFrame(np.random.randn(10, 2), index=index)
res = df.query("rating == 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind == 1]
tm.assert_frame_equal(res, exp)
res = df.query("rating != 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind != 1]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
tm.assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = tm.makeCustomDataframe(
10, 3, r_idx_nlevels=2, r_idx_names=["spam", "eggs"]
)
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {
"index": df.index,
"columns": col_series,
"spam": to_series(df.index, "spam"),
"eggs": to_series(df.index, "eggs"),
"C0": col_series,
}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPandas:
@classmethod
def setup_class(cls):
cls.engine = "numexpr"
cls.parser = "pandas"
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"@df.dates1 < 20130101 < @df.dates3", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.loc[np.random.rand(n) > 0.5, "dates3"] = pd.NaT
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.set_index("dates1", inplace=True, drop=True)
res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index("dates1", inplace=True, drop=True)
res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d["dates1"] = date_range("1/1/2012", periods=n)
d["dates3"] = date_range("1/1/2014", periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.set_index("dates1", inplace=True, drop=True)
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(
{"dates": date_range("1/1/2012", periods=n), "nondate": np.arange(n)}
)
result = df.query("dates == nondate", parser=parser, engine=engine)
assert len(result) == 0
result = df.query("dates != nondate", parser=parser, engine=engine)
tm.assert_frame_equal(result, df)
for op in ["<", ">", "<=", ">="]:
with pytest.raises(TypeError):
df.query(f"dates {op} nondate", parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": range(10), "+": range(3, 13), "r": range(4, 14)})
with pytest.raises(SyntaxError):
df.query("i - +", engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list("ab"))
a, b = 1, 2 # noqa
res = df.query("a > b", engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
res = df.query("@a > b", engine=engine, parser=parser)
expected = df[a > df.b]
tm.assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(
UndefinedVariableError, match="local variable 'c' is not defined"
):
df.query("@a > b > @c", engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError, match="name 'c' is not defined"):
df.query("@a > b > c", engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError, match="name 'sin' is not defined"):
df.query("sin > 5", engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
msg = "Variables in expression.+"
with pytest.raises(NumExprClobberingError, match=msg):
df.query("sin > 5", engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
tm.assert_frame_equal(
df.query("a < b", engine=engine, parser=parser), df[df.a < df.b]
)
tm.assert_frame_equal(
df.query("a + b > b * c", engine=engine, parser=parser),
df[df.a + df.b > df.b * df.c],
)
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(
np.random.randint(10, size=(10, 3)),
index=Index(range(10), name="blob"),
columns=["a", "b", "c"],
)
res = df.query("(blob < 5) & (a < b)", engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
tm.assert_frame_equal(res, expec)
res = df.query("blob < b", engine=engine, parser=parser)
expec = df[df.index < df.b]
tm.assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(
np.random.randint(10, size=(10, 3)),
index=range(10),
columns=["a", "b", "c"],
)
# "index" should refer to the index
res = df.query("index < b", engine=engine, parser=parser)
expec = df[df.index < df.b]
tm.assert_frame_equal(res, expec)
# test against a scalar
res = df.query("index < 5", engine=engine, parser=parser)
expec = df[df.index < 5]
tm.assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query("(@df > 0) & (@df2 > 0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
result = pd.eval("df[df > 0 and df2 > 0]", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
result = pd.eval(
"df[df > 0 and df2 > 0 and df[df > 0] > 0]", engine=engine, parser=parser
)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
tm.assert_frame_equal(result, expected)
result = pd.eval("df[(df>0) & (df2>0)]", engine=engine, parser=parser)
expected = df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("df > 0", engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(100, 10), columns=list("abcdefghij"))
b = 1
expect = df[df.a < b]
result = df.query("a < @b", engine=engine, parser=parser)
tm.assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query("a < b", engine=engine, parser=parser)
tm.assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list("abc")
df = DataFrame(np.random.randn(100, len(cols)), columns=cols)
res = df.query(
"a < b < c and a not in b not in c", engine=engine, parser=parser
)
ind = (
(df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
) # noqa
expec = df[ind]
tm.assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name="a")
b = Series(np.random.randint(10, size=15), name="b")
df = DataFrame({"a": a, "b": b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query("b - 1 in a", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name="b")
expected = df.loc[(b - 1).isin(a)]
result = df.query("@b - 1 in a", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list("ab"))
with pytest.raises(
UndefinedVariableError, match="local variable 'c' is not defined"
):
df.query("a == @c", engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
df.index.name = "index"
result = df.query("index > 5", engine=self.engine, parser=self.parser)
expected = df[df["index"] > 5]
tm.assert_frame_equal(result, expected)
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
result = df.query("ilevel_0 > 5", engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": a, "b": np.random.randn(a.size)})
df.index.name = "a"
result = df.query("a > 5", engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
tm.assert_frame_equal(result, expected)
result = df.query("index > 5", engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({"a": np.random.rand(n), "b": np.random.rand(n)})
df.loc[::2, 0] = np.inf
d = {"==": operator.eq, "!=": operator.ne}
for op, f in d.items():
q = f"a {op} inf"
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = "numexpr"
cls.parser = "python"
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.loc[np.random.rand(n) > 0.5, "dates3"] = pd.NaT
res = df.query(
"(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
| tm.assert_frame_equal(res, expec) | pandas._testing.assert_frame_equal |
import os
import numpy as np
import pandas as pd
def create_list_simu_by_degree(degree, input_dir):
"""Create two list containing names for topographies and simulatins"""
degree_str = str(degree) + 'degree/'
# path to topographies files
topo_dir = input_dir + "dem/" + degree_str
# path to wind files
wind_comp_list = ["ucompwind/", "vcompwind/", "wcompwind/"]
wind_dir = input_dir + "Wind/" + "U_V_W/"
wind_comp_dirs = [wind_dir + wind_comp_dir for wind_comp_dir in wind_comp_list]
# List of filenames (sorted)
topo_list = sorted(os.listdir(topo_dir))
wind_list = list(zip(*(sorted(os.listdir(wind_comp_dirs[i] + degree_str)) for i in range(3))))
return (topo_list, wind_list)
def get_name_ARPS_simulation(degree, simulation):
"""Get short name of ARPS files"""
[topo_or_wind, N, dx, xi, sigma, ext] = simulation.split('_')
name = str(degree) + 'degree' + '_' + xi + '_' + ext
return (name)
def get_xi_from_ARPS_simulation(simulation):
"""Extract xi from full name of ARPS files"""
[topo_or_wind, N, dx, xi, sigma, ext] = simulation.split('_')
xi = xi.split('xi')[1]
return (xi)
def check_names(degree, topo_list, wind_list):
""""Check that we extract corresponding names"""
topo_names = [get_name_ARPS_simulation(degree, topo) for topo in topo_list]
no_duplicate_in_topo = len(set(topo_names)) == len(topo_names)
u_v_w_names = [(get_name_ARPS_simulation(degree, u), get_name_ARPS_simulation(degree, v), get_name_ARPS_simulation(degree, w)) for (u, v, w)
in wind_list]
wind_names = [get_name_ARPS_simulation(degree, u) for (u, v, w) in wind_list]
assert no_duplicate_in_topo
for (u, v, w) in u_v_w_names: assert u == v == w
assert topo_names == wind_names
def create_df_with_simulation_name(input_dir):
"""
Create an array with the name of the files
Output:
degree xi degree_xi topo_name wind_name
0 5 1000 degree5_xi1000 gaussiandem_N5451_dx30_xi1000_sigma88_r000.txt (gaussianu_N5451_dx30_xi1000_sigma88_r000.txt,...
1 5 1000 degree5_xi1000 gaussiandem_N5451_dx30_xi1000_sigma88_r001.txt (gaussianu_N5451_dx30_xi1000_sigma88_r001.txt,...
"""
degrees = [5, 10, 13, 16, 20]
all_info = np.array([])
for index, degree in enumerate(degrees):
# Create list of names for topographies and winds
topo_names, wind_names = create_list_simu_by_degree(degree, input_dir)
# Create list of degrees
list_degree = [degree] * len(topo_names)
# Check names are not duplicated and well orga,ized
check_names(degree, topo_names, wind_names)
name_simu = [get_name_ARPS_simulation(degree, topo) for topo in topo_names]
# Create list of xi
list_xi = [get_xi_from_ARPS_simulation(simu) for simu in topo_names]
# Create list of degree_xi
list_degree_xi = ['degree' + str(degree) + '_' + 'xi' + str(xi) for (degree, xi) in zip(list_degree, list_xi)]
# Store the result
array_to_add = np.array([list_degree, list_xi, list_degree_xi, topo_names, wind_names])
if index == 0: all_info = np.array([list_degree, list_xi, list_degree_xi, topo_names, wind_names])
if index >= 1: all_info = np.concatenate((all_info, array_to_add), axis=1)
all_info = np.transpose(all_info)
df_all = | pd.DataFrame(all_info, columns=['degree', 'xi', 'degree_xi', 'topo_name', 'wind_name']) | pandas.DataFrame |
import numpy as np
import pandas as pd
data=pd.read_csv('iris.csv')
data=np.array(data)
data=np.mat(data[:,0:4])
#数据长度
length=len(data)
#通过核函数在输入空间计算核矩阵
k=np.mat(np.zeros((length,length)))
for i in range(0,length):
for j in range(i,length):
k[i,j]=(np.dot(data[i],data[j].T))**2
k[j,i]=k[i,j]
name=range(length)
test=pd.DataFrame(columns=name,data=k)
print('核矩阵\n',test)
len_k=len(k)
#中心化核矩阵
I=np.eye(len_k)
one=np.ones((len_k,len_k))
A=I-1.0/len_k*one
centered_k=np.dot(np.dot(A,k),A)
test=pd.DataFrame(columns=name,data=centered_k)
print('居中化核矩阵\n',test)
#标准化核矩阵
W_2=np.zeros((len_k,len_k))
for i in range(0,len_k):
W_2[i,i]=k[i,i]**(-0.5)
normalized_k=np.dot(np.dot(W_2,k),W_2)
test=pd.DataFrame(columns=name,data=normalized_k)
print('规范化核矩阵\n',test)
#标准化中心化核矩阵
W_3=np.zeros((len_k,len_k))
for i in range(0,len_k):
W_3[i,i]=centered_k[i,i]**(-0.5)
normalized_centered_k=np.dot(np.dot(W_3,centered_k),W_3)
test= | pd.DataFrame(columns=name,data=normalized_centered_k) | pandas.DataFrame |
from flask import abort, jsonify
from config import db
from models import (
Product,
ProductSchema,
Article,
Inventory
)
import pandas as pd
# Handler function for GET Products endpoint
def read_all_products():
# Query the db to return all products
products = Product.query.order_by(Product.product_id).all()
if len(products) < 1:
abort(404, 'No products exist in database')
# Serialize the query results to produce a response
product_schema = ProductSchema(many=True)
return product_schema.dump(products)
# Handler function for POST /api/products endpoint
def create_products(body):
products = body.get('products', None)
if products == None:
abort(400, 'Invalid schema')
add_log = []
# Loop through list of products and add each to database if not exists
for product in products:
name = product.get('name', None)
price = product.get('price', None)
components = product.get('contain_articles', None)
# Data input validation
if name == None or price == None:
abort(400, 'Products must include a name and price')
if len(components) < 1:
abort(400, 'Each product must be composed of at least one article')
else:
for component in components:
article_id = component.get('art_id', None)
required_article_qty = component.get('amount_of', None)
if article_id == None or required_article_qty == None:
abort(400, 'Each component article must include a numerical ID and a required quantity')
# Check if product already exists
existing_product = Product.query \
.filter(Product.name == name) \
.filter(Product.price == price) \
.one_or_none()
# If it does not already exist, then add it
if existing_product is None:
# Add product to database
p = Product(name=name, price=price)
db.session.add(p)
# Commit changes to add new product
db.session.commit()
# Log which new product was added
add_log.append(p)
# Get the ID of the newly created Product so that records can be added to Inventory table
stmt = db.select(Product).where(
db.and_(
Product.name == name,
Product.price == price
)
)
result = db.session.execute(stmt)
product_id = result.fetchone().Product.product_id
# Iterate over component articles and add records to Inventory table
for component in components:
article_id = component.get('art_id', None)
required_article_qty = component.get('amount_of', None)
if isinstance(article_id, str):
article_id = int(article_id)
if isinstance(required_article_qty, str):
required_article_qty = int(required_article_qty)
i = Inventory(product_id=product_id, article_id=article_id, required_article_qty=required_article_qty)
db.session.add(i)
# Commit Inventory table db changes
db.session.commit()
# Return serialized version of the products added to db
product_schema = ProductSchema(many=True)
if len(add_log) > 0:
added_products = jsonify(product_schema.dump(add_log))
return added_products, 201
else:
abort(409, 'All products already exist in database')
def sell_product(productId):
# "Sell" a product and decrement the stock of its component articles by the required amount
select_stmt = db.select(Inventory, Article).join(Inventory.product).join(Inventory.article).where(Inventory.product_id == productId)
with db.engine.connect() as conn:
results = conn.execute(select_stmt)
df = | pd.DataFrame(data=results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 20:13:44 2020
@author: Adam
"""
#%% Heatmap generator "Barcode"
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
def join_cols(row):
return ''.join(list(row))
def find_favor(seq):
t = []
for m in re.finditer(seq, DNA):
t += [m.start()]
return t
DNA = np.loadtxt('./data/DNA.txt', str)
DNA = ''.join(DNA)
print('DNA Length = {} '.format(len(DNA)) )
start_idxs = []
for m in re.finditer('GTC', DNA):
start_idxs += [m.start()]
start_idxs = np.array(start_idxs)
df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
from pandas.tests.window.common import (
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = Series(np.zeros(20))
other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
],
)
@td.skip_if_no_scipy
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_functions_window_non_shrinkage_binary():
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=pd.MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
functions = [
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = | Series([1] * 5) | pandas.Series |
# %%
import pandas as pd
from zhconv import convert
from scipy import stats
from fuzzywuzzy import fuzz
from time import perf_counter
import searchconsole
from datetime import datetime
from datetime import timedelta
# --------------DATA RETRIVING---------------
# no credentials saved, do not save credentials
#account = searchconsole.authenticate(client_config='client_secrets.json')
# no credentials saved, want to save credentials
#account = searchconsole.authenticate(client_config='client_secrets.json', serialize = 'credentials.json')
# credentials saved as credentials.json
account = searchconsole.authenticate(client_config='client_secrets.json',
credentials='credentials.json')
# webproperty must match what's shown on Google Search Console
webproperty = account['******'] # website url
start = datetime.strptime("******", "%Y-%m-%d") # modify start date
end = datetime.strptime("******", "%Y-%m-%d") # modify end date
df = pd.DataFrame()
while start != end:
start_datetime = datetime.strftime(
start, "%Y-%m-%d")
# interval = 1 day
shifted_datetime = datetime.strftime(start + timedelta(days=1), "%Y-%m-%d")
report = webproperty.query.range(
start_datetime, shifted_datetime).dimension("query").get()
df1 = pd.DataFrame(report.rows)
df1['date'] = datetime.strftime(start, "%Y-%m-%d")
df = pd.concat([df, df1])
print(f"Trend of {start} retrived")
start = start + timedelta(days=1)
print("=========================")
print("ALL DATA RETRIVED")
print("=========================")
df
# df.to_csv('trend.csv', index=False)
# --------------DATA RETRIVING FINISHED---------------
# %%
# --------------DATA PREPARATION---------------
# unify characters, merge similar keywords
# Merge split words and convert Tranditional Chinese to Simplified Chinese -> 'modified_query'
df['modified_query'] = df['query'].apply(lambda x: convert(x, 'zh-cn'))
df['modified_query'] = df['modified_query'].apply(lambda x: x.replace(" ", ""))
# Identify similar keywords
# option 1: fuzzy match words
# TODO: use process in stead of iteration:
# https://towardsdatascience.com/fuzzywuzzy-find-similar-strings-within-one-column-in-a-pandas-data-frame-99f6c2a0c212
# http://jonathansoma.com/lede/algorithms-2017/classes/fuzziness-matplotlib/fuzzing-matching-in-pandas-with-fuzzywuzzy/
similar = df[['modified_query', 'query']
].drop_duplicates(subset='modified_query')
similar1 = similar
# record time
timer1 = perf_counter()
for index, row in similar1.iterrows(): # for each row in second df
for index1, row1 in similar1.iterrows(): # loop through the whole second df
r = fuzz.token_sort_ratio(row['modified_query'],
row1['modified_query'])
if r > 80: # 80 is for conservitive result
# if match, add it into main df
similar.loc[index1, 'aggregated_query'] = row['modified_query']
# and drop the row in second df
similar1 = similar1.drop(index1, axis=0)
print(f"{len(similar1)} rows remain")
print("=========================")
print(
f"{len(similar['aggregated_query'].unique())} unique keywords identified")
print("=========================")
timer2 = perf_counter()
print(f"Identifying Keywords: {timer2 - timer1} Seconds")
# put identified keywords back to df
df = | pd.merge(df, similar, how='left', on='modified_query') | pandas.merge |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
from sklearn import metrics
import sys
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
###############################################################################################
# support class to redirect stderr
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self,name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# Stderr
oldstderr = sys.stderr # global
def capture_stderr(log):
oldstderr = sys.stderr
sys.stderr = open(log, 'w')
sys.stderr = flushfile(sys.stderr)
return log
def restore_stderr():
sys.stderr = oldstderr
def parse_xgblog(xgblog):
import re
pattern = re.compile(r'^\[(?P<round>\d+)\]\s*\D+:(?P<validation>\d+.\d+)\s*\D+:(?P<train>\d+.\d+)')
xgb_list = []
with open(xgblog, "r") as ins:
next(ins)
for line in ins:
match = pattern.match(line)
if match:
idx = int(match.group("round"))
validation = float(match.group("validation"))
training = float(match.group("train"))
xgb_list.append([idx, validation, training])
else:
pass # raise Exception("Failed to parse!")
return xgb_list
def preprocess_data(train,test):
id_test=test['patient_id']
train=train.drop(['patient_id'],axis=1)
test=test.drop(['patient_id'],axis=1)
y=train['is_screener']
train=train.drop(['is_screener'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
os.chdir(os.getcwd())
train_file = '../input/patients_train.csv.gz'
test_file = '../input/patients_test.csv.gz'
train = pd.read_csv(train_file)
test = pd.read_csv(test_file)
train.drop( 'patient_gender', axis = 1, inplace = True )
test.drop( 'patient_gender', axis = 1, inplace = True )
########## last asctivity files
activity_file=('../input/activity_selected_last.csv.gz')
diagnosis_file=('../input/diagnosis_selected_last.csv.gz')
procedure_file=('../input/procedure_selected_last.csv.gz')
surgical_file=('../input/surgical_selected_last.csv.gz')
prescription_file=('../input/prescription_selected_last.csv.gz')
physicians_file=('../input/physicians.csv.gz')
drugs_file=('../input/drugs.csv.gz')
############ first activity files
activity_file_first=('../input/activity_selected_last.csv.gz')
diagnosis_file_first=('../input/diagnosis_selected_last.csv.gz')
procedure_file_first=('../input/procedure_selected_last.csv.gz')
surgical_file_first=('../input/surgical_selected_last.csv.gz')
prescription_file=('../input/prescription_selected_last.csv.gz')
activity=pd.read_csv(activity_file )
#Fa=pd.read_csv(activity_file_first,usecols=['activity_year'])
#print(Fa)
#activity['activity_first_year']=Fa['activity_year']
#activity['delta_time_activity']=activity['activity_year']-activity['activity_first_year']
#print(activity[activity['delta_time_activity']!=0,'delta_time_activity'])
train=pd.merge(train,activity, on='patient_id',how='left')
test=pd.merge(test,activity, on='patient_id',how='left')
print('after merging activity')
print(train.shape,test.shape)
procedure=pd.read_csv(procedure_file )
diagnosis=pd.read_csv(diagnosis_file)
diagnosis=pd.merge(diagnosis,procedure,on=['patient_id','claim_id'],how='left')
train=pd.merge(train,diagnosis, on='patient_id',how='left')
test= | pd.merge(test,diagnosis, on='patient_id',how='left') | pandas.merge |
import os
import sys
import time
import sqlite3
import pyupbit
import pandas as pd
from PyQt5.QtCore import QThread
from pyupbit import WebSocketManager
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import *
from utility.static import now, timedelta_sec, strf_time, timedelta_hour, strp_time
class TraderUpbit(QThread):
def __init__(self, windowQ, coinQ, queryQ, soundQ, cstgQ, teleQ):
super().__init__()
self.windowQ = windowQ
self.coinQ = coinQ
self.queryQ = queryQ
self.soundQ = soundQ
self.cstgQ = cstgQ
self.teleQ = teleQ
self.upbit = None # 매도수 주문 및 체결 확인용 객체
self.buy_uuid = None # 매수 주문 저장용 list: [티커명, uuid]
self.sell_uuid = None # 매도 주문 저장용 list: [티커명, uuid]
self.websocketQ = None # 실시간데이터 수신용 웹소켓큐
self.df_cj = pd.DataFrame(columns=columns_cj) # 체결목록
self.df_jg = pd.DataFrame(columns=columns_jg) # 잔고목록
self.df_tj = pd.DataFrame(columns=columns_tj) # 잔고평가
self.df_td = pd.DataFrame(columns=columns_td) # 거래목록
self.df_tt = pd.DataFrame(columns=columns_tt) # 실현손익
self.str_today = strf_time('%Y%m%d', timedelta_hour(-9))
self.dict_jcdt = {} # 종목별 체결시간 저장용
self.dict_intg = {
'예수금': 0,
'종목당투자금': 0, # 종목당 투자금은 int(예수금 / 최대매수종목수)로 계산
'최대매수종목수': 10,
'업비트수수료': 0. # 0.5% 일경우 0.005로 입력
}
self.dict_bool = {
'모의투자': True,
'알림소리': True
}
self.dict_time = {
'매수체결확인': now(), # 1초 마다 매수 체결 확인용
'매도체결확인': now(), # 1초 마다 매도 체결 확인용
'거래정보': now()
}
def run(self):
self.LoadDatabase()
self.GetKey()
self.GetBalances()
self.EventLoop()
def LoadDatabase(self):
"""
프로그램 구동 시 당일 체결목록, 당일 거래목록, 잔고목록을 불러온다.
체결과 거래목록은 바로 갱신하고 잔고목록은 예수금을 불러온 이후 갱신한다.
"""
con = sqlite3.connect(db_tradelist)
df = pd.read_sql(f"SELECT * FROM chegeollist WHERE 체결시간 LIKE '{self.str_today}%'", con)
self.df_cj = df.set_index('index').sort_values(by=['체결시간'], ascending=False)
df = pd.read_sql(f'SELECT * FROM jangolist', con)
self.df_jg = df.set_index('index').sort_values(by=['매입금액'], ascending=False)
df = pd.read_sql(f"SELECT * FROM tradelist WHERE 체결시간 LIKE '{self.str_today}%'", con)
self.df_td = df.set_index('index').sort_values(by=['체결시간'], ascending=False)
con.close()
if len(self.df_cj) > 0:
self.windowQ.put([ui_num['체결목록'], self.df_cj])
if len(self.df_td) > 0:
self.windowQ.put([ui_num['거래목록'], self.df_td])
self.windowQ.put([ui_num['C로그텍스트'], '시스템 명령 실행 알림 - 데이터베이스 불러오기 완료'])
def GetKey(self):
"""
DB에서 업비트 access 키와 secret 키를 읽어 self.upbit 객체 생성
해당 객체는 매도수 주문 및 체결확인용이다.
"""
con = sqlite3.connect(db_setting)
df = pd.read_sql('SELECT * FROM coin', con)
df = df.set_index('index')
self.dict_bool['모의투자'] = df['모의투자'][0]
self.dict_bool['알림소리'] = df['알림소리'][0]
df = pd.read_sql('SELECT * FROM upbit', con)
df = df.set_index('index')
con.close()
if len(df) > 0 and df['Access_key'][0] != '':
access_key = df['Access_key'][0]
secret_key = df['Secret_key'][0]
self.upbit = pyupbit.Upbit(access_key, secret_key)
self.windowQ.put([ui_num['C로그텍스트'], '시스템 명령 실행 알림 - 주문 및 체결확인용 업비트 객체 생성 완료'])
else:
self.windowQ.put([ui_num['C로그텍스트'], '시스템 명령 오류 알림 - 업비트 키값이 설정되지 않았습니다.'])
def GetBalances(self):
""" 예수금 조회 및 종목당투자금 계산 """
if self.dict_bool['모의투자']:
self.dict_intg['예수금'] = 100000000 - self.df_jg['매입금액'].sum() + self.df_jg['평가손익'].sum()
self.dict_intg['종목당투자금'] = int(100000000 / self.dict_intg['최대매수종목수'])
elif self.upbit is not None:
self.dict_intg['예수금'] = int(float(self.upbit.get_balances()[0]['balance']))
self.dict_intg['종목당투자금'] = int(self.dict_intg['예수금'] / self.dict_intg['최대매수종목수'])
else:
self.windowQ.put([ui_num['C로그텍스트'], '시스템 명령 오류 알림 - 업비트 키값이 설정되지 않았습니다.'])
if len(self.df_td) > 0:
self.UpdateTotaltradelist(first=True)
self.windowQ.put([ui_num['C로그텍스트'], '시스템 명령 실행 알림 - 예수금 조회 완료'])
def EventLoop(self):
int_time = int(strf_time('%H%M%S'))
tickers = pyupbit.get_tickers(fiat="KRW")
self.cstgQ.put(['관심종목초기화', tickers])
websocketQ = WebSocketManager('ticker', tickers)
while True:
"""
주문용 큐를 감시한다.
주문용 큐에 대한 입력은 모두 전략 연산 프로세스에서 이뤄진다.
"""
if not self.coinQ.empty():
data = self.coinQ.get()
if data[0] == '매수':
self.Buy(data[1], data[2], data[3])
elif data[0] == '매도':
self.Sell(data[1], data[2], data[3])
"""
매매는 오전 10시부터 익일 오전 9시까지만 운영한다.
실시간 웹소켓큐로 데이터가 들어오면 티커명 및 시간을 구하고
티커별 마지막 시간이 저장된 self.dict_jcdt의 시간과 틀리면 전략 연산 프로세스로 데이터를 보낸다.
"""
data = websocketQ.get()
ticker = data['code']
t = data['trade_time']
if int_time < coin_exit_time or coin_trad_time < int_time:
try:
last_jcct = self.dict_jcdt[ticker]
except KeyError:
last_jcct = None
if last_jcct is None or t != last_jcct:
self.dict_jcdt[ticker] = t
c = data['trade_price']
h = data['high_price']
low = data['low_price']
per = round(data['signed_change_rate'] * 100, 2)
dm = data['acc_trade_price']
bid = data['acc_bid_volume']
ask = data['acc_ask_volume']
uuidnone = self.buy_uuid is None
injango = ticker in self.df_jg.index
data = [ticker, c, h, low, per, dm, bid, ask, t, uuidnone, injango, self.dict_intg['종목당투자금']]
self.cstgQ.put(data)
""" 잔고목록 갱신 및 매도조건 확인 """
if injango:
ch = round(bid / ask * 100, 2)
self.UpdateJango(ticker, c, ch)
""" 주문의 체결확인은 1초마다 반복한다. """
if not self.dict_bool['모의투자']:
if self.buy_uuid is not None and ticker == self.buy_uuid[0] and now() > self.dict_time['매수체결확인']:
self.CheckBuyChegeol(ticker)
self.dict_time['매수체결확인'] = timedelta_sec(1)
if self.sell_uuid is not None and ticker == self.sell_uuid[0] and now() > self.dict_time['매도체결확인']:
self.CheckSellChegeol(ticker)
self.dict_time['매도체결확인'] = timedelta_sec(1)
""" 잔고평가 및 잔고목록 갱신도 1초마다 반복한다. """
if now() > self.dict_time['거래정보']:
self.UpdateTotaljango()
self.dict_time['거래정보'] = timedelta_sec(1)
""" coin_csan_time에 잔고청산 주문, coin_exit_time에 트레이더가 종료된다. """
if int_time < coin_csan_time < int(strf_time('%H%M%S')):
self.JangoCheongsan()
if int_time < coin_exit_time < int(strf_time('%H%M%S')):
self.queryQ.put([2, self.df_tt, 'totaltradelist', 'append'])
break
int_time = int(strf_time('%H%M%S'))
self.windowQ.put([ui_num['C로그텍스트'], '시스템 명령 실행 알림 - 트레이더 종료합니다.'])
if self.dict_bool['알림소리']:
self.soundQ.put('코인 트레이더를 종료합니다.')
self.teleQ.put('코인 트레이더를 종료하였습니니다.')
sys.exit()
"""
모의투자 시 실제 매도수 주문을 전송하지 않고 바로 체결목록, 잔고목록 등을 갱신한다.
실매매 시 매도수 아이디 및 티커명을 매도, 매수 구분하여 변수에 저장하고
해당 변수값이 None이 아닐 경우 get_order 함수로 체결확인을 1초마다 반복실행한다.
체결이 완료되면 관련목록을 갱신하고 변수값이 다시 None으로 변경된다.
체결확인 후 잔고목록를 갱신 한 이후에 전략 연산 프로세스로 체결완료 신호를 보낸다.
모든 목록은 갱신될 때마다 쿼리 프로세스로 보내어 DB에 실시간으로 기록된다.
매수 주문은 예수금 부족인지 아닌지를 우선 확인하여 예수금 부족일 경우 주문구분을 시드부족으로 체결목록에 기록하고
전략 연산 프로세스의 주문 리스트 삭제용 매수완료 신호만 보낸다.
예수금 부족 상태이며 잔고목록에 없는 상태일 경우 전략 프로세스에서 지속적으로 매수 신호가 발생할 수 있다.
그러므로 재차 시드부족이 발생한 종목은 체결목록에서 마지막 체결시간이 3분이내면 체결목록에 기록하지 않는다.
"""
def Buy(self, ticker, c, oc):
if self.buy_uuid is not None:
self.cstgQ.put(['매수완료', ticker])
return
if self.dict_intg['예수금'] < c * oc:
df = self.df_cj[(self.df_cj['주문구분'] == '시드부족') & (self.df_cj['종목명'] == ticker)]
if len(df) == 0 or timedelta_hour(-9) > timedelta_sec(180, strp_time('%Y%m%d%H%M%S%f', df['체결시간'][0])):
self.UpdateBuy(ticker, c, oc, cancle=True)
self.cstgQ.put(['매수완료', ticker])
return
if self.dict_bool['모의투자']:
self.UpdateBuy(ticker, c, oc)
elif self.upbit is not None:
ret = self.upbit.buy_market_order(ticker, self.dict_intg['종목당투자금'])
self.buy_uuid = [ticker, ret[0]['uuid']]
self.dict_time['체결확인'] = timedelta_sec(1)
else:
text = '시스템 명령 오류 알림 - 업비트 키값이 설정되지 않아 주문을 전송할 수 없습니다.'
self.windowQ.put([ui_num['C로그텍스트'], text])
def Sell(self, ticker, c, oc):
if self.sell_uuid is not None:
self.cstgQ.put(['매도완료', ticker])
return
if self.dict_bool['모의투자']:
self.UpdateSell(ticker, c, oc)
elif self.upbit is not None:
ret = self.upbit.sell_market_order(ticker, oc)
self.sell_uuid = [ticker, ret[0]['uuid']]
self.dict_time['체결확인'] = timedelta_sec(1)
else:
text = '시스템 명령 오류 알림 - 업비트 키값이 설정되지 않아 주문을 전송할 수 없습니다.'
self.windowQ.put([ui_num['C로그텍스트'], text])
def UpdateJango(self, ticker, c, ch):
prec = self.df_jg['현재가'][ticker]
if prec != c:
bg = self.df_jg['매입금액'][ticker]
jc = int(self.df_jg['보유수량'][ticker])
pg, sg, sp = self.GetPgSgSp(bg, jc * c)
columns = ['현재가', '수익률', '평가손익', '평가금액']
self.df_jg.at[ticker, columns] = c, sp, sg, pg
data = [ticker, sp, jc, ch, c]
self.cstgQ.put(data)
def JangoCheongsan(self):
for ticker in self.df_jg.index:
c = self.df_jg['현재가'][ticker]
oc = self.df_jg['보유수량'][ticker]
if self.dict_bool['모의투자']:
self.UpdateSell(ticker, c, oc)
elif self.upbit is not None:
self.upbit.sell_market_order(ticker, oc)
time.sleep(0.2)
else:
text = '시스템 명령 오류 알림 - 업비트 키값이 설정되지 않아 주문을 전송할 수 없습니다.'
self.windowQ.put([ui_num['C로그텍스트'], text])
self.windowQ.put([ui_num['C로그텍스트'], '시스템 명령 실행 알림 - 잔고청산 주문 전송 완료'])
if self.dict_bool['알림소리']:
self.soundQ.put('코인 잔고청산 주문을 전송하였습니다.')
def CheckBuyChegeol(self, ticker):
ret = self.upbit.get_order(self.buy_uuid[1])
if ret is not None and ret['state'] == 'done':
cp = ret['price']
cc = ret['executed_volume']
self.UpdateBuy(ticker, cp, cc)
self.cstgQ.put(['매수완료', ticker])
self.buy_uuid = None
def CheckSellChegeol(self, ticker):
ret = self.upbit.get_order(self.sell_uuid[1])
if ret is not None and ret['state'] == 'done':
cp = ret['price']
cc = ret['executed_volume']
self.UpdateSell(ticker, cp, cc)
self.cstgQ.put(['매도완료', ticker])
self.sell_uuid = None
def UpdateBuy(self, ticker, cp, cc, cancle=False):
dt = strf_time('%Y%m%d%H%M%S%f')
order_gubun = '매수' if not cancle else '시드부족'
self.df_cj.at[dt] = ticker, order_gubun, cc, 0, cp, cp, dt
self.df_cj.sort_values(by='체결시간', ascending=False, inplace=True)
self.windowQ.put([ui_num['체결목록'], self.df_cj])
if not cancle:
bg = cp * cc
pg, sg, sp = self.GetPgSgSp(bg, bg)
self.dict_intg['예수금'] -= bg
self.df_jg.at[ticker] = ticker, cp, cp, sp, sg, bg, pg, cc
self.df_jg.sort_values(by=['매입금액'], ascending=False, inplace=True)
self.queryQ.put([2, self.df_jg, 'jangolist', 'replace'])
text = f'매매 시스템 체결 알림 - {ticker} {cc}코인 매수'
self.windowQ.put([ui_num['C로그텍스트'], text])
if self.dict_bool['알림소리']:
self.soundQ.put(f'{ticker} {cc}코인을 매수하였습니다.')
self.teleQ.put(f'매수 알림 - {ticker} {cp} {cc}')
df = pd.DataFrame([[ticker, order_gubun, cc, 0, cp, cp, dt]], columns=columns_cj, index=[dt])
self.queryQ.put([2, df, 'chegeollist', 'append'])
def UpdateSell(self, ticker, cp, cc):
dt = strf_time('%Y%m%d%H%M%S%f')
bp = self.df_jg['매입가'][ticker]
bg = bp * cc
pg, sg, sp = self.GetPgSgSp(bg, cp * cc)
self.dict_intg['예수금'] += bg + sg
self.df_jg.drop(index=ticker, inplace=True)
self.df_cj.at[dt] = ticker, '매도', cc, 0, cp, cp, dt
self.df_td.at[dt] = ticker, bg, pg, cc, sp, sg, dt
self.df_td.sort_values(by=['체결시간'], ascending=False, inplace=True)
self.windowQ.put([ui_num['체결목록'], self.df_cj])
self.windowQ.put([ui_num['거래목록'], self.df_td])
text = f'매매 시스템 체결 알림 - {ticker} {bp}코인 매도'
self.windowQ.put([ui_num['C로그텍스트'], text])
if self.dict_bool['알림소리']:
self.soundQ.put(f'{ticker} {cc}코인을 매도하였습니다.')
self.queryQ.put([2, self.df_jg, 'jangolist', 'replace'])
df = pd.DataFrame([[ticker, '매도', cc, 0, cp, cp, dt]], columns=columns_cj, index=[dt])
self.queryQ.put([2, df, 'chegeollist', 'append'])
df = pd.DataFrame([[ticker, bg, pg, cc, sp, sg, dt]], columns=columns_td, index=[dt])
self.queryQ.put([2, df, 'tradelist', 'append'])
self.teleQ.put(f'매도 알림 - {ticker} {cp} {cc}')
self.UpdateTotaltradelist()
def UpdateTotaltradelist(self, first=False):
tsg = self.df_td['매도금액'].sum()
tbg = self.df_td['매수금액'].sum()
tsig = self.df_td[self.df_td['수익금'] > 0]['수익금'].sum()
tssg = self.df_td[self.df_td['수익금'] < 0]['수익금'].sum()
sg = self.df_td['수익금'].sum()
sp = round(sg / tbg * 100, 2)
tdct = len(self.df_td)
self.df_tt = | pd.DataFrame([[tdct, tbg, tsg, tsig, tssg, sp, sg]], columns=columns_tt, index=[self.str_today]) | pandas.DataFrame |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data= | pd.read_csv(path) | pandas.read_csv |
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
import os
import time
import traceback
import pandas as pd
from baskerville.util.helpers import get_logger, lines_in_file
from dateutil.tz import tzutc
logger = get_logger(__name__)
COUNTER = 0
SESSION_COUNTER = 0
topic_name = 'deflect.logs'
def load_logs(path):
"""
Load json logs from a path
:param str path: the path to file.json
:return: a pandas Dataframe with the logs
:rtype: pandas.DataFrame
"""
return pd.read_json(path, orient='records', lines=True, encoding='utf-8')
def simulation(
path,
time_window,
kafka_url='0.0.0.0:9092',
zookeeper_url='localhost:2181',
topic_name='deflect.logs',
sleep=True,
verbose=False,
spark=None,
use_spark=False
):
"""
Loads raw logs, groups them by the defined time window and publishes
the grouped raw logs in Kafka if a producer is given, else, it prints out
the groups. After publishing the logs line by line, it will sleep for the
x remaining seconds of the time window if any.
:param str path: the path to raw logs as they are stored in ELS
:param timedelta time_window: the time window for the interval
:param str kafka_url: the url to kafka, defaults to '0.0.0.0:9092'
:param str zookeeper_url: the url to zookeeper, defaults to
'localhost:2181'
:param bytes topic_name: the topic name to publish to
:param bool sleep: if True, the program will sleep after publishing each
group of time windowed logs, for the remaining seconds until a time window
is complete.
:param bool verbose: verbose flag
:return: None
"""
# a short delay for warming up the pipeline
time.sleep(30)
producer = None
if topic_name:
from confluent_kafka import Producer
producer = Producer({'bootstrap.servers': kafka_url})
if not use_spark and lines_in_file(path) < 1e6:
# pandas can usually handle well files under 1M lines - but that
# depends on the machine running the script (amount of RAM)
df = load_logs(path)
publish_df_split_in_time_windows(
time_window, producer, topic_name, df, verbose, sleep
)
else:
from pyspark.sql import functions as F
active_columns = [
'@timestamp', 'timestamp', 'client_request_host', 'client_ip',
'client_ua', 'client_url', 'content_type',
'http_response_code', 'querystring',
'reply_length_bytes'
]
if not spark:
from baskerville.spark import get_spark_session
spark = get_spark_session()
spark.conf.set('spark.driver.memory', '8G')
print('Starting...')
df = spark.read.json(path).cache()
df = df.withColumn('timestamp', F.col('@timestamp').cast('timestamp'))
common_active_cols = [c for c in active_columns if c in df.columns]
df = df.select(*common_active_cols).sort('@timestamp')
print('Dataframe read...')
min_max_df = df.agg(
F.min(F.col('timestamp')).alias('min_ts'),
F.max(F.col('timestamp')).alias('max_ts')
).collect()[0]
current_window = min_max_df[0]
max_window = min_max_df[1]
window_df = None
try:
while True:
filter_ = (
(F.col('timestamp') >= current_window) &
(F.col('timestamp') <= current_window + time_window)
)
if verbose:
logger.info(f'Current window: {current_window}, '
f'Max window: {max_window}')
logger.info(f'Running for {str(filter_._jc)}')
window_df = df.select(
*common_active_cols).where(filter_).cache()
pandas_df = window_df.toPandas()
if not pandas_df.empty:
publish_df_split_in_time_windows(
time_window, producer, topic_name, pandas_df, verbose, sleep
)
current_window = current_window + time_window
logger.info(f'{current_window} {max_window} {time_window}')
if current_window > max_window:
logger.info(
f'>> EOF for Simulation, {current_window} {max_window}'
)
break
except Exception:
traceback.print_exc()
pass
finally:
if df:
df.unpersist()
if window_df:
window_df.unpersist()
if spark:
spark.catalog.clearCache()
def publish_df_split_in_time_windows(
time_window, producer, topic_name, df, verbose=False, sleep=True
):
"""
Publish the dataframe split in time_window seconds.
:param int time_window: the duration of the time window in seconds
:param confluent_kafka.Producer producer: the kafka producer
:topic_name the kafka topic_name
:param pandas.DataFrame df: the dataframe to publish
:param boolean verbose:
:param boolean sleep:if True, sleep for the remaining of the time window
seconds
:return: None
"""
global COUNTER, SESSION_COUNTER
# load logs and set the timestamp index
df = df.set_index(pd.DatetimeIndex(df['@timestamp']))
df.index = pd.to_datetime(df['@timestamp'], utc=True)
# sort by time
df.sort_index(inplace=True)
# group by timeframe - supporting minutes for now
groupped_df = df.groupby( | pd.Grouper(freq=time_window) | pandas.Grouper |
"""PandasMoveDataFrame class."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable
import numpy as np
from pandas import DataFrame, DateOffset, Series, Timedelta
from pymove.core.dataframe import MoveDataFrame
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DAY_PERIODS,
DIST_PREV_TO_NEXT,
DIST_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_COS,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
MOVE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
SPEED_TO_NEXT,
SPEED_TO_PREV,
STOP,
TID,
TIME_PREV_TO_NEXT,
TIME_TO_NEXT,
TIME_TO_PREV,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_DAYS,
WEEK_END,
)
from pymove.utils.conversions import lat_meters
from pymove.utils.distances import haversine
from pymove.utils.log import logger, progress_bar
from pymove.utils.mem import begin_operation, end_operation
from pymove.utils.trajectories import shift
if TYPE_CHECKING:
from pymove.core.dask import DaskMoveDataFrame
class PandasMoveDataFrame(DataFrame):
"""PyMove dataframe extending Pandas DataFrame."""
def __init__(
self,
data: DataFrame | list | dict,
latitude: str = LATITUDE,
longitude: str = LONGITUDE,
datetime: str = DATETIME,
traj_id: str = TRAJ_ID,
):
"""
Checks whether past data has 'lat', 'lon', 'datetime' columns.
Renames it with the PyMove lib standard. After starts the
attributes of the class.
- self._mgr : Represents trajectory data.
- self._type : Represents the type of layer below the data structure.
- self.last_operation : Represents the last operation performed.
Parameters
----------
data : DataFrame or list or dict
Input trajectory data
latitude : str, optional
Represents column name latitude, by default LATITUDE
longitude : str, optional
Represents column name longitude, by default LONGITUDE
datetime : str, optional
Represents column name datetime, by default DATETIME
traj_id : str, optional
Represents column name trajectory id, by default TRAJ_ID
Raises
------
KeyError
If missing one of lat, lon, datetime columns
ValueError, ParserError
If the data types can't be converted
"""
if isinstance(data, dict):
data = DataFrame.from_dict(data)
elif isinstance(data, DataFrame):
data = | DataFrame(data) | pandas.DataFrame |
import os
os.environ["OMP_NUM_THREADS"] = "32"
from contextlib import contextmanager
import argparse
import os.path
import csv
import time
import sys
from functools import partial
import shutil as sh
import dill
from graph_tool.all import *
import pandas as pd
import numpy as np
import scipy as sp
from sklearn.covariance import LedoitWolf, OAS
import statsmodels.api as sm
from multipy.fdr import lsu
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(file_path):
os.makedirs(file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true',
help='Show more information on the screen.')
parser.add_argument('--correlation',
choices=('pearson', 'precision', 'spearman', 'correlation'),
default='spearman',
help=("Compute correlation coefficients using either "
"'pearson' (standard correlation coefficient), "
"'correlation' (shrinkage correlation estimate), "
"'precision' (shrinkage inverse correlation estimate), or "
"'spearman' (Spearman rank correlation)."))
parser.add_argument('--tissue', required=True,
choices=('head', 'body'),
help='Tissue being analysed.')
parser.add_argument('--graph',
help=('Path to the full input graph generated by graph-tool.'))
parser.add_argument('--label', required=True,
help=('Nested block partition csv and dill output file.'))
parser.add_argument('--path', dest='input_path', default = '../data/output/SBM/clustering/',
help=('Nested block partition csv and dill output file.'))
parser.add_argument('--out', dest='out_path', default = None,
help=('Outputh folder name.'))
args = parser.parse_args()
block_df = pd.read_csv(args.input_path + args.label + ".csv")
print("Loading graph...")
g = load_graph(args.graph)
corr = g.edge_properties[args.correlation]
g.ep.positive = g.new_edge_property("int", (np.sign(corr.a) + 1)/2)
g.ep.layer = g.new_edge_property("int16_t", np.sign(corr.a).astype(np.int16))
g.ep.layer.a = np.sign(corr.a).astype(np.int16)
g.ep.z_s = g.new_edge_property("double", (2*np.arctanh(corr.a)))
print("Loading blocks...")
with open (args.input_path + args.label + ".dill", "rb") as fh:
bs = dill.load(fh)
print("Reconstructing BlockState...")
state = minimize_nested_blockmodel_dl(g, init_bs=bs,
state_args=dict(recs=[g.ep.z_s],
rec_types=["real-normal"]))
print("State entropy: " + str(state.entropy()))
if args.out_path is None:
out_folder = args.input_path + args.label + '_gene-blocks'
else:
out_folder = args.out_path
ensure_dir(out_folder)
print("Clearing output folder...")
for filename in os.listdir(out_folder):
file_path = os.path.join(out_folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
sh.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
print("Calculating block sizes...")
blocks = [list(set(block_df[b])) for b in block_df.filter(like='B', axis=1)]
block_sizes = [len(b) for b in blocks]
block_sizes = -np.sort(-np.array(list(set(block_sizes))))
block_sizes = [x for x in block_sizes if x >= 2]
block_df["Gene"].to_csv(out_folder + "/background.csv", header=False, index=False )
print("Creating gene lists...")
n_levels = len(block_sizes)
output_df = | pd.DataFrame(columns=('Nested_Level', 'Block', 'File', 'N_genes', 'Internal_degree', 'Assortatitvity')) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 by <NAME> (www.robo.guru)
# All rights reserved.
# This file is part of Agenoria and is released under the MIT License.
# Please see the LICENSE file that should have been included as part of
# this package.
import datetime as dt
from dateutil.relativedelta import relativedelta
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
from .parse_config import parse_json_config
from .plot_settings import format_monthly_plot, export_figure
# Debug option
DEBUG = False
DEBUG_START_DATE = dt.datetime(2019, 8, 17, 0, 0, 0)
DEBUG_END_DATE = dt.datetime(2019, 9, 27, 0, 0, 0)
ALPHA_VALUE = 0.3
# Parameters from JSON
config = []
def parse_glow_sleep_data(file_name):
# Import file
data_sleep = pd.read_csv(file_name, parse_dates=['Begin time', 'End time'])
# Make a new column with date component only
data_sleep['Date'] = data_sleep['Begin time'].dt.normalize()
# Find first and last entry in column
start_date = data_sleep['Date'].iloc[-1]
end_date = data_sleep['Date'].iloc[0]
if (DEBUG):
start_date = DEBUG_START_DATE
end_date = DEBUG_END_DATE
sleep_data_list = []
offset = 0
# For some reason raw sleep data is not sorted by time
data_sleep = data_sleep.sort_values(['Begin time'], ascending=False)
# Label each daytime nap session
nap_index = (data_sleep['Begin time'].dt.hour > 7) & (
data_sleep['Begin time'].dt.hour < 19)
data_sleep.loc[nap_index, 'daytime_nap'] = 1
# Get duration for each row, then convert to hours
data_sleep['duration'] = data_sleep['End time'] - data_sleep['Begin time']
data_sleep['duration'] = data_sleep['duration'] / np.timedelta64(1, 'h')
# Find the index of session that extend into the next day
index = data_sleep['End time'].dt.normalize() > data_sleep['Date']
# Compute the offset duration to be plotted the next day
sleep_offset = data_sleep.loc[index, 'End time']
data_sleep.loc[index, 'offset'] = sleep_offset.dt.hour + \
sleep_offset.dt.minute / 60
for current_date in pd.date_range(start_date, end_date):
# Get all entires on this date
rows_on_date = data_sleep[data_sleep['Date'].isin([current_date])]
# Compute number of nap sessions
nap_sessions_on_date = rows_on_date['daytime_nap'].count()
# Get total sleep duration
total_sleep_duration = rows_on_date['duration'].sum()
# Add offset from previous day
total_sleep_duration += offset
# Catch session that extend past midnight, subtract from duration
offset = rows_on_date['offset'].sum()
total_sleep_duration -= offset
# Longest session
longest_session = rows_on_date['duration'].max()
# Remove all sleep sessions less than two minutes
SLEEP_THRESHOLD = 0.0333333 # two minutes -> hours
filtered = rows_on_date[rows_on_date['duration'] > SLEEP_THRESHOLD]
# Compute longest awake time - begin (current time) - end (next row)
end_time_shifted = filtered['End time'].shift(-1)
awake_duration = filtered['Begin time'] - end_time_shifted
max_awake_duration = awake_duration.max() / np.timedelta64(1, 'h')
# Put stats in a list
sleep_data_list.append(
[current_date, nap_sessions_on_date, total_sleep_duration,
longest_session, max_awake_duration])
# Convert list to dataframe
data_sleep_daily = pd.DataFrame(
sleep_data_list, columns=['date', 'total_naps', 'total_sleep_duration',
'longest_session', 'max_awake_duration'])
return data_sleep_daily
def parse_glow_feeding_data(file_name, key_amount):
# Import file
data = pd.read_csv(file_name, parse_dates=['Time of feeding'])
# Make a new column with date component only
data['Date'] = data['Time of feeding'].dt.normalize()
# Find first and last entry in column
start_date = data['Date'].iloc[-1]
end_date = data['Date'].iloc[0]
if (DEBUG):
start_date = DEBUG_START_DATE
end_date = DEBUG_END_DATE
# Final data
feeding_data_list = []
for current_date in | pd.date_range(start_date, end_date) | pandas.date_range |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
READ IN:
1) <NAME> Data "../../../AKJ_Replication/Replication/data/data_replication.csv"
2) Alternative data "../output/alternativedata.csv"
EXPORT:
"../output/alternativedata.csv"
@author: olivergiesecke
"""
import pandas as pd
import numpy as np
import os
import re
ref_df = pd.read_csv("../output/alternativedata.csv")
ref_df["start_date"] = pd.to_datetime(ref_df["start_date"])
ref_df["end_date"] = pd.to_datetime(ref_df["end_date"])
dates = ref_df.drop_duplicates("start_date", "first")["start_date"].reset_index()
dates["year"] = dates.start_date.apply(lambda x:x.year)
dates["month"] =dates.start_date.apply(lambda x:x.month)
dates["dup"] = dates.duplicated(subset=["year","month"])
print(f"Number of duplicates in therms of year and month: {dates['dup'].sum()}")
dates.drop(columns="dup",inplace=True)
econ_df = pd.read_csv("../../../AKJ_Replication/Replication/data/data_replication.csv")
econ_df["start_date"] = | pd.to_datetime(econ_df["date"]) | pandas.to_datetime |
import pytest
import numpy as np
from scipy import linalg
import pandas as pd
from linkalman.core.utils import *
# Test mask_nan
def test_mask_nan():
"""
Test when input is a matrix with column size > 1
"""
mat = np.ones((4,4))
is_nan = np.array([True, False, True, False])
expected_result = np.array([[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1]])
result = mask_nan(is_nan, mat)
np.testing.assert_array_equal(expected_result, result)
def test_mask_nan_vector():
"""
Test when input is a matrix with column size == 1
"""
mat = np.ones((4, 1))
is_nan = np.array([True, False, True, False])
expected_result = np.array([[0], [1], [0], [1]])
result = mask_nan(is_nan, mat)
np.testing.assert_array_equal(expected_result, result)
def test_mask_nan_wrong_dim_input():
"""
Test if raise exception if wrong dim input
"""
mat = np.ones((4, 1))
is_nan = np.array([True, False, True, False])
with pytest.raises(ValueError):
result = mask_nan(is_nan, mat, 'Col')
def test_mask_nan_row_only():
"""
Test if only change row if dim=='row'
"""
mat = np.ones((4,4))
is_nan = np.array([True, False, True, False])
expected_result = np.array([[0, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 0, 0],
[1, 1, 1, 1]])
result = mask_nan(is_nan, mat, dim='row')
np.testing.assert_array_equal(expected_result, result)
# Test inv
def test_inv():
"""
Test normal behavior of inv
"""
mat = np.array([[2, 1], [1, 4]])
expected_result =np.array([[1/2 + 1/14, -1/7], [-1/7, 2/7]])
result = inv(mat)
np.testing.assert_array_almost_equal(result, expected_result)
def test_inv_0():
"""
Test pseudo-inverse of zero matrix
"""
mat = np.zeros([3, 3])
expected_result =np.zeros([3, 3])
result = inv(mat)
np.testing.assert_array_almost_equal(result, expected_result)
def test_inv_not_full_rank():
"""
Test pseudo-inverse if not full rank
"""
mat = np.array([[2, 0], [0, 0]])
expected_result = np.array([[0.5, 0], [0, 0]])
result = inv(mat)
np.testing.assert_array_almost_equal(result, expected_result)
def test_inv_not_PSD():
"""
Test result if input matrix not PSD
"""
mat = np.array([[2, 4], [3, 1]])
expected_result = linalg.pinv(mat)
result = inv(mat)
np.testing.assert_array_almost_equal(result, expected_result)
# Test gen_PSD
def test_gen_PSD():
"""
Test normal behavior
"""
theta = [0,2,0,4,5,0]
dim = 3
expected_results = np.array([[1, 2, 4],
[2, 5, 13],
[4, 13, 42]])
results = gen_PSD(theta, dim)
assert(np.array_equal(expected_results, results))
def test_gen_PSD_wrong_theta_size():
"""
Test if raise exception when theta wrong size
"""
theta = [1,2,3,4,5,6]
dim = 2
with pytest.raises(ValueError):
PSD = gen_PSD(theta, dim)
# Test df_to_tensor
def test_df_to_tensor(df1):
"""
Test normal behaviors
"""
expected_result = np.array([[[1.], [4.]], [[2.], [5.]], [[3.], [6.]]])
col_list = ['a', 'b']
result = df_to_tensor(df1, col_list)
np.testing.assert_array_equal(expected_result, result)
def test_df_to_tensor_nan(df1):
"""
Test if return None when not specify col_list
"""
expected_result = None
result = df_to_tensor(df1)
assert result == expected_result
def test_df_to_tensor_NaN():
"""
Test partially missing observations
"""
df = pd.DataFrame({'a': [1., 2., 3.], 'b': [2., np.nan, 4.], 'c': [1, 2, 3]})
expected_result = np.array([[[1.], [2.]], [[2.], [np.nan]], [[3.], [4.]]])
col_list = ['c', 'b']
result = df_to_tensor(df, col_list)
np.testing.assert_array_equal(expected_result, result)
def test_df_to_tensor_all_NaN():
"""
Test 2 fully missing observations
"""
df = pd.DataFrame({'a': [1., np.nan, 3.], 'b': [2., np.nan, 4.]})
col_list = ['a', 'b']
expected_result = np.array([[[1.], [2.]], [[np.nan], [np.nan]], [[3.], [4.]]])
result = df_to_tensor(df, col_list)
np.testing.assert_array_equal(expected_result, result)
def test_df_to_tensor_string():
"""
Test str input exceptions
"""
df = pd.DataFrame({'a': [1., 2., 3.], 'b': [1, 'str2', 'str3']})
col_list = ['a', 'b']
with pytest.raises(TypeError):
df_to_tensor(df, col_list)
def test_df_to_list_not_df(df1):
with pytest.raises(TypeError):
df_to_tensor(df1['a'], ['a'])
# Test tensor_to_df
def test_tensor_to_df():
"""
Test normal behaviors
"""
input_array = np.array([[[1.], [2.]], [[2.], [3.]], [[3.], [4.]]])
col = ['a', 'b']
expected_result = | pd.DataFrame({'a': [1., 2., 3.], 'b': [2., 3., 4.]}) | pandas.DataFrame |
from cytopy.data import gate
from cytopy.data.geometry import *
from scipy.spatial.distance import euclidean
from shapely.geometry import Polygon
from sklearn.datasets import make_blobs
from KDEpy import FFTKDE
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pytest
np.random.seed(42)
def test_child_init():
test_child = gate.Child(name="test",
signature={"x": 2423, "y": 2232, "z": 4543})
assert test_child.name == "test"
assert test_child.signature.get("x") == 2423
assert test_child.signature.get("y") == 2232
assert test_child.signature.get("z") == 4543
def test_childthreshold_init():
test_child = gate.ChildThreshold(name="test",
signature={"x": 2423, "y": 2232, "z": 4543},
definition="+",
geom=ThresholdGeom(x="x",
y="y",
x_threshold=0.5,
y_threshold=0.5,
transform_x="logicle",
transform_y="logicle"))
assert test_child.name == "test"
assert test_child.signature.get("x") == 2423
assert test_child.signature.get("y") == 2232
assert test_child.signature.get("z") == 4543
assert test_child.definition == "+"
assert test_child.geom.x == "x"
assert test_child.geom.y == "y"
assert test_child.geom.x_threshold == 0.5
assert test_child.geom.y_threshold == 0.5
assert test_child.geom.transform_x == "logicle"
assert test_child.geom.transform_x == "logicle"
@pytest.mark.parametrize("definition,expected", [("+", True),
("-", False)])
def test_childthreshold_match_definition_1d(definition, expected):
test_child = gate.ChildThreshold(name="test",
signature={"x": 2423, "y": 2232, "z": 4543},
definition=definition,
geom=ThresholdGeom(x="x",
y="y",
x_threshold=0.5,
y_threshold=0.5,
transform_x="logicle",
transform_y="logicle"))
assert test_child.match_definition("+") == expected
@pytest.mark.parametrize("definition,expected", [("++", True),
("--", False),
("++,+-", True),
("--,-+", False),
("+-,-+,++", True)])
def test_childthreshold_match_definition_2d(definition, expected):
test_child = gate.ChildThreshold(name="test",
signature={"x": 2423, "y": 2232, "z": 4543},
definition=definition,
geom=ThresholdGeom(x="x",
y="y",
x_threshold=0.5,
y_threshold=0.5,
transform_x="logicle",
transform_y="logicle"))
assert test_child.match_definition("++") == expected
def test_childpolygon_init():
test_child = gate.ChildPolygon(name="test",
signature={"x": 2423, "y": 2232, "z": 4543},
geom=PolygonGeom(x="x", y="y"))
assert test_child.name == "test"
assert test_child.signature.get("x") == 2423
assert test_child.signature.get("y") == 2232
assert test_child.signature.get("z") == 4543
assert test_child.geom.x == "x"
assert test_child.geom.y == "y"
@pytest.mark.parametrize("klass,method", [(gate.Gate, "manual"),
(gate.ThresholdGate, "density"),
(gate.PolygonGate, "manual"),
(gate.EllipseGate, "GaussianMixture")])
def test_gate_init(klass, method):
g = klass(gate_name="test",
parent="test parent",
x="X",
y="Y",
method=method,
dim_reduction=dict(method="UMAP", kwargs={"n_neighbours": 100}))
assert g.gate_name == "test"
assert g.parent == "test parent"
assert g.x == "X"
assert g.y == "Y"
assert g.method == method
assert g.dim_reduction.get("method") == "UMAP"
assert g.dim_reduction.get("kwargs").get("n_neighbours") == 100
def test_transform_none():
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual")
data = pd.DataFrame({"X": np.random.normal(1, scale=0.5, size=1000),
"Y": np.random.normal(1, scale=0.5, size=1000)})
transformed = g.transform(data)
assert isinstance(transformed, pd.DataFrame)
assert transformed.shape[0] == 1000
assert transformed.shape[1] == 2
for i in ["X", "Y"]:
assert transformed[i].mean() == pytest.approx(1., 0.1)
assert transformed[i].std() == pytest.approx(0.5, 0.1)
def test_transform_x():
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
transform_x="logicle")
data = pd.DataFrame({"X": np.random.normal(1, scale=0.5, size=1000),
"Y": np.random.normal(1, scale=0.5, size=1000)})
transformed = g.transform(data)
assert isinstance(transformed, pd.DataFrame)
assert transformed.shape[0] == 1000
assert transformed.shape[1] == 2
assert transformed["X"].mean() != pytest.approx(1., 0.1)
assert transformed["X"].std() != pytest.approx(0.5, 0.1)
assert transformed["Y"].mean() == pytest.approx(1., 0.1)
assert transformed["Y"].std() == pytest.approx(0.5, 0.1)
def test_transform_xy():
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
transform_x="logicle",
transform_y="logicle")
data = pd.DataFrame({"X": np.random.normal(1, scale=0.5, size=1000),
"Y": np.random.normal(1, scale=0.5, size=1000)})
transformed = g.transform(data)
assert isinstance(transformed, pd.DataFrame)
assert transformed.shape[0] == 1000
assert transformed.shape[1] == 2
assert transformed["X"].mean() != pytest.approx(1., 0.1)
assert transformed["X"].std() != pytest.approx(0.5, 0.1)
assert transformed["Y"].mean() != pytest.approx(1., 0.1)
assert transformed["Y"].std() != pytest.approx(0.5, 0.1)
@pytest.mark.parametrize("kwargs", [{"method": "uniform",
"n": 500},
{"method": "faithful"},
{"method": "density"}])
def test_downsample(kwargs):
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
sampling=kwargs)
data = pd.DataFrame({"X": np.random.normal(1, scale=0.5, size=1000),
"Y": np.random.normal(1, scale=0.5, size=1000)})
sample = g._downsample(data=data)
if kwargs.get("method") is None:
assert sample is None
else:
assert sample.shape[0] < data.shape[0]
def test_upsample():
data, labels = make_blobs(n_samples=3000,
n_features=2,
centers=3,
random_state=42)
data = pd.DataFrame(data, columns=["X", "Y"])
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
sampling={"method": "uniform",
"frac": 0.5})
sample = g._downsample(data=data)
sample_labels = labels[sample.index.values]
pops = list()
for x in np.unique(sample_labels):
idx = sample.index.values[np.where(sample_labels == x)[0]]
pops.append(gate.Population(population_name=f"Pop_{x}",
parent="root",
index=idx[:498]))
pops = g._upsample(data=data, sample=sample, populations=pops)
assert isinstance(pops, list)
assert all([isinstance(p, gate.Population) for p in pops])
assert all([len(p.index) == 1000 for p in pops])
for x in np.unique(labels):
p = [i for i in pops if i.population_name == f"Pop_{x}"][0]
assert np.array_equal(p.index, np.where(labels == x)[0])
def test_dim_reduction():
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
dim_reduction={"method": "UMAP",
"n_neighbors": 100})
data = pd.DataFrame({"X": np.random.normal(1, 0.5, 1000),
"Y": np.random.normal(1, 0.5, 1000),
"Z": np.random.normal(1, 0.5, 1000),
"W": np.random.normal(1, 0.5, 1000)})
data = g._dim_reduction(data=data)
assert g.x == "UMAP1"
assert g.y == "UMAP2"
assert data.shape == (1000, 6)
assert all([f"UMAP{i + 1}" in data.columns for i in range(2)])
@pytest.mark.parametrize("d", ["++", "--", "+-", "+++", "+ -"])
def test_threshold_add_child_invalid_1d(d):
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
method="manual",
x="X")
child = gate.ChildThreshold(name="test child",
definition=d,
geom=ThresholdGeom(x="X", x_threshold=0.56, y_threshold=0.75))
with pytest.raises(AssertionError) as err:
threshold.add_child(child)
assert str(err.value) == "Invalid child definition, should be either '+' or '-'"
@pytest.mark.parametrize("d", ["+", "-", "+--", "+++", "+ -"])
def test_threshold_add_child_invalid_2d(d):
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual")
child = gate.ChildThreshold(name="test child",
definition=d,
geom=ThresholdGeom(x_threshold=0.56, y_threshold=0.75))
with pytest.raises(AssertionError) as err:
threshold.add_child(child)
assert str(err.value) == "Invalid child definition, should be one of: '++', '+-', '-+', or '--'"
def test_threshold_add_child():
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
transform_x="logicle")
child = gate.ChildThreshold(name="test child",
definition="++",
geom=ThresholdGeom(x_threshold=0.56, y_threshold=0.75))
threshold.add_child(child)
assert len(threshold.children)
assert threshold.children[0].geom.x == threshold.x
assert threshold.children[0].geom.y == threshold.y
assert threshold.children[0].geom.transform_x == "logicle"
assert not threshold.children[0].geom.transform_y
def test_threshold_match_children_1d():
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
method="density")
data = np.random.normal(loc=1., scale=1.5, size=1000)
threshold.add_child(gate.ChildThreshold(name="positive",
definition="+",
geom=ThresholdGeom(x_threshold=0.5)))
threshold.add_child(gate.ChildThreshold(name="negative",
definition="-",
geom=ThresholdGeom(x_threshold=0.5)))
pos = gate.Population(population_name="p1",
parent="root",
definition="+",
geom=ThresholdGeom(x_threshold=0.6),
index=data[np.where(data >= 0.6)])
neg = gate.Population(population_name="p2",
parent="root",
definition="-",
geom=ThresholdGeom(x_threshold=0.6),
index=data[np.where(data >= 0.6)])
pops = threshold._match_to_children([neg, pos])
pos = [p for p in pops if p.definition == "+"][0]
assert pos.population_name == "positive"
neg = [p for p in pops if p.definition == "-"][0]
assert neg.population_name == "negative"
def test_threshold_match_children_2d():
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="density")
x = np.random.normal(loc=1., scale=1.5, size=1000)
y = np.random.normal(loc=1., scale=1.5, size=1000)
data = pd.DataFrame({"X": x, "Y": y})
threshold.add_child(gate.ChildThreshold(name="positive",
definition="++,+-",
geom=ThresholdGeom(x_threshold=0.5)))
threshold.add_child(gate.ChildThreshold(name="negative",
definition="--,-+",
geom=ThresholdGeom(x_threshold=0.5)))
pos = gate.Population(population_name="p1",
parent="root",
definition="++",
geom=ThresholdGeom(x_threshold=0.6),
index=data[data.X >= 0.6].index.values)
neg = gate.Population(population_name="p2",
parent="root",
definition="--,-+",
geom=ThresholdGeom(x_threshold=0.6),
index=data[data.X < 0.6].index.values)
pops = threshold._match_to_children([neg, pos])
pos = [p for p in pops if p.definition == "++"][0]
assert pos.population_name == "positive"
neg = [p for p in pops if p.definition == "--,-+"][0]
assert neg.population_name == "negative"
def test_threshold_1d():
x = np.random.normal(loc=1., scale=1.5, size=1000)
data = pd.DataFrame({"X": x})
results = gate.threshold_1d(data=data, x="X", x_threshold=0.5)
assert len(results.keys()) == 2
assert all(isinstance(df, pd.DataFrame) for df in results.values())
assert len(np.where(x >= 0.5)[0]) == results.get("+").shape[0]
assert len(np.where(x < 0.5)[0]) == results.get("-").shape[0]
def test_threshold_2d():
x = np.random.normal(loc=1., scale=1.5, size=1000)
y = np.random.normal(loc=1., scale=1.5, size=1000)
data = pd.DataFrame({"X": x,
"Y": y})
results = gate.threshold_2d(data=data, x="X", y="Y", x_threshold=0.5, y_threshold=0.5)
assert len(results.keys()) == 4
assert all(isinstance(df, pd.DataFrame) for df in results.values())
x_pos, y_pos = np.where(x >= 0.5)[0], np.where(y >= 0.5)[0]
x_neg, y_neg = np.where(x < 0.5)[0], np.where(y < 0.5)[0]
assert len(np.intersect1d(x_pos, y_pos)) == results.get("++").shape[0]
assert len(np.intersect1d(x_pos, y_neg)) == results.get("+-").shape[0]
assert len(np.intersect1d(x_neg, y_pos)) == results.get("-+").shape[0]
assert len(np.intersect1d(x_neg, y_neg)) == results.get("--").shape[0]
def test_smoothed_peak_finding():
n1 = np.random.normal(loc=0.2, scale=1, size=500)
n2 = np.random.normal(loc=2.5, scale=0.2, size=250)
n3 = np.random.normal(loc=6.5, scale=0.5, size=500)
data = np.hstack([n1, n2, n3])
smoothed, peaks = gate.smoothed_peak_finding(p=data)
assert isinstance(smoothed, np.ndarray)
assert isinstance(peaks, np.ndarray)
assert len(peaks) == 2
def test_find_local_minima():
n1 = np.random.normal(loc=2, scale=1, size=1000)
n2 = np.random.normal(loc=10, scale=0.5, size=1000)
data = np.hstack([n1, n2])
x, y = FFTKDE(kernel='gaussian', bw='silverman').fit(data).evaluate()
peak1 = np.where(y == np.max(y[np.where(x < 6)]))[0][0]
peak2 = np.where(y == np.max(y[np.where(x > 6)]))[0][0]
minima = x[np.where(y == np.min(y[np.where((x > 4) & (x < 7))]))[0][0]]
assert gate.find_local_minima(p=y, x=x, peaks=np.array([peak1, peak2])) == minima
def test_find_inflection_point():
np.random.seed(42)
n1 = np.random.normal(loc=2, scale=1, size=1000)
x, y = FFTKDE(kernel='gaussian', bw='silverman').fit(n1).evaluate()
inflection_point = gate.find_inflection_point(x=x, p=y, peak_idx=int(np.argmax(y)),
incline=False)
plt.plot(x, y)
plt.axvline(inflection_point, c="r")
plt.title("Test inflection point; incline=False")
plt.show()
assert 3 < inflection_point < 4
inflection_point = gate.find_inflection_point(x=x, p=y, peak_idx=int(np.argmax(y)),
incline=True)
plt.plot(x, y)
plt.axvline(inflection_point, c="r")
plt.title("Test inflection point; incline=True")
plt.show()
assert 0 < inflection_point < 1
def test_threshold_fit_1d():
np.random.seed(42)
n1 = np.random.normal(loc=0.2, scale=1, size=500)
n2 = np.random.normal(loc=2.5, scale=0.2, size=250)
n3 = np.random.normal(loc=6.5, scale=0.5, size=500)
data = pd.DataFrame({"X": np.hstack([n1, n2, n3])})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
method="density")
threshold.fit(data=data)
assert len(threshold.children) == 2
assert threshold.children[0].geom.x_threshold == threshold.children[1].geom.x_threshold
assert round(threshold.children[0].geom.x_threshold) == 4
assert all([i in [c.definition for c in threshold.children] for i in ["+", "-"]])
def test_threshold_fit_2d():
data, labels = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (1., 5.), (5., 0.2)],
random_state=42)
data = pd.DataFrame({"X": data[:, 0], "Y": data[:, 1]})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="density")
threshold.fit(data)
assert len(threshold.children) == 4
assert len(set([c.geom.x_threshold for c in threshold.children])) == 1
assert len(set([c.geom.y_threshold for c in threshold.children])) == 1
assert all([i in [c.definition for c in threshold.children] for i in ["++", "--",
"+-", "-+"]])
assert 2 < threshold.children[0].geom.x_threshold < 4
assert 2 < threshold.children[0].geom.y_threshold < 4
def test_threshold_predict_1d():
n1 = np.random.normal(loc=0.2, scale=1, size=500)
n2 = np.random.normal(loc=2.5, scale=0.2, size=250)
n3 = np.random.normal(loc=6.5, scale=0.5, size=500)
data = pd.DataFrame({"X": np.hstack([n1, n2, n3])})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
method="density")
threshold.fit(data=data)
new_data = pd.DataFrame({"X": np.hstack([np.random.normal(loc=0.2, scale=1, size=500),
np.random.normal(loc=6.5, scale=0.5, size=500)])})
pops = threshold.predict(new_data)
assert len(pops) == 2
assert all([isinstance(p, gate.Population) for p in pops])
assert all([isinstance(p.geom, ThresholdGeom) for p in pops])
assert all([p.geom.x == threshold.x for p in pops])
assert all([p.geom.y == threshold.y for p in pops])
assert all(p.geom.transform_x == threshold.transform_x for p in pops)
assert all(p.geom.transform_y == threshold.transform_y for p in pops)
assert all(i in [p.definition for p in pops] for i in ["+", "-"])
neg_idx = new_data[new_data.X < threshold.children[0].geom.x_threshold].index.values
pos_idx = new_data[new_data.X >= threshold.children[0].geom.x_threshold].index.values
pos_pop = [p for p in pops if p.definition == "+"][0]
neg_pop = [p for p in pops if p.definition == "-"][0]
assert np.array_equal(neg_pop.index, neg_idx)
assert np.array_equal(pos_pop.index, pos_idx)
def test_threshold_predict_2d():
data, _ = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (1., 5.), (5., 0.2)],
random_state=42)
data = pd.DataFrame({"X": data[:, 0], "Y": data[:, 1]})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="density")
threshold.fit(data=data)
new_data, _ = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (5., 0.2)],
random_state=42)
new_data = pd.DataFrame({"X": new_data[:, 0], "Y": new_data[:, 1]})
pops = threshold.predict(new_data)
assert len(pops) == 4
assert all([isinstance(p, gate.Population) for p in pops])
assert all([isinstance(p.geom, ThresholdGeom) for p in pops])
assert all([p.geom.x == threshold.x for p in pops])
assert all([p.geom.y == threshold.y for p in pops])
assert all(p.geom.transform_x == threshold.transform_x for p in pops)
assert all(p.geom.transform_y == threshold.transform_y for p in pops)
assert all(i in [p.definition for p in pops] for i in ["++", "--", "-+", "+-"])
neg_idx = new_data[(new_data.X < threshold.children[0].geom.x_threshold) &
(new_data.Y < threshold.children[0].geom.y_threshold)].index.values
pos_idx = new_data[(new_data.X >= threshold.children[0].geom.x_threshold) &
(new_data.Y >= threshold.children[0].geom.y_threshold)].index.values
negpos_idx = new_data[(new_data.X < threshold.children[0].geom.x_threshold) &
(new_data.Y >= threshold.children[0].geom.y_threshold)].index.values
posneg_idx = new_data[(new_data.X >= threshold.children[0].geom.x_threshold) &
(new_data.Y < threshold.children[0].geom.y_threshold)].index.values
pos_pop = [p for p in pops if p.definition == "++"][0]
neg_pop = [p for p in pops if p.definition == "--"][0]
posneg_pop = [p for p in pops if p.definition == "+-"][0]
negpos_pop = [p for p in pops if p.definition == "-+"][0]
assert np.array_equal(neg_pop.index, neg_idx)
assert np.array_equal(pos_pop.index, pos_idx)
assert np.array_equal(negpos_pop.index, negpos_idx)
assert np.array_equal(posneg_pop.index, posneg_idx)
def test_threshold_fit_predict_1d():
n1 = np.random.normal(loc=0.2, scale=1, size=500)
n2 = np.random.normal(loc=2.5, scale=0.2, size=250)
n3 = np.random.normal(loc=6.5, scale=0.5, size=500)
data = pd.DataFrame({"X": np.hstack([n1, n2, n3])})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
method="density")
threshold.fit(data=data)
threshold.label_children({"+": "Positive",
"-": "Negative"})
new_data = pd.DataFrame({"X": np.hstack([np.random.normal(loc=0.2, scale=1, size=200),
np.random.normal(loc=6.5, scale=0.5, size=1000)])})
pops = threshold.fit_predict(new_data)
assert len(pops) == 2
assert all([isinstance(p, gate.Population) for p in pops])
assert all([isinstance(p.geom, ThresholdGeom) for p in pops])
assert all([p.geom.x == threshold.x for p in pops])
assert all([p.geom.y == threshold.y for p in pops])
assert all(p.geom.transform_x == threshold.transform_x for p in pops)
assert all(p.geom.transform_y == threshold.transform_y for p in pops)
assert all(i in [p.definition for p in pops] for i in ["+", "-"])
pos_pop = [p for p in pops if p.definition == "+"][0]
assert pos_pop.population_name == "Positive"
neg_pop = [p for p in pops if p.definition == "-"][0]
assert neg_pop.population_name == "Negative"
assert len(pos_pop.index) > len(neg_pop.index)
assert len(pos_pop.index) > 800
assert len(neg_pop.index) < 300
def test_threshold_fit_predict_2d():
data, _ = make_blobs(n_samples=4000,
n_features=2,
centers=[(1., 1.), (1., 7.), (7., 2.), (7., 6.2)],
random_state=42)
data = pd.DataFrame({"X": data[:, 0], "Y": data[:, 1]})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="density")
threshold.fit(data)
threshold.label_children({"++": "Top left",
"--": "Other",
"-+": "Other",
"+-": "Other"})
data, _ = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (1., 7.), (7., 6.2)],
random_state=42)
data = | pd.DataFrame({"X": data[:, 0], "Y": data[:, 1]}) | pandas.DataFrame |
import os
import shutil
import numpy as np
import pandas as pd
import scipy.integrate, scipy.stats, scipy.optimize, scipy.signal
from scipy.stats import mannwhitneyu
import statsmodels.formula.api as smf
import pystan
def clean_folder(folder):
"""Create a new folder, or if the folder already exists,
delete all containing files
Args:
folder (string): Path to folder
"""
if os.path.isdir(folder):
shutil.rmtree(folder)
try:
os.makedirs(folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_data_for_stan(y):
"""Convenience function for
collecting data for STAN estimation
Args:
y (np vector): Data series for Bayesian filtering
Returns:
dict: Data for Stan estimation
"""
assert y.ndim == 1, \
"y must be a vector"
assert len(y) > 0, \
"y must have positive length"
assert isinstance(y, np.ndarray), \
"y must be a numpy array"
N_obs = len(pd.Series(y).dropna())
N_mis = np.sum(np.isnan(y))
ii_obs = list(range(1, N_obs + N_mis + 1))
ii_mis = []
if N_mis > 0:
for ii in np.argwhere(np.isnan(y)):
ii_mis.append(ii[0] + 1)
ii_obs.remove(ii[0] + 1)
return {'N_obs': N_obs,
'N_mis': N_mis,
'ii_obs': ii_obs,
'ii_mis': ii_mis,
'y_obs': | pd.Series(y) | pandas.Series |
"""
This example shows how to join multiple pandas Series to a DataFrame
For further information take a look at the pandas documentation:
https://pandas.pydata.org/pandas-docs/stable/merging.html
"""
import wapi
import pandas as pd
import matplotlib.pyplot as plt
############################################
# Insert the path to your config file here!
my_config_file = 'path/to/your/config.ini'
############################################
# Create a session to Connect to Wattsight Database
session = wapi.Session(config_file=my_config_file)
## Combine Series with same time index
######################################
# To combine pandas Series that all have the same time index, the simplest
# option is to add a new column for every series
# We first create an empty dataframe
df1 = pd.DataFrame()
# now we read temperature data for 3 different regions for the same time horizon
regions = ['fr','es','de']
# start_date
start_date = pd.Timestamp('2018-6-1 00:00')
# end_date
end_date = pd.Timestamp('2018-6-8 00:00')
# we loop through the regions and get the data for each region
for r in regions:
# define curve name to read, based on the region
curve_name = 'tt ' + r + ' con °c cet min15 s'
# get the curve
curve = session.get_curve(name=curve_name)
# read curve data from start_date to end_date to ts object
ts = curve.get_data(data_from=start_date, data_to=end_date)
# convert to pandas.Series object
s = ts.to_pandas()
# add the series as a new column to the DataFrame, set the region as name
df1[r] = s
# plot the dataframe using the plotting function from pandas
df1.plot()
### Combine Series with different time index
############################################
# To combine pandas Series that all have the same time index, is a bit more
# complicated and there are mulriple options. Here we use the pandas.concat()
# function
# We first create an empty dataframe
df2 = pd.DataFrame()
# now we read temperature forecasts of germany for 3 different issue dates
# we create the issue_dates, yesterday, plus 1 and 2 days before yesterday
yesterday = pd.Timestamp.now().floor('D') - pd.Timedelta(days=1)
yesterday_1before = yesterday - pd.Timedelta(days=1)
yesterday_2before = yesterday - pd.Timedelta(days=2)
# put them together in a list
issue_dates = [yesterday, yesterday_1before, yesterday_2before]
# define curve name to read, in this case temperature forecast for Germany
curve_name = 'tt de con ec00 °c cet min15 f'
# get the curve
curve = session.get_curve(name=curve_name)
# we loop through the issue_dates and get the forecast data for each
for issue_date in issue_dates:
# read curve data for this issue_date
ts = curve.get_instance(issue_date=issue_date)
# convert to pandas.Series object
s = ts.to_pandas()
# use the pandas.concat() function to add a new column and keep all indexes
df2 = | pd.concat([df2,s], axis=1) | pandas.concat |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.testing._utils import NUMERIC_TYPES, assert_eq
from cudf.utils.dtypes import np_dtypes_to_pandas_dtypes
def test_can_cast_safely_same_kind():
# 'i' -> 'i'
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="int64")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**31], dtype="int64")._column
assert not data.can_cast_safely(to_dtype)
# 'u' -> 'u'
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("uint64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint64")._column
to_dtype = np.dtype("uint32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 2**33], dtype="uint64")._column
assert not data.can_cast_safely(to_dtype)
# 'f' -> 'f'
data = cudf.Series([np.inf, 1.0], dtype="float64")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
data = cudf.Series(
[np.finfo("float32").max * 2, 1.0], dtype="float64"
)._column
to_dtype = np.dtype("float32")
assert not data.can_cast_safely(to_dtype)
def test_can_cast_safely_mixed_kind():
data = cudf.Series([1, 2, 3], dtype="int32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="int32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3], dtype="uint32")._column
to_dtype = np.dtype("float32")
assert data.can_cast_safely(to_dtype)
# too big to fit into f32 exactly
data = cudf.Series([1, 2, 2**24 + 1], dtype="uint32")._column
assert not data.can_cast_safely(to_dtype)
to_dtype = np.dtype("float64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1.0, 2.0, 3.0], dtype="float32")._column
to_dtype = np.dtype("int32")
assert data.can_cast_safely(to_dtype)
# not integer float
data = cudf.Series([1.0, 2.0, 3.5], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
data = cudf.Series([10.0, 11.0, 2000.0], dtype="float64")._column
assert data.can_cast_safely(to_dtype)
# float out of int range
data = cudf.Series([1.0, 2.0, 1.0 * (2**31)], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
# negative signed integers casting to unsigned integers
data = cudf.Series([-1, 0, 1], dtype="int32")._column
to_dtype = np.dtype("uint32")
assert not data.can_cast_safely(to_dtype)
def test_to_pandas_nullable_integer():
gsr_not_null = cudf.Series([1, 2, 3])
gsr_has_null = cudf.Series([1, 2, None])
psr_not_null = pd.Series([1, 2, 3], dtype="int64")
psr_has_null = pd.Series([1, 2, None], dtype="Int64")
assert_eq(gsr_not_null.to_pandas(), psr_not_null)
assert_eq(gsr_has_null.to_pandas(nullable=True), psr_has_null)
def test_to_pandas_nullable_bool():
gsr_not_null = cudf.Series([True, False, True])
gsr_has_null = cudf.Series([True, False, None])
psr_not_null = pd.Series([True, False, True], dtype="bool")
psr_has_null = pd.Series([True, False, None], dtype="boolean")
assert_eq(gsr_not_null.to_pandas(), psr_not_null)
assert_eq(gsr_has_null.to_pandas(nullable=True), psr_has_null)
def test_can_cast_safely_has_nulls():
data = cudf.Series([1, 2, 3, None], dtype="float32")._column
to_dtype = np.dtype("int64")
assert data.can_cast_safely(to_dtype)
data = cudf.Series([1, 2, 3.1, None], dtype="float32")._column
assert not data.can_cast_safely(to_dtype)
@pytest.mark.parametrize(
"data",
[
[1, 2, 3],
(1.0, 2.0, 3.0),
[float("nan"), None],
np.array([1, 2.0, -3, float("nan")]),
pd.Series(["123", "2.0"]),
pd.Series(["1.0", "2.", "-.3", "1e6"]),
pd.Series(
["1", "2", "3"],
dtype= | pd.CategoricalDtype(categories=["1", "2", "3"]) | pandas.CategoricalDtype |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FFMpegWriter
import copy
from . import otherfunctions
from pathlib import Path
import warnings
import os
from skimage import feature
# Implement the data structure
class BaseMeasurement:
# Store parameters of the measurement
@property
def params(self):
return self._params
# Store flags identifying bad chirps
@property
def flags(self):
return self._flags
# Store flags identifying bad acquisitions
@property
def acq_flags(self):
return self._acq_flags
# Store actual data
@property
def data(self):
return self._data
@property
def meas_type(self):
return self._meas_type
def __init__(self, shodata=None, parameters=None, xaxis=None, adjustphase=True):
if shodata is None:
self._data = 0
self._params = 0
self._flags = 0
self._acq_flags = 0
else:
self._params = parameters.transpose()
temp_plotgroup = shodata["PlotGroup"].xs(0)
in_out = shodata['InOut'].unstack().xs(0)
self._flags = shodata['Flag'].unstack()
shodata['PR'] = np.zeros(shodata.shape[0])
data = shodata[["Amp", "errA", "Phase", "errP", "Res", "errRes", "Q", "errQ", "PR"]].unstack()
if adjustphase:
temp = data['Phase'].replace([np.inf, -np.inf], np.nan).copy()
phaseMean = temp.fillna(0).mean()
phaseMean = phaseMean.replace([np.inf, -np.inf], np.nan)
phaseMean = phaseMean.fillna(0).mean()
data['Phase'] = data['Phase'] - phaseMean
data['Phase'] = data['Phase'].applymap(lambda x: np.mod(x + np.pi, 2*np.pi) - np.pi)
data['PR'] = data.apply(lambda row: row['Amp'] * np.sin(row['Phase']), axis=1)
data = data.transpose()
data['InOut'] = np.tile(in_out.values, 9)
data.set_index('InOut', append=True, inplace=True)
data['PlotGroup'] = np.tile(temp_plotgroup.values, 9)
data.set_index('PlotGroup', append=True, inplace=True)
if xaxis is not None:
data['xaxis'] = np.tile(xaxis.values, 9)
data.set_index('xaxis', append=True, inplace=True)
data = data.transpose()
self._data = data
self.clean()
def GetDataSubset(self, inout=0.0, plotGroup=None, insert=None, stack=None, clean=False):
inout_vals=self._data.columns.get_level_values(level='InOut')
plotGroup_vals=self._data.columns.get_level_values(level='PlotGroup')
if stack is None:
stack = ['Amp', 'Phase', 'Res', 'Q']
if inout is None:
inout_mask = np.ones(inout_vals.shape)
else:
inout_mask = inout_vals == inout
if plotGroup is None:
pg_mask = np.ones(plotGroup_vals.shape)
else:
pg_mask = plotGroup_vals == plotGroup
mask = np.logical_and(inout_mask, pg_mask)
if clean:
cleanmask = self._acq_flags
else:
cleanmask = np.full(self._acq_flags.shape, False)
return_data = copy.deepcopy(self._data)
return_data = return_data[~cleanmask]
if insert is None:
return return_data.T[mask].T[stack]
else:
return_data.T[mask] = insert
return return_data[stack]
def SetDataSubset(self, set_vals, inout=0.0, plotGroup=None, stack=None, clean=False):
if stack is None:
stack = ['Amp', 'Phase', 'Res', 'Q']
inout_vals = self._data[stack].columns.get_level_values(level='InOut')
plotGroup_vals = self._data[stack].columns.get_level_values(level='PlotGroup')
if inout is None:
inout_mask = np.ones(inout_vals.shape)
else:
inout_mask = inout_vals == inout
if plotGroup is None:
pg_mask = np.ones(plotGroup_vals.shape)
else:
pg_mask = plotGroup_vals == plotGroup
mask = np.logical_and(inout_mask, pg_mask)
if clean:
cleanmask = self._acq_flags
else:
cleanmask = np.full(self._acq_flags.shape, False)
old = self.data[stack].loc[:, mask]
new = | pd.DataFrame(set_vals, index=old.index, columns=old.columns) | pandas.DataFrame |
import json
import os
import geopandas
import numpy as np
import pandas as pd
import cea.plots.cache
from cea.constants import HOURS_IN_YEAR
from cea.plots.variable_naming import get_color_array
from cea.utilities.standardize_coordinates import get_geographic_coordinate_system
"""
Implements py:class:`cea.plots.ThermalNetworksPlotBase` as a base class for all plots in the category
"thermal-networks" and also set's the label for that category.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# identifies this package as a plots category and sets the label name for the category
label = 'Thermal networks'
class ThermalNetworksPlotBase(cea.plots.PlotBase):
"""Implements properties / methods used by all plots in this category"""
category_name = "thermal-networks"
# default parameters for plots in this category - override if your plot differs
expected_parameters = {
'scenario-name': 'general:scenario-name',
'network-type': 'plots:network-type',
'network-name': 'plots:network-name',
}
def __init__(self, project, parameters, cache):
super(ThermalNetworksPlotBase, self).__init__(project, parameters, cache)
self.category_path = os.path.join('new_basic', self.category_name)
self.network_name = parameters['network-name'] if parameters['network-name'] else ''
self.network_type = parameters['network-type']
@property
def title(self):
"""Override the version in PlotBase"""
if not self.network_name: # different plot titles if a network name is specified, here without network name
return '{name} for {network_type}'.format(name=self.name, network_type=self.network_type)
else:
# plot title including network name
return '{name} for {network_type} in {network_name}'.format(name=self.name, network_type=self.network_type,
network_name=self.network_name)
@property
def output_path(self):
file_name = '{network_type}_{network_name}_{name}'.format(network_type=self.network_type,
network_name=self.network_name, name=self.id())
return self.locator.get_timeseries_plots_file(file_name, self.category_path)
@property
@cea.plots.cache.cached
def buildings_hourly(self):
thermal_demand_df = pd.read_csv(self.locator.get_thermal_demand_csv_file(self.network_type, self.network_name))
thermal_demand_df.set_index(self.date)
thermal_demand_df = thermal_demand_df / 1000
return thermal_demand_df
@property
@cea.plots.cache.cached
def hourly_loads(self):
hourly_loads = pd.DataFrame(self.buildings_hourly.sum(axis=1))
if self.network_type == 'DH':
hourly_loads.columns = ['Q_dem_heat']
else:
hourly_loads.columns = ['Q_dem_cool']
return hourly_loads
@property
@cea.plots.cache.cached
def date(self):
"""Read in the date information from demand results of the first building in the zone"""
buildings = self.locator.get_zone_building_names()
df_date = pd.read_csv(self.locator.get_demand_results_file(buildings[0]))
return df_date["DATE"]
@property
@cea.plots.cache.cached
def plant_pumping_requirement_kWh(self):
hourly_pressure_loss = pd.read_csv(
self.locator.get_network_energy_pumping_requirements_file(self.network_type, self.network_name))
hourly_pressure_loss = hourly_pressure_loss['pressure_loss_total_kW']
return | pd.DataFrame(hourly_pressure_loss) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import unittest
import pandas as pd
import numpy as np
from math import sqrt
import numba
import sdc
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs,
count_parfor_OneD_Vars, count_array_OneD_Vars,
dist_IR_contains)
from datetime import datetime
import random
class TestDate(unittest.TestCase):
@unittest.skip("needs support for boxing/unboxing DatetimeIndex")
def test_datetime_index_in(self):
def test_impl(dti):
return dti
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
dti = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(dti).values, test_impl(dti).values)
def test_datetime_index(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_kw(self):
def test_impl(df):
return pd.DatetimeIndex(data=df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_arg(self):
def test_impl(A):
return A
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_datetime_getitem(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
self.assertEqual(hpat_func(A), test_impl(A))
def test_ts_map(self):
def test_impl(A):
return A.map(lambda x: x.hour)
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date(self):
def test_impl(A):
return A.map(lambda x: x.date())[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_ts_map_date2(self):
def test_impl(df):
return df.apply(lambda row: row.dt_ind.date(), axis=1)[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_ts_map_date_set(self):
def test_impl(df):
df['hpat_date'] = df.dt_ind.map(lambda x: x.date())
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
df['dt_ind'] = pd.DatetimeIndex(df['str_date'])
hpat_func(df)
df['pd_date'] = df.dt_ind.map(lambda x: x.date())
np.testing.assert_array_equal(df['hpat_date'], df['pd_date'])
def test_date_series_unbox(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series().map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_date_series_unbox2(self):
def test_impl(A):
return A[0]
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).map(lambda x: x.date())
self.assertEqual(hpat_func(A), test_impl(A))
def test_datetime_index_set(self):
def test_impl(df):
df['sdc'] = pd.DatetimeIndex(df['str_date']).values
hpat_func = sdc.jit(test_impl)
df = self._gen_str_date_df()
hpat_func(df)
df['std'] = pd.DatetimeIndex(df['str_date'])
allequal = (df['std'].equals(df['sdc']))
self.assertTrue(allequal)
def test_timestamp(self):
def test_impl():
dt = datetime(2017, 4, 26)
ts = pd.Timestamp(dt)
return ts.day + ts.hour + ts.microsecond + ts.month + ts.nanosecond + ts.second + ts.year
hpat_func = sdc.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_extract(self):
def test_impl(s):
return s.month
hpat_func = sdc.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
month = hpat_func(ts)
self.assertEqual(month, 4)
def test_timestamp_date(self):
def test_impl(s):
return s.date()
hpat_func = sdc.jit(test_impl)
ts = pd.Timestamp(datetime(2017, 4, 26).isoformat())
self.assertEqual(hpat_func(ts), test_impl(ts))
def test_datetimeindex_str_comp(self):
def test_impl(df):
return (df.A >= '2011-10-23').values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = sdc.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetimeindex_str_comp2(self):
def test_impl(df):
return ('2011-10-23' <= df.A).values
df = pd.DataFrame({'A': pd.DatetimeIndex(['2015-01-03', '2010-10-11'])})
hpat_func = sdc.jit(test_impl)
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_df(self):
def test_impl(df):
df = pd.DataFrame({'A': | pd.DatetimeIndex(df['str_date']) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
# Loading libraries
import os
import sys
import time
from networkx.algorithms.centrality import group
import pandas as pd
import re
import csv
from swmmtoolbox import swmmtoolbox as swmm
from datetime import datetime
from os import listdir
from concurrent import futures
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
import multiprocessing
import pyproj
event_id = input('4-digit event_id like: 0123: ')
model_id = f'model_{event_id}'# + input('4-digit model_id like: 0123: ' )
precipitation_id = f'precipitation_{event_id}'# + input('4-digit raingage_id like 0123: ')
epsg_modelo = input('EPSG (ejemplo: 5348): ')
project_folder = os.path.abspath(os.path.join(os.getcwd(),"../.."))
data_raw_folder = os.path.join(project_folder,'data', 'raw_swmm')
event_folder = os.path.join(data_raw_folder, 'Run_[' + event_id + ']')
model_inp = os.path.join(event_folder, 'model.inp')
model_out = os.path.join(event_folder, 'model.out')
# Connection to database
engine_base_ina = create_engine('postgresql://postgres:[email protected]:5555/base-ina')
RELEVANT_GROUP_TYPES_OUT = [
'link',
'node',
'subcatchment',
# 'system'
]
RELEVANT_GROUP_TYPES_INP = [
'coordinates',
'subcatchments',
'raingages',
'conduits',
'orifices',
'weirs',
'outfalls',
# 'vertices',
# 'polygons',
'subareas',
# 'losses',
'infiltration',
'junctions',
'storage',
# 'properties',
# "curves",
]
RELEVANT_LINKS = [
# 'channel10944',
# 'channel24416',
# 'channel60443',
# 'channel17459',
# 'channel87859',
# 'channel14380',
# 'channel55414',
# 'channel77496',
# 'channel83013',
# 'channel52767',
# 'channel12818',
# 'conduit11698',
# 'channel6317',
# 'conduit18801',
# 'conduit50317',
# 'conduit528',
# 'conduit36611',
# 'conduit50827',
# 'conduit78108',
# 'conduit57848',
# 'conduit42638',
# 'conduit34157',
# 'conduit29340',
# 'conduit19715',
# 'conduit23023',
# 'conduit37130',
# 'conduit21772',
# 'channel52598',
# 'conduit75783',
# 'conduit62715',
# 'conduit48979',
# 'conduit82544',
# 'conduit83110',
# 'conduit33678',
# 'conduit18303',
# 'conduit40724',
# 'conduit13927'
]
RELEVANT_SUBCATCHMENTS = []
RELEVANT_NODES = []
RELEVANT_SUBAREAS = []
RELEVANT_OUTFALLS = []
RELEVANT_VERTICES = []
RELEVANT_POLYGNOS = []
RELEVANT_LINKS_CONDUITS = []
RELEVANT_LINKS_ORIFICES = []
RELEVANT_LINKS_WEIRS = []
RELEVANT_LOSSES = []
RELEVANT_INFILTRATION = []
RELEVANT_JUNCTIONS = []
RELEVANT_STORAGE = []
MODEL_OUT_COLS = {
'SUBCATCHMENTS_COLS' : [
'event_id',
'elapsed_time',
'subcatchment_id',
'rainfall',
'elapsed_time',
'snow_depth',
'evaporation_loss',
'infiltration_loss',
'runoff_rate',
'groundwater_outflow',
'groundwater_elevation',
'soil_moisture'
],
'LINKS_COLS' : [
'event_id',
'elapsed_time',
'link_id',
'flow_rate',
'flow_depth',
'flow_velocity',
'froude_number',
'capacity'
],
'NODES_COLS' : [
'event_id',
'elapsed_time',
'node_id',
'depth_above_invert',
'hydraulic_head',
'volume_stored_ponded',
'lateral_inflow',
'total_inflow',
'flow_lost_flooding'
]
}
MODEL_INP_COLS = {
'NODES_COORDINATES' : [
'node_id',
'x_coord',
'y_coord',
],
"SUBCATCHMENTS" : [
"subcatchment_id",
"raingage_id",
"outlet",
"area",
"imperv",
"width",
"slope",
"curb_len"
],
"LINKS_CONDUITS" : [
"conduit_id",
"from_node",
"to_node",
"length",
"roughness",
"in_offset",
"out_offset",
"init_flow",
"max_flow"
],
"LINKS_ORIFICES" : [
"orifice_id",
"from_node",
"to_node",
"type",
"offset",
"q_coeff",
"gated",
"close_time"
],
"LINKS_WEIRS" : [
"weir_id",
"from_node",
"to_node",
"type",
"crest_ht",
"q_coeff",
"gated",
"end_con",
"end_coeff",
"surcharge"
],
"SUBAREAS" : [
"subcatchment_id",
"n_imperv",
"n_perv",
"s_imperv",
"s_perv",
"pct_zero",
"route_to"
],
"NODES_STORAGE" : [
"storage_id",
"elevation",
"max_depth",
"init_depth",
"shape",
"curve_name_params",
"n_a",
"f_evap"
],
"NODES_OUTFALLS" : [
"outfall_id",
"elevation",
"type",
# "stage_data",
"gated",
# "route_to"
],
"NODES_JUNCTIONS" : [
"junction_id",
"elevation",
"max_depth",
"init_depth",
"sur_depth",
"aponded"
],
"INFILTRATION": [
"subcatchment_id",
"max_rate",
"min_rate",
"decay",
"dry_time",
"max_infil",
],
# "POLYGONS": [
# "subcatchment_id",
# "x_coord",
# "y_coord"
# ],
# "VERICES": [
# "link_id",
# "x_coord",
# "y_coord"
# ],
"PROPERTIES": [
"model_name",
"model_version",
"flow_units",
"infiltration",
"flow_routing",
"link_offsets",
"min_slope",
"allow_ponding",
"skip_steady_state",
"start_date",
"start_time",
"report_start_date",
"report_start_time",
"end_date",
"end_time",
"sweep_start",
"sweep_end",
"report_step",
"wet_step",
"dry_step",
"routing_step",
"inertial_damping",
"normal_flow_limited",
"force_main_equation",
"variable_step",
"lengthening_step",
"min_surfarea",
"max_trials",
"head_tolerance",
"sys_flow",
"lat_flow_tol",
"minimum_step",
"threads"
]
}
# dictionary to store data
groups = {}
# Definition of starting postiion of each element
def group_start_line(model):
with open(model, 'r') as inp:
groups = {}
count = 0
lines = inp.readlines()
for line in lines:
if ('[' in line) & (']' in line):
groups.update({line[1:-2].lower() : {'start': count}})
count += 1
# subselection of elements from MODEL_ELEMENTS
groups = {key:value for key,value in groups.items() if key in RELEVANT_GROUP_TYPES_INP}
LINK_TYPES = ['orifices', 'conduits', 'weirs']
NODE_TYPES = ['outfalls', 'junctions', 'storage']
for key in [key for key in groups.keys() if key in LINK_TYPES]:
groups['links_' + key] = groups.pop(key)
for key in [key for key in groups.keys() if key in NODE_TYPES]:
groups['nodes_' + key] = groups.pop(key)
groups['nodes_coordinates'] = groups.pop('coordinates')
return groups
# adding header and skip-lines to elements dict
def build_groups_dicts(model):
groups = group_start_line(model)
count = 0
for element, start_dict in groups.items():
start = start_dict['start']
with open(model, 'r') as inp:
lines = inp.readlines()
for index, line in enumerate(lines):
if (index - start == 1) & (';;' in line) & (';;--' not in line):
groups[element].update({'header':[col for col in re.split("\s\s+", line[2:-1]) if len(col) > 1]})
elif (index - start == 2) & (';;--------------' in line):
groups[element].update({'line_to_skip': index})
elif (index - start == 3):
break
# some corrrections on header because of mismatches on inp file
# groups['properties'].update({'header': MODEL_INP_COLS['PROPERTIES']})
groups['subcatchments'].update({'header': MODEL_INP_COLS['SUBCATCHMENTS']})
groups['subareas'].update({'header': MODEL_INP_COLS['SUBAREAS']})
groups['infiltration'].update({'header': MODEL_INP_COLS['INFILTRATION']})
groups['links_conduits'].update({'header': MODEL_INP_COLS['LINKS_CONDUITS']})
groups['links_weirs'].update({'header': MODEL_INP_COLS['LINKS_WEIRS']})
groups['links_orifices'].update({'header': MODEL_INP_COLS['LINKS_ORIFICES']})
groups['nodes_coordinates'].update({'header': MODEL_INP_COLS['NODES_COORDINATES']})
groups['nodes_outfalls'].update({'header': MODEL_INP_COLS['NODES_OUTFALLS']})
groups['nodes_storage'].update({'header': MODEL_INP_COLS['NODES_STORAGE']})
groups['nodes_junctions'].update({'header': MODEL_INP_COLS['NODES_JUNCTIONS']})
return groups
# %%
def list_files(directory, extension, prefix):
return (f for f in listdir(directory) if (f.endswith('.' + extension)) & (f.startswith(prefix)))
def raingages_meta_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['raingages']['start']
skip_rows = build_groups_dicts(model)['raingages']['line_to_skip']
header = ['raingage_id', 'format', 'interval', 'unit']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
formatted_line = [line[0].split()[0], line[0].split()[1], line[0].split()[2],line[0].split()[7]]
contents.append(formatted_line)
df = pd.DataFrame(data = contents, columns= header,)
df['interval'] = df['interval'].map( lambda x: datetime.strptime(x, '%H:%M'))
df.insert(0, 'precipitation_id', precipitation_id)
print('raingages','df created!')
return df
def date_parser(line):
year = line[0].split()[1]
month = line[0].split()[2].zfill(2)
day = line[0].split()[3].zfill(2)
hour = line[0].split()[4].zfill(2)
minute = line[0].split()[5].zfill(2)
str_date = '-'.join([year, month, day, hour, minute] )
date_format = '%Y-%m-%d-%H-%M'
return datetime.strptime(str_date, date_format)
# %%
def raingages_to_df(event_folder, event_id, model, model_id):
contents = []
for file in list_files(event_folder, 'txt', 'P'):
raingage_id = file.split('.')[0]
with open(os.path.join(event_folder, file), newline='') as f:
r = csv.reader(f)
for i, line in enumerate(r):
try:
formatted_line = [
raingage_id,
date_parser(line),
line[0].split()[6]
]
contents.append(formatted_line)
except:
print('error')
df_timeseries = pd.DataFrame(data = contents, columns= ['raingage_id', 'elapsed_time', 'value'])
df_timeseries.insert(0, 'precipitation_id', precipitation_id)
df_metadata = raingages_meta_to_dfs(model, model_id)
return df_metadata, df_timeseries
# %%
def load_raingages_to_db(event_folder, event_id, model, model_id):
raingage_metadata, raingage_timeseries = raingages_to_df(event_folder, event_id, model, model_id)
table_metadata = 'raingages_metadata'
table_timeseries = 'raingages_timeseries'
try:
raingage_metadata.to_sql(table_metadata, engine_base_ina, index=False, if_exists='append')
except Exception as e:
print(e)
try:
raingage_timeseries.to_sql(table_timeseries, engine_base_ina, index=False, if_exists='append')
except Exception as e:
print(e)
# def group_type_to_dfs(model, model_id, group, id_col, col_to_check, own_relevant__list, relevant_dependent_list):
# """ Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
# """
# start = build_groups_dicts(model)[group]['start']
# skip_rows = build_groups_dicts(model)[group]['line_to_skip']
# header = build_groups_dicts(model)[group]['header']
# global own_relevant__list
# own_relevant_list = []
# df = pd.DataFrame()
# with open(model, newline='') as f:
# contents = []
# r = csv.reader(f)
# for i, line in enumerate(r):
# if i >= start + 1:
# if i != skip_rows:
# if not line:
# break
# # elif i == start + 1:
# # headers = line
# else:
# if len(relevant_dependecy_list) == 0:
# own_relevant__list.append(line[0].split()[id_col])
# contents.append(line[0].split())
# else:
# if line[0].split()[col_to_check].lower() in relevant_dependent_list:
# own_relevant__list.append(line[0].split()[id_col])
# contents.append(line[0].split())
# df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
# df.insert(0, 'model_id', model_id)
# print(group,'df created!')
# return df
def conduits_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['links_conduits']['start']
skip_rows = build_groups_dicts(model)['links_conduits']['line_to_skip']
header = build_groups_dicts(model)['links_conduits']['header']
global RELEVANT_LINKS_CONDUITS
RELEVANT_LINKS_CONDUITS = []
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
if len(RELEVANT_LINKS) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0].lower() in RELEVANT_LINKS:
RELEVANT_LINKS_CONDUITS.append(line[0].split()[0])
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('conduits','df created!')
return df
def weirs_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['links_weirs']['start']
skip_rows = build_groups_dicts(model)['links_weirs']['line_to_skip']
header = build_groups_dicts(model)['links_weirs']['header']
global RELEVANT_LINKS_WEIRS
RELEVANT_LINKS_WEIRS = []
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
if len(RELEVANT_LINKS) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0].lower() in RELEVANT_LINKS:
RELEVANT_LINKS_WEIRS.append(line[0].split()[0])
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('weirs','df created!')
return df
def orifices_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['links_orifices']['start']
skip_rows = build_groups_dicts(model)['links_orifices']['line_to_skip']
header = build_groups_dicts(model)['links_orifices']['header']
global RELEVANT_LINKS_ORIFICES
RELEVANT_LINKS_ORIFICES = []
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
if len(RELEVANT_LINKS) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0].lower() in RELEVANT_LINKS:
RELEVANT_LINKS_ORIFICES.append(line[0].split()[0])
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('orifices','df created!')
return df
def get_nodes_from_links(model, model_id):
conduits_df = conduits_to_dfs(model, model_id)
orifices_df = orifices_to_dfs(model, model_id)
weirs_df = weirs_to_dfs(model, model_id)
links_dfs = [
conduits_df,
orifices_df,
weirs_df
]
nodes = []
for df in links_dfs:
for col in [col for col in df.columns if 'node' in col]:
nodes += df[col].unique().tolist()
return nodes
#cambio de coordenadas
def convert_coords(coord_tuple):
transformer = pyproj.Transformer.from_crs(crs_from='epsg:' + epsg_modelo, crs_to='epsg:4326')
lon, lat = transformer.transform(coord_tuple[0], coord_tuple[1])
return (lon,lat)
def nodes_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
global RELEVANT_NODES
RELEVANT_NODES = get_nodes_from_links(model, model_id)
start = build_groups_dicts(model)['nodes_coordinates']['start']
skip_rows = build_groups_dicts(model)['nodes_coordinates']['line_to_skip']
header = build_groups_dicts(model)['nodes_coordinates']['header']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif (i == start + 1):
# headers = line
else:
if len(RELEVANT_NODES) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0] in RELEVANT_NODES:
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
cols =['lat', 'lon']
coords = []
coordinates = [(j[0], j[1]) for i,j in df[['x_coord', 'y_coord']].iterrows()]
pool = multiprocessing.Pool(8)
coords.append(pool.map(convert_coords, coordinates))
pool.close()
pool.join()
# for i in df[['x_coord', 'y_coord']].iterrows():
# coords.append(convert_coords(i[1]))
# from pyproj import Transformer
# def convert_coords(coord_tuple):
# global coords
# transformer = Transformer.from_crs(crs_from='epsg:5348' , crs_to='epsg:4326')
# lon, lat = transformer.transform(coord_tuple[0], coord_tuple[1])
# coords.append((lon, lat, coord_tuple[2]))
# return coords
# import concurrent.futures
# coords = []
# with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
# for result in executor.map(convert_coords, [(i[1], i[2], i[3]) for i in coordinates]):
# pass
# coords = result
df = pd.concat([df, pd.DataFrame(coords[0], columns=cols)], axis=1)
print('nodes','df created!')
return df
def outfalls_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
global RELEVANT_NODES
RELEVANT_NODES = get_nodes_from_links(model, model_id)
start = build_groups_dicts(model)['nodes_outfalls']['start']
skip_rows = build_groups_dicts(model)['nodes_outfalls']['line_to_skip']
header = build_groups_dicts(model)['nodes_outfalls']['header']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif (i == start + 1):
# headers = line
else:
if len(RELEVANT_NODES) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0] in RELEVANT_NODES:
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('outfalls','df created!')
return df
def junctions_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
global RELEVANT_NODES
RELEVANT_NODES = get_nodes_from_links(model, model_id)
start = build_groups_dicts(model)['nodes_junctions']['start']
skip_rows = build_groups_dicts(model)['nodes_junctions']['line_to_skip']
header = build_groups_dicts(model)['nodes_junctions']['header']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif (i == start + 1):
# headers = line
else:
if len(RELEVANT_NODES) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0] in RELEVANT_NODES:
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('junctions','df created!')
return df
def storage_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
global RELEVANT_NODES
RELEVANT_NODES = get_nodes_from_links(model, model_id)
start = build_groups_dicts(model)['nodes_storage']['start']
skip_rows = build_groups_dicts(model)['nodes_storage']['line_to_skip']
header = build_groups_dicts(model)['nodes_storage']['header']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif (i == start + 1):
# headers = line
else:
if len(RELEVANT_NODES) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0] in RELEVANT_NODES:
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('storage','df created!')
return df
def subcatch_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['subcatchments']['start']
skip_rows = build_groups_dicts(model)['subcatchments']['line_to_skip']
header = build_groups_dicts(model)['subcatchments']['header']
global RELEVANT_SUBCATCHMENTS
RELEVANT_SUBCATCHMENTS = []
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
relevant_nodes = [node for node in RELEVANT_NODES]
if len(relevant_nodes) == 0:
contents.append(line[0].split())
else:
if line[0].split()[2] in relevant_nodes:
RELEVANT_SUBCATCHMENTS.append(line[0].split()[0])
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('subcatch','df created!')
return df
def infiltration_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['infiltration']['start']
skip_rows = build_groups_dicts(model)['infiltration']['line_to_skip']
header = build_groups_dicts(model)['infiltration']['header']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
relevant_nodes = [node for node in RELEVANT_NODES]
if len(relevant_nodes) == 0:
contents.append(line[0].split())
else:
if line[0].split()[2] in relevant_nodes:
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('infiltration','df created!')
return df
def subareas_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['subareas']['start']
skip_rows = build_groups_dicts(model)['subareas']['line_to_skip']
header = build_groups_dicts(model)['subareas']['header']
df = | pd.DataFrame() | pandas.DataFrame |
from typing import List, Tuple
import pandas as pd
from src.preprocessing.config import RANKING_COLS, RESULTS_COLS
from src.db.manager import DBManager
from src.db.data import Results, GeneralRanking, HomeRanking, AwayRanking
class DataRetriever:
def __init__(self, db_config : str) -> None:
self._db_manager = DBManager(db_config)
def get_historical_data(self):
results = self._db_manager.select(Results)
general_ranking = self._db_manager.select(GeneralRanking)
home_ranking = self._db_manager.select(HomeRanking)
away_ranking = self._db_manager.select(AwayRanking)
results_df = self.get_result_dataframe(results)
general_df, home_df, away_df = self.get_ranking_dataframes([general_ranking,
home_ranking,
away_ranking])
return results_df, general_df, home_df, away_df
def get_result_dataframe(self, raw_results : List[Tuple]) -> pd.DataFrame:
data = []
for result in raw_results:
data.append((result.season, result.league_match, result.home,
result.team_1, result.team_2, result.outcome))
results_df = | pd.DataFrame(data, columns=RESULTS_COLS) | pandas.DataFrame |
"""
Use the ``MNLDiscreteChoiceModel`` class to train a choice module using
multinomial logit and make subsequent choice predictions.
"""
from __future__ import print_function, division
import abc
import logging
import numpy as np
import pandas as pd
from patsy import dmatrix
from prettytable import PrettyTable
from zbox import toolz as tz
from . import util
from ..exceptions import ModelEvaluationError
from ..urbanchoice import interaction, mnl
from ..utils import yamlio
from ..utils.logutil import log_start_finish
from urbansim_defaults.randomfile import fixedrandomseed,seednum
logger = logging.getLogger(__name__)
def unit_choice(chooser_ids, alternative_ids, probabilities):
"""
Have a set of choosers choose from among alternatives according
to a probability distribution. Choice is binary: each
alternative can only be chosen once.
Parameters
----------
chooser_ids : 1d array_like
Array of IDs of the agents that are making choices.
alternative_ids : 1d array_like
Array of IDs of alternatives among which agents are making choices.
probabilities : 1d array_like
The probability that an agent will choose an alternative.
Must be the same shape as `alternative_ids`. Unavailable
alternatives should have a probability of 0.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
chooser_ids = np.asanyarray(chooser_ids)
alternative_ids = np.asanyarray(alternative_ids)
probabilities = np.asanyarray(probabilities)
logger.debug(
'start: unit choice with {} choosers and {} alternatives'.format(
len(chooser_ids), len(alternative_ids)))
choices = pd.Series(index=chooser_ids)
if probabilities.sum() == 0:
# return all nan if there are no available units
return choices
# probabilities need to sum to 1 for np.random.choice
probabilities = probabilities / probabilities.sum()
# need to see if there are as many available alternatives as choosers
n_available = np.count_nonzero(probabilities)
n_choosers = len(chooser_ids)
n_to_choose = n_choosers if n_choosers < n_available else n_available
if fixedrandomseed==0: np.random.seed(seednum)
chosen = np.random.choice(
alternative_ids, size=n_to_choose, replace=False, p=probabilities)
# if there are fewer available units than choosers we need to pick
# which choosers get a unit
if n_to_choose == n_available:
if fixedrandomseed==0: np.random.seed(seednum)
chooser_ids = np.random.choice(
chooser_ids, size=n_to_choose, replace=False)
choices[chooser_ids] = chosen
logger.debug('finish: unit choice')
return choices
# define the minimum interface a class must have in order to
# look like we expect DCMs to look
class DiscreteChoiceModel(object):
"""
Abstract base class for discrete choice models.
"""
__metaclass__ = abc.ABCMeta
@staticmethod
def _check_prob_choice_mode_compat(probability_mode, choice_mode):
"""
Check that the probability and choice modes are compatibly with
each other. Currently 'single_chooser' must be paired with
'aggregate' and 'full_product' must be paired with 'individual'.
"""
if (probability_mode == 'full_product' and
choice_mode == 'aggregate'):
raise ValueError(
"'full_product' probability mode is not compatible with "
"'aggregate' choice mode")
if (probability_mode == 'single_chooser' and
choice_mode == 'individual'):
raise ValueError(
"'single_chooser' probability mode is not compatible with "
"'individual' choice mode")
@staticmethod
def _check_prob_mode_interaction_compat(
probability_mode, interaction_predict_filters):
"""
The 'full_product' probability mode is currently incompatible with
post-interaction prediction filters, so make sure we don't have
both of those.
"""
if (interaction_predict_filters is not None and
probability_mode == 'full_product'):
raise ValueError(
"interaction filters may not be used in "
"'full_product' mode")
@abc.abstractmethod
def apply_fit_filters(self, choosers, alternatives):
choosers = util.apply_filter_query(choosers, self.choosers_fit_filters)
alternatives = util.apply_filter_query(
alternatives, self.alts_fit_filters)
return choosers, alternatives
@abc.abstractmethod
def apply_predict_filters(self, choosers, alternatives):
choosers = util.apply_filter_query(
choosers, self.choosers_predict_filters)
alternatives = util.apply_filter_query(
alternatives, self.alts_predict_filters)
return choosers, alternatives
@abc.abstractproperty
def fitted(self):
pass
@abc.abstractmethod
def probabilities(self):
pass
@abc.abstractmethod
def summed_probabilities(self):
pass
@abc.abstractmethod
def fit(self):
pass
@abc.abstractmethod
def predict(self):
pass
@abc.abstractmethod
def choosers_columns_used(self):
pass
@abc.abstractmethod
def alts_columns_used(self):
pass
@abc.abstractmethod
def interaction_columns_used(self):
pass
@abc.abstractmethod
def columns_used(self):
pass
class MNLDiscreteChoiceModel(DiscreteChoiceModel):
"""
A discrete choice model with the ability to store an estimated
model and predict new data based on the model.
Based on multinomial logit.
Parameters
----------
model_expression : str, iterable, or dict
A patsy model expression. Should contain only a right-hand side.
sample_size : int
Number of choices to sample for estimating the model.
probability_mode : str, optional
Specify the method to use for calculating probabilities
during prediction.
Available string options are 'single_chooser' and 'full_product'.
In "single chooser" mode one agent is chosen for calculating
probabilities across all alternatives. In "full product" mode
probabilities are calculated for every chooser across all alternatives.
Currently "single chooser" mode must be used with a `choice_mode`
of 'aggregate' and "full product" mode must be used with a
`choice_mode` of 'individual'.
choice_mode : str, optional
Specify the method to use for making choices among alternatives.
Available string options are 'individual' and 'aggregate'.
In "individual" mode choices will be made separately for each chooser.
In "aggregate" mode choices are made for all choosers at once.
Aggregate mode implies that an alternative chosen by one agent
is unavailable to other agents and that the same probabilities
can be used for all choosers.
Currently "individual" mode must be used with a `probability_mode`
of 'full_product' and "aggregate" mode must be used with a
`probability_mode` of 'single_chooser'.
choosers_fit_filters : list of str, optional
Filters applied to choosers table before fitting the model.
choosers_predict_filters : list of str, optional
Filters applied to the choosers table before calculating
new data points.
alts_fit_filters : list of str, optional
Filters applied to the alternatives table before fitting the model.
alts_predict_filters : list of str, optional
Filters applied to the alternatives table before calculating
new data points.
interaction_predict_filters : list of str, optional
Filters applied to the merged choosers/alternatives table
before predicting agent choices.
estimation_sample_size : int, optional
Whether to sample choosers during estimation
(needs to be applied after choosers_fit_filters).
prediction_sample_size : int, optional
Whether (and how much) to sample alternatives during prediction.
Note that this can lead to multiple choosers picking the same
alternative.
choice_column : optional
Name of the column in the `alternatives` table that choosers
should choose. e.g. the 'building_id' column. If not provided
the alternatives index is used.
name : optional
Optional descriptive name for this model that may be used
in output.
"""
def __init__(
self, model_expression, sample_size,
probability_mode='full_product', choice_mode='individual',
choosers_fit_filters=None, choosers_predict_filters=None,
alts_fit_filters=None, alts_predict_filters=None,
interaction_predict_filters=None,
estimation_sample_size=None,
prediction_sample_size=None,
choice_column=None, name=None):
self._check_prob_choice_mode_compat(probability_mode, choice_mode)
self._check_prob_mode_interaction_compat(
probability_mode, interaction_predict_filters)
self.model_expression = model_expression
self.sample_size = sample_size
self.probability_mode = probability_mode
self.choice_mode = choice_mode
self.choosers_fit_filters = choosers_fit_filters
self.choosers_predict_filters = choosers_predict_filters
self.alts_fit_filters = alts_fit_filters
self.alts_predict_filters = alts_predict_filters
self.interaction_predict_filters = interaction_predict_filters
self.estimation_sample_size = estimation_sample_size
self.prediction_sample_size = prediction_sample_size
self.choice_column = choice_column
self.name = name if name is not None else 'MNLDiscreteChoiceModel'
self.sim_pdf = None
self.log_likelihoods = None
self.fit_parameters = None
@classmethod
def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a DiscreteChoiceModel instance from a saved YAML configuration.
Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
MNLDiscreteChoiceModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['model_expression'],
cfg['sample_size'],
probability_mode=cfg.get('probability_mode', 'full_product'),
choice_mode=cfg.get('choice_mode', 'individual'),
choosers_fit_filters=cfg.get('choosers_fit_filters', None),
choosers_predict_filters=cfg.get('choosers_predict_filters', None),
alts_fit_filters=cfg.get('alts_fit_filters', None),
alts_predict_filters=cfg.get('alts_predict_filters', None),
interaction_predict_filters=cfg.get(
'interaction_predict_filters', None),
estimation_sample_size=cfg.get('estimation_sample_size', None),
prediction_sample_size=cfg.get('prediction_sample_size', None),
choice_column=cfg.get('choice_column', None),
name=cfg.get('name', None)
)
if cfg.get('log_likelihoods', None):
model.log_likelihoods = cfg['log_likelihoods']
if cfg.get('fit_parameters', None):
model.fit_parameters = pd.DataFrame(cfg['fit_parameters'])
logger.debug('loaded LCM model {} from YAML'.format(model.name))
return model
@property
def str_model_expression(self):
"""
Model expression as a string suitable for use with patsy/statsmodels.
"""
return util.str_model_expression(
self.model_expression, add_constant=False)
def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
return super(MNLDiscreteChoiceModel, self).apply_fit_filters(
choosers, alternatives)
def apply_predict_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for prediction.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
return super(MNLDiscreteChoiceModel, self).apply_predict_filters(
choosers, alternatives)
def fit(self, choosers, alternatives, current_choice):
"""
Fit and save model parameters based on given data.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
current_choice : pandas.Series or any
A Series describing the `alternatives` currently chosen
by the `choosers`. Should have an index matching `choosers`
and values matching the index of `alternatives`.
If a non-Series is given it should be a column in `choosers`.
Returns
-------
log_likelihoods : dict
Dict of log-liklihood values describing the quality of the
model fit. Will have keys 'null', 'convergence', and 'ratio'.
"""
logger.debug('start: fit LCM model {}'.format(self.name))
if not isinstance(current_choice, pd.Series):
current_choice = choosers[current_choice]
choosers, alternatives = self.apply_fit_filters(choosers, alternatives)
if self.estimation_sample_size:
if fixedrandomseed==0: np.random.seed(seednum)
choosers = choosers.loc[np.random.choice(
choosers.index,
min(self.estimation_sample_size, len(choosers)),
replace=False)]
current_choice = current_choice.loc[choosers.index]
_, merged, chosen = interaction.mnl_interaction_dataset(
choosers, alternatives, self.sample_size, current_choice)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.values.shape[0]:
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
self.log_likelihoods, self.fit_parameters = mnl.mnl_estimate(
model_design.values, chosen, self.sample_size)
self.fit_parameters.index = model_design.columns
logger.debug('finish: fit LCM model {}'.format(self.name))
return self.log_likelihoods
@property
def fitted(self):
"""
True if model is ready for prediction.
"""
return self.fit_parameters is not None
def assert_fitted(self):
"""
Raises `RuntimeError` if the model is not ready for prediction.
"""
if not self.fitted:
raise RuntimeError('Model has not been fit.')
def report_fit(self):
"""
Print a report of the fit results.
"""
if not self.fitted:
print('Model not yet fit.')
return
print('Null Log-liklihood: {0:.3f}'.format(
self.log_likelihoods['null']))
print('Log-liklihood at convergence: {0:.3f}'.format(
self.log_likelihoods['convergence']))
print('Log-liklihood Ratio: {0:.3f}\n'.format(
self.log_likelihoods['ratio']))
tbl = PrettyTable(
['Component', ])
tbl = PrettyTable()
tbl.add_column('Component', self.fit_parameters.index.values)
for col in ('Coefficient', 'Std. Error', 'T-Score'):
tbl.add_column(col, self.fit_parameters[col].values)
tbl.align['Component'] = 'l'
tbl.float_format = '.3'
print(tbl)
def probabilities(self, choosers, alternatives, filter_tables=True):
"""
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
"""
logger.debug('start: calculate probabilities for LCM model {}'.format(
self.name))
self.assert_fitted()
if filter_tables:
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if self.prediction_sample_size is not None:
sample_size = self.prediction_sample_size
else:
sample_size = len(alternatives)
if self.probability_mode == 'single_chooser':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers.head(1), alternatives, sample_size)
elif self.probability_mode == 'full_product':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers, alternatives, sample_size)
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
merged = util.apply_filter_query(
merged, self.interaction_predict_filters)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.values.shape[0]:
raise ModelEvaluationError(
'Simulated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
# get the order of the coefficients in the same order as the
# columns in the design matrix
coeffs = [self.fit_parameters['Coefficient'][x]
for x in model_design.columns]
# probabilities are returned from mnl_simulate as a 2d array
# with choosers along rows and alternatives along columns
if self.probability_mode == 'single_chooser':
numalts = len(merged)
else:
numalts = sample_size
probabilities = mnl.mnl_simulate(
model_design.values,
coeffs,
numalts=numalts, returnprobs=True)
# want to turn probabilities into a Series with a MultiIndex
# of chooser IDs and alternative IDs.
# indexing by chooser ID will get you the probabilities
# across alternatives for that chooser
mi = pd.MultiIndex.from_arrays(
[merged['join_index'].values, merged.index.values],
names=('chooser_id', 'alternative_id'))
probabilities = pd.Series(probabilities.flatten(), index=mi)
logger.debug('finish: calculate probabilities for LCM model {}'.format(
self.name))
return probabilities
def summed_probabilities(self, choosers, alternatives):
"""
Calculate total probability associated with each alternative.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Total probability associated with each alternative.
"""
def normalize(s):
return s / s.sum()
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
probs = self.probabilities(choosers, alternatives, filter_tables=False)
# groupby the the alternatives ID and sum
if self.probability_mode == 'single_chooser':
return (
normalize(probs) * len(choosers)
).reset_index(level=0, drop=True)
elif self.probability_mode == 'full_product':
return probs.groupby(level=0).apply(normalize)\
.groupby(level=1).sum()
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
def predict(self, choosers, alternatives, debug=False):
"""
Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
self.assert_fitted()
logger.debug('start: predict LCM model {}'.format(self.name))
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if len(choosers) == 0:
return pd.Series()
if len(alternatives) == 0:
return pd.Series(index=choosers.index)
probabilities = self.probabilities(
choosers, alternatives, filter_tables=False)
if debug:
self.sim_pdf = probabilities
if self.choice_mode == 'aggregate':
choices = unit_choice(
choosers.index.values,
probabilities.index.get_level_values('alternative_id').values,
probabilities.values)
elif self.choice_mode == 'individual':
def mkchoice(probs):
probs.reset_index(0, drop=True, inplace=True)
if fixedrandomseed==0: np.random.seed(seednum)
return np.random.choice(
probs.index.values, p=probs.values / probs.sum())
choices = probabilities.groupby(level='chooser_id', sort=False)\
.apply(mkchoice)
else:
raise ValueError(
'Unrecognized choice_mode option: {}'.format(self.choice_mode))
logger.debug('finish: predict LCM model {}'.format(self.name))
return choices
def to_dict(self):
"""
Return a dict respresentation of an MNLDiscreteChoiceModel
instance.
"""
return {
'model_type': 'discretechoice',
'model_expression': self.model_expression,
'sample_size': self.sample_size,
'name': self.name,
'probability_mode': self.probability_mode,
'choice_mode': self.choice_mode,
'choosers_fit_filters': self.choosers_fit_filters,
'choosers_predict_filters': self.choosers_predict_filters,
'alts_fit_filters': self.alts_fit_filters,
'alts_predict_filters': self.alts_predict_filters,
'interaction_predict_filters': self.interaction_predict_filters,
'estimation_sample_size': self.estimation_sample_size,
'prediction_sample_size': self.prediction_sample_size,
'choice_column': self.choice_column,
'fitted': self.fitted,
'log_likelihoods': self.log_likelihoods,
'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters)
if self.fitted else None)
}
def to_yaml(self, str_or_buffer=None):
"""
Save a model respresentation to YAML.
Parameters
----------
str_or_buffer : str or file like, optional
By default a YAML string is returned. If a string is
given here the YAML will be written to that file.
If an object with a ``.write`` method is given the
YAML will be written to that object.
Returns
-------
j : str
YAML is string if `str_or_buffer` is not given.
"""
logger.debug('serializing LCM model {} to YAML'.format(self.name))
if (not isinstance(self.probability_mode, str) or
not isinstance(self.choice_mode, str)):
raise TypeError(
'Cannot serialize model with non-string probability_mode '
'or choice_mode attributes.')
return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
def choosers_columns_used(self):
"""
Columns from the choosers table that are used for filtering.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.choosers_predict_filters),
util.columns_in_filters(self.choosers_fit_filters))))
def alts_columns_used(self):
"""
Columns from the alternatives table that are used for filtering.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.alts_predict_filters),
util.columns_in_filters(self.alts_fit_filters))))
def interaction_columns_used(self):
"""
Columns from the interaction dataset used for filtering and in
the model. These may come originally from either the choosers or
alternatives tables.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.interaction_predict_filters),
util.columns_in_formula(self.model_expression))))
def columns_used(self):
"""
Columns from any table used in the model. May come from either
the choosers or alternatives tables.
"""
return list(tz.unique(tz.concatv(
self.choosers_columns_used(),
self.alts_columns_used(),
self.interaction_columns_used())))
@classmethod
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None):
"""
Parameters
----------
choosers : DataFrame
A dataframe in which rows represent choosers.
chosen_fname : string
A string indicating the column in the choosers dataframe which
gives which alternatives the choosers have chosen.
alternatives : DataFrame
A table of alternatives. It should include the choices
from the choosers table as well as other alternatives from
which to sample. Values in choosers[chosen_fname] should index
into the alternatives dataframe.
cfgname : string
The name of the yaml config file from which to read the discrete
choice model.
outcfgname : string, optional (default cfgname)
The name of the output yaml config file where estimation results are written into.
Returns
-------
lcm : MNLDiscreteChoiceModel which was used to fit
"""
logger.debug('start: fit from configuration {}'.format(cfgname))
lcm = cls.from_yaml(str_or_buffer=cfgname)
lcm.fit(choosers, alternatives, choosers[chosen_fname])
lcm.report_fit()
outcfgname = outcfgname or cfgname
lcm.to_yaml(str_or_buffer=outcfgname)
logger.debug('finish: fit into configuration {}'.format(outcfgname))
return lcm
@classmethod
def predict_from_cfg(cls, choosers, alternatives, cfgname=None, cfg=None,
alternative_ratio=2.0, debug=False):
"""
Simulate choices for the specified choosers
Parameters
----------
choosers : DataFrame
A dataframe of agents doing the choosing.
alternatives : DataFrame
A dataframe of locations which the choosers are locating in and
which have a supply.
cfgname : string
The name of the yaml config file from which to read the discrete
choice model.
cfg: string
an ordered yaml string of the model discrete choice model configuration.
Used to read config from memory in lieu of loading cfgname from disk.
alternative_ratio : float, optional
Above the ratio of alternatives to choosers (default of 2.0),
the alternatives will be sampled to meet this ratio
(for performance reasons).
debug : boolean, optional (default False)
Whether to generate debug information on the model.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
lcm : MNLDiscreteChoiceModel which was used to predict
"""
logger.debug('start: predict from configuration {}'.format(cfgname))
if cfgname:
lcm = cls.from_yaml(str_or_buffer=cfgname)
elif cfg:
lcm = cls.from_yaml(yaml_str=cfg)
else:
msg = 'predict_from_cfg requires a configuration via the cfgname or cfg arguments'
logger.error(msg)
raise ValueError(msg)
if len(alternatives) > len(choosers) * alternative_ratio:
logger.info(
("Alternative ratio exceeded: %d alternatives "
"and only %d choosers") %
(len(alternatives), len(choosers)))
if fixedrandomseed==0: np.random.seed(seednum)
idxes = np.random.choice(
alternatives.index, size=int(len(choosers) *
alternative_ratio),
replace=False)
alternatives = alternatives.loc[idxes]
logger.info(
" after sampling %d alternatives are available\n" %
len(alternatives))
new_units = lcm.predict(choosers, alternatives, debug=debug)
print("Assigned %d choosers to new units" % len(new_units.dropna()))
logger.debug('finish: predict from configuration {}'.format(cfgname))
return new_units, lcm
class MNLDiscreteChoiceModelGroup(DiscreteChoiceModel):
"""
Manages a group of discrete choice models that refer to different
segments of choosers.
Model names must match the segment names after doing a pandas groupby.
Parameters
----------
segmentation_col : str
Name of a column in the table of choosers. Will be used to perform
a pandas groupby on the choosers table.
remove_alts : bool, optional
Specify how to handle alternatives between prediction for different
models. If False, the alternatives table is not modified between
predictions. If True, alternatives that have been chosen
are removed from the alternatives table before doing another
round of prediction.
name : str, optional
A name that may be used in places to identify this group.
"""
def __init__(self, segmentation_col, remove_alts=False, name=None):
self.segmentation_col = segmentation_col
self.remove_alts = remove_alts
self.name = name if name is not None else 'MNLDiscreteChoiceModelGroup'
self.models = {}
def add_model(self, model):
"""
Add an MNLDiscreteChoiceModel instance.
Parameters
----------
model : MNLDiscreteChoiceModel
Should have a ``.name`` attribute matching one of the segments
in the choosers table.
"""
logger.debug(
'adding model {} to LCM group {}'.format(model.name, self.name))
self.models[model.name] = model
def add_model_from_params(
self, name, model_expression, sample_size,
probability_mode='full_product', choice_mode='individual',
choosers_fit_filters=None, choosers_predict_filters=None,
alts_fit_filters=None, alts_predict_filters=None,
interaction_predict_filters=None, estimation_sample_size=None,
prediction_sample_size=None, choice_column=None):
"""
Add a model by passing parameters through to MNLDiscreteChoiceModel.
Parameters
----------
name
Must match a segment in the choosers table.
model_expression : str, iterable, or dict
A patsy model expression. Should contain only a right-hand side.
sample_size : int
Number of choices to sample for estimating the model.
probability_mode : str, optional
Specify the method to use for calculating probabilities
during prediction.
Available string options are 'single_chooser' and 'full_product'.
In "single chooser" mode one agent is chosen for calculating
probabilities across all alternatives. In "full product" mode
probabilities are calculated for every chooser across all
alternatives.
choice_mode : str or callable, optional
Specify the method to use for making choices among alternatives.
Available string options are 'individual' and 'aggregate'.
In "individual" mode choices will be made separately for each
chooser. In "aggregate" mode choices are made for all choosers at
once. Aggregate mode implies that an alternative chosen by one
agent is unavailable to other agents and that the same
probabilities can be used for all choosers.
choosers_fit_filters : list of str, optional
Filters applied to choosers table before fitting the model.
choosers_predict_filters : list of str, optional
Filters applied to the choosers table before calculating
new data points.
alts_fit_filters : list of str, optional
Filters applied to the alternatives table before fitting the model.
alts_predict_filters : list of str, optional
Filters applied to the alternatives table before calculating
new data points.
interaction_predict_filters : list of str, optional
Filters applied to the merged choosers/alternatives table
before predicting agent choices.
estimation_sample_size : int, optional
Whether to sample choosers during estimation
(needs to be applied after choosers_fit_filters)
prediction_sample_size : int, optional
Whether (and how much) to sample alternatives during prediction.
Note that this can lead to multiple choosers picking the same
alternative.
choice_column : optional
Name of the column in the `alternatives` table that choosers
should choose. e.g. the 'building_id' column. If not provided
the alternatives index is used.
"""
logger.debug('adding model {} to LCM group {}'.format(name, self.name))
self.models[name] = MNLDiscreteChoiceModel(
model_expression, sample_size,
probability_mode, choice_mode,
choosers_fit_filters, choosers_predict_filters,
alts_fit_filters, alts_predict_filters,
interaction_predict_filters, estimation_sample_size,
prediction_sample_size, choice_column, name)
def _iter_groups(self, data):
"""
Iterate over the groups in `data` after grouping by
`segmentation_col`. Skips any groups for which there
is no model stored.
Yields tuples of (name, df) where name is the group key
and df is the group DataFrame.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
"""
groups = data.groupby(self.segmentation_col)
for name, group in groups:
if name not in self.models:
continue
logger.debug(
'returning group {} in LCM group {}'.format(name, self.name))
yield name, group
def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
This is done by filtering each submodel and concatenating
the results.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
ch = []
alts = []
for name, df in self._iter_groups(choosers):
filtered_choosers, filtered_alts = \
self.models[name].apply_fit_filters(df, alternatives)
ch.append(filtered_choosers)
alts.append(filtered_alts)
return pd.concat(ch), pd.concat(alts)
def apply_predict_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for prediction.
This is done by filtering each submodel and concatenating
the results.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
ch = []
alts = []
for name, df in self._iter_groups(choosers):
filtered_choosers, filtered_alts = \
self.models[name].apply_predict_filters(df, alternatives)
ch.append(filtered_choosers)
alts.append(filtered_alts)
filtered_choosers = | pd.concat(ch) | pandas.concat |
# Function 0
def cleaning_func_0(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['90day_worse_rating'] = np.where(loan['mths_since_last_major_derog'].isnull(), 0, 1)
return loan
#=============
# Function 1
def cleaning_func_1(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['revol_util'] = loan['revol_util'].fillna(loan['revol_util'].median())
return loan
#=============
# Function 2
def cleaning_func_2(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['emp_title'] = np.where(loan['emp_title'].isnull(), 'Job title not given', loan['emp_title'])
return loan
#=============
# Function 3
def cleaning_func_3(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['acc_now_delinq'] = np.where(loan['acc_now_delinq'].isnull(), 0, loan['acc_now_delinq'])
return loan
#=============
# Function 4
def cleaning_func_4(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['delinq_2yrs'] = np.where(loan['delinq_2yrs'].isnull(), 0, loan['delinq_2yrs'])
return loan
#=============
# Function 5
def cleaning_func_5(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_coll_amt'] = loan['tot_coll_amt'].fillna(loan['tot_coll_amt'].median())
return loan
#=============
# Function 6
def cleaning_func_6(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['title'] = np.where(loan['title'].isnull(), 0, loan['title'])
return loan
#=============
# Function 7
def cleaning_func_7(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_rev_hi_lim'] = loan['total_rev_hi_lim'].fillna(loan['total_rev_hi_lim'].median())
return loan
#=============
# Function 8
def cleaning_func_8(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['inq_last_6mths'] = np.where(loan['inq_last_6mths'].isnull(), 0, loan['inq_last_6mths'])
return loan
#=============
# Function 9
def cleaning_func_9(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_acc'] = np.where(loan['total_acc'].isnull(), 0, loan['total_acc'])
return loan
#=============
# Function 10
def cleaning_func_10(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['annual_inc'] = loan['annual_inc'].fillna(loan['annual_inc'].median())
return loan
#=============
# Function 11
def cleaning_func_11(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['open_acc'] = np.where(loan['open_acc'].isnull(), 0, loan['open_acc'])
return loan
#=============
# Function 12
def cleaning_func_12(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['collections_12_mths_ex_med'] = np.where(loan['collections_12_mths_ex_med'].isnull(), 0, loan['collections_12_mths_ex_med'])
return loan
#=============
# Function 13
def cleaning_func_13(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_cur_bal'] = loan['tot_cur_bal'].fillna(loan['tot_cur_bal'].median())
return loan
#=============
# Function 14
def cleaning_func_14(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['pub_rec'] = np.where(loan['pub_rec'].isnull(), 0, loan['pub_rec'])
return loan
#=============
# Function 15
def cleaning_func_15(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['mths_since_last_delinq'] = np.where(loan['mths_since_last_delinq'].isnull(), 188, loan['mths_since_last_delinq'])
return loan
#=============
# Function 16
def cleaning_func_0(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['pct_paid'] = (loan.out_prncp / loan.loan_amnt)
return loan
#=============
# Function 17
def cleaning_func_1(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_mo'] = loan.issue_d.str[slice(0, 3, None)]
return loan
#=============
# Function 18
def cleaning_func_2(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_year'] = loan.issue_d.str[slice(4, None, None)]
return loan
#=============
# Function 19
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['bad_loan'] = 0
return data
#=============
# Function 20
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
bad_indicators = ['Charged Off ', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Default Receiver', 'Late (16-30 days)', 'Late (31-120 days)']
data.loc[(data.loan_status.isin(bad_indicators), 'bad_loan')] = 1
return data
#=============
# Function 21
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
return data
#=============
# Function 22
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['month'] = data['issue_dt'].dt.month
return data
#=============
# Function 23
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['year'] = data['issue_dt'].dt.year
return data
#=============
# Function 24
def cleaning_func_0(loans):
# core cleaning code
import pandas as pd
date = ['issue_d', 'last_pymnt_d']
cols = ['issue_d', 'term', 'int_rate', 'loan_amnt', 'total_pymnt', 'last_pymnt_d', 'sub_grade', 'grade', 'loan_status']
# loans = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=date, usecols=cols, infer_datetime_format=True)
latest = loans['issue_d'].max()
finished_bool = (((loans['issue_d'] < (latest - pd.DateOffset(years=3))) & (loans['term'] == ' 36 months')) | ((loans['issue_d'] < (latest - pd.DateOffset(years=5))) & (loans['term'] == ' 60 months')))
finished_loans = loans.loc[finished_bool]
finished_loans['roi'] = (((finished_loans.total_pymnt / finished_loans.loan_amnt) - 1) * 100)
return finished_loans
#=============
# Function 25
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
badLoan = ['Charged Off', 'Default', 'Late (31-120 days)', 'Late (16-30 days)', 'In Grace Period', 'Does not meet the credit policy. Status:Charged Off']
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
df['isBad'] = [(1 if (x in badLoan) else 0) for x in df.loan_status]
return df
#=============
# Function 26
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'Num_Loans']
return perStatedf
#=============
# Function 27
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
return df.groupby('addr_state', as_index=False).count()
#=============
# Function 28
def cleaning_func_6(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'loan_amt']
return perStatedf
#=============
# Function 29
def cleaning_func_8(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf.columns = ['State', 'badLoans']
return perStatedf
#=============
# Function 30
def cleaning_func_10(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
perStatedf.columns = ['State', 'totalLoans']
return perStatedf
#=============
# Function 31
def cleaning_func_14(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 32
def cleaning_func_15(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.Num_Loans / perStatedf.Pop)
return perStatedf
#=============
# Function 33
def cleaning_func_16(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 34
def cleaning_func_17(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return statePopdf
#=============
# Function 35
def cleaning_func_18(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 36
def cleaning_func_19(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.loan_amt / perStatedf.Pop)
return perStatedf
#=============
# Function 37
def cleaning_func_20(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return df.groupby('addr_state', as_index=False).sum()
#=============
# Function 38
def cleaning_func_21(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 39
def cleaning_func_23(df):
# core cleaning code
import pandas as pd
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = | pd.DataFrame.from_dict(statePop, orient='index') | pandas.DataFrame.from_dict |
# coding=utf-8
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "23/09/18"
import logging
import os
import json
import sys
import pandas as pd
import numpy as np
import random
import math
import itertools
import scipy.stats
from sklearn import linear_model
from math import exp, sqrt
import ai4materials.utils.unit_conversion as uc
logger = logging.getLogger('ai4materials')
def choose_atomic_features(selected_feature_list=None,
atomic_data_file=None, binary_data_file=None):
"""Choose primary features for the extended lasso procedure."""
df1 = pd.read_csv(atomic_data_file, index_col=False)
df2 = pd.read_csv(binary_data_file, index_col=False)
# merge two dataframes on Material
df = pd.merge(df1, df2, on='Mat')
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
radii_s_p = ['rp(A)', 'rs(A)', 'rp(B)', 'rs(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
e_val_z = ['Es(A)', 'val(A)']
df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Es(B)', 'val(B)']
df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(A)', 'val(A)']
df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(B)', 'val(B)']
df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
column_list = df.columns.tolist()
feature_list = column_list
if 'Mat' in feature_list:
feature_list.remove('Mat')
if 'Edim' in feature_list:
feature_list.remove('Edim')
logger.debug("Available features: \n {}".format(feature_list))
df_selected = df[selected_feature_list]
df_selected.insert(0, 'Mat', df['Mat'])
if selected_feature_list:
logger.info("Primary features selected: \n {}".format(selected_feature_list))
else:
logger.error("No selected features.")
sys.exit(1)
return df_selected
def classify_rs_zb(structure):
"""Classify if a structure is rocksalt of zincblend from a list of NoMaD structure.
(one json file). Supports multiple frames (TO DO: check that). Hard-coded.
rocksalt:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.5 0.5 0.5
zincblende:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.25 0.25 0.25
zincblende --> label=0
rocksalt --> label=1
"""
energy = {}
chemical_formula = {}
label = {}
# gIndexRun=0
# gIndexDesc=1
for (gIndexRun, gIndexDesc), atoms in structure.atoms.iteritems():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[(gIndexRun, gIndexDesc)]
# energy=1.0
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[(gIndexRun, gIndexDesc)]
# get labels, works only for RS/ZB dataset
pos_atom_2 = np.asarray(list(structure.scaled_positions.values())).reshape(2, 3)[1, :]
if all(i < 0.375 for i in pos_atom_2):
# label='zincblend'
label[gIndexRun, gIndexDesc] = 0
else:
# label='rocksalt'
label[gIndexRun, gIndexDesc] = 1
break
return chemical_formula, energy, label
def get_energy_diff(chemical_formula_list, energy_list, label_list):
""" Obtain difference in energy (eV) between rocksalt and zincblend structures of a given binary.
From a list of chemical formulas, energies and labels returns a dictionary
with {`material`: `delta_e`} where `delta_e` is the difference between the energy
with label 1 and energy with label 0, grouped by material.
Each element of such list corresponds to a json file.
The `delta_e` is exactly what reported in the PRL 114, 105503(2015).
.. todo:: Check if it works for multiple frames.
"""
energy_ = []
chemical_formula_ = []
label_ = []
# energy and chemical formula are lists even if only one frame is present
for i, energy_i in enumerate(energy_list):
energy_.append(energy_i.values())
for i, chemical_formula_i in enumerate(chemical_formula_list):
chemical_formula_.append(chemical_formula_i.values())
for i, label_i in enumerate(label_list):
label_.append(label_i.values())
# flatten the lists
energy = list(itertools.chain(*energy_))
chemical_formula = list(itertools.chain(*chemical_formula_))
label = list(itertools.chain(*label_))
df = pd.DataFrame()
df['Mat'] = chemical_formula
df['Energy'] = energy
df['Label'] = label
# generate summary dataframe with lowest zincblend and rocksalt energy
# zincblend --> label=0
# rocksalt --> label=1
df_summary = df.sort_values(by='Energy').groupby(['Mat', 'Label'], as_index=False).first()
groupby_mat = df_summary.groupby('Mat')
dict_delta_e = {}
for mat, df in groupby_mat:
# calculate the delta_e (E_RS - E_ZB)
energy_label_1 = df.loc[df['Label'] == 1].Energy.values
energy_label_0 = df.loc[df['Label'] == 0].Energy.values
# if energy_diff>0 --> rs
# if energy_diff<0 --> zb
if (energy_label_0 and energy_label_1):
# single element numpy array --> convert to scalar
energy_diff = (energy_label_1 - energy_label_0).item(0)
# divide by 2 because it is the energy_diff for each atom
energy_diff = energy_diff / 2.0
else:
logger.error(
"Could not find all the energies needed to calculate required property for material '{0}'".format(mat))
sys.exit(1)
dict_delta_e.update({mat: (energy_diff, energy_label_0, energy_label_1)})
return dict_delta_e
def get_lowest_energy_structures(structure, dict_delta_e):
"""Get lowest energy structure for each material and label type.
Works only with two possible labels for a given material.
.. todo:: Check if it works for multiple frames.
"""
energy = {}
chemical_formula = {}
is_lowest_energy = {}
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[gIndexRun, gIndexDesc]
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[gIndexRun, gIndexDesc]
lowest_energy_label_0 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[1]
lowest_energy_label_1 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[2]
if lowest_energy_label_0 > lowest_energy_label_1:
lowest_energy_label_01 = lowest_energy_label_1
else:
lowest_energy_label_01 = lowest_energy_label_0
if energy[gIndexRun, gIndexDesc] == lowest_energy_label_01:
is_lowest_energy[gIndexRun, gIndexDesc] = True
else:
is_lowest_energy[gIndexRun, gIndexDesc] = False
return is_lowest_energy
def write_atomic_features(structure, selected_feature_list, df, dict_delta_e=None,
path=None, filename_suffix='.json', json_file=None):
"""Given the chemical composition, build the descriptor made of atomic features only.
Includes all the frames in the same json file.
.. todo:: Check if it works for multiple frames.
"""
# make dictionary {primary_feature: value} for each structure
# dictionary of a dictionary, key: Mat, value: atomic_features
dict_features = df.set_index('chemical_formula').T.to_dict()
# label=0: rocksalt, label=1: zincblend
#chemical_formula_, energy_, label_ = classify_rs_zb(structure)
#is_lowest_energy_ = get_lowest_energy_structures(structure, dict_delta_e)
if structure.isPeriodic == True:
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
# filename is the normalized absolute path
filename = os.path.abspath(os.path.normpath(os.path.join(path,
'{0}{1}'.format(structure.name, filename_suffix))))
outF = file(filename, 'w')
outF.write("""
{
"data":[""")
cell = structure.atoms[gIndexRun, gIndexDesc].get_cell()
cell = np.transpose(cell)
atoms = structure.atoms[gIndexRun, gIndexDesc]
chemical_formula = structure.chemical_formula_[gIndexRun, gIndexDesc]
energy = structure.energy_eV[gIndexRun, gIndexDesc]
label = label_[gIndexRun, gIndexDesc]
#target = dict_delta_e.get(chemical_formula_[gIndexRun, gIndexDesc])[0]
target = dict_delta_e.get(chemical_formula)
atomic_features = dict_features[structure.chemical_formula[gIndexRun, gIndexDesc]]
#is_lowest_energy = is_lowest_energy_[gIndexRun,gIndexDesc]
res = {
"checksum": structure.name,
"label": label,
"energy": energy,
#"is_lowest_energy": is_lowest_energy,
"delta_e_rs_zb": target,
"chemical_formula": chemical_formula,
"gIndexRun": gIndexRun,
"gIndexDesc": gIndexDesc,
"cell": cell.tolist(),
"particle_atom_number": map(lambda x: x.number, atoms),
"particle_position": map(lambda x: [x.x, x.y, x.z], atoms),
"atomic_features": atomic_features,
"main_json_file_name": json_file,
}
json.dump(res, outF, indent=2)
outF.write("""
] }""")
outF.flush()
return filename
def r_sigma(row):
"""Calculates r_sigma.
John-Bloch's indicator1: |rp(A) + rs(A) - rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
"""
return abs(row[0] + row[1] - row[2] + row[3])
def r_pi(row):
"""Calculates r_pi.
John-Bloch's indicator2: |rp(A) - rs(A)| +| rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
combine_features
"""
return abs(row[0] - row[1]) + abs(row[2] - row[3])
def e_sqrt_z(row):
"""Calculates e/sqrt(val_Z).
Es/sqrt(Zval) and Ep/sqrt(Zval) from Phys. Rev. B 85, 104104 (2012).
Input Es(A) or Ep(A), val(A) (A-->B)
They need to be given in this order.
"""
return row[0] / math.sqrt(row[1])
def _get_scaling_factors(columns, metadata_info, energy_unit, length_unit):
"""Calculates characteristic energy and length, given an atomic metadata"""
scaling_factor = []
if columns is not None:
for col in columns:
try:
col_unit = metadata_info[col.split('(', 1)[0]]['units']
# check allowed values, to avoid problem with substance - NOT IDEAD
if col_unit == 'J':
scaling_factor.append(uc.convert_unit(1, energy_unit, target_unit='eV'))
# divide all column by e_0
#df.loc[:, col] *= e_0
elif col_unit == 'm':
scaling_factor.append(uc.convert_unit(1, length_unit, target_unit='angstrom'))
# divide all column by e_0
#df.loc[:, col] *= d_0
else:
scaling_factor.append(1.0)
logger.debug("Feature units are not energy nor lengths. "
"No scale to characteristic length.")
except BaseException:
scaling_factor.append(1.0)
logger.debug("Feature units not included in metadata")
return scaling_factor
def _my_power_2(row):
return pow(row[0], 2)
def _my_power_3(row):
return pow(row[0], 3)
def _my_power_m1(row):
return pow(row[0], -1)
def _my_power_m2(row):
return pow(row[0], -2)
def _my_power_m3(row):
return pow(row[0], -3)
def _my_abs_sqrt(row):
return math.sqrtabs(abs(row[0]))
def _my_exp(row):
return exp(row[0])
def _my_exp_power_2(row):
return exp(pow(row[0], 2))
def _my_exp_power_3(row):
return exp(pow(row[0], 3))
def _my_sum(row):
return row[0] + row[1]
def _my_abs_sum(row):
return abs(row[0] + row[1])
def _my_abs_diff(row):
return abs(row[0] - row[1])
def _my_diff(row):
return row[0] - row[1]
def _my_div(row):
return row[0] / row[1]
def _my_sum_power_2(row):
return pow((row[0] + row[1]), 2)
def _my_sum_power_3(row):
return pow((row[0] + row[1]), 3)
def _my_sum_exp(row):
return exp(row[0] + row[1])
def _my_sum_exp_power_2(row):
return exp(pow(row[0] + row[1], 2))
def _my_sum_exp_power_3(row):
return exp(pow(row[0] + row[1], 3))
def combine_features(df=None, energy_unit=None, length_unit=None,
metadata_info=None, allowed_operations=None, derived_features=None):
"""Generate combination of features given a dataframe and a list of allowed operations.
For the exponentials, we introduce a characteristic energy/length
converting the
..todo:: Fix under/overflow errors, and introduce handling of exceptions.
"""
if allowed_operations:
logger.info('Selected operations:\n {0}'.format(allowed_operations))
else:
logger.warning('No allowed operations selected.')
# make derived features
if derived_features is not None:
if 'r_sigma' in derived_features:
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
logger.info('Including rs and rp to allow r_sigma calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
if 'r_pi' in derived_features:
logger.info('Including rs and rp to allow r_pi calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
# e_val_z = ['Es(A)', 'val(A)']
# df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Es(B)', 'val(B)']
# df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
#
# e_val_z = ['Ep(A)', 'val(A)']
# df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Ep(B)', 'val(B)']
# df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
columns_ = df.columns.tolist()
# define subclasses of features (see Phys. Rev. Lett. 114, 105503(2015) Supp. info. pag.1)
# make a dictionary {feature: subgroup}
# features belonging to a0 will not be combined, just added at the end
# dict_features = {
# u'val(B)': 'a0', u'val(A)': 'a0',
#
# u'period__el0':'a0',
# u'period__el1':'a0',
# u'atomic_number__el0': 'a0',
# u'atomic_number__el1': 'a0',
# u'group__el0': 'a0',
# u'group__el1': 'a0',
#
# u'atomic_ionization_potential__el0': 'a1',
# u'atomic_ionization_potential__el1': 'a1',
# u'atomic_electron_affinity__el0': 'a1',
# u'atomic_electron_affinity__el1': 'a1',
# u'atomic_homo_lumo_diff__el0': 'a1',
# u'atomic_homo_lumo_diff__el1': 'a1',
# u'atomic_electronic_binding_energy_el0': 'a1',
# u'atomic_electronic_binding_energy_el1': 'a1',
#
#
# u'HOMO(A)': 'a2', u'LUMO(A)': 'a2', u'HOMO(B)': 'a2', u'LUMO(B)': 'a2',
# u'HL_gap_AB': 'a2',
# u'Ebinding_AB': 'a2',
#
# u'atomic_rs_max__el0': 'a3',
# u'atomic_rs_max__el1': 'a3',
# u'atomic_rp_max__el0': 'a3',
# u'atomic_rp_max__el1': 'a3',
# u'atomic_rd_max__el0': 'a3',
# u'atomic_rd_max__el1': 'a3',
# u'atomic_r_by_2_dimer__el0': 'a3',
# u'atomic_r_by_2_dimer__el1': 'a3',
#
# u'd_AB': 'a3',
# u'r_sigma': 'a3', u'r_pi': 'a3',
#
# u'Eh': 'a4', u'C': 'a4'
# }
dict_features = {
u'period': 'a0',
u'atomic_number': 'a0',
u'group': 'a0',
u'atomic_ionization_potential': 'a1',
u'atomic_electron_affinity': 'a1',
u'atomic_homo_lumo_diff': 'a1',
u'atomic_electronic_binding_energy': 'a1',
u'atomic_homo': 'a2', u'atomic_lumo': 'a2',
u'atomic_rs_max': 'a3',
u'atomic_rp_max': 'a3',
u'atomic_rd_max': 'a3',
u'atomic_r_by_2_dimer': 'a3',
u'r_sigma': 'a3', u'r_pi': 'a3'
}
# standardize the data -
# we cannot reproduce the PRL if we standardize the data
#df_a0 = (df_a0 - df_a0.mean()) / (df_a0.max() - df_a0.min())
#df_a1 = (df_a1 - df_a1.mean()) / (df_a1.max() - df_a1.min())
#df_a2 = (df_a2 - df_a2.mean()) / (df_a2.max() - df_a2.min())
#df_a3 = (df_a3 - df_a3.mean()) / (df_a3.max() - df_a3.min())
#df_a4 = (df_a4 - df_a4.mean()) / (df_a4.max() - df_a4.min())
# df_a0 = df[[col for col in columns_ if dict_features.get(col)=='a0']].astype('float32')
df_a0 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a0']].astype('float32')
df_a1 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a1']].astype('float32')
df_a2 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a2']].astype('float32')
df_a3 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a3']].astype('float32')
df_a4 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a4']].astype('float32')
col_a0 = df_a0.columns.tolist()
col_a1 = df_a1.columns.tolist()
col_a2 = df_a2.columns.tolist()
col_a3 = df_a3.columns.tolist()
col_a4 = df_a4.columns.tolist()
# this list will at the end all the dataframes created
df_list = []
df_b0_list = []
df_b1_list = []
df_b2_list = []
df_b3_list = []
df_c3_list = []
df_d3_list = []
df_e3_list = []
df_f1_list = []
df_f2_list = []
df_f3_list = []
df_x1_list = []
df_x2_list = []
df_x_list = []
# create b0: absolute differences and sums of a0
# this is not in the PRL.
for subset in itertools.combinations(col_a0, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = ['(' + subset[1] + '-' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '/' in allowed_operations:
cols = [subset[0] + '/' + subset[1]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = [subset[1] + '/' + subset[0]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a0, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a0[list(subset)].apply(_my_power_2, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a0[list(subset)].apply(_my_power_3, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_exp, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# create b1: absolute differences and sums of a1
for subset in itertools.combinations(col_a1, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
# create b2: absolute differences and sums of a2
for subset in itertools.combinations(col_a2, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
# create b3: absolute differences and sums of a3
for subset in itertools.combinations(col_a3, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
# create c3: two steps:
# 1) squares of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a3[list(subset)].apply(_my_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a3[list(subset)].apply(_my_power_3, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
# 2) squares of b3 (only sums) --> sum squared of a3
for subset in itertools.combinations(col_a3, 2):
if '^2' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')^2']
data = df_a3[list(subset)].apply(_my_sum_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')^3']
data = df_a3[list(subset)].apply(_my_sum_power_3, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
# create d3: two steps:
# 1) exponentials of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp, axis=1)
df_d3_list.append(pd.DataFrame(data, columns=cols))
# 2) exponentials of b3 (only sums) --> exponential of sum of a3
for subset in itertools.combinations(col_a3, 2):
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + '+' + subset[1] + ')']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp, axis=1)
df_d3_list.append(pd.DataFrame(data, columns=cols))
# create e3: two steps:
# 1) exponentials of squared a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
operations = {'exp', '^2'}
if operations <= set(allowed_operations):
cols = ['exp(' + subset[0] + '^2)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp_power_2, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
operations = {'exp', '^3'}
if operations <= set(allowed_operations):
try:
cols = ['exp(' + subset[0] + '^3)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp_power_3, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
except OverflowError as e:
logger.warning('Dropping feature combination that caused under/overflow.\n')
# 2) exponentials of b3 (only sums) --> exponential of sum of a3
for subset in itertools.combinations(col_a3, 2):
operations = {'exp', '^2'}
if operations <= set(allowed_operations):
cols = ['exp((' + subset[0] + '+' + subset[1] + ')^2)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp_power_2, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
operations = {'exp', '^3'}
if operations <= set(allowed_operations):
try:
cols = ['exp((' + subset[0] + '+' + subset[1] + ')^3)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp_power_3, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
except OverflowError as e:
logger.warning('Dropping feature combination that caused under/overflow.\n')
# make dataframes from lists, check if they are not empty
# we make there here because they are going to be used to further
# combine the features
if not df_a0.empty:
df_list.append(df_a0)
if not df_a1.empty:
df_x1_list.append(df_a1)
df_list.append(df_a1)
if not df_a2.empty:
df_x1_list.append(df_a2)
df_list.append(df_a2)
if not df_a3.empty:
df_x1_list.append(df_a3)
df_list.append(df_a3)
if not df_a4.empty:
df_list.append(df_a4)
if df_b0_list:
df_b0 = pd.concat(df_b0_list, axis=1)
col_b0 = df_b0.columns.tolist()
df_b0.to_csv('./df_b0.csv', index=True)
df_list.append(df_b0)
if df_b1_list:
df_b1 = pd.concat(df_b1_list, axis=1)
col_b1 = df_b1.columns.tolist()
df_x1_list.append(df_b1)
df_list.append(df_b1)
if df_b2_list:
df_b2 = pd.concat(df_b2_list, axis=1)
col_b2 = df_b2.columns.tolist()
df_x1_list.append(df_b2)
df_list.append(df_b2)
if df_b3_list:
df_b3 = pd.concat(df_b3_list, axis=1)
col_b3 = df_b3.columns.tolist()
df_x1_list.append(df_b3)
df_list.append(df_b3)
if df_c3_list:
df_c3 = pd.concat(df_c3_list, axis=1)
col_c3 = df_c3.columns.tolist()
df_x2_list.append(df_c3)
df_list.append(df_c3)
if df_d3_list:
df_d3 = pd.concat(df_d3_list, axis=1)
col_d3 = df_d3.columns.tolist()
df_x2_list.append(df_d3)
df_list.append(df_d3)
if df_e3_list:
df_e3 = | pd.concat(df_e3_list, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
"""
This python file contains the class BioforskStation which is a class
to handle the time series and general info of each station.
A BioforskStation can be generated with the use of BioforskStation('Aas'),
where 'Aas' is the name of one of the Bioforsk stations. The list of available
stations at this time is found in ../config/stations.cfg.
"""
import pandas as pd
import numpy as np
import configparser
from datetime import datetime
import os
class BioforskStation(object):
"""
A container for all applications applied to the data from the
Bioforsk Stations.
:param name_of_station: The name of the location of the station.
:type name_of_station: string
:param remove_partial_years: If you want your data to not show
the first and last year if they are incomplete.
:type remove_partial_years: boolean, default True
"""
conf = configparser.RawConfigParser()
def __init__(self, name_of_station, remove_partial_years=True, path='..'):
"""Sets up an instance"""
self.conf.read(os.path.join(path, 'config', 'stations.cfg'))
self.name = name_of_station
self.data = pd.read_csv(os.path.join(path, 'data', 'raw_data',
'{}.csv'.format(
name_of_station)),
sep=';', parse_dates=True,
index_col='time_measured', dayfirst=True)
self.longitude = self.conf.getfloat(self.name, 'lon')
self.latitude = self.conf.getfloat(self.name, 'lat')
self.altitude = self.conf.getfloat(self.name, 'hgt')
self.station_id = self.conf.getfloat(self.name, 'id')
# Makes sure to fill in NaN data where there are none
# registered in the data file
self.data = self.data.asfreq('H')
self.raw = self.data.copy() # Keeps the raw files
if remove_partial_years:
self.remove_partial_year()
# Sometimes the instrument records -6999 og 6999 instead of NaN
self.data.loc[(self.data['qo'] == -6999) |
(self.data['qo'] == 6999), 'qo'] = np.nan
# Import extraterrestrial irradiation
self.data.loc[:, 'toa'] = pd.read_csv(os.path.join(path, 'data', 'toa',
'{}toa.csv'.format(
self.name)),
header=None).values
# Import clear sky irradiation
clear_sky = pd.read_csv(os.path.join(path, 'data', 'clear_sky',
'{}clear.txt'.format(
self.name)),
header=None, delim_whitespace=True,
parse_dates=True, index_col=0, dayfirst=True)
clear_sky.columns = ['sza', 'qo']
self.data.loc[:, 'sza'] = clear_sky['sza'].values
self.data.loc[:, 'clear_sky'] = clear_sky['qo'].values
# Initializes the flags DataFrame
self.flags = pd.DataFrame(index=self.data.index)
def remove_partial_year(self):
"""Returns removed start and end year if not fully complete"""
if not self.data.index[0].is_year_start:
self.data = self.data[self.data.index.year
!= self.data.index[0].year]
if not self.data.index[-1].is_year_end:
self.data = self.data[self.data.index.year
!= self.data.index[-1].year]
def count_flags_per_year(self, pesd=False):
"""
Returns a count of all flags registered from every test in yearly sum
:param pesd: If true the data will be percent of errorneous data.
:type pesd: boolean, default False
:returns: Pandas DataFrame
"""
if pesd:
flagged = self.flags[self.data['toa'] > 0].groupby(self.flags[
self.data['toa'] > 0].index.year).aggregate(np.mean) * 100
# Offset flag is a percentage of every data
flagged.loc[:, 'Offset'] = self.flags.loc[:, 'Offset'].groupby(
self.flags.index.year).aggregate(np.mean) * 100
else:
flagged = self.flags.groupby(self.flags.index.year).aggregate(sum)
flagged['Sum'] = flagged.sum(axis=1)
# Change column names for easier read. Changed some of the names according
# to thesis.
flagged.name = self.name
flagged.index.name = 'Year'
return flagged
def count_flags_per_month(self, pesd=False):
"""
Returns a count of all flags registered from every test in yearly sum
:param pesd: If true the data will be percent of errorneous data.
:type pesd: boolean, default False
:returns: Pandas DataFrame
"""
if pesd:
flagged = self.flags[self.data['toa'] > 0].groupby(self.flags[
self.data['toa'] > 0].index.month).aggregate(np.mean) * 100
# Offset flag is a percentage of every data
flagged.loc[:, 'Offset'] = self.flags.loc[:, 'Offset'].groupby(
self.flags.index.month).aggregate(np.mean) * 100
else:
flagged = self.flags.groupby(
self.flags.index.month).aggregate(sum)
flagged.loc[:, 'Offset'] = self.flags.loc[:, 'Offset'].groupby(
self.flags.index.month).aggregate(sum)
flagged['Sum'] = flagged.sum(axis=1)
flagged.index = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December']
flagged.name = self.name
flagged.index.name = 'Month'
return flagged
def zero_out(self):
"""
Sets all of the global irradiance values to zero if the corresponding
toa value is zero or the registered value is negative
"""
self.data.loc[(self.data['toa'] == 0) |
(self.data['qo'] < 0), 'qo'] = 0
def flag_info(self, pesd=True, start_date='', end_date=''):
"""
Returns info about the flagged data
:param pesd: If true the data will be percent of errorneous data.
:type pesd: boolean, default True
:param start_date: Start date on the format 'yyyy-mm-dd hh:mm',
recursive need from years.
:type start_date: string, default ''
:param end_date: End date on the format 'yyyy-mm-dd hh:mm',
recursive need from years.
:type end_date: string, default ''
:returns: Pandas DataFrame
"""
if start_date == '':
start_date = self.flags.index[0]
if end_date == '':
end_date = self.flags.index[-1]
if pesd:
offset = np.mean(self.flags[start_date:
end_date].iloc[:, :1]) * 100
flagged = offset.append(np.mean(
self.flags[start_date:end_date][self.data[
'toa'] > 0].iloc[:, 1:]) * 100)
else:
flagged = np.sum(self.flags[start_date:end_date])
flag_table = | pd.DataFrame(flagged.values, flagged.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 10 16:42:22 2018
@author: Ifrana
"""
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from sklearn.utils.fixes import signature
from sklearn.metrics import average_precision_score, auc
from sklearn.metrics import roc_curve, auc
df_csv = | pd.read_csv('D:\Data TimeSeries (Hourly)\Classification/for_RF_model.csv', delimiter=',') | pandas.read_csv |
#%%
import pandas as pd
dtypes = {"placekey":"object", "safegraph_place_id":"object", "parent_placekey":"object", "parent_safegraph_place_id":"object",
"location_name":"category", "street_address":"category", "city":"category", "region":"category", "postal_code":"int32",
"safegraph_brand_ids":"object", "brands":"category", "date_range_start":"object", "date_range_end":"object"}
df = pd.read_csv('PATH', dtype=dtypes, encoding='utf-8') #csv name: mobility-patterns-backfilled_2020-01-01_2021-04-13.csv
df.head()
#%%
cadenas = ["Chick-fil-A", 'Shake Shack', 'Five Guys', "Wendy's", "KFC", "Burger King", "Popeyes Louisiana Kitchen", "Subway", "Pizza Hut", "Taco Bell", "Starbucks", "Domino's Pizza", "7-Eleven"]
for cad in cadenas:
data = df[df['brands'] == cad]
print(cad, len(set(data['safegraph_place_id']))) #Para saber cuántas tiendas hay en Houston de cada cadena.
#%%
len(set(data['safegraph_place_id']))
# %%
lats = list(set(data['latitude']))
lons = []
for lat in lats:
dt = | pd.DataFrame(data[data['latitude'] == lat]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon 9/2/14
Using python pandas to post process csv output from Pacejka Tire model
@author: <NAME>, 2014
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import pylab as py
class PacTire_panda:
'''
@class: loads, manages and plots various output from Pacejka Tire model
'''
def __init__(self,fileName_steadyState,fileName_transient="none"):
'''
Input:
filename: full filename w/ input data in csv form
'''
# first file is the steady state magic formula output
self._m_filename_SS = fileName_steadyState
# optional: compare against the transient slip output
if( fileName_transient=="none"):
self._use_transient_slip = False
else:
self._use_transient_slip = True
self._m_filename_T = fileName_transient
df_T = pd.read_csv(self._m_filename_T, header=0, sep=',') # index_col=0,
self._m_df_T = df_T
# Header Form:
# time,kappa,alpha,gamma,kappaP,alphaP,gammaP,Vx,Vy,Fx,Fy,Fz,Mx,My,Mz,Fxc,Fyc,Mzc
df = pd.read_csv(self._m_filename_SS, header=0, sep=',') # index_col=0,
self._m_df = df
# @brief plot Forces, moments, pure slip vs. kappa
def plot_kappa_FMpure(self,adams_Fx_tab_filename="none",adams_Mz_tab_filename="none"):
# force v. kappa
figF = plt.figure()
df_kappa = pd.DataFrame(self._m_df, columns = ['kappa','Fx','Fz','Fxc'])
# always create the plot, axes handle
axF = df_kappa.plot(linewidth=2.0,x='kappa',y=['Fxc','Fz'])
axF.set_xlabel(r'$\kappa $ ')
axF.set_ylabel('Force [N]')
axF.set_title(r'$\kappa *$, pure long. slip')
if( adams_Fx_tab_filename != "none"):
# load in the adams junk data, Fx vs. kappa %
dfx_adams = pd.read_table(adams_Fx_tab_filename,sep='\t',header=0)
dfx_adams['Longitudinal Slip'] = dfx_adams['Longitudinal Slip'] / 100.0
axF.plot(dfx_adams['Longitudinal Slip'],dfx_adams['Longitudinal Force'],'r--',linewidth=1.5,label="Fx Adams")
# plt.legend(loc='best')
# compare transient slip output also?
if( self._use_transient_slip):
df_ts = pd.DataFrame(self._m_df_T, columns = ['kappa','Fx','Fxc'])
axF.plot(df_ts['kappa'],df_ts['Fx'],'c--',linewidth=1.0,label='Fx transient')
axF.plot(df_ts['kappa'],df_ts['Fxc'],'k-*',linewidth=1.0,label='Fxc transient')
axF.legend(loc='best')
figM = plt.figure()
df_kappaM = pd.DataFrame(self._m_df, columns = ['kappa','Mx','My','Mz','Mzc'])
axM = df_kappaM.plot(linewidth=2.0,x='kappa',y=['Mx','My','Mzc'])
if( adams_Mz_tab_filename != "none"):
dfmz_adams = pd.read_table(adams_Mz_tab_filename,sep='\t',header=0)
dfmz_adams['longitudinal_slip'] = dfmz_adams['longitudinal_slip']/100.
axM.plot(dfmz_adams['longitudinal_slip'], dfmz_adams['aligning_moment'],'r--',linewidth=1.5,label="Mz Adams")
if( self._use_transient_slip):
df_tsM = pd.DataFrame(self._m_df_T, columns = ['kappa','Mzc','Mzx','Mzy','M_zrc','t','s'])
axM.plot(df_tsM['kappa'],df_tsM['Mzc'],'k-*',linewidth=1.0,label='Mzc transient')
axM.plot(df_tsM['kappa'], df_tsM['Mzx'],'b--',linewidth=2,label='Mz,x')
axM.plot(df_tsM['kappa'], df_tsM['Mzy'],'g--',linewidth=2,label='Mz,y')
axM.plot(df_tsM['kappa'], df_tsM['M_zrc'],'y--',linewidth=2,label='M_zrc')
axM2 = axM.twinx()
axM2.plot(df_tsM['kappa'], df_tsM['t'],'b.',linewidth=1.0,label='t trail')
axM2.plot(df_tsM['kappa'], df_tsM['s'],'c.',linewidth=1.0,label='s arm')
axM2.set_ylabel('length [m]')
axM2.legend(loc='lower right')
axM.set_xlabel(r'$\kappa $ ')
axM.set_ylabel('Moment [N-m]')
axM.legend(loc='best')
axM.set_title(r'$\kappa $, pure long. slip')
# @brief plot Forces, Moments, pure slip vs. alpha
def plot_alpha_FMpure(self,adams_Fy_tab_filename="none",adams_Mz_tab_filename="none"):
figF = plt.figure()
df_sy = pd.DataFrame(self._m_df, columns = ['alpha','Fy','Fz','Fyc'])
axF = df_sy.plot(linewidth=2.0,x='alpha', y=['Fyc','Fz'])# y=['Fy','Fyc','Fz'])
if( adams_Fy_tab_filename != "none"):
# load in adams tabular data for Fy vs. alpha [deg]
dfy_adams = pd.read_table(adams_Fy_tab_filename,sep='\t',header=0)
axF.plot(dfy_adams['slip_angle'],dfy_adams['lateral_force'],'r--',linewidth=1.5,label="Fy Adams")
# compare transient slip output
if(self._use_transient_slip):
# already have this in df_T
df_ts = pd.DataFrame(self._m_df_T, columns = ['alpha','Fyc','Fy'] )
# axF.plot(df_ts['alpha'], df_ts['Fy'],'c-*',linewidth=2.0,label="Fy Transient")
axF.plot(df_ts['alpha'], df_ts['Fyc'],'k-*',linewidth=1.0,label="Fyc Transient")
axF.set_xlabel(r'$\alpha $[deg]')
axF.set_ylabel('Force [N]')
axF.legend(loc='best')
axF.set_title(r'$\alpha $, pure lateral slip')
figM = plt.figure()
df_M = pd.DataFrame(self._m_df, columns = ['alpha','Mx','My','Mz','Mzc'])
axM = df_M.plot(linewidth=2.0,x='alpha',y=['Mx','My','Mzc']) # ,'Mz'])
if( adams_Mz_tab_filename != "none"):
dfmz_adams = pd.read_table(adams_Mz_tab_filename,sep='\t',header=0)
axM.plot(dfmz_adams['slip_angle'], dfmz_adams['aligning_moment'],'r--',linewidth=1.5,label="Mz Adams")
# also plot transient slip outputs
if(self._use_transient_slip):
# already have this in df_T
df_tsM = | pd.DataFrame(self._m_df_T, columns = ['alpha','Mz','Mzc','MP_z','M_zr','t','s'] ) | pandas.DataFrame |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.normalize_data import (
remove_whitespace_from_column_names,
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
ddm2dec,
remove_empty_unnamed_columns,
normalize_columns
)
class RemoveSpacesFromColumns:
def test_replaces_leading_and_trailing_spaces_from_columns(self):
df = pd.DataFrame(columns=[' Aa', 'Bb12 ', ' Cc', 'Dd ', ' Ed Ed ', ' 12 ' ])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb12', 'Cc', 'Dd', 'Ee Ee', '12']
def test_returns_columns_if_no_leading_and_trailing_spaces(self):
df = pd.DataFrame(columns=['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed'])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb', 'Cc', 'Dd', 'Ee Ee' ]
class TestNormalizeExpeditionSectionCols:
def test_dataframe_does_not_change_if_expection_section_columns_exist(self):
data = {
"Col": [0, 1],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Sample_exist(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Label_exist(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Label(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Sample(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_missing_aw_col(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3", "10-U2H-20T-3"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_no_data(self):
data = {
"Col": [0],
"Sample": ["No data this hole"],
}
df = pd.DataFrame(data)
data = {
"Col": [0],
"Sample": ["No data this hole"],
"Exp": [None],
"Site": [None],
"Hole": [None],
"Core": [None],
"Type": [None],
"Section": [None],
"A/W": [None],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_otherwise_raise_error(self):
df = pd.DataFrame({"foo": [1]})
message = "File does not have the expected columns."
with pytest.raises(ValueError, match=message):
normalize_expedition_section_cols(df)
class TestRemoveBracketText:
def test_removes_text_within_brackets_at_end_of_cell(self):
df = pd.DataFrame(['aa [A]', 'bb [BB]', 'cc [C] ', 'dd [dd] '])
expected = pd.DataFrame(['aa', 'bb', 'cc', 'dd'])
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_at_start_of_cell(self):
df = pd.DataFrame(['[A] aa', '[BB] bb', '[C] cc ', ' [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_in_middle_of_cell(self):
df = pd.DataFrame(['aa [A] aa', 'bb [BB] bb', ' cc [C] cc ', ' dd [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_removes_letters_numbers_punctuation_within_brackets(self):
df = pd.DataFrame(['aa [A A]', 'bb [BB 123]', 'cc [123-456.] '])
expected = pd.DataFrame(['aa', 'bb', 'cc'])
remove_bracket_text(df)
assert_frame_equal(df, expected)
class TestRemoveWhitespaceFromDataframe:
def test_remove_leading_and_trailing_spaces_from_dataframe(self):
data = {
'A': ['A', 'B ', ' C', 'D ', ' Ed ', ' 1 '],
'B': ['Aa', 'Bb ', ' Cc', 'Dd ', ' Ed Ed ', ' 11 '],
}
df = pd.DataFrame(data)
data2 = {
'A': ['A', 'B', 'C', 'D', 'Ed', '1'],
'B': ['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed', '11'],
}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_ignores_numeric_columns(self):
data = {
'A': ['A', 'B ', ' C'],
'B': [1, 2, 3],
'C': [1.1, 2.2, 3.3],
}
df = pd.DataFrame(data)
data2 = {
'A': ['A', 'B', 'C'],
'B': [1, 2, 3],
'C': [1.1, 2.2, 3.3],
}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_handles_empty_strings(self):
data = {'A': ['A', 'B ', ' C', ' ']}
df = pd.DataFrame(data)
data2 = {'A': ['A', 'B', 'C', '']}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
def test_converts_nan_to_empty_strings(self):
data = {'A': ['A', 'B ', ' C', np.nan]}
df = pd.DataFrame(data)
data2 = {'A': ['A', 'B', 'C', '']}
expected = pd.DataFrame(data2)
remove_whitespace(df)
assert_frame_equal(df, expected)
class Testddm2dec:
def test_returns_decimal_degree_fof_degree_decimal_minute(self):
string = '25 51.498 N'
assert ddm2dec(string) == 25.8583
def test_works_with_decimal(self):
string = '25 .498 N'
assert ddm2dec(string) == 25.0083
def test_works_with_integer(self):
string = '25 20 N'
assert ddm2dec(string) == 25.333333333333332
def test_works_with_direction_first(self):
string = 'N 25 51.498'
assert ddm2dec(string) == 25.8583
@pytest.mark.parametrize("string,result", [("25° 51.498'N", 25.8583), ("25°51.498'N", 25.8583)])
def test_works_with_degree_minute_notation(self, string, result):
assert ddm2dec(string) == result
@pytest.mark.parametrize("string,result", [('25 51.498 e', 25.8583), ('25 51.498 w', -25.8583), ('25 51.498 S', -25.8583), ('25 51.498 n', 25.8583)])
def test_adds_correct_sign_for_direction(self, string, result):
assert ddm2dec(string) == result
class TestRemoveEmptyUnnamedColumns:
def test_remove_unnamed_columns_with_no_content(self):
data = {'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'Unnamed: 12': [None, None, None]}
df = pd.DataFrame(data)
data = {'A': [1, 2, 3], 'B': ['a', 'b', 'c']}
expected = pd.DataFrame(data)
remove_empty_unnamed_columns(df)
assert_frame_equal(df, expected)
def test_does_change_named_columns_without_content(self):
data = {'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'C': [None, None, None]}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
remove_empty_unnamed_columns(df)
assert_frame_equal(df, expected)
def test_does_change_unnamed_columns_with_content(self):
data = {'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'Unnamed: 12': ['a', None, None]}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
remove_empty_unnamed_columns(df)
assert_frame_equal(df, expected)
class TestNormalizeColumns:
def test_replace_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A"}
data = {"aa": [1]}
df = pd.DataFrame(data)
data = {"A": [1]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_replace_multiple_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_does_not_affect_columns_not_in_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2], "cc": [3]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2], "cc": [3]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_does_not_affect_columns_if_columns_mapping_has_no_value(self):
columns_mapping = {"aa": None, "bb": "", "cc": np.nan}
data = {"aa": [1], "b b": [2], "cc": [3]}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
| assert_frame_equal(df, expected) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python
import json
import logging
import sys
import pandas as pd
import numpy as np
from functools import reduce
# from typing import Optional
from cascade_at.executor.args.arg_utils import ArgumentList
from cascade_at.core.log import get_loggers, LEVELS
from cascade_at.executor.args.args import ModelVersionID, BoolArg, LogLevel, StrArg
LOG = get_loggers(__name__)
ARG_LIST = ArgumentList([
ModelVersionID(),
BoolArg('--make', help='whether or not to make the file structure for the cascade'),
BoolArg('--configure', help='whether or not to configure for the IHME cluster'),
LogLevel(),
StrArg('--json-file', help='for testing, pass a json file directly by filepath'
'instead of referencing a model version ID.'),
StrArg('--test-dir', help='if set, will save files to the directory specified.'
'Invalidated if --configure is set.')
])
class CovariateReference:
def __init__(self, inputs):
self.inputs = inputs
self.cov_ids = {c.covariate_id: c.name
for c in inputs.covariate_specs.covariate_specs
if c.study_country == 'country'}
self.loc_df = inputs.location_dag.df
self.pop_df = inputs.population.configure_for_dismod()
def configure_for_dismod(self, covariate_data):
from cascade_at.inputs.utilities.covariate_weighting import CovariateInterpolator
def handle_exception(**kwds):
try: return cov.interpolate(**kwds)
except: return None
cov_df = covariate_data.configure_for_dismod(self.pop_df, self.loc_df)
cov = CovariateInterpolator(cov_df, self.pop_df)
cov_id = covariate_data.covariate_id
cov_name = self.cov_ids[cov_id]
cov_df = pd.DataFrame([{'location_id': loc_id, 'sex_id': sex_id,
cov_name: handle_exception(loc_id = loc_id,sex_id = sex_id, age_lower=0, age_upper=100, time_lower = 1970, time_upper = 2020)}
for loc_id in sorted(cov_df.location_id.unique())
for sex_id in (1,2,3)])
self.inputs.transform_country_covariates(cov_df)
return cov_df
def all_locations(inputs, settings):
import json
import dill
from cascade_at.inputs.measurement_inputs import MeasurementInputs
covariate_id = [i.country_covariate_id for i in settings.country_covariate]
inputs2 = MeasurementInputs(model_version_id=settings.model.model_version_id,
gbd_round_id=settings.gbd_round_id,
decomp_step_id=settings.model.decomp_step_id,
csmr_cause_id = settings.model.add_csmr_cause,
crosswalk_version_id=settings.model.crosswalk_version_id,
country_covariate_id=covariate_id,
conn_def='epi',
location_set_version_id=settings.location_set_version_id)
inputs2.get_raw_inputs()
inputs2.configure_inputs_for_dismod(settings)
return inputs2
def main():
args = ARG_LIST.parse_args(sys.argv[1:])
logging.basicConfig(level=LEVELS[args.log_level])
from cascade_at.settings.settings import load_settings
with open(args.json_file) as f:
settings_json = json.load(f)
settings = load_settings(settings_json=settings_json)
if 0:
from cascade_at.executor.configure_inputs import configure_inputs
global context, inputs
context, inputs = configure_inputs(
model_version_id = args.model_version_id,
make = False,
configure = False,
test_dir=args.test_dir,
json_file=args.json_file,
)
inputs2 = all_locations(inputs, settings)
for d in inputs, inputs2:
print()
for integrand in sorted(d.dismod_data.measure.unique()):
print (integrand, len(d.dismod_data[d.dismod_data.measure == integrand]), 'locations', len(d.dismod_data.loc[d.dismod_data.measure == integrand].location_id.unique()))
if 1:
import shutil
import dill
with open(f'/tmp/cascade_dir/data/{args.model_version_id}/inputs/inputs1.p', 'wb') as stream:
dill.dump(inputs, stream)
with open(f'/tmp/cascade_dir/data/{args.model_version_id}/inputs/inputs2.p', 'wb') as stream:
dill.dump(inputs2, stream)
shutil.copy2(f'/tmp/cascade_dir/data/{args.model_version_id}/inputs/inputs2.p', f'/tmp/cascade_dir/data/{args.model_version_id}/inputs/inputs.p')
from cascade_at.executor.dismod_db import dismod_db
# It seems that dismod_db gets mtall/mtspecific from inputs.p for just the parent and the parents children
# And it seems that the entire set of locations is in inputs.p for mtall and mtspecific.
dismod_db(model_version_id = args.model_version_id,
parent_location_id=inputs.drill_location_start,
fill=True,
test_dir=args.test_dir,
save_fit = False,
save_prior = False)
from cascade_at.executor.run import run
run(model_version_id = args.model_version_id,
jobmon = False,
make = False,
skip_configure = True,
json_file = args.json_file,
test_dir = args.test_dir,
execute_dag = False)
else:
import dill
with open(f'/tmp/cascade_dir/data/{args.model_version_id}/inputs/inputs1.p', 'rb') as stream:
inputs = dill.load(stream)
global covariate_reference, data, asdr, csmr
cov_ref = CovariateReference(inputs)
covariate_reference = reduce(lambda x, y: | pd.merge(x, y) | pandas.merge |
"""
.. module:: repeats
:synopsis: Repeats (transposon) related stuffs
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import csv
import subprocess
import os
import gzip
import glob
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
import uuid
import pandas as PD
import numpy as N
import matplotlib.pylab as P
from jgem import utils as UT
from jgem import fasta as FA
from jgem import filenames as FN
from jgem import bedtools as BT
from jgem import gtfgffbed as GGB
from jgem import assembler2 as A2
from jgem import assembler3 as A3
RMSKPARAMS = dict(
np = 4,
th_uexon=4,
th_bp_ovl=50,
th_ex_ovl=50,
datacode='',
gname='gname',
)
def filter_paths(mdstpre, rdstpre):
ex = UT.read_pandas(rdstpre+'.ex.txt.gz')
def select_chromwise(paths, ex):
npchrs = []
for chrom in paths['chr'].unique():
pchr = paths[paths['chr']==chrom]
echr = ex[ex['chr']==chrom]
exnames = set(echr['name'].values)
#e2gname = UT.df2dict(echr,'name','gname')
idx = [all([x in exnames for x in y.split('|')]) for y in pchr['name']]
npchrs.append(pchr[idx])
return PD.concat(npchrs, ignore_index=True)
paths = GGB.read_bed(mdstpre+'.paths.withse.bed.gz')
npaths = select_chromwise(paths, ex)
GGB.write_bed(npaths, rdstpre+'.paths.withse.bed.gz', ncols=12)
paths = GGB.read_bed(mdstpre+'.paths.txt.gz')
npaths = select_chromwise(paths, ex)
GGB.write_bed(npaths, rdstpre+'.paths.txt.gz', ncols=12)
def filter_sjexdf(mdstpre, rdstpre):
exdf = UT.read_pandas(mdstpre+'.exdf.txt.gz', names=A3.EXDFCOLS)
sedf = UT.read_pandas(mdstpre+'.sedf.txt.gz', names=A3.EXDFCOLS)
exdf = PD.concat([exdf, sedf], ignore_index=True)
sjdf = UT.read_pandas(mdstpre+'.sjdf.txt.gz', names=A3.SJDFCOLS)
ex = UT.read_pandas(rdstpre+'.ex.txt.gz')
sj = UT.read_pandas(rdstpre+'.sj.txt.gz')
def select_chromwise_df(exdf, ex):
npchrs = []
for chrom in exdf['chr'].unique():
pchr = exdf[exdf['chr']==chrom]
echr = ex[ex['chr']==chrom]
exnames = set(echr['name'].values)
idx = [x in exnames for x in pchr['name']]
npchrs.append(pchr[idx])
return PD.concat(npchrs, ignore_index=True)
nexdf = select_chromwise_df(exdf, ex)
nsjdf = select_chromwise_df(sjdf, sj)
UT.write_pandas(nexdf, rdstpre+'.exdf.txt.gz', '')
UT.write_pandas(nsjdf, rdstpre+'.sjdf.txt.gz', '')
class RmskFilter(object):
"""Filter genes with by overlap to repeat masker.
Args:
sjexpre: path prefix to assembled ex.txt.gz, sj.txt.gz files (optionally unionex.txt.gz )
code: identifier
chromdir: direcotry which contains chromosomes sequences in FASTA format
rmskviz: RepeatMasker viz track (UCSC) converted in BED7 (using jgem.repeats.rmskviz2bed7)
outdir: output directory
"""
def __init__(self, sjexpre, code, chromdir, rmskviz, outdir, **kw):
self.sjexpre = sjexpre
self.prefix = prefix = os.path.join(outdir, code)
self.fnobj = FN.FileNamesBase(prefix)
self.chromdir = chromdir
self.rmskviz = rmskviz
self.gfc = FA.GenomeFASTAChroms(chromdir)
self.params = RMSKPARAMS.copy()
self.params.update(kw)
self.ex = UT.read_pandas(sjexpre+'.ex.txt.gz')
self.sj = UT.read_pandas(sjexpre+'.sj.txt.gz')
if 'glen' not in self.ex or 'tlen' not in self.ex:
if not os.path.exists(sjexpre+'.ci.txt.gz'):
ci = UT.chopintervals(ex, sjexpre+'.ci.txt.gz')
else:
ci = UT.read_ci(sjexpre+'.ci.txt.gz')
UT.set_glen_tlen(self.ex,ci,gidx='_gidx')
UT.write_pandas(self.ex, sjexpre+'.ex.txt.gz', 'h')
uexpath = sjexpre+'.unionex.txt.gz'
if os.path.exists(uexpath):
self.uex = UT.read_pandas(uexpath)
else:
LOG.info('making union exons...saving to {0}'.format(uexpath))
self.uex = UT.make_unionex(self.ex, '_gidx')
UT.write_pandas(self.uex, uexpath, 'h')
def calculate(self):
""" Calculate base pair overlap to repeat using UCSC genome mask of repeats to lower case,
and exon level overlap to repeat using UCSC RepeatMaskerViz track.
ALso make a dataframe containing summary.
"""
pr = self.params
fn = self.fnobj
uex = count_repeats_mp(self.uex, self.gfc, np=pr['np'], col='#repbp')
uex = count_repeats_viz_mp(uex, self.rmskviz, np=pr['np'], idcol='_id', expand=0, col='repnames')
self.ugb = ugb = self._make_gbed(self.ex, self.sj, uex, datacode=pr['datacode'], gname=pr['gname'])
UT.write_pandas(ugb, fn.txtname('all.genes.stats', category='output'), 'h')
def _make_gbed(self, ex, sj, ugb, datacode='', gname='gname'):
# rep%
gr = ugb.groupby('_gidx')
gb2 = gr[['chr',gname,'tlen','glen']].first()
gb2['#repbp'] = gr['#repbp'].sum()
gb2['rep%'] = 100.*gb2['#repbp']/gb2['tlen']
# rmskviz, exon%
gb2['#uexons'] = gr.size()
gbsub = ugb[ugb['repnames']!='.(-1)'] # .(-1) == overlap
gb2['#uexons_rmsk'] = gbsub.groupby('_gidx').size() # num exons overlapping rmskviz
gb2.loc[gb2['#uexons_rmsk'].isnull(),'#uexons_rmsk'] = 0
gb2['rviz%'] = 100.*gb2['#uexons_rmsk']/gb2['#uexons']
gb2['repnames'] = gbsub.groupby('_gidx')['repnames'].apply(lambda x: ';'.join(list(x)))
# locus
gb2['st'] = gr['st'].min()
gb2['ed'] = gr['ed'].max()
gb2['glocus'] = UT.calc_locus(gb2,'chr','st','ed')
# rmskviz, class=[Simple_repeats, LINE, SINE, LTR, DNA]
rcols = ['Simple_repeat','LINE','SINE','LTR','DNA']
for k in rcols:
gb2[k] = gb2['repnames'].str.contains('#'+k)
dc = '_'+datacode if datacode else ''
egr = ex.groupby('_gidx')
gb2['#exons'] = egr.size()
gb2['avgecov'] = egr['ecov'+dc].mean()
if 'gcov' in egr:
gb2['gcov'] = egr['gcov'+dc].first()
sgr = sj.groupby('_gidx')
if 'ucnt' in sgr and 'mcnt' in sgr:
gb2['ucnt'] = sgr['ucnt'+dc].sum()
gb2['mcnt'] = sgr['mcnt'+dc].sum()
gb2['minjcnt'] = sgr['mcnt'+dc].min()
elif 'tcnt' in sgr:
gb2['tcnt'] = sgr['tcnt'+dc].sum()
gb2['#junc'] = sgr.size()
# gb2['lscore'] = N.log10(gb2['tlen']) - N.log10(gb2['glen']) + 2
# gb2['jscore'] = N.log10(gb2['ucnt']) - N.log10(gb2['mcnt']) - 1.5
return gb2
def filter(self, **kw):
""" Filter genes.
base pair repeat overlap % >= th_bp_ovl (default 50)
exon_repeat_overlap % >= th_ex_ovl (default 50)
#union exon < th_uexon (default 4)
That is, by default, it filters out 2,3 exon genes with both base pair and exon level
overlap to repeats are greater or equal to 50%. Does not apply to single exons.
"""
d = self.ugb
pr = self.params
fn = self.fnobj
pr.update(kw)
idx1 = (d['rep%']>=pr['th_bp_ovl'])&(d['rviz%']>pr['th_ex_ovl'])
idx2 = (d['#junc'].notnull())&(d['#uexons']<pr['th_uexon'])
idx = ~(idx1&idx2)
self.ugb2 = ugb2 = d[idx] # filtered
self.ugb3 = ugb3 = d[~idx]
gids = ugb2.index.values
ex0 = self.ex
sj0 = self.sj
uex = self.uex
# filter ex,sj,uex
self.ex2 = ex2 = ex0[ex0['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
self.sj2 = sj2 = sj0[sj0['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
self.uex2 = uex2 = uex[uex['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
gcovfld = 'gcov_'+pr['datacode'] if pr['datacode'] else 'gcov'
self.gbed2 = gbed2 = GGB.unionex2bed12(uex2,name=pr['gname'],sc2=gcovfld,sc1='tlen')
gbed2['sc2'] = gbed2['sc2'].astype(int)
# write out filtered ex,sj,ci,unionex,gbed
UT.write_pandas(ex2, fn.txtname('ex', category='output'), 'h')
UT.write_pandas(sj2, fn.txtname('sj', category='output'), 'h')
UT.chopintervals(ex2, fn.txtname('ci', category='output'))
GGB.write_bed(ex2, fn.bedname('ex', category='output'))
GGB.write_bed(sj2, fn.bedname('sj', category='output'))
UT.write_pandas(uex2, fn.txtname('unionex', category='output'), 'h')
UT.write_pandas(ugb2, fn.txtname('genes.stats', category='output'), 'h')
UT.write_pandas(gbed2, fn.bedname('genes', category='output'), '') # BED12
# also write filtered out genes
self.ex3 = ex3 = ex0[~ex0['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
self.sj3 = sj3 = sj0[~sj0['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
self.uex3 = uex3 = uex[~uex['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
gcovfld = 'gcov_'+pr['datacode'] if pr['datacode'] else 'gcov'
self.gbed3 = gbed3 = GGB.unionex2bed12(uex3,name=pr['gname'],sc2=gcovfld,sc1='tlen')
gbed3['sc2'] = gbed3['sc2'].astype(int)
# write out filtered ex,sj,ci,unionex,gbed
UT.write_pandas(ex3, fn.txtname('removed.ex', category='output'), 'h')
UT.write_pandas(sj3, fn.txtname('removed.sj', category='output'), 'h')
UT.chopintervals(ex3, fn.txtname('removed.ci', category='output'))
UT.write_pandas(uex3, fn.txtname('removed.unionex', category='output'), 'h')
UT.write_pandas(ugb3, fn.txtname('removed.genes.stats', category='output'), 'h')
UT.write_pandas(gbed3, fn.bedname('removed.genes', category='output'), '') # BED12
def save_params(self):
UT.save_json(self.params, self.fnobj.fname('params.json', category='output'))
def __call__(self):
self.calculate()
self.filter()
self.save_params()
filter_paths(self.sjexpre, self.prefix)
filter_sjexdf(self.sjexpre, self.prefix)
def plot_tlen_vs_glen_panels(gbed, fld='rep%', alpha=0.1, ms=0.8):
fig,axr = P.subplots(2,5,figsize=(15,6), sharex=True, sharey=True)
for i,t in enumerate(range(0,100,10)):
ax = axr[int(i/5)][i % 5]
_tvl(gbed, t, t+10, ax=ax, fld=fld, alpha=alpha, ms=ms)
def plot_tlen_vs_glen(gbed, title='', ax=None, mk='b.', ms=0.5, alpha=0.1):
x = N.log10(gbed['glen'])
y = N.log10(gbed['tlen'])
if ax is None:
fig, ax = P.subplots(1,1,figsize=(3,3))
ax.plot(x.values, y.values, mk, ms=ms, alpha=alpha)
ax.set_title('{0} (#{1})'.format(title,len(gbed)))
ax.set_xlabel('log10(glen)')
ax.set_ylabel('log10(tlen)')
ax.set_xlim(1,7)
ax.set_ylim(1.5,5.5)
def _tvl(gbed, t0, t1, alpha=0.1, ax=None, fld='rep%',ms=0.1):
idx10 = (gbed[fld]>=t0)&(gbed[fld]<=t1)
x = N.log10(gbed['glen'])
y = N.log10(gbed['tlen'])
if ax is None:
fig, ax = P.subplots(1,1,figsize=(3,3))
ax.plot(x[idx10].values, y[idx10].values, 'b.', ms=ms, alpha=alpha)
ax.set_title('{0}<={3}<={1} ({2})'.format(t0,t1,N.sum(idx10),fld))
#ax.axhline(3.2)
ax.set_xlabel('log10(glen)')
ax.set_ylabel('log10(tlen)')
ax.set_xlim(1,7)
ax.set_ylim(1.5,5.5)
def count_repeats(beddf, genomefastaobj, col='#repbp', returnseq=False, seqcol='seq'):
"""Looks up genome sequence and counts the number of lower characters.
(RepeatMaker masked sequence are set to lower characters in UCSC genome)
Args:
beddf: Pandas DataFrame with chr,st,ed columns, when calculating repeats bp
for genes, unioned bed should be used (use utils.make_unionex)
genomefastaobj: an object with get(chr,st,ed) method that returns sequence
(use fasta.GenomeFASTAChroms).
col: column names where counts will be put in
returnseq (bool): whether to return sequence or not (default False)
seqcol: column where sequences are put in (default seq)
Outputs:
are put into beddf columns with colname col(default #repbp)
"""
def _cnt(chrom,st,ed):
seq = genomefastaobj.get(chrom,st,ed)
return N.sum([x.islower() for x in seq])
if returnseq:
beddf[seqcol] = [genomefastaobj.get(*x) for x in beddf[['chr','st','ed']].values]
beddf[col] = beddf[seqcol].apply(lambda x: N.sum([y.islower() for y in x]))
else:
beddf[col] = [_cnt(*x) for x in beddf[['chr','st','ed']].values]
return beddf
def count_repeats_mp(beddf, genomefastaobj, col='#repbp', returnseq=False, seqcol='seq', idfld='_id', np=4):
""" MultiCPU version of counts_repeats """
# only send relevant part i.e. chr,st,ed,id
if not idfld in beddf:
beddf[idfld] = N.arange(len(beddf))
# number per CPU
n = int(N.ceil(len(beddf)/float(np))) # per CPU
args = [(beddf.iloc[i*n:(i+1)*n],genomefastaobj,col,returnseq,seqcol) for i in range(np)]
rslts = UT.process_mp(count_repeats, args, np=np, doreduce=False)
df = PD.concat(rslts, ignore_index=True)
i2c = UT.df2dict(df, idfld, col)
beddf[col] = [i2c[x] for x in beddf[idfld]]
if returnseq:
i2s = UT.df2dict(df, idfld, seqcol)
beddf[seqcol] = [i2s[x] for x in beddf[idfld]]
return beddf
def count_repeats_viz_mp(beddf, rmskvizpath, idcol='_id', np=3, prefix=None, expand=0, col='repnames'):
"""Use rmsk-viz track and check each (unioned) exon overlaps with repeats and report repeat name(s).
Uses Bedtools and calculates chromosome-wise.
Args:
beddf: Pandas DataFrame with chr,st,ed cols, when calculating repeats bp
for genes, unioned bed should be used (use utils.make_unionex)
idcol: colname for unique row id (default _id)
rmskvizpath: path to repeat masker viz BED7 file (created using rmskviz2bed7)
np: number of CPU to use
prefix: path prefix for temp file, if not None temp files are kept. (default None)
expand: how many bases to expand exon region in each side (default 0)
col: column name to put in overlapping repeat names (if multiple comma separated)
Outputs:
are put into beddf columns with colname col(default repnames)
"""
cleanup = False
if prefix is None:
cleanup = True
prefix = os.path.join(os.path.dirname(rmskvizpath), str(uuid.uuid4())+'_')
# chrom-wise
chroms = sorted(beddf['chr'].unique())
# check whether rmskviz is already split
splitrmsk=False
for chrom in chroms:
rpath = rmskvizpath+'.{0}.bed.gz'.format(chrom) # reuse
if not os.path.exists(rpath):
splitrmsk=True
break
if splitrmsk:
rmsk = GGB.read_bed(rmskvizpath)
args = []
bfiles = []
ofiles = []
for chrom in chroms:
bpath = prefix+'tgt.{0}.bed'.format(chrom) # don't compress
rpath = rmskvizpath+'.{0}.bed.gz'.format(chrom) # reuse
if expand>0:
bchr = beddf[beddf['chr']==chrom].copy()
bchr['st'] = bchr['st'] - expand
bchr['ed'] = bchr['ed'] + expand
bchr.loc[bchr['st']<0,'st'] = 0
else:
bchr = beddf[beddf['chr']==chrom]
UT.write_pandas(bchr[['chr','st','ed',idcol]], bpath, '')
bfiles.append(bpath)
if splitrmsk:
rchr = rmsk[rmsk['chr']==chrom]
UT.write_pandas(rchr[['chr','st','ed','name','strand']], rpath, '')
opath = prefix+'out.{0}.bed'.format(chrom)
ofiles.append(opath)
args.append([bpath, rpath, opath])
rslts = UT.process_mp(count_repeats_viz_chr, args, np=np, doreduce=False)
# gather outputs
cols = ['name','repnames']
outs = [UT.read_pandas(f, names=cols) for f in ofiles]
df = | PD.concat(outs, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
result_pred = pd.read_csv("./sub_xgb_16_2_h.csv")
print("begin process gt10..")
gt10_prob = pd.read_csv("../classification/result/gt10.csv")
gt10_prob.sort_values(by='gt10_prob',inplace=True)
gt10_25 = gt10_prob.tail(25)
result_pred = | pd.merge(result_pred,gt10_25,on='id',how='left') | pandas.merge |
import pandas
import scipy.interpolate
import numpy as np
from ..j_utils.string import str2time, time2str
from ..j_utils.path import format_filepath
from collections import OrderedDict
class History:
"""
Store dataseries by iteration and epoch.
Data are index through timestamp: the number of iteration since the first iteration of the first epoch.
"""
def __init__(self):
self._timeline_series = OrderedDict()
self._timestamps = pandas.DataFrame(columns=['date', 'time'])
self._events = []
self._nb_iterations_by_epoch = [0]
self._current_epoch = 1
self._current_epoch_iteration = -1
def save(self, path):
path = format_filepath(path)
df = self.export_dataframe()
def load(self, path):
path = format_filepath(path)
# --- Current Iteration ---
@property
def epoch(self):
return self._current_epoch
@property
def iteration(self):
return self._current_epoch_iteration
@property
def last_timeid(self):
return sum(self._nb_iterations_by_epoch)
def __len__(self):
return self.last_timeid + 1
def next_iteration(self, time, date=None):
self._current_epoch_iteration += 1
self._nb_iterations_by_epoch[-1] = self._current_epoch_iteration
self._update_timestamp(time, date)
def next_epoch(self, time, date=None):
self._current_epoch += 1
self._current_epoch_iteration = 0
self._nb_iterations_by_epoch.append(0)
self._update_timestamp(time, date)
def _update_timestamp(self, time, date):
if date is None:
date = pandas.Timestamp.now()
date = pandas.to_datetime(date)
df = pandas.DataFrame([[time, date]], index=[self.last_timeid], columns=['time', 'date'])
self._timestamps = self._timestamps.append(df)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise KeyError('History key should be a serie name not (%s, type:%s).'
% (str(key), type(key)))
if key not in self._timeline_series:
serie = pandas.Series(data=[value], index=[self.last_timeid], name=key)
self._timeline_series[key] = serie
else:
self._timeline_series[key][self.last_timeid] = value
# --- Store/Read Data ---
def keys(self):
return self._timeline_series.keys()
def series(self, only_number=False):
keys = list(self.keys())
if only_number:
return [k for k in keys if self._timeline_series[k].dtype != 'O']
return keys
def __getitem__(self, item):
if isinstance(item, str):
if item not in self.keys():
raise KeyError('%s is an unknown serie name.' % item)
return self._timeline_series[item].iloc[-1]
elif isinstance(item, tuple):
if len(item) != 2 or item[0] not in self.keys():
raise KeyError("Invalid history index: %s\n"
"Index should follow the form: ['series name', time_index]" % repr(item))
series = item[0]
timeid = item[1]
if isinstance(timeid, slice):
df = self.read(series=series, start=timeid.start, stop=timeid.stop, step=timeid.step,
interpolation='previous', averaged=True, std=False)
return df[series].values
else:
return self.get(series=series, timeid=timeid, interpolation='previous')
raise IndexError('Invalid index: unable to read from history series')
def get(self, series, timeid=-1, interpolation='previous', default='raise exception'):
try:
t = self.interpret_timeid(timeid)
if series not in self.keys():
raise KeyError('%s is an unknown serie name.' % series)
except LookupError as e:
if default != 'raise exception':
return default
raise e from None
serie = self._timeline_series[series]
if interpolation is None:
try:
return serie.loc[t]
except KeyError:
if default != 'raise exception':
return default
raise IndexError("Serie %s doesn't store any data at time: %s.\n"
"The interpolation parameter may be use to remove this exception."
% (series, repr(timeid)))
else:
serie = scipy.interpolate.interp1d(x=serie.index, y=serie.values,
kind=interpolation, fill_value='extrapolate',
assume_sorted=True, copy=False)
return serie(timeid)
def read(self, series=None, start=0, stop=0, step=1, timestamp=None,
interpolation='previous', smooth=None, averaged=True, std=False):
"""
Interpolate or average
:param series: Keys of the variables to read
:type series: str or tuple or set
:param start: timestamp from which data should be read
:type start: int, TimeStamp, ...
:param stop: timestamp until which data should be read
:type stop: int, TimeStamp, ...
:param step: Interval between to sample
:type step: int, TimeStamp, ...
:param timestamp: Additional timestamp related columns. Acceptable values are:
- epoch
- iteration
- time
- date
:param interpolation: Specify which number serie should be interpolated and how.
NaN in number series can automatically be replaced by interpolated values using pandas interpolation algorithms.
This parameter most be one of those:
- True: All numbers series are interpolated linearly
- False: No interpolation is applied (NaN are not replaced)
- List of series name: The specified series are interpolated linearly
- Dictionary associating an interpolation method to a series name.
:param smooth: Specify which number series should be smoothed and how much.
Specified series are Savitzky-Golay filter of order 3. The window size may be chosen (default is 15).
:param averaged: Names of the time series whose values should be averaged along each step
instead of being naively down-sampled. Can only be applied on number series.
True means that all number series are be averaged and False means no series are.
:param std: Names of the averaged time series whose standard deviation should be computed.
A new columns is created for every of these time series with the name 'STD columnName'.
:return: time series
:rtype: pandas.DataFrame
"""
if stop is None:
stop = len(self)
indexes = np.array(list(self.timeid_iterator(start=start, stop=stop, step=step)), dtype=np.uint32)
intervals = np.stack((indexes, np.concatenate((indexes[1:], [stop]))), axis=1)
series_name = self.interpret_series_name(series)
if isinstance(averaged, bool):
averaged = self.series(only_number=True) if averaged else []
else:
averaged = self.interpret_series_name(averaged, only_number=True)
if isinstance(std, bool):
std = averaged if std else []
else:
if isinstance(std, str):
std = [std]
not_averaged_series = set(std).difference(averaged)
if not_averaged_series:
raise ValueError("Can't compute standard deviation of: %s.\n"
"Those series are not averaged." % repr(not_averaged_series))
if not interpolation:
interpolation = {}
elif isinstance(interpolation, bool):
interpolation = {_: 'linear' for _ in self.series(only_number=True)}
elif isinstance(interpolation, str):
if interpolation in self.series(only_number=True):
interpolation = {interpolation: 'linear'}
else:
interpolation = {_: interpolation for _ in self.series(only_number=True)}
elif isinstance(interpolation, (dict, OrderedDict)):
unknown_keys = set(interpolation.keys()).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't interpolate series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
else:
unknown_keys = set(interpolation).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't interpolate series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
interpolation = {_: 'linear' for _ in interpolation}
if not smooth:
smooth = {}
elif isinstance(smooth, bool):
smooth = {_: 15 for _ in self.series(only_number=True)}
elif isinstance(smooth, str):
if smooth not in self.series(only_number=True):
raise ValueError("Can't smooth series %s. It is either unknown or doesn't contain number!"
% smooth)
smooth = {smooth: 15}
elif isinstance(smooth, (dict, OrderedDict)):
unknown_keys = set(smooth.keys()).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't smooth series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
else:
unknown_keys = set(smooth).difference(self.series(only_number=True))
if unknown_keys:
raise ValueError("Can't smooth series: %s.\n"
"Those series are either unknown or don't contain numbers!" % repr(unknown_keys))
smooth = {_: 15 for _ in smooth}
if smooth:
import scipy.signal
df = []
for k in series_name:
series = self._timeline_series[k]
std_series = None
# Sample
if k in self.series(only_number=True):
if k not in averaged:
series = series.reindex(indexes, copy=False)
else:
mean_series = np.zeros(shape=(intervals.shape[0],))
std_series = np.zeros(shape=(intervals.shape[0],)) if k in std else None
for i, (start_id, end_id) in enumerate(intervals):
s = series.loc[start_id:end_id-1]
mean_series[i] = np.nanmean(s) if len(s) else np.nan
if std_series is not None:
std_series[i] = np.nanvar(s) if len(s) else np.nan
series = pandas.Series(index=indexes, data=mean_series, name=series.name)
if std_series is not None:
std_series = | pandas.Series(index=indexes, data=std_series, name='STD '+series.name) | pandas.Series |
"""
<NAME>017
PanCancer Classifier
scripts/pancancer_classifier.py
Usage: Run in command line with required command argument:
python pancancer_classifier.py --genes $GENES
Where GENES is a comma separated string. There are also optional arguments:
--diseases comma separated string of disease types for classifier
default: Auto (will pick diseases from filter args)
--folds number of cross validation folds
default: 5
--drop drop the input genes from the X matrix
default: False if flag omitted
--copy_number optional flag to supplement copy number to define Y
default: False if flag omitted
--filter_count int of low count of mutation to include disease
default: 15
--filter_prop float of low proportion of mutated samples per disease
default: 0.05
--num_features int of number of genes to include in classifier
default: 8000
--alphas comma separated string of alphas to test in pipeline
default: '0.1,0.15,0.2,0.5,0.8,1'
--l1_ratios comma separated string of l1 parameters to test
default: '0,0.1,0.15,0.18,0.2,0.3'
--alt_genes comma separated string of alternative genes to test
default: None
--alt_diseases comma separated string of alternative diseases to test
default: Auto
--alt_filter_count int of low count of mutations to include alt_diseases
default: 15
--alt_filter_prop float of low proportion of mutated samples alt_disease
default: 0.05
--alt_folder string of where to save the classifier figures
default: Auto
--remove_hyper store_true: remove hypermutated samples
default: False if flag omitted
--keep_intermediate store_true: keep intermediate roc curve items
default: False if flag omitted
--x_matrix string of which feature matrix to use
default: raw
Output:
ROC curves, AUROC across diseases, and classifier coefficients
"""
import os
import sys
import warnings
import pandas as pd
import csv
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split, cross_val_predict
from dask_searchcv import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from statsmodels.robust.scale import mad
sys.path.insert(0, os.path.join('scripts', 'util'))
from tcga_util import get_args, get_threshold_metrics, integrate_copy_number
from tcga_util import shuffle_columns
# Load command arguments
args = get_args()
genes = args.genes.split(',')
diseases = args.diseases.split(',')
folds = int(args.folds)
drop = args.drop
drop_rasopathy = args.drop_rasopathy
copy_number = args.copy_number
filter_count = int(args.filter_count)
filter_prop = float(args.filter_prop)
num_features_kept = args.num_features
alphas = [float(x) for x in args.alphas.split(',')]
l1_ratios = [float(x) for x in args.l1_ratios.split(',')]
alt_genes = args.alt_genes.split(',')
alt_filter_count = int(args.alt_filter_count)
alt_filter_prop = float(args.alt_filter_prop)
alt_diseases = args.alt_diseases.split(',')
alt_folder = args.alt_folder
remove_hyper = args.remove_hyper
keep_inter = args.keep_intermediate
x_matrix = args.x_matrix
shuffled = args.shuffled
shuffled_before_training = args.shuffled_before_training
no_mutation = args.no_mutation
drop_expression = args.drop_expression
drop_covariates = args.drop_covariates
warnings.filterwarnings('ignore',
message='Changing the shape of non-C contiguous array')
# Generate file names for output
genes_folder = args.genes.replace(',', '_')
base_folder = os.path.join('classifiers', genes_folder)
if alt_folder != 'Auto':
base_folder = alt_folder
if not os.path.exists(base_folder):
os.makedirs(base_folder)
else:
warnings.warn('Classifier may have already been built! Classifier results'
' will be overwritten!', category=Warning)
disease_folder = os.path.join(base_folder, 'disease')
if not os.path.exists(disease_folder):
os.makedirs(disease_folder)
count_table_file = os.path.join(base_folder, 'summary_counts.csv')
cv_heatmap_file = os.path.join(base_folder, 'cv_heatmap.pdf')
full_roc_file = os.path.join(base_folder, 'all_disease_roc.pdf')
full_pr_file = os.path.join(base_folder, 'all_disease_pr.pdf')
disease_roc_file = os.path.join(base_folder, 'disease', 'classifier_roc_')
disease_pr_file = os.path.join(base_folder, 'disease', 'classifier_pr_')
dis_summary_auroc_file = os.path.join(base_folder, 'disease_auroc.pdf')
dis_summary_aupr_file = os.path.join(base_folder, 'disease_aupr.pdf')
classifier_file = os.path.join(base_folder, 'classifier_coefficients.tsv')
roc_results_file = os.path.join(base_folder, 'pancan_roc_results.tsv')
alt_gene_base = 'alt_gene_{}_alt_disease_{}'.format(
args.alt_genes.replace(',', '_'),
args.alt_diseases.replace(',', '_'))
alt_count_table_file = os.path.join(base_folder, 'alt_summary_counts.csv')
alt_gene_auroc_file = os.path.join(base_folder,
'{}_auroc_bar.pdf'.format(alt_gene_base))
alt_gene_aupr_file = os.path.join(base_folder,
'{}_aupr_bar.pdf'.format(alt_gene_base))
alt_gene_summary_file = os.path.join(base_folder,
'{}_summary.tsv'.format(alt_gene_base))
# Load Datasets
if x_matrix == 'raw':
expr_file = os.path.join('data', 'pancan_rnaseq_freeze.tsv.gz')
else:
expr_file = x_matrix
mut_file = os.path.join('data', 'pancan_mutation_freeze.tsv.gz')
mut_burden_file = os.path.join('data', 'mutation_burden_freeze.tsv')
sample_freeze_file = os.path.join('data', 'sample_freeze.tsv')
rnaseq_full_df = pd.read_table(expr_file, index_col=0)
mutation_df = pd.read_table(mut_file, index_col=0)
sample_freeze = pd.read_table(sample_freeze_file, index_col=0)
mut_burden = pd.read_table(mut_burden_file)
# Construct data for classifier
common_genes = set(mutation_df.columns).intersection(genes)
if x_matrix == 'raw':
common_genes = list(common_genes.intersection(rnaseq_full_df.columns))
else:
common_genes = list(common_genes)
y = mutation_df[common_genes]
missing_genes = set(genes).difference(common_genes)
if len(common_genes) != len(genes):
warnings.warn('All input genes were not found in data. The missing genes '
'are {}'.format(missing_genes), category=Warning)
if drop:
if x_matrix == 'raw':
rnaseq_full_df.drop(common_genes, axis=1, inplace=True)
if drop_rasopathy:
rasopathy_genes = set(['BRAF', 'CBL', 'HRAS', 'KRAS', 'MAP2K1', 'MAP2K2',
'NF1', 'NRAS', 'PTPN11', 'RAF1', 'SHOC2', 'SOS1',
'SPRED1', 'RIT1'])
rasopathy_drop = list(rasopathy_genes.intersection(rnaseq_full_df.columns))
rnaseq_full_df.drop(rasopathy_drop, axis=1, inplace=True)
# Incorporate copy number for gene activation/inactivation
if copy_number:
# Load copy number matrices
copy_loss_file = os.path.join('data', 'copy_number_loss_status.tsv.gz')
copy_loss_df = pd.read_table(copy_loss_file, index_col=0)
copy_gain_file = os.path.join('data', 'copy_number_gain_status.tsv.gz')
copy_gain_df = pd.read_table(copy_gain_file, index_col=0)
# Load cancer gene classification table
vogel_file = os.path.join('data', 'vogelstein_cancergenes.tsv')
cancer_genes = pd.read_table(vogel_file)
y = integrate_copy_number(y=y, cancer_genes_df=cancer_genes,
genes=common_genes, loss_df=copy_loss_df,
gain_df=copy_gain_df,
include_mutation=no_mutation)
# Process y matrix
y = y.assign(total_status=y.max(axis=1))
y = y.reset_index().merge(sample_freeze,
how='left').set_index('SAMPLE_BARCODE')
count_df = y.groupby('DISEASE').sum()
prop_df = count_df.divide(y['DISEASE'].value_counts(sort=False).sort_index(),
axis=0)
count_table = count_df.merge(prop_df, left_index=True, right_index=True,
suffixes=('_count', '_proportion'))
count_table.to_csv(count_table_file)
# Filter diseases
mut_count = count_df['total_status']
prop = prop_df['total_status']
if diseases[0] == 'Auto':
filter_disease = (mut_count > filter_count) & (prop > filter_prop)
diseases = filter_disease.index[filter_disease].tolist()
# Load mutation burden and process covariates
y_df = y[y.DISEASE.isin(diseases)].total_status
common_samples = list(set(y_df.index) & set(rnaseq_full_df.index))
y_df = y_df.loc[common_samples]
rnaseq_df = rnaseq_full_df.loc[y_df.index, :]
if remove_hyper:
burden_filter = mut_burden['log10_mut'] < 5 * mut_burden['log10_mut'].std()
mut_burden = mut_burden[burden_filter]
y_matrix = mut_burden.merge(pd.DataFrame(y_df), right_index=True,
left_on='SAMPLE_BARCODE')\
.set_index('SAMPLE_BARCODE')
# Add covariate information
y_sub = y.loc[y_matrix.index]['DISEASE']
covar_dummy = pd.get_dummies(sample_freeze['DISEASE']).astype(int)
covar_dummy.index = sample_freeze['SAMPLE_BARCODE']
covar = covar_dummy.merge(y_matrix, right_index=True, left_index=True)
covar = covar.drop('total_status', axis=1)
# How cross validation splits will be balanced and stratified
y_df = y_df.loc[y_sub.index]
strat = y_sub.str.cat(y_df.astype(str))
x_df = rnaseq_df.loc[y_df.index, :]
# Subset x matrix to MAD genes and scale
if x_matrix == 'raw':
med_dev = pd.DataFrame(mad(x_df), index=x_df.columns)
mad_genes = med_dev.sort_values(by=0, ascending=False)\
.iloc[0:num_features_kept].index.tolist()
x_df = x_df.loc[:, mad_genes]
fitted_scaler = StandardScaler().fit(x_df)
x_df_update = pd.DataFrame(fitted_scaler.transform(x_df),
columns=x_df.columns)
x_df_update.index = x_df.index
x_df = x_df_update.merge(covar, left_index=True, right_index=True)
# Remove information from the X matrix given input arguments
if drop_expression:
x_df = x_df.iloc[:, num_features_kept:]
elif drop_covariates:
x_df = x_df.iloc[:, 0:num_features_kept]
# Shuffle expression matrix _before_ training - this can be used as NULL model
if shuffled_before_training:
# Shuffle genes
x_train_genes = x_df.iloc[:, range(num_features_kept)]
rnaseq_shuffled_df = x_train_genes.apply(shuffle_columns, axis=1,
result_type='broadcast')
x_train_cov = x_df.iloc[:, num_features_kept:]
x_df = pd.concat([rnaseq_shuffled_df, x_train_cov], axis=1)
# Build classifier pipeline
x_train, x_test, y_train, y_test = train_test_split(x_df, y_df,
test_size=0.1,
random_state=0,
stratify=strat)
clf_parameters = {'classify__loss': ['log'],
'classify__penalty': ['elasticnet'],
'classify__alpha': alphas, 'classify__l1_ratio': l1_ratios}
estimator = Pipeline(steps=[('classify', SGDClassifier(random_state=0,
class_weight='balanced',
loss='log',
max_iter=5,
tol=None))])
cv_pipeline = GridSearchCV(estimator=estimator, param_grid=clf_parameters,
n_jobs=-1, cv=folds, scoring='roc_auc',
return_train_score=True)
cv_pipeline.fit(X=x_train, y=y_train)
cv_results = pd.concat([pd.DataFrame(cv_pipeline.cv_results_)
.drop('params', axis=1),
pd.DataFrame.from_records(cv_pipeline
.cv_results_['params'])],
axis=1)
# Cross-validated performance heatmap
cv_score_mat = pd.pivot_table(cv_results, values='mean_test_score',
index='classify__l1_ratio',
columns='classify__alpha')
ax = sns.heatmap(cv_score_mat, annot=True, fmt='.1%')
ax.set_xlabel('Regularization strength multiplier (alpha)')
ax.set_ylabel('Elastic net mixing parameter (l1_ratio)')
plt.tight_layout()
plt.savefig(cv_heatmap_file, dpi=600, bbox_inches='tight')
plt.close()
# Get predictions
y_predict_train = cv_pipeline.decision_function(x_train)
y_predict_test = cv_pipeline.decision_function(x_test)
metrics_train = get_threshold_metrics(y_train, y_predict_train,
drop_intermediate=keep_inter)
metrics_test = get_threshold_metrics(y_test, y_predict_test,
drop_intermediate=keep_inter)
# Rerun "cross validation" for the best hyperparameter set to define
# cross-validation disease-specific performance. Each sample prediction is
# based on the fold that the sample was in the testing partition
y_cv = cross_val_predict(cv_pipeline.best_estimator_, X=x_train, y=y_train,
cv=folds, method='decision_function')
metrics_cv = get_threshold_metrics(y_train, y_cv,
drop_intermediate=keep_inter)
# Determine shuffled predictive ability of shuffled gene expression matrix
# representing a test of inflation of ROC metrics. Be sure to only shuffle
# gene names, retain covariate information (tissue type and log10 mutations)
if shuffled:
# Shuffle genes
x_train_genes = x_train.iloc[:, range(num_features_kept)]
rnaseq_shuffled_df = x_train_genes.apply(shuffle_columns, axis=1,
result_type='broadcast')
x_train_cov = x_train.iloc[:, num_features_kept:]
rnaseq_shuffled_df = pd.concat([rnaseq_shuffled_df, x_train_cov], axis=1)
y_predict_shuffled = cv_pipeline.decision_function(rnaseq_shuffled_df)
metrics_shuffled = get_threshold_metrics(y_train, y_predict_shuffled,
drop_intermediate=keep_inter)
# Decide to save ROC results to file
if keep_inter:
train_roc = metrics_train['roc_df']
train_roc = train_roc.assign(train_type='train')
test_roc = metrics_test['roc_df']
test_roc = test_roc.assign(train_type='test')
cv_roc = metrics_cv['roc_df']
cv_roc = cv_roc.assign(train_type='cv')
full_roc_df = pd.concat([train_roc, test_roc, cv_roc])
if shuffled:
shuffled_roc = metrics_shuffled['roc_df']
shuffled_roc = shuffled_roc.assign(train_type='shuffled')
full_roc_df = pd.concat([full_roc_df, shuffled_roc])
full_roc_df = full_roc_df.assign(disease='PanCan')
# Plot ROC
sns.set_style("whitegrid")
plt.figure(figsize=(3, 3))
total_auroc = {}
colors = ['blue', 'green', 'orange', 'grey']
idx = 0
metrics_list = [('Training', metrics_train), ('Testing', metrics_test),
('CV', metrics_cv)]
if shuffled:
metrics_list += [('Random', metrics_shuffled)]
for label, metrics in metrics_list:
roc_df = metrics['roc_df']
plt.plot(roc_df.fpr, roc_df.tpr,
label='{} (AUROC = {:.1%})'.format(label, metrics['auroc']),
linewidth=1, c=colors[idx])
total_auroc[label] = metrics['auroc']
idx += 1
plt.axis('equal')
plt.plot([0, 1], [0, 1], color='navy', linewidth=1, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=8)
plt.ylabel('True Positive Rate', fontsize=8)
plt.title('')
plt.tick_params(labelsize=8)
lgd = plt.legend(bbox_to_anchor=(1.03, 0.85),
loc=2,
borderaxespad=0.,
fontsize=7.5)
plt.savefig(full_roc_file, dpi=600, bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
# Plot PR
sns.set_style("whitegrid")
plt.figure(figsize=(3, 3))
total_aupr = {}
colors = ['blue', 'green', 'orange', 'grey']
idx = 0
metrics_list = [('Training', metrics_train), ('Testing', metrics_test),
('CV', metrics_cv)]
if shuffled:
metrics_list += [('Random', metrics_shuffled)]
for label, metrics in metrics_list:
pr_df = metrics['pr_df']
plt.plot(pr_df.recall, pr_df.precision,
label='{} (AUPR = {:.1%})'.format(label, metrics['aupr']),
linewidth=1, c=colors[idx])
total_aupr[label] = metrics['aupr']
idx += 1
plt.axis('equal')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall', fontsize=8)
plt.ylabel('Precision', fontsize=8)
plt.title('')
plt.tick_params(labelsize=8)
lgd = plt.legend(bbox_to_anchor=(1.03, 0.85),
loc=2,
borderaxespad=0.,
fontsize=7.5)
plt.savefig(full_pr_file, dpi=600, bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
# disease specific performance
disease_metrics = {}
for disease in diseases:
# Get all samples in current disease
sample_sub = y_sub[y_sub == disease].index
# Get true and predicted training labels
y_disease_train = y_train[y_train.index.isin(sample_sub)]
if y_disease_train.sum() < 1:
continue
y_disease_predict_train = y_predict_train[y_train.index.isin(sample_sub)]
# Get true and predicted testing labels
y_disease_test = y_test[y_test.index.isin(sample_sub)]
if y_disease_test.sum() < 1:
continue
y_disease_predict_test = y_predict_test[y_test.index.isin(sample_sub)]
# Get predicted labels for samples when they were in cross validation set
# The true labels are y_pred_train
y_disease_predict_cv = y_cv[y_train.index.isin(sample_sub)]
# Get classifier performance metrics for three scenarios for each disease
met_train_dis = get_threshold_metrics(y_disease_train,
y_disease_predict_train,
disease=disease,
drop_intermediate=keep_inter)
met_test_dis = get_threshold_metrics(y_disease_test,
y_disease_predict_test,
disease=disease,
drop_intermediate=keep_inter)
met_cv_dis = get_threshold_metrics(y_disease_train,
y_disease_predict_cv,
disease=disease,
drop_intermediate=keep_inter)
# Get predictions and metrics with shuffled gene expression
if shuffled:
y_dis_predict_shuf = y_predict_shuffled[y_train.index.isin(sample_sub)]
met_shuff_dis = get_threshold_metrics(y_disease_train,
y_dis_predict_shuf,
disease=disease,
drop_intermediate=keep_inter)
if keep_inter:
train_roc = met_train_dis['roc_df']
train_roc = train_roc.assign(train_type='train')
test_roc = met_test_dis['roc_df']
test_roc = test_roc.assign(train_type='test')
cv_roc = met_cv_dis['roc_df']
cv_roc = cv_roc.assign(train_type='cv')
full_dis_roc_df = train_roc.append(test_roc).append(cv_roc)
if shuffled:
shuffled_roc = met_shuff_dis['roc_df']
shuffled_roc = shuffled_roc.assign(train_type='shuffled')
full_dis_roc_df = full_dis_roc_df.append(shuffled_roc)
full_dis_roc_df = full_dis_roc_df.assign(disease=disease)
full_roc_df = full_roc_df.append(full_dis_roc_df)
# Store results in disease indexed dictionary
disease_metrics[disease] = [met_train_dis, met_test_dis, met_cv_dis]
if shuffled:
disease_metrics[disease] += [met_shuff_dis]
disease_auroc = {}
disease_aupr = {}
for disease, metrics_val in disease_metrics.items():
labels = ['Training', 'Testing', 'CV', 'Random']
met_list = []
idx = 0
for met in metrics_val:
lab = labels[idx]
met_list.append((lab, met))
idx += 1
disease_pr_sub_file = '{}_pred_{}.pdf'.format(disease_pr_file, disease)
disease_roc_sub_file = '{}_pred_{}.pdf'.format(disease_roc_file, disease)
# Plot disease specific PR
plt.figure(figsize=(3, 3))
aupr = []
idx = 0
for label, metrics in met_list:
pr_df = metrics['pr_df']
plt.plot(pr_df.recall, pr_df.precision,
label='{} (AUPR = {:.1%})'.format(label, metrics['aupr']),
linewidth=1, c=colors[idx])
aupr.append(metrics['aupr'])
idx += 1
disease_aupr[disease] = aupr
plt.axis('equal')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall', fontsize=8)
plt.ylabel('Precision', fontsize=8)
plt.title('')
plt.tick_params(labelsize=8)
lgd = plt.legend(bbox_to_anchor=(1.03, 0.85),
loc=2,
borderaxespad=0.,
fontsize=7.5)
plt.savefig(disease_pr_sub_file, dpi=600, bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
# Plot disease specific ROC
plt.figure(figsize=(3, 3))
auroc = []
idx = 0
for label, metrics in met_list:
roc_df = metrics['roc_df']
plt.plot(roc_df.fpr, roc_df.tpr,
label='{} (AUROC = {:.1%})'.format(label, metrics['auroc']),
linewidth=1, c=colors[idx])
auroc.append(metrics['auroc'])
idx += 1
disease_auroc[disease] = auroc
plt.axis('equal')
plt.plot([0, 1], [0, 1], color='navy', linewidth=1, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=8)
plt.ylabel('True Positive Rate', fontsize=8)
plt.title('')
plt.tick_params(labelsize=8)
lgd = plt.legend(bbox_to_anchor=(1.03, 0.85),
loc=2,
borderaxespad=0.,
fontsize=7.5)
plt.savefig(disease_roc_sub_file, dpi=600, bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
index_lab = ['Train', 'Test', 'Cross Validation']
if shuffled:
index_lab += ['Random']
disease_auroc_df = pd.DataFrame(disease_auroc, index=index_lab).T
disease_auroc_df = disease_auroc_df.sort_values('Cross Validation',
ascending=False)
ax = disease_auroc_df.plot(kind='bar', title='Disease Specific Performance')
ax.set_ylabel('AUROC')
plt.tight_layout()
plt.savefig(dis_summary_auroc_file, dpi=600, bbox_inches='tight')
plt.close()
disease_aupr_df = pd.DataFrame(disease_aupr, index=index_lab).T
disease_aupr_df = disease_aupr_df.sort_values('Cross Validation',
ascending=False)
ax = disease_aupr_df.plot(kind='bar', title='Disease Specific Performance')
ax.set_ylabel('AUPR')
plt.tight_layout()
plt.savefig(dis_summary_aupr_file, dpi=600, bbox_inches='tight')
plt.close()
# Save classifier coefficients
final_pipeline = cv_pipeline.best_estimator_
final_classifier = final_pipeline.named_steps['classify']
coef_df = pd.DataFrame.from_dict(
{'feature': x_df.columns,
'weight': final_classifier.coef_[0]})
coef_df['abs'] = coef_df['weight'].abs()
coef_df = coef_df.sort_values('abs', ascending=False)
coef_df.to_csv(classifier_file, sep='\t')
if keep_inter:
full_roc_df.to_csv(roc_results_file, sep='\t')
# Apply the same classifier previously built to predict alternative genes
if alt_genes[0] is not 'None':
# Classifying alternative mutations
y_alt = mutation_df[alt_genes]
# Add copy number info if applicable
if copy_number:
y_alt = integrate_copy_number(y=y_alt, cancer_genes_df=cancer_genes,
genes=alt_genes, loss_df=copy_loss_df,
gain_df=copy_gain_df)
# Append disease id
y_alt = y_alt.assign(total_status=y_alt.max(axis=1))
y_alt = y_alt.reset_index().merge(sample_freeze,
how='left').set_index('SAMPLE_BARCODE')
# Filter data
alt_count_df = y_alt.groupby('DISEASE').sum()
alt_prop_df = alt_count_df.divide(y_alt['DISEASE'].value_counts(sort=False)
.sort_index(), axis=0)
alt_count_table = alt_count_df.merge(alt_prop_df,
left_index=True,
right_index=True,
suffixes=('_count', '_proportion'))
alt_count_table.to_csv(alt_count_table_file)
mut_co = alt_count_df['total_status']
prop = alt_prop_df['total_status']
if alt_diseases[0] == 'Auto':
alt_filter_dis = (mut_co > alt_filter_count) & (prop > alt_filter_prop)
alt_diseases = alt_filter_dis.index[alt_filter_dis].tolist()
# Subset data
y_alt_df = y_alt[y_alt.DISEASE.isin(alt_diseases)].total_status
common_alt_samples = list(set(y_alt_df.index) & set(rnaseq_full_df.index))
y_alt_df = y_alt_df.loc[common_alt_samples]
rnaseq_alt_df = rnaseq_full_df.loc[y_alt_df.index, :]
y_alt_matrix = mut_burden.merge(pd.DataFrame(y_alt_df), right_index=True,
left_on='SAMPLE_BARCODE')\
.set_index('SAMPLE_BARCODE')
# Add Covariate Info to alternative y matrix
y_alt_sub = y_alt.loc[y_alt_matrix.index]['DISEASE']
covar_dummy_alt = pd.get_dummies(sample_freeze['DISEASE']).astype(int)
covar_dummy_alt.index = sample_freeze['SAMPLE_BARCODE']
covar_alt = covar_dummy_alt.merge(y_alt_matrix, right_index=True,
left_index=True)
covar_alt = covar_alt.drop('total_status', axis=1)
y_alt_df = y_alt_df.loc[y_alt_sub.index]
# Process alternative x matrix
x_alt_df = rnaseq_alt_df.loc[y_alt_df.index, :]
if x_matrix == 'raw':
x_alt_df = x_alt_df.loc[:, mad_genes]
x_alt_df_update = pd.DataFrame(fitted_scaler.transform(x_alt_df),
columns=x_alt_df.columns)
x_alt_df_update.index = x_alt_df.index
x_alt_df = x_alt_df_update.merge(covar_alt, left_index=True,
right_index=True)
# Apply the previously fit model to predict the alternate Y matrix
y_alt_cv = cv_pipeline.decision_function(X=x_alt_df)
alt_metrics_cv = get_threshold_metrics(y_alt_df, y_alt_cv,
drop_intermediate=keep_inter)
validation_metrics = {}
val_x_type = {}
for disease in alt_diseases:
sample_dis = y_alt_sub[y_alt_sub == disease].index
# Subset full data if it has not been trained on
if disease not in diseases:
x_sub = x_alt_df.loc[sample_dis]
y_sub = y_alt_df[sample_dis]
category = 'Full'
# Only subset to the holdout set if data was trained on
else:
x_sub = x_test.loc[x_test.index.isin(sample_dis)]
y_sub = y_test[y_test.index.isin(sample_dis)]
category = 'Holdout'
# If there are not enough classes do not proceed to plot
if y_sub.sum() < 1:
continue
neg, pos = y_sub.value_counts()
val_x_type[disease] = [category, neg, pos]
y_pred_alt = cv_pipeline.decision_function(x_sub)
y_pred_alt_cv = y_alt_cv[y_alt_df.index.isin(y_sub.index)]
alt_metrics_dis = get_threshold_metrics(y_sub, y_pred_alt,
disease=disease,
drop_intermediate=keep_inter)
alt_metrics_di_cv = get_threshold_metrics(y_sub, y_pred_alt_cv,
disease=disease,
drop_intermediate=keep_inter)
validation_metrics[disease] = [alt_metrics_dis, alt_metrics_di_cv]
# Compile a summary dataframe
val_x_type = | pd.DataFrame.from_dict(val_x_type) | pandas.DataFrame.from_dict |
import itertools
import time
import glob as gb
import librosa
import matplotlib.pyplot as plt
import librosa.display
import pickle
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
import os
import soundfile as sf
import sys
import warnings
from keras.utils.vis_utils import plot_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import tensorflow.keras as keras
from sklearn.svm import LinearSVC
from tensorflow.keras.layers import Input
from tensorflow.keras.regularizers import l2, l1_l2
import seaborn as sns
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import classification_report
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import statistics
from sklearn import tree
from sklearn.dummy import DummyClassifier
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
import random
from numpy import inf
import audioread
import librosa.segment
import numpy as np
import data_utils as du
import data_utils_input as dus
from data_utils_input import normalize_image, padding_MLS, padding_SSLM, borders
from keras import backend as k
from shutil import copyfile
import fnmatch
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from ast import literal_eval
from sklearn.feature_selection import RFE
from skimage.transform import resize
from tensorflow.python.ops.init_ops_v2 import glorot_uniform
import lightgbm as lgb
from treegrad import TGDClassifier
from sklearn.preprocessing import MultiLabelBinarizer
import logging
# import tensorflow_decision_forests as tfdf # linux only
from tensorflow.keras.layers.experimental import RandomFourierFeatures
from XBNet.training_utils import training, predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
import autokeras as ak
from djinn import djinn
import hyperas
from hyperopt import Trials, STATUS_OK, tpe
from hyperas.distributions import choice, uniform
from os import listdir, walk, getcwd, sep
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import math
from scipy import signal
import tensorflow.keras.layers as kl
import tensorflow.keras.applications as ka
import tensorflow.keras.optimizers as ko
import tensorflow.keras.models as km
import skimage.measure
import scipy
from scipy.spatial import distance
from tensorflow.keras.layers import Flatten, Dropout, Activation, BatchNormalization, Dense
from sklearn.model_selection import GridSearchCV
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.optimizers import SGD
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from tensorflow.keras.regularizers import l1
from keras.utils import np_utils
from pydub import AudioSegment
from tensorflow.keras.models import load_model
from sklearn.metrics import roc_curve, roc_auc_score, auc
import datetime
import glob
import math
import re
import pyaudio
import wave
import torch
from matplotlib.pyplot import specgram
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import multilabel_confusion_matrix
tf.get_logger().setLevel(logging.ERROR)
k.set_image_data_format('channels_last')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if not sys.warnoptions:
warnings.simplefilter("ignore") # ignore warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# region Directories
MASTER_DIR = 'D:/Google Drive/Resources/Dev Stuff/Python/Machine Learning/Master Thesis/'
MASTER_INPUT_DIR = 'F:/Master Thesis Input/'
MASTER_LABELPATH = os.path.join(MASTER_INPUT_DIR, 'Labels/')
WEIGHT_DIR = os.path.join(MASTER_DIR, 'Weights/')
MIDI_Data_Dir = np.array(gb.glob(os.path.join(MASTER_DIR, 'Data/MIDIs/*')))
FULL_DIR = os.path.join(MASTER_INPUT_DIR, 'Full/')
FULL_MIDI_DIR = os.path.join(FULL_DIR, 'MIDI/')
FULL_LABELPATH = os.path.join(MASTER_LABELPATH, 'Full/')
# endregion
"""=================================================================================================================="""
# region DEPRECATED
# Deprecated
Train_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Train/*'))) # os.path.join(MASTER_DIR, 'Data/Train/*'
Test_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Test/*'))) # os.path.join(MASTER_DIR, 'Data/Test/*')))
Validate_Data_Dir = np.array(gb.glob(os.path.join(MASTER_INPUT_DIR, 'Validate/*'))) # os.path.join(MASTER_DIR,'Data/Val
MLS_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/MLS/')
SSLMCOS_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMCOS/')
SSLMEUC_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMEUC/')
SSLMCRM_Data_Dir = os.path.join(MASTER_DIR, 'Images/Train/SSLMCRM/')
TRAIN_DIR = os.path.join(MASTER_INPUT_DIR, 'Train/')
TEST_DIR = os.path.join(MASTER_INPUT_DIR, 'Test/')
VAL_DIR = os.path.join(MASTER_INPUT_DIR, 'Validate/')
TRAIN_LABELPATH = os.path.join(MASTER_LABELPATH, 'Train/')
TEST_LABELPATH = os.path.join(MASTER_LABELPATH, 'Test/')
VAL_LABELPATH = os.path.join(MASTER_LABELPATH, 'Validate/')
# Deprecated
def validate_directories():
print("Validating Training Directory...")
dus.validate_folder_contents(TRAIN_LABELPATH, os.path.join(TRAIN_DIR, 'MIDI/'), os.path.join(TRAIN_DIR, 'MLS/'),
os.path.join(TRAIN_DIR, 'SSLM_CRM_COS/'), os.path.join(TRAIN_DIR, 'SSLM_CRM_EUC/'),
os.path.join(TRAIN_DIR, 'SSLM_MFCC_COS/'), os.path.join(TRAIN_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
print("Validating Validation Directory...")
dus.validate_folder_contents(VAL_LABELPATH, os.path.join(VAL_DIR, 'MIDI/'), os.path.join(VAL_DIR, 'MLS/'),
os.path.join(VAL_DIR, 'SSLM_CRM_COS/'), os.path.join(VAL_DIR, 'SSLM_CRM_EUC/'),
os.path.join(VAL_DIR, 'SSLM_MFCC_COS/'), os.path.join(VAL_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
print("Validating Testing Directory...")
dus.validate_folder_contents(TEST_LABELPATH, os.path.join(TEST_DIR, 'MIDI/'), os.path.join(TEST_DIR, 'MLS/'),
os.path.join(TEST_DIR, 'SSLM_CRM_COS/'), os.path.join(TEST_DIR, 'SSLM_CRM_EUC/'),
os.path.join(TEST_DIR, 'SSLM_MFCC_COS/'), os.path.join(TEST_DIR, 'SSLM_MFCC_EUC/'))
print("Succes.\n")
# Deprecated
def get_class_weights(labels, one_hot=False):
if one_hot is False:
n_classes = max(labels) + 1
else:
n_classes = len(labels[0])
class_counts = [0 for _ in range(int(n_classes))]
if one_hot is False:
for label in labels:
class_counts[label] += 1
else:
for label in labels:
class_counts[np.where(label == 1)[0][0]] += 1
return {i: (1. / class_counts[i]) * float(len(labels)) / float(n_classes) for i in range(int(n_classes))}
# Deprecated
def buildValidationSet():
cnt = 1
numtrainfiles = len(fnmatch.filter(os.listdir(os.path.join(TRAIN_DIR, "MLS/")), '*.npy'))
for file in os.listdir(os.path.join(TRAIN_DIR, "MLS/")):
numvalfiles = len(fnmatch.filter(os.listdir(os.path.join(VAL_DIR, "MLS/")), '*.npy'))
if numvalfiles >= numtrainfiles * 0.2:
print(f"Validation set >= 20% of training set: {numvalfiles}/{numtrainfiles}")
break
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
formfolder = "" # Start search for correct form to search for label
for root, dirs, files in os.walk(os.path.join(MASTER_DIR, 'Labels/')):
flag = False
for tfile in files:
if tfile.split('/')[-1].split('.')[0] == name:
formfolder = os.path.join(root, file).split('/')[-1].split('\\')[0]
flag = True
if flag:
break
path = os.path.join(os.path.join(MASTER_DIR, 'Labels/'), formfolder) + '/' + os.path.basename(name) + '.txt'
num_lines = sum(1 for _ in open(path))
if num_lines <= 2:
print("File has not been labeled with ground truth yet. Skipping...")
cnt += 1
continue
else:
src1 = os.path.join(TRAIN_DIR, "MLS/") + '/' + filename
src2 = os.path.join(TRAIN_DIR, "SSLM_CRM_COS/") + '/' + filename
src3 = os.path.join(TRAIN_DIR, "SSLM_CRM_EUC/") + '/' + filename
src4 = os.path.join(TRAIN_DIR, "SSLM_MFCC_COS/") + '/' + filename
src5 = os.path.join(TRAIN_DIR, "SSLM_MFCC_EUC/") + '/' + filename
dst1 = os.path.join(VAL_DIR, "MLS/") + '/' + filename
dst2 = os.path.join(VAL_DIR, "SSLM_CRM_COS/") + '/' + filename
dst3 = os.path.join(VAL_DIR, "SSLM_CRM_EUC/") + '/' + filename
dst4 = os.path.join(VAL_DIR, "SSLM_MFCC_COS/") + '/' + filename
dst5 = os.path.join(VAL_DIR, "SSLM_MFCC_EUC/") + '/' + filename
if os.path.exists(dst1) and os.path.exists(dst2) and os.path.exists(dst3) and os.path.exists(dst4) \
and os.path.exists(dst5):
print("File has already been prepared for training material. Skipping...")
cnt += 1
continue
else:
copyfile(src1, dst1)
copyfile(src2, dst2)
copyfile(src3, dst3)
copyfile(src4, dst4)
copyfile(src5, dst5)
cnt += 1
pass
# Deprecated
def findBestShape(mls_train, sslm_train):
dim1_mls = [i.shape[0] for i in mls_train.getImages()]
dim2_mls = [i.shape[1] for i in mls_train.getImages()]
print(dim1_mls)
print(dim2_mls)
dim1_sslm = [i.shape[0] for i in sslm_train.getImages()]
dim2_sslm = [i.shape[1] for i in sslm_train.getImages()]
print(dim1_sslm)
print(dim2_sslm)
dim1_mean = min(statistics.mean(dim1_mls), statistics.mean(dim2_sslm))
dim2_mean = min(statistics.mean(dim1_mls), statistics.mean(dim2_sslm))
dim1_median = min(statistics.median(dim1_mls), statistics.median(dim2_sslm))
dim2_median = min(statistics.median(dim1_mls), statistics.median(dim2_sslm))
dim1_mode = min(statistics.mode(dim1_mls), statistics.mode(dim2_sslm))
dim2_mode = min(statistics.mode(dim1_mls), statistics.mode(dim2_sslm))
print(f"Dimension 0:\nMean: {dim1_mean}\t\tMedian: {dim1_median}\t\tMode: {dim1_mode}")
print(f"Dimension 1:\nMean: {dim2_mean}\t\tMedian: {dim2_median}\t\tMode: {dim2_mode}")
# Deprecated WORKING FUSE MODEL
def old_formnn_fuse(output_channels=32, lrval=0.00001, numclasses=12):
cnn1_mel = formnn_mls(output_channels, lrval=lrval)
cnn1_sslm = formnn_sslm(output_channels, lrval=lrval)
combined = layers.concatenate([cnn1_mel.output, cnn1_sslm.output], axis=2)
cnn2_in = formnn_pipeline(combined, output_channels, lrval=lrval, numclasses=numclasses)
cnn2_in = layers.Dense(numclasses, activation='sigmoid')(cnn2_in)
opt = keras.optimizers.Adam(lr=lrval)
model = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
model.compile(loss=keras.losses.BinaryCrossentropy(from_logits=True), optimizer=opt, metrics=['accuracy'])
model.summary() # Try categorical_crossentropy, metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
return model
# Deprecated WORKING PIPELINE MODEL
def old_formnn_pipeline(combined, output_channels=32, lrval=0.0001):
z = layers.ZeroPadding2D(padding=((1, 1), (6, 6)))(combined)
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5), strides=(1, 1),
padding='same', dilation_rate=(1, 3))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.5)(z)
# z = layers.Reshape(target_shape=(-1, 1, output_channels * 152))(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), strides=(1, 1), padding='same')(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.5)(z)
z = layers.Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding='same')(z)
z = layers.GlobalMaxPooling2D()(z)
return z
# Deprecated MLS MODEL
def cnn_mls(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=output_channels,
kernel_size=(5, 7), strides=(1, 1),
padding='same', # ((5 - 1) // 2, (7 - 1) // 2),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(200, 1150, 4) # (1,)
))
model.add(layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')) # (1, 1)))
# opt = keras.optimizers.Adam(lr=lrval)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# Deprecated SSLM MODEL
def cnn_sslm(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=output_channels,
kernel_size=(5, 7), strides=(1, 1),
padding='same', # ((5 - 1) // 2, (7 - 1) // 2),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(200, 1150, 4) # (3,)
))
model.add(layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')) # (1, 1)))
# opt = keras.optimizers.Adam(lr=lrval)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# Deprecated PIPELINE MODEL
def cnn2(output_channels, lrval=0.0001):
model = tf.keras.Sequential()
model.add(layers.Conv2D(filters=(output_channels * 2),
kernel_size=(3, 5), strides=(1, 1),
padding='same', # ((3 - 1) // 2, (5 - 1) * 3 // 2),
dilation_rate=(1, 3),
activation=layers.LeakyReLU(alpha=lrval), input_shape=(40, 1150, 8)
))
model.add(layers.SpatialDropout2D(rate=0.5))
model.add(
layers.Conv2D(output_channels * 152, 128, (1, 1), activation=layers.LeakyReLU(alpha=lrval), padding='same'))
# *72=para 6pool, *152 para 2pool3
model.add(layers.SpatialDropout2D(rate=0.5))
model.add(layers.Conv2D(128, 1, (1, 1), padding='same')) # , padding='same'))
# x = np.reshape(x, -1, x.shape[1] * x.shape[2], 1, x.shape[3]) # reshape model?
# model = keras.layers.Reshape((-1, model.shape))(model)
# Feature maps are joined with the column dimension (frequency)
# opt = keras.optimizers.Adam(lr=lrval) # learning rate
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# model.summary()
return model
# Deprecated
def fuse_model(output_channels, lrval=0.0001):
cnn1_mel = cnn_mls(output_channels, lrval=lrval)
cnn1_sslm = cnn_sslm(output_channels, lrval=lrval)
combined = keras.layers.concatenate([cnn1_mel.output, cnn1_sslm.output])
cnn2_in = cnn2(output_channels, lrval=lrval)(combined)
opt = keras.optimizers.Adam(lr=lrval) # learning rate
model = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
model.get_layer(name='sequential_2').summary()
if not os.path.isfile(os.path.join(MASTER_DIR, 'Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True)
# if not os.path.isfile(os.path.join(MASTER_DIR, 'Model_Diagram_Inner.png')):
# plot_model(model.get_layer(name='sequential_2'), to_file=os.path.join(MASTER_DIR, 'Model_Diagram_Inner.png'),
# show_shapes=True, show_layer_names=True, expand_nested=True)
return model
# Probably deprecated
def prepare_train_data():
"""
Retrieve analysis of the following audio data for each training file:
- Log-scaled Mel Spectrogram (MLS)
- Self-Similarity Lag Matrix (Mel-Frequency Cepstral Coefficients/MFCCs - Cosine Distance, SSLMCOS)
- Self-Similarity Lag Matrix (MFCCs - Euclidian Distance, SSLMEUC)
- Self-Similarity Matrix (Chromas, SSLMCRM)
Checks to ensure that each file has been fully analyzed/labeled with ground truth
and not yet prepared for training material.
"""
cnt = 1
for folder in MIDI_Data_Dir:
for file in os.listdir(folder):
foldername = folder.split('\\')[-1]
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
path = os.path.join(os.path.join(MASTER_DIR, 'Labels/'), foldername) + '/' + os.path.basename(name) + '.txt'
num_lines = sum(1 for _ in open(path))
if num_lines <= 2:
print("File has not been labeled with ground truth yet. Skipping...")
cnt += 1
continue
# elif os.path.basename(name) != "INSERT_DEBUG_NAME_HERE": # Debug output of specified file
else:
png1 = os.path.join(MASTER_DIR, 'Images/Train/') + "MLS/" + os.path.basename(name) + 'mls.png'
png2 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMCOS/" + os.path.basename(name) + 'cos.png'
png3 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMEUC/" + os.path.basename(name) + 'euc.png'
png4 = os.path.join(MASTER_DIR, 'Images/Train/') + "SSLMCRM/" + os.path.basename(name) + 'crm.png'
if os.path.exists(png1) and os.path.exists(png2) and os.path.exists(png3) and os.path.exists(png4):
print("File has already been prepared for training material. Skipping...")
cnt += 1
continue
fullfilename = folder + '/' + filename
du.create_mls_sslm(fullfilename, name, foldername)
du.peak_picking(fullfilename, name, foldername)
cnt += 1
# Deprecated
def old_prepare_train_data():
"""
Retrieve analysis of the following audio data for each training file:
- Log-scaled Mel Spectrogram (MLS)
- Self-Similarity Lag Matrix (Mel-Frequency Cepstral Coefficients/MFCCs - Cosine Distance, SSLMCOS)
- Self-Similarity Lag Matrix (MFCCs - Euclidian Distance, SSLMEUC)
- Self-Similarity Matrix (Chromas, SSLMCRM)
"""
cnt = 1
for file in Train_Data_Dir:
filename, name = file, file.split('/')[-1].split('.')[0]
print(f"\nWorking on {os.path.basename(name)}, file #" + str(cnt))
du.create_mls_sslm(filename, name)
du.create_mls_sslm2(filename, name)
cnt += 1
# Deprecated
def old_prepare_model_training_input():
"""
Read in the input data for the model, return: images [MLS, SSLMCOS, EUC, and CRM] labels (phrases), labels (seconds)
"""
mls_images = np.asarray(du.ReadImagesFromFolder(MLS_Data_Dir), dtype=np.float32)
sslmcos_images = np.asarray(du.ReadImagesFromFolder(SSLMCOS_Data_Dir), dtype=np.float32)
sslmeuc_images = np.asarray(du.ReadImagesFromFolder(SSLMEUC_Data_Dir), dtype=np.float32)
sslmcrm_images = du.ReadImagesFromFolder(SSLMCRM_Data_Dir)
lbls_seconds, lbls_phrases = du.ReadLabelSecondsPhrasesFromFolder()
# print(lbls_seconds)
# print([i for i, x in enumerate(lbls_seconds) if len(x) != 560])
# lbls_seconds = np.array(lbls_seconds).flatten()
# lbls_seconds = [item for sublist in lbls_seconds for item in sublist]
# for i in range(len(lbls_seconds)):
# lbls_seconds[i] = np.asarray(lbls_seconds[i]).flatten()
lbls_seconds = padMatrix(lbls_seconds) # matrix must not be jagged in order to convert to ndarray of float32
# print(lbls_seconds)
lbls_seconds = np.asarray(lbls_seconds, dtype=np.float32)
mdl_images = [mls_images, sslmcos_images, sslmeuc_images, sslmcrm_images]
return mdl_images, lbls_seconds, lbls_phrases
# Probably deprecated
def padMatrix(a):
b = []
width = max(len(r) for r in a)
for i in range(len(a)):
if len(a[i]) != width:
x = np.pad(a[i], (width - len(a[i]), 0), 'constant', constant_values=0)
else:
x = a[i]
b.append(x)
return b
# Probably deprecated
def debugInput(mimg, lbls, lblp):
# model_images = [0 => mls, 1 => sslmcos, 2 => sslmeuc, 3 => sslmcrm]
print("Model images:", mimg)
print("Model images length:", len(mimg))
for i in range(len(mimg)):
print("M_Imgs[" + str(i) + "] length:", len(mimg[i]))
print("Label seconds:", lbls)
print("Label phrases:", lblp)
print("Image shape:", mimg[0][0].shape) # returns (height, width, channels) := (216, 1162, 4)
# Deprecated
def old_trainModel():
model_images, labels_seconds, labels_phrases = old_prepare_model_training_input()
# debugInput(model_images, labels_seconds, labels_phrases)
# FIT MODEL AND USE CHECKPOINT TO SAVE BEST MODEL
trmodel = fuse_model(4) # (32) CNN Layer 1 Output Characteristic Maps
checkpoint = ModelCheckpoint("best_initial_model.hdf5", monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
model_history = trmodel.fit((np.array([model_images[0]], dtype=np.float32),
np.array([model_images[1], model_images[2], model_images[3]], dtype=np.float32)),
# np.asarray([tf.stack(model_images[1:2]), model_images[3]],
# (np.array([model_images[1], model_images[2]], dtype=np.float32),
# np.array(model_images[3])),
np.array(labels_seconds, dtype=np.float32),
batch_size=32, epochs=2000,
validation_data=(labels_seconds,),
callbacks=[checkpoint])
print(model_history)
# PLOT MODEL HISTORY OF ACCURACY AND LOSS OVER EPOCHS
plt.plot(model_history.history['accuracy'])
plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
# pd.DataFrame(model_history.history).plot() # figsize=(8, 5)
# plt.show()
# summarize history for loss
plt.plot(model_history.history['loss'])
plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_loss.png')
plt.show()
# Probably deprecated
def combine_generator(gen1, gen2):
while True:
yield next(gen1), next(gen2)
# endregion
# region OldModelDefinition
# MIDI MODEL -- Try switching activation to ELU instead of RELU. Mimic visual/aural analysis using ensemble method
def formnn_midi(output_channels=32, numclasses=12):
inputC = layers.Input(shape=(None, 1))
w = layers.Conv1D(output_channels * 2, kernel_size=10, activation='relu', input_shape=(None, 1))(inputC)
w = layers.Conv1D(output_channels * 4, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01))(w)
w = layers.MaxPooling1D(pool_size=6)(w)
w = layers.Dropout(0.4)(w)
w = layers.Conv1D(output_channels * 4, kernel_size=10, activation='relu')(w)
w = layers.MaxPooling1D(pool_size=6)(w)
w = layers.Dropout(0.4)(w)
w = layers.GlobalMaxPooling1D()(w)
w = layers.Dense(output_channels * 8, activation='relu')(w)
w = layers.Dropout(0.4)(w)
w = layers.Dense(numclasses)(w)
w = layers.Softmax()(w)
w = keras.models.Model(inputs=inputC, outputs=w)
return w
def formnn_mls2(output_channels=32):
inputA = layers.Input(batch_input_shape=(None, None, None, 1))
x = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(inputA)
x = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(x)
x = keras.models.Model(inputs=inputA, outputs=x)
return x
def formnn_sslm2(output_channels=32):
inputB = layers.Input(batch_input_shape=(None, None, None, 1)) # (None, None, None, 4)
y = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(inputB)
y = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(y)
y = layers.AveragePooling2D(pool_size=(1, 4))(y)
y = keras.models.Model(inputs=inputB, outputs=y)
return y
def formnn_pipeline2(combined, output_channels=32, numclasses=12):
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5),
padding='same', dilation_rate=(1, 3), kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01), activation='relu')(combined)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(z)
z = layers.MaxPooling2D(pool_size=3)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(z)
z = layers.MaxPooling2D(pool_size=3)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
z = layers.GlobalMaxPooling2D()(z)
# z = layers.Dense(output_channels * 8, activation='relu')(z)
# z = layers.Dropout(rate=0.3)(z)
z = layers.Dense(numclasses)(z)
z = layers.Softmax()(z)
return z
"""=======================ORIGINAL MODEL======================="""
# MLS MODEL
def formnn_mls(output_channels=32, lrval=0.0001):
inputA = layers.Input(batch_input_shape=(None, None, None, 1))
x = layers.ZeroPadding2D(padding=((2, 2), (3, 3)))(inputA)
x = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(x)
x = layers.LeakyReLU(alpha=lrval)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(x)
x = keras.models.Model(inputs=inputA, outputs=x)
return x
# SSLM MODEL
def formnn_sslm(output_channels=32, lrval=0.0001):
inputB = layers.Input(batch_input_shape=(None, None, None, 1)) # (None, None, None, 4)
y = layers.ZeroPadding2D(padding=((2, 2), (3, 3)))(inputB)
y = layers.Conv2D(filters=output_channels, kernel_size=(5, 7), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(y)
y = layers.LeakyReLU(alpha=lrval)(y)
y = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(y)
y = layers.MaxPooling2D(pool_size=(5, 3), strides=(5, 1), padding='same')(y)
y = layers.AveragePooling2D(pool_size=(1, 4))(y)
y = keras.models.Model(inputs=inputB, outputs=y)
return y
# PIPELINE MODEL
def formnn_pipeline(combined, output_channels=32, lrval=0.0001, numclasses=12):
z = layers.ZeroPadding2D(padding=((1, 1), (6, 6)))(combined)
z = layers.Conv2D(filters=(output_channels * 2), kernel_size=(3, 5), strides=(1, 1),
padding='same', dilation_rate=(1, 3), kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.SpatialDropout2D(rate=0.3)(z)
# z = layers.Reshape(target_shape=(-1, 1, output_channels * 152))(z)
z = layers.Conv2D(filters=output_channels * 4, kernel_size=(1, 1), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
# z = layers.SpatialDropout2D(rate=0.5)(z)
z = layers.Conv2D(filters=output_channels * 8, kernel_size=(1, 1), strides=(1, 1), padding='same',
kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(z)
z = layers.LeakyReLU(alpha=lrval)(z)
z = layers.GlobalAveragePooling2D()(z)
# z = layers.Flatten()(z)
z = layers.Dense(numclasses)(z)
z = layers.Softmax()(z)
# Softmax -> Most likely class where sum(probabilities) = 1, Sigmoid -> Multiple likely classes, sum != 1
return z
def formnn_fuse(output_channels=32, lrval=0.0001, numclasses=12):
cnn1_mel = formnn_mls(output_channels, lrval=lrval)
cnn1_sslm = formnn_sslm(output_channels, lrval=lrval)
combined = layers.concatenate([cnn1_mel.output, cnn1_sslm.output], axis=2)
cnn2_in = formnn_pipeline(combined, output_channels, lrval=lrval, numclasses=numclasses)
# opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
imgmodel = keras.models.Model(inputs=[cnn1_mel.input, cnn1_sslm.input], outputs=[cnn2_in])
midmodel = formnn_midi(output_channels, numclasses=numclasses)
averageOut = layers.Average()([imgmodel.output, midmodel.output])
model = keras.models.Model(inputs=[imgmodel.input[0], imgmodel.input[1], midmodel.input], outputs=averageOut)
model.compile(loss=['categorical_crossentropy'], optimizer=opt, metrics=['accuracy'])
# model.compile(loss=keras.losses.BinaryCrossentropy(from_logits=True), optimizer=opt, metrics=['accuracy'])
model.summary() # Try categorical_crossentropy, metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
return model
def old_trainFormModel():
batch_size = 10
# region MODEL_DIRECTORIES
mls_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'MLS/'), label_path=TRAIN_LABELPATH, # end=90,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_CRM_COS/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_CRM_EUC/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_MFCC_COS/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_train = dus.BuildDataloader(os.path.join(TRAIN_DIR, 'SSLM_MFCC_EUC/'), label_path=TRAIN_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_train = dus.BuildMIDIloader(os.path.join(TRAIN_DIR, 'MIDI/'), label_path=TRAIN_LABELPATH,
batch_size=batch_size)
mls_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'MLS/'), label_path=VAL_LABELPATH,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_CRM_COS/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_CRM_EUC/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_MFCC_COS/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_val = dus.BuildDataloader(os.path.join(VAL_DIR, 'SSLM_MFCC_EUC/'), label_path=VAL_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_val = dus.BuildMIDIloader(os.path.join(VAL_DIR, 'MIDI/'), label_path=VAL_LABELPATH, batch_size=batch_size)
mls_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'MLS/'), label_path=TEST_LABELPATH,
transforms=[padding_MLS, normalize_image, borders], batch_size=batch_size)
sslm_cmcos_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_CRM_COS/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_cmeuc_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_CRM_EUC/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfcos_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_MFCC_COS/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
sslm_mfeuc_test = dus.BuildDataloader(os.path.join(TEST_DIR, 'SSLM_MFCC_EUC/'), label_path=TEST_LABELPATH,
transforms=[padding_SSLM, normalize_image, borders], batch_size=batch_size)
midi_test = dus.BuildMIDIloader(os.path.join(TEST_DIR, 'MIDI/'), label_path=TEST_LABELPATH, batch_size=batch_size)
# endregion
# findBestShape(mls_train, sslm_cmcos_train)
train_datagen = multi_input_generator(mls_train, sslm_cmcos_train, sslm_cmeuc_train, sslm_mfcos_train,
sslm_mfeuc_train, midi_train)
valid_datagen = multi_input_generator(mls_val,
sslm_cmcos_val, sslm_cmeuc_val, sslm_mfcos_val, sslm_mfeuc_val, midi_val)
test_datagen = multi_input_generator(mls_test,
sslm_cmcos_test, sslm_cmeuc_test, sslm_mfcos_test, sslm_mfeuc_test, midi_test)
steps_per_epoch = len(list(mls_train)) // batch_size
steps_per_valid = len(list(mls_val)) // batch_size
label_encoder = LabelEncoder()
label_encoder.classes_ = np.load(os.path.join(MASTER_DIR, 'form_classes.npy'))
if mls_train.getNumClasses() != mls_val.getNumClasses() or mls_train.getNumClasses() != mls_test.getNumClasses():
print(f"Train and validation or testing datasets have differing numbers of classes: "
f"{mls_train.getNumClasses()} vs. {mls_val.getNumClasses()} vs. {mls_test.getNumClasses()}")
# classweights = get_class_weights(mls_train.getLabels().numpy().squeeze(axis=-1), one_hot=True)
"""
# Show class weights as bar graph
barx, bary = zip(*sorted(classweights.items()))
plt.figure(figsize=(12, 8))
plt.bar(label_encoder.inverse_transform(barx), bary, color='green')
for i in range(len(barx)):
plt.text(i, bary[i]//2, round(bary[i], 3), ha='center', color='white')
plt.title('Train Class Weights')
plt.ylabel('Weight')
plt.xlabel('Class')
plt.savefig('Initial_Model_Class_Weights.png')
plt.show()
"""
model = formnn_fuse(output_channels=32, lrval=0.00005, numclasses=mls_train.getNumClasses()) # Try 'val_loss'?
# model.load_weights('best_initial_model.hdf5')
early_stopping = EarlyStopping(patience=5, verbose=5, mode="auto")
checkpoint = ModelCheckpoint(os.path.join(MASTER_DIR, 'best_formNN_model.hdf5'), monitor='val_accuracy', verbose=0,
save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
model_history = model.fit(train_datagen, epochs=100, verbose=1, validation_data=valid_datagen, shuffle=False,
callbacks=[checkpoint, early_stopping], batch_size=batch_size, # class_weight=classweight
steps_per_epoch=steps_per_epoch, validation_steps=steps_per_valid)
print("Training complete!\n")
# region LossAccuracyGraphs
plt.plot(model_history.history['loss'])
plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_Loss.png')
plt.show()
plt.plot(model_history.history['accuracy'])
plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
pd.DataFrame(model_history.history).plot()
plt.show()
# endregion
predictions = model.predict_generator(valid_datagen, steps=1, verbose=1, workers=0)
print(predictions)
print("Prediction complete!")
inverted = label_encoder.inverse_transform([np.argmax(predictions[0, :])])
print("Predicted: ", end="")
print(inverted, end=""),
print("\tActual: ", end="")
print(label_encoder.inverse_transform([np.argmax(mls_val.getFormLabel(mls_val.getCurrentIndex()-1))]))
print("Name: " + mls_val.getSong(mls_val.getCurrentIndex()-1))
print("\nEvaluating...")
score = model.evaluate_generator(test_datagen, steps=len(list(mls_test)), verbose=1)
print("Evaluation complete!\nScore:")
print(f"Loss: {score[0]}\tAccuracy: {score[1]}")
# region EvaluationGraphs
predictions = model.predict(test_datagen, steps=len(list(mls_test)), verbose=1)
predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
actual = mls_test.getLabels().numpy().argmax(axis=1)
actual = actual.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]],
columns=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('Initial_Model_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_[0:mls_test.getNumClasses()]])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('Initial_Model_Classification_Report.png')
plt.show()
# endregion
def formnn_cnn_mod(input_dim_1, filters=64, lrval=0.0001, numclasses=12):
model = tf.keras.Sequential()
model.add(layers.Conv1D(filters, kernel_size=10, activation='relu', input_shape=(input_dim_1, 1)))
model.add(layers.Dropout(0.4)) # ?
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=6))
# model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Flatten())
model.add(layers.Dense(filters*4, activation='relu'))
# model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(numclasses, activation='softmax')) # Try softmax?
opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
# opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def formnn_cnn_old(input_dim_1, filters=64, lrval=0.0001, numclasses=12):
model = tf.keras.Sequential()
model.add(layers.Conv1D(filters, kernel_size=10, activation='relu', input_shape=(input_dim_1, 1)))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu', kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.Dropout(0.4))
model.add(layers.Conv1D(filters*2, kernel_size=10, activation='relu'))
model.add(layers.MaxPooling1D(pool_size=6))
model.add(layers.Dropout(0.4))
model.add(layers.Flatten())
model.add(layers.Dense(filters*4, activation='relu'))
model.add(layers.Dropout(0.4))
model.add(layers.Dense(numclasses, activation='softmax')) # Try softmax?
# opt = keras.optimizers.Adam(lr=lrval, epsilon=1e-6)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# endregion
# region OldWorkingModelDefinition
def formnn_cnn(input_dim_1, filters=8, lrval=0.0001, numclasses=12, kernelsize=3, l1reg=0.01, l2reg=0.01, dropout=0.6):
np.random.seed(9)
X_input = Input(shape=(input_dim_1, 1))
X = layers.Conv1D(filters, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X_input)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Conv1D(filters * 2, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Conv1D(filters * 4, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=2)(X)
X = layers.Activation('relu')(X)
X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
X = layers.Flatten()(X)
# X = layers.Conv1D(filters * 8, kernel_size=kernelsize, strides=1, kernel_initializer=glorot_uniform(seed=9),
# bias_regularizer=l2(0.5))(X)
X = layers.Dense(filters * 8, kernel_initializer=glorot_uniform(seed=9), # 256
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
X = layers.BatchNormalization(axis=-1)(X)
X = layers.Activation('relu')(X)
# X = layers.MaxPooling1D(numclasses, padding='same')(X)
X = layers.Dropout(dropout)(X)
# X = layers.GaussianNoise(0.1)(X)
# X = layers.Flatten()(X)
X = layers.Dense(numclasses, activation='sigmoid', kernel_initializer=glorot_uniform(seed=9),
bias_regularizer=l1_l2(l1=l1reg, l2=l2reg), kernel_regularizer=l1_l2(l1=l1reg, l2=l2reg))(X)
# opt = keras.optimizers.Adam(lr=lrval)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
model = keras.models.Model(inputs=X_input, outputs=X, name='FormModel')
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def oldWorkingtrainFormModel():
# region DataPreProcessing
df = pd.read_excel(os.path.join(MASTER_DIR, 'Data/full_augmented_dataset.xlsx'))
# df = pd.read_excel(os.path.join(MASTER_DIR, 'full_dataset.xlsx'))
names = df[['piece_name', 'composer', 'filename']]
y = df['formtype']
# """
df = df.drop(columns=['sslm_chroma_cos_mean', 'sslm_chroma_cos_var', 'sslm_chroma_euc_mean', 'sslm_chroma_euc_var',
'sslm_mfcc_cos_mean', 'sslm_mfcc_cos_var', 'sslm_mfcc_euc_mean', 'sslm_mfcc_euc_var'])
# """
df.drop(columns=['spectral_bandwidth_var', 'spectral_centroid_var', 'spectral_flatness_var', 'spectral_rolloff_var',
'zero_crossing_var', 'fourier_tempo_mean', 'fourier_tempo_var'], inplace=True) # Remove useless
# nonlist = df[['duration', 'spectral_contrast_var']]
nonlist = df[['duration']]
df.drop(columns=['piece_name', 'composer', 'filename', 'duration', 'spectral_contrast_var', 'formtype'],
inplace=True)
# df = df[['ssm_log_mel_mean', 'ssm_log_mel_var', 'mel_mean', 'mel_var', 'chroma_stft_mean', 'chroma_stft_var']]
# df = df[['ssm_log_mel_mean', 'ssm_log_mel_var']]
df = df[['ssm_log_mel_mean']] # best decision tree accuracy
print("Fixing broken array cells as needed...")
def fix_broken_arr(strx):
if '[' in strx:
if ']' in strx:
return strx
else:
return strx + ']'
for col in df.columns:
df[col] = df[col].apply(lambda x: fix_broken_arr(x))
# print("Headers:", pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1).columns)
# Headers: Index(['piece_name', 'composer', 'filename', 'duration', 'ssm_log_mel_mean', 'formtype'], dtype='object')
print("Done processing cells, building training set...")
# d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()).add_prefix(col) for col in df.columns]
d = [pd.DataFrame(df[col].astype(str).apply(literal_eval).values.tolist()) for col in df.columns]
df = pd.concat(d, axis=1).fillna(0)
df = pd.concat([pd.concat([names, pd.concat([nonlist, df], axis=1)], axis=1), y], axis=1) # print(df)
train, test = train_test_split(df, test_size=0.169, random_state=0, stratify=df['formtype']) # test_s=.169 gave 50%
# df.to_csv(os.path.join(MASTER_DIR, 'full_modified_dataset.csv'))
X_train = train.iloc[:, 3:-1]
# X_train_names = train.iloc[:, 0:3]
y_train = train.iloc[:, -1]
print("Train shape:", X_train.shape)
X_test = test.iloc[:, 3:-1]
# X_test_names = test.iloc[:, 0:3]
y_test = test.iloc[:, -1]
print("Test shape:", X_test.shape)
# Normalize Data
"""
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train) # Good for decision tree
X_test = min_max_scaler.fit_transform(X_test)
"""
# X_train = preprocessing.scale(X_train)
# X_test = preprocessing.scale(X_test)
# """
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean) / std # Good for decision tree
X_test = (X_test - mean) / std
# """
print("Normalized Train shape:", X_train.shape)
print("Normalized Test shape:", X_test.shape)
# Convert to arrays for keras
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
label_encoder = LabelEncoder()
old_y_train = y_train
# old_y_test = y_test
int_y_train = label_encoder.fit_transform(y_train)
print(int_y_train.shape)
# int_y_train = int_y_train.reshape(len(int_y_train), 1)
# int_y_test = label_encoder.fit_transform(y_test)
# int_y_test = int_y_test.reshape(len(int_y_test), 1)
y_train = to_categorical(label_encoder.fit_transform(y_train))
y_test = to_categorical(label_encoder.fit_transform(y_test))
print(y_train.shape, y_test.shape)
print(label_encoder.classes_, "\n")
""" BASE MODEL """
# DummyClassifier makes predictions while ignoring input features
dummy_clf = DummyClassifier(strategy="stratified")
dummy_clf.fit(X_train, y_train)
DummyClassifier(strategy='stratified')
dummy_clf.predict(X_test)
print("Dummy classifier accuracy:", dummy_clf.score(X_test, y_test))
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
clf.predict(X_test)
print("Decision tree accuracy:", clf.score(X_test, y_test))
""" FEATURE TUNING """
selector = SelectKBest(f_classif, k=15) # 1000 if using RFE
Z_train = selector.fit_transform(X_train, old_y_train)
skb_values = selector.get_support()
Z_test = X_test[:, skb_values]
np.save(os.path.join(MASTER_DIR, "selectkbest_indices.npy"), skb_values)
print(Z_train.shape)
print(Z_test.shape)
"""
plt.title('Feature Importance')
plt.ylabel('Score')
plt.xlabel('Feature')
plt.plot(selector.scores_)
plt.savefig('Initial_Feature_Importance.png')
plt.show()
"""
print("Indices of top 10 features:", (-selector.scores_).argsort()[:10])
""" KBEST MODEL """
clf = tree.DecisionTreeClassifier()
clf = clf.fit(Z_train, y_train)
clf.predict(Z_test)
# treedepth = clf.tree_.max_depth
skb_score = clf.score(Z_test, y_test)
print("K-Best Decision tree accuracy:", skb_score) # Highest score: 84.3% accuracy
# """
# Accuracy 0.211, stick with SKB? Gives good loss though
clf = LinearSVC(C=0.01, penalty="l1", dual=False)
clf.fit(X_train, old_y_train)
rfe_selector = RFE(clf, 15, verbose=5)
rfe_selector = rfe_selector.fit(Z_train, old_y_train)
# rfe_selector = rfe_selector.fit(X_train, old_y_train)
rfe_values = rfe_selector.get_support()
# np.save(os.path.join(MASTER_DIR, "rfebest_indices.npy"), rfe_values)
print("Indices of RFE important features:", np.where(rfe_values)[0])
W_train = Z_train[:, rfe_values]
W_test = Z_test[:, rfe_values]
# "" " RFE MODEL " ""
clf = tree.DecisionTreeClassifier()
clf = clf.fit(W_train, y_train)
clf.predict(W_test)
rfe_score = clf.score(W_test, y_test)
print("RFE Decision tree accuracy:", rfe_score) # Highest score: 83.7% accuracy, typically better than SKB
"""
plt.figure(figsize=(30, 20)) # set plot size (denoted in inches)
tree.plot_tree(clf, fontsize=10)
plt.show()
plt.savefig('tree_high_dpi', dpi=100)
"""
# """
# endregion
# Reshape to 3D tensor for keras
if skb_score > rfe_score:
X_train = Z_train[:, :, np.newaxis]
X_test = Z_test[:, :, np.newaxis]
# X1_train = Z_train
# X1_test = Z_test
else:
X_train = W_train[:, :, np.newaxis]
X_test = W_test[:, :, np.newaxis]
X1_train = W_train
X1_test = W_test
treedepth = clf.tree_.max_depth
# print(treedepth)
X_train = X_train[:, :, np.newaxis]
X_test = X_test[:, :, np.newaxis]
"""
# Autokeras Model - 32% accuracy
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=10)
model_history = clf.fit(W_train, y_train, epochs=100)
predicted_y = clf.predict(W_test)
print(predicted_y)
print(clf.evaluate(W_test, y_test))
model = clf.export_model()
model.summary()
# model.save('best_auto_model.h5', save_format='tf')
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_CNN_AutoModel_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_CNN_AutoModel_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
"""
"""
# Deep CNN Decision Tree - 50% accuracy
feature_extractor = Sequential()
feature_extractor.add(layers.Conv1D(16, 3, padding='valid', activation='relu', input_shape=(X_train.shape[1], 1),
strides=1, kernel_regularizer=l1_l2(l1=0.01, l2=0.01)))
feature_extractor.add(layers.MaxPooling1D(2))
feature_extractor.add(layers.Dropout(0.6))
feature_extractor.add(layers.BatchNormalization())
feature_extractor.add(layers.Conv1D(32, 3, padding='valid', activation='relu',
kernel_regularizer=l1_l2(l1=0.01, l2=0.01), strides=1))
# New layers for prediction outside of feature extraction model
x = feature_extractor.output
x = layers.MaxPooling1D(4)(x)
x = layers.Dropout(0.6)(x)
x = layers.BatchNormalization()(x)
x = layers.Flatten()(x)
prediction_layer = layers.Dense(len(label_encoder.classes_), activation='softmax')(x)
# New model combining both layer sets
lrval = 0.1
# opt = keras.optimizers.Adam(lr=lrval)
opt = keras.optimizers.SGD(lr=lrval, decay=1e-6, momentum=0.9, nesterov=True)
cnn_model = keras.models.Model(inputs=feature_extractor.input, outputs=prediction_layer)
cnn_model.compile(optimizer=opt, loss='categorical_crossentropy')
for i in range(10):
cnn_model.fit(X_train, y_train, verbose=1)
# Predict only the output of the feature extraction model
X_ext = feature_extractor.predict(X_train)
dtc = tree.DecisionTreeClassifier() # criterion='entropy'
nsamples, nx, ny = X_ext.shape
X_ext = X_ext.reshape((nsamples, nx * ny))
# Train the decision tree on the extracted features
dtc.fit(X_ext, y_train)
# Evaluate decision tree
X_ext = feature_extractor.predict(X_test)
nsamples, nx, ny = X_ext.shape
X_ext = X_ext.reshape((nsamples, nx * ny))
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep CNN Decision tree accuracy:", dtc_score)
# """
"""
# Deep SVM-NN - 23% accuracy
model = keras.Sequential([
keras.Input(shape=(X_train.shape[1],)),
RandomFourierFeatures(output_dim=4096, scale=10.0, kernel_initializer="gaussian"),
layers.Dense(units=len(label_encoder.classes_)),
])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.hinge,
metrics=[keras.metrics.CategoricalAccuracy(name="acc")],
)
model.fit(X_train, y_train, epochs=100, batch_size=32, validation_data=(X_test, y_test), verbose=1)
"""
"""
# Deep ANN Decision Tree - 53% accuracy
model = Sequential()
model.add(layers.Dense(128, activation='relu', input_shape=(X_train.shape[1],)))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(len(label_encoder.classes_), activation='softmax'))
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# model.fit(X_train, y_train, epochs=100, batch_size=32, validation_data=(X_test, y_test), verbose=1)
model.fit(X_train, y_train, epochs=10000)
score, acc = model.evaluate(X_test, y_test, verbose=1) # ~26-35% accuracy
feature_vectors_model = keras.models.Model(model.input, model.get_layer('dense_3').output)
X_ext = feature_vectors_model.predict(X_train)
dtc = tree.DecisionTreeClassifier()
dtc.fit(X_ext, y_train)
X_ext = feature_vectors_model.predict(X_test)
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep ANN Decision Tree accuracy:", dtc_score)
"""
"""
# Deep Jointly-Informed Neural Network (DJINN) - 45% accuracy
modelname = "class_djinn_test"
ntrees = 1 # number of trees = number of neural nets in ensemble
maxdepth = 18 # 4 or 20-25; max depth of tree -- optimize this for each data set
dropout_keep = 1.0 # dropout typically set to 1 for non-Bayesian models
model = djinn.DJINN_Classifier(ntrees, maxdepth, dropout_keep)
optimal = model.get_hyperparameters(X1_train, y_train, random_state=1)
batchsize = optimal['batch_size']
learnrate = optimal['learn_rate']
epochs = optimal['epochs']
model.train(X1_train, int_y_train, epochs=epochs, learn_rate=learnrate, batch_size=batchsize,
display_step=1, save_files=True, file_name=modelname,
save_model=True, model_name=modelname, random_state=1)
m = model.predict(X1_test)
acc = accuracy_score(int_y_test, m.flatten())
print('DJINN Accuracy: ', acc)
model.close_model()
"""
"""
# XGBoosted Neural Network - 24% accuracy
model = XBNETClassifier(X1_train, int_y_train, num_layers=2)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
m, acc, lo, val_ac, val_lo = run_XBNET(X1_train, X1_test, int_y_train, int_y_test, model,
criterion, optimizer, batch_size=32, epochs=100)
print(predict(m, X1_test))
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.plot(acc, label='XBNET Training Accuracy')
plt.plot(val_ac, label='XBNET Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(lo, label='XBNET Training Loss')
plt.plot(val_lo, label='XBNET Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
"""
"""
# TreeGrad Deep Neural Decision Forest - 83% accuracy
model = TGDClassifier(num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,
autograd_config={'refit_splits': True})
model.fit(X1_train, int_y_train)
acc = accuracy_score(int_y_test, model.predict(X1_test))
print('TreeGrad Deep Neural Decision Forest accuracy: ', acc)
predictions = model.predict(X1_test)
# predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = pd.DataFrame({'Predicted Values': predictions})
# actual = y_test.argmax(axis=1)
actual = int_y_test.astype(int).flatten()
actual = (label_encoder.inverse_transform(actual))
actual = pd.DataFrame({'Actual Values': actual})
cm = confusion_matrix(actual, predictions)
plt.figure(figsize=(12, 10))
cm = pd.DataFrame(cm, index=[i for i in label_encoder.classes_], columns=[i for i in label_encoder.classes_])
ax = sns.heatmap(cm, linecolor='white', cmap='Blues', linewidth=1, annot=True, fmt='')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.title('Confusion Matrix', size=20)
plt.xlabel('Predicted Labels', size=14)
plt.ylabel('Actual Labels', size=14)
plt.savefig('TreeGrad_Confusion_Matrix.png')
plt.show()
clf_report = classification_report(actual, predictions, output_dict=True,
target_names=[i for i in label_encoder.classes_])
sns.heatmap(pd.DataFrame(clf_report).iloc[:, :].T, annot=True, cmap='viridis')
plt.title('Classification Report', size=20)
plt.savefig('TreeGrad_Classification_Report.png')
plt.show()
with open('treegrad_model_save.pkl', 'wb') as f:
pickle.dump(model, f)
with open('treegrad_model_save.pkl', 'rb') as f:
model2 = pickle.load(f)
acc = accuracy_score(int_y_test, model2.predict(X1_test))
print('TreeGrad Deep Neural Decision Forest accuracy from save: ', acc)
"""
# """
model = formnn_cnn(X_train.shape[1], filters=32, lrval=0.003, numclasses=len(label_encoder.classes_),
kernelsize=10, l1reg=0.000001, l2reg=0.000001, dropout=0.6)
model.summary()
if not os.path.isfile(os.path.join(MASTER_DIR, 'FormNN_CNN_Model_Diagram.png')):
plot_model(model, to_file=os.path.join(MASTER_DIR, 'FormNN_CNN_Model_Diagram.png'),
show_shapes=True, show_layer_names=True, expand_nested=True, dpi=300)
history_loss = []
history_val_loss = []
history_accuracy = []
history_val_accuracy = []
num_epochs = 0
"""
# Try predict
model.load_weights('best_form_model_50p.hdf5')
result = model.predict(X_test)
percent_correct = 0
pred_table = pd.DataFrame(columns=["Piece", "Predicted", "Actual"])
X_test_names = np.array(X_test_names)
for i in range(len(result)):
resultlbl = label_encoder.inverse_transform([np.argmax(result[i, :])])
actuallbl = label_encoder.inverse_transform([np.argmax(y_test[i, :])])
pred_table.loc[i] = ([X_test_names[i][2], resultlbl, actuallbl])
percent_correct += 1 if resultlbl == actuallbl else 0
print(pred_table.to_string(index=False))
print("Accuracy: " + str(float(percent_correct/len(result))*100) + "%")
return
"""
# model.load_weights('best_form_model_44p.hdf5')
model.load_weights('best_form_new_model40p.hdf5')
# while True:
for i in range(0, 3000):
# early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=5, mode="auto")
checkpoint = ModelCheckpoint("best_form_new_model.hdf5", monitor='val_accuracy', verbose=0,
save_best_only=False, mode='max', save_freq='epoch', save_weights_only=True)
model_history = model.fit(X_train, y_train, batch_size=32, epochs=1, validation_data=(X_test, y_test),
callbacks=[checkpoint]) # , early_stopping epochs=2000 loss hits 0.7
history_loss.append(model_history.history['loss'])
history_val_loss.append(model_history.history['val_loss'])
history_accuracy.append(model_history.history['accuracy'])
history_val_accuracy.append(model_history.history['val_accuracy'])
num_epochs += 1
print("Epochs completed:", num_epochs)
print("\nEvaluating...")
score = model.evaluate(X_test, y_test, verbose=1)
print("Evaluation complete!\n__________Score__________")
print(f"Loss: {score[0]}\tAccuracy: {score[1]}")
feature_vectors_model = keras.models.Model(model.input, model.get_layer('dense').output)
X_ext = feature_vectors_model.predict(X_train)
dtc = tree.DecisionTreeClassifier()
"""
# More trees performs worst, rfc0 28%, everything else 12-15%
rfc0 = RandomForestClassifier(n_estimators=1)
rfc1 = RandomForestClassifier(n_estimators=10)
rfc2 = RandomForestClassifier(n_estimators=100)
rfc3 = RandomForestClassifier(n_estimators=1000)
rfc4 = RandomForestClassifier(n_estimators=int(np.sqrt(X_train.shape[1])))
rfc5 = RandomForestClassifier(n_estimators=int(X_train.shape[1]/2))
"""
dtc.fit(X_ext, y_train)
X_ext = feature_vectors_model.predict(X_test)
dtc.predict(X_ext)
dtc_score = dtc.score(X_ext, y_test)
print("Deep CNN Decision Tree 2 accuracy:", dtc_score) # ^ 26%, 29%
# if score[1] >= 0.51:
# region EvaluationGraphs
plt.plot(history_loss) # plt.plot(model_history.history['loss'])
plt.plot(history_val_loss) # plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Initial_Model_Loss.png')
plt.show()
plt.plot(history_accuracy) # plt.plot(model_history.history['accuracy'])
plt.plot(history_val_accuracy) # plt.plot(model_history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('Initial_Model_Accuracy.png')
plt.show()
# pd.DataFrame(model_history.history).plot()
# plt.show()
predictions = model.predict(X_test, verbose=1)
predictions = predictions.argmax(axis=1)
predictions = predictions.astype(int).flatten()
predictions = (label_encoder.inverse_transform(predictions))
predictions = | pd.DataFrame({'Predicted Values': predictions}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import multiprocessing
from functools import partial
def _df_split(tup_arg, **kwargs):
split_ind, df_split, df_f_name = tup_arg
return (split_ind, getattr(df_split, df_f_name)(**kwargs))
def df_multicores(df, df_f_name, subset=None, njobs=-1, **kwargs):
'''
process operation in a multiprocessing fashion
args:
df: dataframe on which performing operations
df_f_name: processing function
subset : column of the dataframe on which to compute the function
njobs: number of processes
'''
if njobs == -1:
njobs = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=njobs-1)
try:
splits = np.array_split(df[subset], njobs)
except ValueError:
splits = np.array_split(df, njobs)
pool_data = [(split_ind, df_split, df_f_name) for split_ind, df_split in enumerate(splits)]
results = pool.map(partial(_df_split, **kwargs), pool_data)
pool.close()
pool.join()
# order results back
results = sorted(results, key=lambda x:x[0])
# concatenate the results in a dataframe
results = | pd.concat([split[1] for split in results]) | pandas.concat |
import logging
import os
import pandas as pd
import sys
from . import settings
# Logging
logging.basicConfig(stream=sys.stdout)
logger = logging.getLogger('avocado')
# Exceptions
class AvocadoException(Exception):
"""The base class for all exceptions raised in avocado."""
pass
def _verify_hdf_chunks(store, keys):
"""Verify that a pandas table that was written to an HDF5 file in chunks
was fully written out.
If successful, this will return normally. Otherwise, it will raise an
exception indicating which chunks missing in the file.
Parameters
----------
store : `pandas.HDFStore`
The HDF5 file to verify.
keys : list
A list of keys to verify in the HDF5 file.
"""
try:
chunk_info = | pd.read_hdf(store, 'chunk_info') | pandas.read_hdf |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
import argparse
import os
# +
#Load all pre-requisites
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.nn.functional import softmax, relu, selu, elu
from torchtext.data import Field, BucketIterator, TabularDataset, Iterator
import torch.nn.init as init
import torch
import torch.nn as nn
import torch.optim as optim
import inspect
import random
import math
import time
from torchtext.datasets import TranslationDataset, Multi30k
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from tokeniser import tokenize_drug
from seq2seq import Encoder, Decoder, Seq2Seq, init_weights, count_parameters, train, evaluate, epoch_time
SEED = 123
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.cuda.is_available()
cudaid = int(0)
DEVICE = torch.device("cuda:%d" % (cudaid) if torch.cuda.is_available() else "cpu")
print(DEVICE)
# -
#Define the src and target for torchtext to process
def run_smiles_generator(test_file):
src = Field(sequential=True,
tokenize = tokenize_drug,
init_token = '<sos>',
eos_token = '<eos>'
)
trg = Field(sequential=True,
tokenize = tokenize_drug,
init_token = '<sos>',
eos_token = '<eos>'
)
#Get the train and test set in torchtext format
datafields = [("src", src), # we won't be needing the id, so we pass in None as the field
("trg", trg)]
train,test = TabularDataset.splits(
path='../data/SMILES_Autoencoder/', train='all_smiles_revised_final.csv',
test=test_file,
format='csv',
skip_header=True,
fields=datafields)
#Split the dataset into train and validation set
train_data,valid_data = train.split(split_ratio=0.99)
print(f"Number of examples: {len(train_data.examples)}")
src.build_vocab(train_data, min_freq = 2)
trg.build_vocab(train_data, min_freq = 2)
#Total no of unique words in our vocabulary
print(f"Unique tokens in source vocabulary: {len(src.vocab)}")
print(f"Unique tokens in target vocabulary: {len(trg.vocab)}")
TRG_PAD_IDX = trg.vocab.stoi[trg.pad_token]
print("Padding Id: ",TRG_PAD_IDX)
#Create the iterator to traverse over test samples for which we need to generate latent space
BATCH_SIZE = 128
(train_iterator, test_iterator) = BucketIterator.splits((train_data,test),
batch_size = BATCH_SIZE,
device = DEVICE,
sort = False,
shuffle = False)
print(src.vocab.stoi)
print(trg.vocab.stoi)
#Define the model once again
INPUT_DIM = len(src.vocab)
OUTPUT_DIM = len(trg.vocab)
ENC_EMB_DIM = 128
DEC_EMB_DIM = 128
HID_DIM = 256
N_LAYERS = 1
ENC_DROPOUT = 0.0
DEC_DROPOUT = 0.0
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, device=DEVICE).to(DEVICE)
model.apply(init_weights)
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss().to(DEVICE)
model.load_state_dict(torch.load('../models/lstm_out/torchtext_checkpoint.pt',map_location=torch.device('cpu')))
#Get latent space for all drugs
model.eval()
epoch_loss = 0
ls_list = []
encode_list = []
decode_list = []
error_list = []
with torch.no_grad():
for j, batch in enumerate(test_iterator):
new_src = batch.src
new_trg = batch.trg
#Get output
outputs = model(new_src, new_trg, 1) #turn on teacher forcing
output = outputs[0]
hidden = outputs[1]
cell_state = outputs[2]
#Get latent space
o1 = torch.argmax(torch.softmax(output,dim=2),dim=2)
h1 = torch.mean(hidden,dim=0).cpu().detach().tolist()
c1 = torch.mean(cell_state,dim=0).cpu().detach().tolist()
for i in range(len(h1)):
temp_ls = h1[i]
temp_encode = new_trg[:,i].cpu().detach().tolist()
temp_decode = o1[:,i].cpu().detach().tolist()
try:
index_1 = temp_decode.index(1)
except:
index_1 = len(temp_decode)
temp_error = np.array(temp_encode)-np.array(temp_decode)
error = sum(np.absolute(temp_error[1:index_1])>0)/len(temp_error)
error_list.append(error)
ls_list.append(temp_ls)
encode_list.append(temp_encode)
decode_list.append(temp_decode)
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
rev_trg = new_trg[1:].view(-1)
loss = criterion(output, rev_trg)
print("Reconstruction Loss for iteration "+str(j)+" is :"+str(round(loss.item(),3)))
epoch_loss += loss.item()
#Print overall average error
print("Average reconstruction error: ",epoch_loss/len(test_iterator));
torch.cuda.empty_cache()
final_list, only_smiles_list =[],[]
for i in range(len(encode_list)):
temp_encode = encode_list[i]
temp_decode = decode_list[i]
temp_encode_str,temp_decode_str, temp_mol_str, temp_error_str = '','','',''
#Get original string
for j in range(1,len(temp_encode)):
#Break when it sees padding
if (temp_encode[j]==1):
break
#Don't pad end of sentence
if (temp_encode[j]!=3):
temp_encode_str+=src.vocab.itos[temp_encode[j]]
#Get decoded string
for j in range(1,len(temp_decode)):
if (temp_decode[j]==1):
break;
if (temp_decode[j]!=3):
temp_decode_str+=src.vocab.itos[temp_decode[j]]
#m = Chem.MolFromSmiles(temp_decode_str)
#if (m is not None):
# temp_mol_str = '1'
#else:
# temp_mol_str = '0'
#string_list = [temp_encode_str, temp_decode_str, temp_mol_str, str(error_list[i])]
#only_smiles_list.append(string_list)
#string_list_with_ls = string_list + ls_list[i]
#final_list.append(string_list_with_ls)
colids = ['LS_'+str(x) for x in range(len(ls_list[0]))]
final_out_df = pd.DataFrame(ls_list, columns = colids)
return(final_out_df)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--input", help = "input filename")
parser.add_argument("--output", help = "output filename")
args = parser.parse_args()
print('Inputfile:',args.input)
print('Outputfile:',args.output)
#Read input file containing uniprot_id, protein sequence, inchikey, smiles, pchembl value
input_df = pd.read_csv("../data/"+args.input,header='infer')
all_sequences = input_df['canonical_smiles'].values.tolist()
temp_df = | pd.DataFrame({'src':all_sequences,'trg':all_sequences}) | pandas.DataFrame |
import pandas as pd
file1 = r'1_search_standard_box_spacer_0_16_greedy.csv'
file2 = r'2_search_specific_box_spacer_0_16_greedy.csv'
file3 = r'3_search_Epsilonproteobacteria_box_spacer_0_16_greedy.csv'
with open(file1, 'r') as f1:
data1 = pd.read_csv(f1)
with open(file2, 'r') as f2:
data2 = pd.read_csv(f2)
with open(file3, 'r') as f3:
data3 = | pd.read_csv(f3) | pandas.read_csv |
# -*- coding:utf-8 -*-
"""
AHMath module.
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
import copy
import collections
import warnings
import math
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.stats import norm
class AHMath(object):
""" alphahunter 常用数学函数
"""
@staticmethod
def array(num_list):
""" list类型转换成numpy array类型
"""
return np.array(num_list)
@staticmethod
def multiply(a, b):
""" 返回两个数的乘积,出现任何异常,返回None
"""
if pd.isnull(a) or pd.isnull(b):
return None
else:
return a * b
@staticmethod
def power(a, b):
""" a的b次方
"""
return math.pow(a, b)
@staticmethod
def exp(a):
""" e的a次方
"""
return math.exp(a)
@staticmethod
def expm1(a):
""" e的a次方减1
"""
return math.expm1(a)
@staticmethod
def log(a):
""" e为底的log(a)
"""
return math.log(a)
@staticmethod
def log1p(a):
""" log(1 + a)
"""
return math.log1p(a)
@staticmethod
def sqrt(a):
""" a的平方根
"""
return math.sqrt(a)
@staticmethod
def abs(a):
""" a的绝对值
"""
return math.fabs(a)
@staticmethod
def copysign(a, b):
""" b的正负号乘以a
"""
return math.copysign(a, b)
@staticmethod
def zeros(a):
""" 长度为a,元素都为0的numpy array类型
"""
return np.zeros(a)
@staticmethod
def ones(a):
""" 长度为a的,元素都为1的numpy array类型
"""
return np.ones(a)
@staticmethod
def max(a):
""" 返回一个列表里面最大的元素,出现任何异常,返回None
"""
if (a is None) or (len(a) == 0):
return None
a_array = np.array([i for i in a if pd.notnull(i)])
count = len(a_array)
if count == 0:
return None
else:
return a_array.max()
@staticmethod
def min(a):
""" 返回一个列表里面最小的元素,出现任何异常,返回None
"""
if (a is None) or (len(a) == 0):
return None
a_array = np.array([i for i in a if pd.notnull(i)])
count = len(a_array)
if count == 0:
return None
else:
return a_array.min()
@staticmethod
def sum(a):
""" 返回一个列表里面所有元素的和,出现任何异常,返回0.0
"""
if (a is None) or (len(a) == 0):
return 0.0
result = 0.0 if | pd.isnull(a[0]) | pandas.isnull |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal( | Panel(d) | pandas.core.panel.Panel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.