prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from functools import partial
from textwrap import dedent
from io import StringIO
import pytest
import pandas.testing as pdtest
import numpy
import pandas
from wqio.utils import misc
from wqio.tests import helpers
@pytest.fixture
def basic_data():
testcsv = """\
Date,A,B,C,D
X,1,2,3,4
Y,5,6,7,8
Z,9,0,1,2
"""
return pandas.read_csv(StringIO(dedent(testcsv)), index_col=["Date"])
@pytest.fixture
def multiindex_df():
index = pandas.MultiIndex.from_product(
[["A", "B", "C"], ["mg/L"]], names=["loc", "units"]
)
return pandas.DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["a", "b"])
class mockDataset(object):
def __init__(self, inflow, outflow):
self.inflow = mockLocation(inflow)
self.outflow = mockLocation(outflow)
class mockLocation(object):
def __init__(self, data):
self.data = data
self.stats = mockSummary(data)
class mockSummary(object):
def __init__(self, data):
self.N = len(data)
self.max = max(data)
self.min = min(data)
self.nonething = None
def test_add_column_level(basic_data):
known_cols = pandas.MultiIndex.from_tuples(
[(u"test", u"A"), (u"test", u"B"), (u"test", u"C"), (u"test", u"D")]
)
newdata = misc.add_column_level(basic_data, "test", "testlevel")
assert known_cols.tolist() == newdata.columns.tolist()
# can only add levels to non-MultiIndex columns
with helpers.raises(ValueError):
misc.add_column_level(newdata, "test2", "testlevel2")
@pytest.mark.parametrize("L1", [0, "loc"])
@pytest.mark.parametrize("L2", [2, "units"])
def test_swap_column_levels(multiindex_df, L1, L2):
columns = pandas.MultiIndex.from_product(
[["A", "B", "C"], ["res", "cen"], ["mg/L"]], names=["loc", "value", "units"]
)
data = numpy.arange(len(columns) * 10).reshape((10, len(columns)))
df = pandas.DataFrame(data, columns=columns).pipe(misc.swap_column_levels, L1, L2)
expected_columns = pandas.MultiIndex.from_product(
[["mg/L"], ["cen", "res"], ["A", "B", "C"]], names=["units", "value", "loc"]
)
pdtest.assert_index_equal(df.columns, expected_columns)
def test_flatten_columns(multiindex_df, basic_data):
expected = ["A_mg/L", "B_mg/L", "C_mg/L"]
flat = misc.flatten_columns(multiindex_df.T)
assert flat.columns.tolist() == expected
assert (
misc.flatten_columns(basic_data).columns.tolist() == basic_data.columns.tolist()
)
def test_expand_columns():
x = numpy.arange(12).reshape(3, 4)
df = | pandas.DataFrame(x, columns=("A_a", "A_b", "B_a", "B_c")) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 13:56:10 2018
@author: dugj2403
"""
import os
from glob import glob
import pandas as pd
import numpy as np
from scipy.interpolate import griddata
from copy import deepcopy
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pandas import DataFrame
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def rename_axis(df,axis_in,axis_out):
"""
Replace axis_in[i] with axis_out[i]
"""
df=deepcopy(df)
for count, column in enumerate(axis_in):
df.rename(columns={column:axis_out[count]}, inplace=True)
return df
def get_PIV_files(path,**keyword_parameters):
"""Recursively look in directory specified as path and its subdirectories
for PIV .dat files. All '.dat' files are found by looking recursively
in the path, these are returned in the 'full_list'. The treatment
Args:
path (str): Directory to look in.
treatment (str): keyword used in file name to identify treatment
Returns:
treatment_1 (list): containing file paths for treatment 1
treatment_2 (list): containing file paths for treatment identified by "treatment" keyword
"""
result = [y for x in os.walk(path) for y in glob(os.path.join(x[0], '*.dat'))]
return_list=[]
for i in result:
if ('treatment' in keyword_parameters):
treatment=keyword_parameters['treatment']
if treatment in i:
return_list.append(i)
else:
return_list.append(i)
return return_list
def load_PIV_dataframes(file_list,plane_labels,shot_labels,fieldNames,fieldKeys,**keyword_parameters):
"""Load data contained in .dat into pandas dataframes, neatly organized in a
heirachical dictionary. Must contain 'x', 'y' and 'scalar' data.
Your scalar field should be exported in DaVis as a Tecplot .dat file. Planes
are physically different locations of the laser light sheet. Shots are repetitions
at the same plane. If you only have 1 plane and 1 shot, name your file something
like 'Plane_1_shot_1' anyway, and then use these to populate the plane_labels
and shot_labels lists.
Args:
file_list [list]: list from find_PIV_files() containing file paths
Plane_labels [list]: strings of keywords used in file path to identify 'planes'
shot_labels [list]: strings of keywords used in file path to identify 'shots'
fieldKeys [list]: strings of keywords used in file path to identify each scalar field (e.g. "B0001" - crappy DaVis output)
fieldNames [list]: strings of keywords corresponding to fieldKeys ("B0001" corresponds to 'u')
Returns:
shot_dic {dict}: two-level dictionary. shot_labels are outer keys and fieldNames are inner keys.
each inner key is attributed a dataframe with corresponding dataframe containing scalar data
"""
print(shot_labels)
plane_dic={}
for file in file_list:
for plane in plane_labels:
if plane in file:
if plane not in plane_dic:
plane_dic[plane] = {}
for shot in shot_labels:
if shot in file:
if shot not in plane_dic[plane]:
plane_dic[plane][shot] = {}
for index, fieldKey in enumerate(fieldKeys):
if fieldNames[index] not in plane_dic[plane][shot]:
plane_dic[plane][shot][fieldNames[index]]={}
if fieldKey in file:
df=pd.read_table(file,skiprows=[0,1,2],delim_whitespace=True,names=['x','y',fieldNames[index],'valid'])
print(shot,fieldKey,index,fieldNames[index])
plane_dic[plane][shot][fieldNames[index]]=df
if ('rename_axis' in keyword_parameters):
if keyword_parameters['rename_axis']==True:
axis_in=keyword_parameters['axis_in']
axis_out=keyword_parameters['axis_out']
for plane in plane_dic:
for shot in plane_dic[plane]:
for frame in plane_dic[plane][shot]:
plane_dic[plane][shot][frame]=rename_axis(plane_dic[plane][shot][frame],axis_in,axis_out)
return plane_dic
def process_piv_dictionary(dictionary,scalar_name,plane_labels,plane_positions,**keyword_parameters):
print("\n")
print("Processing dictionary containing PIV planes and shots:")
return_dict={}
for counter, plane in enumerate(plane_labels):
print("\n")
print("Processing %s" %plane)
if 'average' in keyword_parameters:
if keyword_parameters['average']:
axis=keyword_parameters['axis']
if 'prefix' in keyword_parameters:
prefix=keyword_parameters['prefix']
df=average_PIV_field(dictionary[plane],axis,scalar_name,prefix=prefix)
else:
df=average_PIV_field(dictionary[plane],axis,scalar_name)
if 'geoRef' in keyword_parameters:
if keyword_parameters['geoRef']:
geoRefCoords=keyword_parameters['geoRefCoords']
#get current geoRefCoord
geoRefCoord=[0,0,0]
for i, axis in enumerate(geoRefCoords):
if type(axis) is int:
geoRefCoord[i]=axis
if type(axis) is list:
geoRefCoord[i]=axis[counter]
if type(axis) is float:
geoRefCoord[i]=axis
print("Georeferencing %s with %s" %(plane,str(geoRefCoord)))
try:
#only if 'average'
df=piv.georeference(df,geoRefCoord)
except NameError:
single_shot_ID=list(dictionary[plane].keys())[0]
df=georeference(dictionary[plane][single_shot_ID][scalar_name],geoRefCoord)
if 'crop' in keyword_parameters:
if keyword_parameters['crop']:
try:
limits_x=keyword_parameters['limits_x']
except KeyError:
pass
try:
limits_y=keyword_parameters['limits_y']
except KeyError:
pass
try:
limits_z=keyword_parameters['limits_z']
except KeyError:
pass
if 'limits_x' in keyword_parameters:
if 'limits_y' in keyword_parameters:
print('Cropping %s along x and y' % plane)
df=crop_scalar_field(df,limits_x=limits_x,limits_y=limits_y)
if 'limits_x' in keyword_parameters:
if 'limits_z' in keyword_parameters:
print('Cropping %s along x and z' % plane)
df=crop_scalar_field(df,limits_x=limits_x,limits_z=limits_z)
if 'limits_y' in keyword_parameters:
if 'limits_z' in keyword_parameters:
print('Cropping %s along y and z' % plane)
df=crop_scalar_field(df,limits_y=limits_y,limits_z=limits_z)
return_dict.update({plane : df})
del df
del keyword_parameters
return return_dict
def average_PIV_field(dictionary,axis,field, **keyword_parameters):
"""Average 2D scalar values across multiple 'shots', where a 'shot' represents
a sequence of PIV images lasting a duration of time. Multiple shots make up a PIV
measurement of plane. Example, five 20 s shots make a total of 100 s of PIV. This function
takes the average of each of the five 20 s shots.
Args:
dictionary (str): heirachical dictionary containing 'shot_xx' entries as the master key
each scalar field for that shot is contained within a subdictionary as
a dataframe that can be accessed with a keyword (e.g. "u" or "TKE")
field (str): keyword corresponding to both subdictionary key and header of dataframe
Returns:
df (dataframe): containing data from all shots and the averaged scalar field under
the header "mean_xx", where xx = field
"""
df= | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import urllib.request
import json
import pprint
import pandas as pd
import re
import numpy as np
import pandas as pd
import sys
from konlpy.tag import Okt
from gensim import corpora, models
import time
import pymysql
def __main__():
"""
politic_crwaler를 실행함.
"""
f = open("crawler\list.txt", 'r',encoding='UTF8')
lines = f.readlines()
for line in lines:
input = line.replace("\n","")
politic_crawler(input).main()
class politic_crawler:
def __init__(self, keyword):
"""
method : __init__
explain : 생성자. db를 연결하고, 각 자료구조를 생성함.
"""
self.start = time.time()
try:
self.db = pymysql.connect(host='127.0.0.1',port=3306, user='root', passwd="<PASSWORD>@"
,db='test', charset='utf8')
self.cursor = self.db.cursor()
except Exception as e:
print("db is not connection")
self.keyword = keyword
self.client_id = "agXsz69GSRrBcAIVoLhV"
self.client_secret = "<KEY>"
self.url_list = []
def main(self):
"""
method : main
explain : news_api 함수와 LDA_MODELING을 실행한다. 주요 기능의 실행을 담당한다.
"""
jsonobject = self.news_api(self.keyword)
try:
result_keywords = self.lda_modeling(jsonobject)
except Exception as e:
print(e)
result_keywords = "LDA_MODELING_ERROR"
print("LDA_MODELING_ERROR")
time_required = time.time() - self.start
# print("결과", result_keywords)
# print("실행 시간 :", time_required)
# print("url_list : " self.url_list)
# print("입력 키워드 : ", self.keyword)
result_url_list = ""
result_keyword = ""
# print(self.url_list)
# print(result_keywords)
cnt = 0
try:
for i in range(len(result_keywords)-1):
if result_keywords[i] == result_keywords[len(result_keywords)-2]:
result_keyword += result_keywords[i]
else:
result_keyword += result_keywords[i] + ","
cnt += 1
except Exception as e:
print("키워드 담는 문제")
#print(cnt)
try:
for i in range(0,3):
if self.url_list[i] == self.url_list[2]:
result_url_list += self.url_list[i]
else:
result_url_list += self.url_list[i] + ","
except Exception as e:
print("url 담는 문제")
# except Exception as e:
# print(e)
#print(result_url_list, result_keyword, self.keyword)
try:
self.mysql_connection(result_url_list, result_keyword, self.keyword)
except Exception as e:
print("mysql error")
print(e)
def mysql_connection(self, url_list, result_keywords, search_keyword):
'''
method : mysql_connection
explain : mysql을 사용하기 위한 함수.
'''
create_sql = """
CREATE TABLE SRESULT(
result_keywords VARCHAR(1000) NOT NULL,
input_keyword VARCHAR(1000) NOT NULL,
NEWS_URL VARCHAR(1000) NOT NULL
)
"""
input_sql = """
INSERT INTO SRESULT(result_keywords, input_keyword, NEWS_URL) VALUES (%s,%s,%s)
"""
try:
self.cursor.execute(create_sql)
except Exception as e:
print("테이블이 있습니다.")
try:
self.cursor.execute(input_sql,(result_keywords, search_keyword, url_list))
self.db.commit()
self.cursor.close()
print(result_keywords, self.keyword, url_list)
except Exception as e:
print("수집 안됨", e)
def news_api(self, keywords):
'''
api url : https://developers.naver.com/docs/serviceapi/search/news/news.md#%EB%89%B4%EC%8A%A4
method : news_api를 직접적으로 사용하는 함수. description과 title를 json형태로 반환함.
'''
start = 1
itemdict = {}
title = []
description = []
urldata = []
encText = urllib.parse.quote(self.keyword)
# 가져올 페이지 수
for i in range(3):
add = str(50 * i + start)
url = "https://openapi.naver.com/v1/search/news?query=" + encText+"&display=50&start=" +add+"&sort=sim"# json 결과
request = urllib.request.Request(url)
request.add_header("X-Naver-Client-Id",self.client_id)
request.add_header("X-Naver-Client-Secret",self.client_secret)
response = urllib.request.urlopen(request)
rescode = response.getcode()
if(rescode==200):
response_body = response.read()
jsonobject = json.loads(response_body.decode('utf-8'))
for i in jsonobject['items']:
title.append(i['title'])
description.append(i['description'])
urldata.append(i['link'])
else:
print("Error Code:" + rescode)
itemdict['title'] = title
itemdict['description'] = description
itemdict['link'] = urldata
# print(itemdict)
return itemdict
def lda_modeling(self, jsonobject):
'''
method : lda_modeling
explain : lda modeling
lda 모델링
1. 어간추출 - konlpy
2. bow - gensim
3. tf-idf - gensim
4. lda - gensim
'''
dfPapers = | pd.DataFrame(columns=['papers']) | pandas.DataFrame |
"""Utilities for creating data."""
from microbepy.common import constants as cn
from microbepy.common import util
from microbepy.common.isolate import Isolate
import numpy as np
import pandas as pd
import scipy.stats as stats
MAX_STD = 3.0
MUTATION_GROUP_STRING = "--" # Mutation group string
RATE_AVG = 'rate_avg'
YIELD_AVG = 'yield_avg'
RATE_RES = 'rate_res'
YIELD_RES = 'yield_res'
def makeCultureIsolateMutationDF(is_separate_species=True):
"""
Retrieves the data required for making plots
:return pd.DataFrame:
cn.LINE, cn.KEY_CULTURE, KEY_ISOLATE_DVH, KEY_ISOLATE_MMP
cn.KEY_MUTATION, cn.GGENE_ID, cn.GENE_ID, cn.POSITION
cn.RATE, cn.YIELD
"""
query1 = '''
select distinct key_culture, line, key_isolate,
gene_id, ggene_id, position, key_mutation, effect,
rate, yield
from genotype_phenotype
where species_mix = 'B'
and is_an_mutation = 0
'''
df1 = util.readSQL(query1)
if is_separate_species:
# Separate columns for species
del df1[cn.KEY_ISOLATE]
query2 = '''
select distinct key_culture,
key_isolate as key_isolate_dvh,
key_isolate_mmp
from genotype_phenotype,
(select distinct key_isolate as key_isolate_mmp,
line as line_mmp,
key_culture as key_culture_mmp from genotype_phenotype
where species='M'
and line_mmp != 'AN') sub
where species_mix = 'B'
and species = 'D'
and key_culture_mmp = key_culture
and line != 'AN'
'''
df2 = util.readSQL(query2)
sel = [Isolate.create(i).experiment == cn.EXPERIMENT_CI
for i in df2[cn.KEY_ISOLATE_DVH]]
df2 = df2.loc[sel]
df = df1.merge(df2, on=cn.KEY_CULTURE, how='inner')
else:
sel = [Isolate.create(i).experiment == cn.EXPERIMENT_CI
for i in df1[cn.KEY_ISOLATE]]
df1 = df1.loc[sel]
df = df1
df[cn.POSITION] = df[cn.POSITION].apply(lambda v: int(v))
return df
def makeStandardizedResidualsForPairings():
"""
Calculates the standardized residuals from the means of each pairing.
:return pd.DataFrame: cn.KEY_CULTURE, RATE_RES, YIELD_RES
"""
def standardizeDF(df, columns):
for col in columns:
df[col] = (df[col] - df[col].mean()) / df[col].std()
#
columns = [cn.KEY_CULTURE, cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP,
cn.RATE, cn.YIELD]
df_full = makeCultureIsolateMutationDF()[columns].copy()
df_full = df_full.drop_duplicates()
df_mean = df_full.groupby(
[cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP]).mean()
df_mean = df_mean.rename(columns={cn.RATE: RATE_AVG,
cn.YIELD: YIELD_AVG})
df_merge = df_full.merge(df_mean,
on=[cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP], how='inner')
df_merge[RATE_RES] = df_merge[cn.RATE] - df_merge[RATE_AVG]
df_merge[YIELD_RES] = df_merge[cn.YIELD] - df_merge[YIELD_AVG]
df_result = df_merge[[cn.KEY_CULTURE, RATE_RES, YIELD_RES]].copy()
standardizeDF(df_result, [RATE_RES, YIELD_RES])
return df_result
def filterOutlierCultures(df, max_std=MAX_STD):
"""
Removes cultures that have large residuals for the mean
of their pairings.
:param pd.DataFrame df: has column cn.KEY_CULTURE
:return pd.DataFrame:
"""
df_res = makeStandardizedResidualsForPairings()
cultures = []
for _, row in df_res.iterrows():
if ((np.abs(row[RATE_RES]) >= max_std)
or (np.abs(row[YIELD_RES]) >= max_std)):
cultures.append(row[cn.KEY_CULTURE])
# Delete the outlier rows
rows = []
for _, row in df.iterrows():
if not row[cn.KEY_CULTURE] in cultures:
rows.append(row)
return pd.DataFrame(rows)
def makeGausianMixture(means, stds, num_rows, num_extracols,
prob=0.5):
"""
Creates predictor and dependent variable dataframes.
There is a predictor column for each mean and each
extra column. Predictor values are binary and are assigned
randomly. The
n-th dependent variable is a gausian mixture of the means
that have a binary 1 in their predictor column.
:param list-float means:
:param list-float stds:
:param int num_rows: number of rows in result
:param int num_extracols: number of extra columns that do
not affect the dependent variable
:param float prob: probability of a 1 in df_X
:return pd.DataFrame, pd.DataFrame: df_X, df_y
"""
COL_Y = 'y'
num_cols = len(means) + num_extracols
# construct the predictors
df_X = pd.DataFrame(
stats.bernoulli.rvs(prob, size=(num_rows, num_cols)))
predictor_columns = df_X.columns.tolist()[0:len(means)]
extra_columns = df_X.columns.tolist()[len(means):]
for idx, col in enumerate(predictor_columns):
df_X.rename(columns={col: "X_%2.4f" % means[idx]}, inplace=True)
for idx, col in enumerate(extra_columns):
df_X.rename(columns={col: "E_%d" % idx}, inplace=True)
predictor_columns = df_X.columns.tolist()[0:len(means)]
extra_columns = df_X.columns.tolist()[len(means):]
# Construct the dependent variables
df_y = pd.DataFrame({
COL_Y: np.repeat(0, num_rows)
})
for idx, col in enumerate(predictor_columns):
sel = df_X[col] == 1
values = np.random.normal(means[idx], stds[idx], num_rows)
y_values = [y + v if s else y
for s, v, y in zip(sel, values, df_y[COL_Y])]
df_y[COL_Y] = y_values
#
return df_X, df_y
def makeNoisyOr(predictor_probs, num_rows, noise_prob):
"""
Creates a dependent variable (y) that is the OR of
predictor variables (x_i) with noise added. The resulting
binary variable has the value of the OR with probability
1 - p_noise and the value of a Bernoulli(0.5) with probability
p_noise. The dependent variables is distributed as a
Bernoulli(0.5). P(y | x_i, p_i) = 1, where p_i is the fraction
of the data in which y_i = x_i.
:param list-float predictor_probs: probability distribution
for the predictor variables.
:param int num_rows: number of rows in result
:param float noise_prob:
:return pd.DataFrame, pd.DataFrame, float: df_X, df_y, score
score - maximum accuracy achievable by a classifier
"""
def concat(mat1, mat2):
if mat1 is None:
return mat2
if mat2 is None:
return mat1
return np.concatenate((mat1, mat2), axis=1)
def randomizeIndex(dfs):
"""
Randomizes a set of dataframes in the same way.
:param list-DataFrame dfs:
:return list-DataFrame:
"""
indices = np.random.permutation(range(len(dfs[0])))
results = []
for df in dfs:
df[cn.INDEX] = indices
df = df.sort_values(cn.INDEX)
del df[cn.INDEX]
df.index = range(len(df))
results.append(df)
return results
#
# Check the inputs
if not np.isclose(np.sum(predictor_probs), 1.0):
msg = "Predictor probabilities must be a distribution."
raise ValueError(msg)
# Constants used
col_y = 'y'
num_cols = len(predictor_probs)
rand_prob = 0.5
num_nonnoise_rows = int(np.round(num_rows*(1 -noise_prob)))
num_noise_rows = num_rows - num_nonnoise_rows
# Construct the dependent variable
ys = stats.bernoulli.rvs(rand_prob, size=(num_rows, 1))
# construct the predictors
columns = ["P_%d" % n for n,_ in enumerate(predictor_probs)]
dfs = []
cur_row = 0
for idx, prob in enumerate(predictor_probs):
cur_num = int(np.round(num_rows*prob*(1 -noise_prob)))
# First block
if idx > 0:
mat1 = stats.bernoulli.rvs(rand_prob, size=(cur_num,
idx))
mat1.reshape(idx, cur_num)
else:
mat1 = None
# Predicator variable equal to y
mat2 = np.array(ys[cur_row:(cur_row+cur_num)])
mat2.reshape(cur_num, 1)
# Third block
if num_cols - idx > 1:
num_cols3 = num_cols - idx - 1
mat3 = stats.bernoulli.rvs(rand_prob, size=(cur_num, num_cols3))
mat3.reshape(num_cols3, cur_num)
else:
mat3 = None
# Assemble this section
mat = concat(mat1, mat2)
mat = concat(mat, mat3)
df = pd.DataFrame(mat, columns=columns)
dfs.append(df)
cur_row += cur_num
# Add the remaining rows
rand_mat = stats.bernoulli.rvs(rand_prob,
size=(num_noise_rows, num_cols))
df = pd.DataFrame(rand_mat, columns=columns)
dfs.append(df)
# Calculate the maximum accuracy of a classifier of these data
# The accuracy is the rand_prob for the rows in which predictor
# values are assigned randomly. The accuracy is 1 where one
# predictor has a value equal to the dependent variable.
score = rand_prob*noise_prob + 1 - noise_prob
#
df_X = pd.concat(dfs, sort=True)
df_y = | pd.DataFrame(ys, columns=[col_y]) | pandas.DataFrame |
import asyncio
import os
import queue
import time
import traceback
from datetime import date, datetime, timedelta
from typing import Dict, List, Optional, Tuple
import pandas as pd
import requests
from alpaca_trade_api.entity import Order as AlpacaOrder
from alpaca_trade_api.rest import REST, URL, Entity
from alpaca_trade_api.stream import Stream
from pytz import timezone
from requests.auth import HTTPBasicAuth
from liualgotrader.common import config
from liualgotrader.common.tlog import tlog
from liualgotrader.common.types import Order, QueueMapper, Trade
from liualgotrader.trading.base import Trader
nyc = timezone("America/New_York")
class AlpacaTrader(Trader):
def __init__(self, qm: QueueMapper = None):
self.market_open: Optional[datetime]
self.market_close: Optional[datetime]
self.alpaca_brokage_api_baseurl = os.getenv(
"ALPACA_BROKER_API_BASEURL", None
)
self.alpaca_brokage_api_key = os.getenv("ALPACA_BROKER_API_KEY", None)
self.alpaca_brokage_api_secret = os.getenv(
"ALPACA_BROKER_API_SECRET", None
)
self.alpaca_rest_client = REST(
base_url=URL(config.alpaca_base_url),
key_id=config.alpaca_api_key,
secret_key=config.alpaca_api_secret,
)
if qm:
self.alpaca_ws_client = Stream(
base_url=URL(config.alpaca_base_url),
key_id=config.alpaca_api_key,
secret_key=config.alpaca_api_secret,
)
if not self.alpaca_ws_client:
raise AssertionError(
"Failed to authenticate Alpaca web_socket client"
)
self.alpaca_ws_client.subscribe_trade_updates(
AlpacaTrader.trade_update_handler
)
self.running_task: Optional[asyncio.Task] = None
now = datetime.now(nyc)
calendar = self.alpaca_rest_client.get_calendar(
start=now.strftime("%Y-%m-%d"), end=now.strftime("%Y-%m-%d")
)[0]
if now.date() >= calendar.date.date():
self.market_open = now.replace(
hour=calendar.open.hour,
minute=calendar.open.minute,
second=0,
microsecond=0,
)
self.market_close = now.replace(
hour=calendar.close.hour,
minute=calendar.close.minute,
second=0,
microsecond=0,
)
else:
self.market_open = self.market_close = None
super().__init__(qm)
async def _is_personal_order_completed(
self, order_id: str
) -> Tuple[Order.EventType, float, float, float]:
alpaca_order = self.alpaca_rest_client.get_order(order_id=order_id)
event = (
Order.EventType.canceled
if alpaca_order.status in ["canceled", "expired", "replaced"]
else Order.EventType.pending
if alpaca_order.status in ["pending_cancel", "pending_replace"]
else Order.EventType.fill
if alpaca_order.status == "filled"
else Order.EventType.partial_fill
if alpaca_order.status == "partially_filled"
else Order.EventType.other
)
return (
event,
float(alpaca_order.filled_avg_price or 0.0),
float(alpaca_order.filled_qty or 0.0),
0.0,
)
async def is_fractionable(self, symbol: str) -> bool:
asset_details = self.alpaca_rest_client.get_asset(symbol)
return asset_details.fractionable
async def _is_brokerage_account_order_completed(
self, order_id: str, external_order_id: Optional[str] = None
) -> Tuple[Order.EventType, float, float, float]:
if not self.alpaca_brokage_api_baseurl:
raise AssertionError(
"order_on_behalf can't be called, if brokerage configs incomplete"
)
endpoint: str = (
f"/v1/trading/accounts/{external_order_id}/orders/{order_id}"
)
tlog(f"_is_brokerage_account_order_completed:{endpoint}")
url: str = self.alpaca_brokage_api_baseurl + endpoint
response = await self._get_request(url)
tlog(f"_is_brokerage_account_order_completed: response: {response}")
event = (
Order.EventType.canceled
if response["status"] in ["canceled", "expired", "replaced"]
else Order.EventType.pending
if response["status"] in ["pending_cancel", "pending_replace"]
else Order.EventType.fill
if response["status"] == "filled"
else Order.EventType.partial_fill
if response["status"] == "partially_filled"
else Order.EventType.other
)
return (
event,
float(response.get("filled_avg_price") or 0.0),
float(response.get("filled_qty") or 0.0),
0.0,
)
async def is_order_completed(
self, order_id: str, external_order_id: Optional[str] = None
) -> Tuple[Order.EventType, float, float, float]:
if not external_order_id:
return await self._is_personal_order_completed(order_id)
return await self._is_brokerage_account_order_completed(
order_id, external_order_id
)
def get_market_schedule(
self,
) -> Tuple[Optional[datetime], Optional[datetime]]:
return self.market_open, self.market_close
def get_trading_days(
self, start_date: date, end_date: date = date.today()
) -> pd.DataFrame:
calendars = self.alpaca_rest_client.get_calendar(
start=str(start_date), end=str(end_date)
)
_df = pd.DataFrame.from_dict([calendar._raw for calendar in calendars])
_df["date"] = | pd.to_datetime(_df.date) | pandas.to_datetime |
#!/usr/bin/env python
import json
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
def load(path):
def read(path):
with open(path) as f:
for line in f:
line = line.strip()
split = line.split('\t', 2)
while len(split) < 3:
split.append("")
time, id, msg = split
time, subject = time.split(' ', 1)
subject = subject[1:-1] # remove brackets
data = dict(time=time, subject=subject, id=id)
if subject == 'LOG':
data['log'] = msg
if msg.startswith("REQ_LATENCY"):
data['req_latency'] = int(msg.split(' ')[1]) // 1000 / 1000
if subject == 'EVENT':
msg = json.loads(msg)
if msg['Type'] != 'container':
continue
attr = msg['Actor']['Attributes']
data['service'] = attr.get('com.docker.swarm.service.name')
data['signal'] = attr.get('signal')
data['status'] = msg['status']
if subject == 'STATS':
msg = json.loads(msg)
preread = msg['preread']
if preread.startswith("0001"):
cpu = 0
else:
dt = (pd.to_datetime(msg['read']) -
| pd.to_datetime(preread) | pandas.to_datetime |
from datetime import datetime, timedelta
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lrange, range, zip
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.resample import _get_period_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def _series_name():
return 'pi'
class TestPeriodIndex(object):
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + obj.index.freq).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self, series):
# test for fill value during resampling, issue 3715
s = series
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
@pytest.mark.parametrize('kwargs', [dict(on='date'), dict(level='d')])
def test_selection(self, index, freq, kind, kwargs):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
msg = ("Resampling from level= or on= selection with a PeriodIndex is"
r" not currently supported, use \.set_index\(\.\.\.\) to"
" explicitly set index")
with pytest.raises(NotImplementedError, match=msg):
df.resample(freq, kind=kind, **kwargs)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('meth', ['ffill', 'bfill'])
@pytest.mark.parametrize('conv', ['start', 'end'])
@pytest.mark.parametrize('targ', ['D', 'B', 'M'])
def test_annual_upsample_cases(self, targ, conv, meth, month,
simple_period_range_series):
ts = simple_period_range_series(
'1/1/1990', '12/31/1991', freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
@pytest.mark.parametrize('rule,expected_error_msg', [
('a-dec', '<YearEnd: month=12>'),
('q-mar', '<QuarterEnd: startingMonth=3>'),
('M', '<MonthEnd>'),
('w-thu', '<Week: weekday=3>')
])
def test_not_subperiod(
self, simple_period_range_series, rule, expected_error_msg):
# These are incompatible period rules for resampling
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='w-wed')
msg = ("Frequency <Week: weekday=2> cannot be resampled to {}, as they"
" are not sub or super periods").format(expected_error_msg)
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('target', ['D', 'B', 'M'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_quarterly_upsample(self, month, target, convention,
simple_period_range_series):
freq = 'Q-{month}'.format(month=month)
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_monthly_upsample(self, target, convention,
simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='M')
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self, resample_method):
# GH12770
series = Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
result = getattr(series.resample('M'), resample_method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
msg = ("Frequency <MonthEnd> cannot be resampled to <Week: weekday=6>,"
" as they are not sub or super periods")
with pytest.raises(IncompatibleFrequency, match=msg):
Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') -
offsets.Day())
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_with_pytz(self):
# GH 13238
s = Series(2, index=pd.date_range('2017-01-01', periods=48, freq="H",
tz="US/Eastern"))
result = s.resample("D").mean()
expected = Series(2, index=pd.DatetimeIndex(['2017-01-01',
'2017-01-02'],
tz="US/Eastern"))
assert_series_equal(result, expected)
# Especially assert that the timezone is LMT for pytz
assert result.index.tz == pytz.timezone('US/Eastern')
def test_with_local_timezone_dateutil(self):
# see gh-5430
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - offsets.Day())
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_nonexistent_time_bin_edge(self):
# GH 19375
index = date_range('2017-03-12', '2017-03-12 1:45:00', freq='15T')
s = Series(np.zeros(len(index)), index=index)
expected = s.tz_localize('US/Pacific')
result = expected.resample('900S').mean()
tm.assert_series_equal(result, expected)
# GH 23742
index = date_range(start='2017-10-10', end='2017-10-20', freq='1H')
index = index.tz_localize('UTC').tz_convert('America/Sao_Paulo')
df = DataFrame(data=list(range(len(index))), index=index)
result = df.groupby(pd.Grouper(freq='1D')).count()
expected = date_range(start='2017-10-09', end='2017-10-20', freq='D',
tz="America/Sao_Paulo",
nonexistent='shift_forward', closed='left')
tm.assert_index_equal(result.index, expected)
def test_resample_ambiguous_time_bin_edge(self):
# GH 10117
idx = pd.date_range("2014-10-25 22:00:00", "2014-10-26 00:30:00",
freq="30T", tz="Europe/London")
expected = Series(np.zeros(len(idx)), index=idx)
result = expected.resample('30T').mean()
tm.assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9, dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M').ffill()
both = s.resample('M').ffill().resample('M').last().astype('int64')
assert_series_equal(last, both)
@pytest.mark.parametrize('day', DAYS)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_weekly_upsample(self, day, target, convention,
simple_period_range_series):
freq = 'W-{day}'.format(day=day)
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp').mean()
expected = ts.to_timestamp(how='start').resample('A-DEC').mean()
assert_series_equal(result, expected)
def test_resample_to_quarterly(self, simple_period_range_series):
for month in MONTHS:
ts = simple_period_range_series(
'1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month).ffill()
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = simple_period_range_series('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how).ffill()
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A').ffill()
expected = stamps.resample('A').ffill().to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
s.resample('A').ffill()
@pytest.mark.parametrize('freq', ['5min'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_resample_5minute(self, freq, kind):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
expected = ts.to_timestamp().resample(freq).mean()
if kind != 'timestamp':
expected = expected.to_period(freq)
result = ts.resample(freq, kind=kind).mean()
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self, simple_period_range_series):
ts = simple_period_range_series('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D').asfreq()
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = simple_period_range_series('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s').asfreq()
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min').apply(len)
expected = s.resample('10min').apply(len).loc[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU').asfreq()
assert result.isna().all()
result = ts.resample('W-THU').asfreq().ffill()[:-1]
expected = ts.asfreq('W-THU').ffill()
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W').mean()
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample(
'W').mean().tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D').mean()
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right').mean()
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period').mean()
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propagate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first'] = np.random.randn(len(rng))
ts['second'] = np.cumsum(np.random.randn(len(rng)))
expected = DataFrame(
{
'first': ts.resample('A').sum()['first'],
'second': ts.resample('A').mean()['second']},
columns=['first', 'second'])
result = ts.resample(
'A').agg({'first': np.sum,
'second': np.mean}).reindex(columns=['first', 'second'])
assert_frame_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', closed='left', label='right').mean()
exp = s[1:].resample('10min', closed='left', label='right').mean()
assert_series_equal(result, exp)
result = s.resample('10min', closed='left', label='left').mean()
exp = s[1:].resample('10min', closed='left', label='left').mean()
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
tm.assert_index_equal(result.index, ex_index)
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A').mean()
exp = ts.to_timestamp().resample('A').mean().to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = date_range(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', closed='left', label='left').first()
def test_resample_with_dst_time_change(self):
# GH 15549
index = (
pd.DatetimeIndex([1457537600000000000, 1458059600000000000])
.tz_localize("UTC").tz_convert('America/Chicago')
)
df = pd.DataFrame([1, 2], index=index)
result = df.resample('12h', closed='right',
label='right').last().ffill()
expected_index_values = ['2016-03-09 12:00:00-06:00',
'2016-03-10 00:00:00-06:00',
'2016-03-10 12:00:00-06:00',
'2016-03-11 00:00:00-06:00',
'2016-03-11 12:00:00-06:00',
'2016-03-12 00:00:00-06:00',
'2016-03-12 12:00:00-06:00',
'2016-03-13 00:00:00-06:00',
'2016-03-13 13:00:00-05:00',
'2016-03-14 01:00:00-05:00',
'2016-03-14 13:00:00-05:00',
'2016-03-15 01:00:00-05:00',
'2016-03-15 13:00:00-05:00']
index = pd.to_datetime(expected_index_values, utc=True).tz_convert(
'America/Chicago')
expected = pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 2.0], index=index)
assert_frame_equal(result, expected)
def test_resample_bms_2752(self):
# GH2753
foo = Series(index=pd.bdate_range('20000101', '20000201'))
res1 = foo.resample("BMS").mean()
res2 = foo.resample("BMS").mean().resample("B").mean()
assert res1.index[0] == Timestamp('20000103')
assert res1.index[0] == res2.index[0]
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span').mean()
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = date_range(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right').mean())
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = date_range(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left').mean())
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A").mean()
tm.assert_almost_equal(result[0], s.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# 4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(np.random.randn(9, 3),
index=date_range('2000-1-1', periods=9))
result = df.resample('5D').mean()
expected = pd.concat(
[df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T
expected.index = [Timestamp('2000-1-1'), Timestamp('2000-1-6')]
assert_frame_equal(result, expected)
index = date_range(start='2001-5-4', periods=28)
df = DataFrame(
[{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90,
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
index=index.append(index)).sort_index()
index = date_range('2001-5-4', periods=4, freq='7D')
expected = DataFrame(
[{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14,
'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4,
index=index)
result = df.resample('7D').count()
assert_frame_equal(result, expected)
expected = DataFrame(
[{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4,
index=index)
result = df.resample('7D').sum()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
@pytest.mark.parametrize('agg_arg', ['mean', {'value': 'mean'}, ['mean']])
def test_loffset_returns_datetimeindex(self, frame, kind, agg_arg):
# make sure passing loffset returns DatetimeIndex in all cases
# basic method taken from Base.test_resample_loffset_arg_type()
df = frame
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = period_range(
df.index[0], periods=len(df.index) / 2, freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
result_agg = df.resample('2D', loffset='2H', kind=kind).agg(agg_arg)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result_how = df.resample('2D', how=agg_arg, loffset='2H',
kind=kind)
if isinstance(agg_arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value', 'mean')])
assert_frame_equal(result_agg, expected)
| assert_frame_equal(result_how, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from functools import reduce
import pickle
import os
import pymssql
from virgo import market
startDate_default = '20060101'
endDate_default = (datetime.now() + timedelta(days=-1)).strftime('%Y%m%d')
# endDate_default = datetime.now().strftime('%Y%m%d')
indexTickerUnivSR_default = np.array(['000300.SH', '000016.SH', '000905.SH'])
indexTickerNameUnivSR_default = np.array(['沪深300', '上证50', '中证500'])
# Global val
conn243 = pymssql.connect(server='192.168.1.243', user="yuman.hu", password="<PASSWORD>")
conn247 = pymssql.connect(server='192.168.1.247', user="yuman.hu", password="<PASSWORD>")
# daily data download
class dailyQuant(object):
def __init__(self, startDate=startDate_default, endDate=endDate_default,
indexTickerUnivSR=indexTickerUnivSR_default, indexTickerNameUnivSR=indexTickerNameUnivSR_default):
self.startDate = startDate
self.endDate = endDate
self.rawData_path = '../data/rawData/'
self.fundamentalData_path = '../data/fundamentalData/'
self.indexTickerUnivSR = indexTickerUnivSR
self.indexTickerNameUnivSR = indexTickerNameUnivSR
self.tradingDateV, self.timeSeries = self.get_dateData()
self.tickerUnivSR, self.stockTickerUnivSR, self.tickerNameUnivSR, self.stockTickerNameUnivSR, self.tickerUnivTypeR = self.get_tickerData()
def get_dateData(self):
sql = '''
SELECT [tradingday]
FROM [Group_General].[dbo].[TradingDayList]
where tradingday>='20060101'
order by tradingday asc
'''
dateSV = pd.read_sql(sql, conn247)
tradingdays = dateSV.tradingday.unique()
tradingDateV = np.array([x.replace('-', '') for x in tradingdays])
timeSeries = pd.Series(pd.to_datetime(tradingDateV))
pd.Series(tradingDateV).to_csv(self.rawData_path+ 'tradingDateV.csv', index=False)
return tradingDateV, timeSeries
def get_tickerData(self):
# and B.[SecuAbbr] not like '%%ST%%'
# where ChangeDate>='%s'
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket],B.[SecuAbbr]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
order by SecuCode asc
'''
dataV = pd.read_sql(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
flagV = np.full(len(dataV), True)
flagList = []
for i in range(len(dataV)):
if dataV.iat[i, 1] == 4:
if dataV.iat[i, 0] < self.tradingDateV[0]:
flagList.append(dataV.iat[i, 2])
for i in range(len(dataV)):
if dataV.iat[i, 2] in flagList:
flagV[i] = False
dataV = dataV[flagV]
stockTickerUnivSR = dataV.SecuCode.unique()
tickerUnivSR = np.append(self.indexTickerUnivSR, stockTickerUnivSR)
stockTickerNameUnivSR = dataV.SecuAbbr.unique()
tickerNameUnivSR = np.append(self.indexTickerNameUnivSR, stockTickerNameUnivSR)
tickerUnivTypeR = np.append(np.full(len(self.indexTickerUnivSR), 3), np.ones(len(dataV)))
pd.DataFrame(self.indexTickerUnivSR).T.to_csv(self.rawData_path+'indexTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerUnivSR).T.to_csv(self.rawData_path+'stockTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivSR).T.to_csv(self.rawData_path+'tickerUnivSR.csv', header=False, index=False)
pd.DataFrame(self.indexTickerNameUnivSR).T.to_csv(self.rawData_path+'indexTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerNameUnivSR).T.to_csv(self.rawData_path+'stockTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerNameUnivSR).T.to_csv(self.rawData_path+'tickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivTypeR).T.to_csv(self.rawData_path+'tickerUnivTypeR.csv', header=False, index=False)
return tickerUnivSR, stockTickerUnivSR, tickerNameUnivSR, stockTickerNameUnivSR, tickerUnivTypeR
def __tradingData(self,tradingDay):
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_DailyQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataV = pd.concat([dataIndex,dataStock])
sql = '''
SELECT [TradingDay], [SecuCode], [StockReturns]
FROM [Group_General].[dbo].[DailyQuote]
where tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn247)
sql = '''
SELECT A.[TradingDay], B.[SecuCode], A.[ChangePCT]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex.ChangePCT = dataIndex.ChangePCT / 100
dataIndex = dataIndex.rename({'ChangePCT': 'StockReturns'}, axis='columns')
dataR = pd.concat([dataIndex, dataStock])
data = pd.merge(dataV,dataR)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x + '.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x + '.SZ')
data.TradingDay = data.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
preCloseM = pd.DataFrame(pd.pivot_table(data,values='PrevClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
openM = pd.DataFrame(pd.pivot_table(data,values='OpenPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
highM = pd.DataFrame(pd.pivot_table(data,values='HighPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
lowM =pd.DataFrame(pd.pivot_table(data,values='LowPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
closeM = pd.DataFrame(pd.pivot_table(data,values='ClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
volumeM = pd.DataFrame(pd.pivot_table(data,values='TurnoverVolume',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
amountM = pd.DataFrame(pd.pivot_table(data,values='TurnoverValue',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
retM = pd.DataFrame(pd.pivot_table(data,values='StockReturns',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)], columns=self.tickerUnivSR)
sql = '''
SELECT A.[ExDiviDate], B.[SecuMarket], B.[SecuCode], A.[AdjustingFactor]
FROM [JYDB].[dbo].[QT_AdjustingFactor] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
'''
dataAF = pd.read_sql_query(sql, conn243)
dataAF = dataAF.rename({'ExDiviDate':'TradingDay'},axis=1)
flagMarket = dataAF.SecuMarket == 83
dataAF['SecuCode'][flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SH')
dataAF['SecuCode'][~flagMarket] = dataAF['SecuCode'].map(lambda x: x + '.SZ')
dataAF.TradingDay = dataAF.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
adjFactorM = pd.pivot_table(dataAF, values='AdjustingFactor', index='TradingDay', columns='SecuCode')
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM = pd.DataFrame(adjFactorM ,index=self.tradingDateV, columns=self.tickerUnivSR)
adjFactorM.fillna(method='pad', inplace=True)
adjFactorM =pd.DataFrame(adjFactorM ,index=[str(tradingDay)])
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where (A.ChangeType = 1 or A.ChangeType = 4)
'''
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[PubDate],B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_IndexBasicInfo] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[IndexCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
'''
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex['ChangeType'] = 1
dataIndex = dataIndex.rename({'PubDate': 'ChangeDate'}, axis='columns')
dataV = pd.concat([dataIndex, dataStock])
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
listedM = pd.pivot_table(dataV, values='ChangeType', index='ChangeDate', columns='SecuCode')
dateTotal = np.union1d(listedM.index.values, [str(tradingDay)])
listedM = pd.DataFrame(listedM, index=dateTotal, columns=self.tickerUnivSR)
listedM[listedM == 4] = 0
listedM.fillna(method='pad', inplace=True)
listedM = pd.DataFrame(listedM,index= [str(tradingDay)])
listedM = listedM.fillna(0)
sql = '''
SELECT A.[SuspendDate],A.[ResumptionDate],A.[SuspendTime], A.[ResumptionTime], B.[SecuCode],B.[SecuMarket]
FROM [JYDB].[dbo].[LC_SuspendResumption] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.[SuspendDate] = '%s'
'''%tradingDay
if tradingDay == self.tradingDateV[0]:
sql = sql.replace('A.[SuspendDate] = ','A.[SuspendDate] <= ')
dataSusp = pd.read_sql_query(sql, conn243)
flagMarket = dataSusp.SecuMarket == 83
dataSusp['SecuCode'][flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SH')
dataSusp['SecuCode'][~flagMarket] = dataSusp['SecuCode'].map(lambda x: x + '.SZ')
dataSusp.SuspendDate = dataSusp.SuspendDate.map(lambda x: x.strftime('%Y%m%d'))
dataSusp['flag'] = 1
startFlag = pd.pivot_table(dataSusp, values='flag',index='SuspendDate', columns='SecuCode')
try:
startFlag = pd.DataFrame(startFlag, index=[str(tradingDay)], columns=self.tickerUnivSR)
except:
startFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
endFlag = pd.DataFrame(index=[str(tradingDay)], columns=self.tickerUnivSR)
amount = amountM.fillna(0)
flag = (amount == 0)
endFlag[startFlag == 1] = 1
endFlag[flag] = 1
suspM = endFlag.fillna(0)
suspM[(listedM==0)] = 1
else:
dataSusp = | pd.read_sql_query(sql, conn243) | pandas.read_sql_query |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_contains(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert p in idx0
assert str(p) in idx0
assert "2017-09-01 00:00:01" in idx0
assert "2017-09" in idx0
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx0 = pd.PeriodIndex([p0, p1, p2])
input0 = np.array([1, 2, 3])
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
input1 = np.array([1, 2, 3])
expected1 = np.array([1, 2])
result1 = idx1.get_value(input1, p1)
tm.assert_numpy_array_equal(result1, expected1)
idx2 = pd.PeriodIndex([p1, p2, p1])
input2 = np.array([1, 2, 3])
expected2 = np.array([1, 3])
result2 = idx2.get_value(input2, p1)
tm.assert_numpy_array_equal(result2, expected2)
def test_get_indexer(self):
# GH 17717
p1 = pd.Period("2017-09-01")
p2 = pd.Period("2017-09-04")
p3 = pd.Period("2017-09-07")
tp0 = pd.Period("2017-08-31")
tp1 = pd.Period("2017-09-02")
tp2 = pd.Period("2017-09-05")
tp3 = pd.Period("2017-09-09")
idx = pd.PeriodIndex([p1, p2, p3])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = pd.PeriodIndex([tp0, tp1, tp2, tp3])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2, -1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 0, 1, 2], dtype=np.intp)
)
res = idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 day"))
tm.assert_numpy_array_equal(res, np.array([0, 0, 1, -1], dtype=np.intp))
def test_get_indexer_mismatched_dtype(self):
# Check that we return all -1s and do not raise or cast incorrectly
dti = pd.date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
pi2 = dti.to_period("W")
expected = np.array([-1, -1, -1], dtype=np.intp)
result = pi.get_indexer(dti)
tm.assert_numpy_array_equal(result, expected)
# This should work in both directions
result = dti.get_indexer(pi)
tm.assert_numpy_array_equal(result, expected)
result = pi.get_indexer(pi2)
tm.assert_numpy_array_equal(result, expected)
# We expect the same from get_indexer_non_unique
result = pi.get_indexer_non_unique(dti)[0]
tm.assert_numpy_array_equal(result, expected)
result = dti.get_indexer_non_unique(pi)[0]
tm.assert_numpy_array_equal(result, expected)
result = pi.get_indexer_non_unique(pi2)[0]
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_non_unique(self):
# GH 17717
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
p4 = pd.Period("2017-09-05")
idx1 = pd.PeriodIndex([p1, p2, p1])
idx2 = | pd.PeriodIndex([p2, p1, p3, p4]) | pandas.PeriodIndex |
import logging
import os
from pathlib import Path
import click
import pandas as pd
from scipy import stats
from tqdm import tqdm
logging.basicConfig(level=logging.INFO)
CORRECT_NER_ENTAILS = "Entails"
CORRECT_NER_NOT_ENTAILS = "Not Entails/Error"
CORRECT_NER_VALS = [CORRECT_NER_ENTAILS, CORRECT_NER_NOT_ENTAILS]
AGG_BY_SAFE_FIRST = [
"pmid",
"input",
"premise",
"gold_label",
"hypothesis",
"proba_entails",
"hypothesis_food",
"proba_ambiguity",
"proba_not_entails",
"hypothesis_chemical",
"foodb_food_name",
"foodb_food_id",
"foodb_chemical_name",
"foodb_chemical_id",
]
def calculate_annotator_agreement_matrix(df: pd.DataFrame):
pass
# TODO
def first(x):
# return x.values[0]
return None
def safe_first(x):
if not (len(x.unique()) == 1 or x.isna().all()):
print(x)
raise ValueError
return x.values[0]
def match_food_by_name(hypothesis_food: str, foods: pd.DataFrame):
food_match = foods[
foods.name.str.lower() == hypothesis_food.lower()
] # try exact match by lowercase name
if len(food_match) > 0:
food_match = food_match.iloc[0, :]
return food_match["name"], food_match.id
return None, None
def match_food_by_taxid(hypothesis_food_id: str, foods: pd.DataFrame):
if hypothesis_food_id is not None and str(hypothesis_food_id) != "nan":
food_match = foods[
foods.ncbi_taxonomy_id.astype(str) == str(int(hypothesis_food_id))
]
if len(food_match) > 0:
food_match = food_match.iloc[0, :]
return food_match["name"], food_match.id
return None, None
def match_food(row, foods):
hypothesis_food = row["hypothesis_food"]
hypothesis_food_id = row["hypothesis_food_id"]
# we prefer id-based matches first
food_name, food_id = match_food_by_taxid(hypothesis_food_id, foods)
if food_name is not None:
return food_name, food_id, "id"
else: # if we can't match by id, match by name
food_name, food_id = match_food_by_name(hypothesis_food, foods)
if food_name is not None:
return food_name, food_id, "name"
return None, None, None
def find_compound_synonym(hypothesis_chemical, compound_synonyms):
synonym_match = compound_synonyms[
compound_synonyms.synonym.str.lower() == hypothesis_chemical.lower()
]
if len(synonym_match) > 0:
synonym_match = synonym_match.iloc[0, :]
return (
synonym_match.synonym,
synonym_match.source_id,
synonym_match.source_type,
)
else:
return None, None, None
def match_chemical(
row, compounds, nutrients, compound_synonyms, synonym_matching: bool
):
hypothesis_chemical = row["hypothesis_chemical"]
def _match_chemical_inner(hypothesis_chemical):
chemical_match = compounds[
compounds.name.str.lower() == hypothesis_chemical.lower()
] # exact match compound name
if len(chemical_match) > 0:
chemical_match = chemical_match.iloc[0, :]
return (
chemical_match["name"],
chemical_match.id,
"name",
"compound",
)
return None, None, None, None
def match_nutrient(hypothesis_chemical):
chemical_match = nutrients[
nutrients.name.str.lower() == hypothesis_chemical.lower()
] # exact match compound name
if len(chemical_match) > 0:
chemical_match = chemical_match.iloc[0, :]
return (
chemical_match["name"],
chemical_match.id,
"name",
"nutrient",
)
return None, None, None, None
res = _match_chemical_inner(hypothesis_chemical)
if res[0] is not None:
return res
else: # if that doesn't work, exact match nutrients
res = match_nutrient(hypothesis_chemical)
if res[0] is not None:
return res
elif synonym_matching: # if that doesn't work, try matching by synonym
syn_name, syn_source_id, syn_source_type = find_compound_synonym(
hypothesis_chemical, compound_synonyms
)
chemical_match = []
if syn_source_type == "Compound":
chemical_match = compounds[compounds.id == syn_source_id]
elif syn_source_type == "Nutrient":
chemical_match = nutrients[nutrients.id == syn_source_id]
if len(chemical_match) > 0:
chemical_match = chemical_match.iloc[0, :]
return (
chemical_match["name"],
chemical_match.id,
"synonym",
syn_source_type.lower(),
)
return None, None, None, None
def match_row(
row, foods, compounds, nutrients, compound_synonyms, synonym_matching: bool
):
food_match_name, food_match_id, food_match_type = match_food(row, foods)
(
chemical_match_name,
chemical_match_id,
chemical_match_type,
chemical_foodb_compound_status,
) = match_chemical(
row, compounds, nutrients, compound_synonyms, synonym_matching
)
return (
food_match_name,
food_match_id,
food_match_type,
chemical_match_name,
chemical_match_id,
chemical_match_type,
chemical_foodb_compound_status,
)
def get_citation_info(content_row):
if len(content_row) > 0:
if len(content_row) > 1:
return (
content_row.citation.unique(),
content_row.citation_type.unique(),
)
else:
return content_row.citation, content_row.citation_type
return None, None
def match_df(
data,
foods,
compounds,
nutrients,
compound_synonyms,
contents,
synonym_matching: bool,
):
newrows = []
for idx, row in tqdm(data.iterrows(), total=len(data)):
(
food_match_name,
food_match_id,
food_match_type,
chemical_match_name,
chemical_match_id,
chemical_match_type,
chemical_foodb_compound_status,
) = match_row(
row,
foods,
compounds,
nutrients,
compound_synonyms,
synonym_matching,
)
newrow = row.copy()
(
newrow["foodb_food_name"],
newrow["foodb_food_id"],
newrow["foodb_food_match_type"],
newrow["foodb_chemical_name"],
newrow["foodb_chemical_id"],
newrow["foodb_chemical_match_type"],
newrow["foodb_compound_status"],
) = (
food_match_name,
food_match_id,
food_match_type,
chemical_match_name,
chemical_match_id,
chemical_match_type,
chemical_foodb_compound_status,
)
if food_match_id is not None and chemical_match_id is not None:
content_row = contents[
(contents.food_id == food_match_id)
& (contents.source_id == chemical_match_id)
]
(
newrow["foodb_citation"],
newrow["foodb_citation_type"],
) = get_citation_info(content_row)
newrows.append(newrow)
results = pd.DataFrame(newrows)
return results
def load_foodb_data(foodb_location, nrows=None):
foodb_base = Path(foodb_location)
logging.info("loading Foodb Contents")
contents = pd.read_csv(
foodb_base / "Content.csv",
dtype={
"standard_content": object
}, # load as object so .isna() works - otherwise empties are loaded as zeros
nrows=nrows,
)
logging.info("loading Foodb Synonyms")
synonyms = | pd.read_csv(foodb_base / "CompoundSynonym.csv", nrows=nrows) | pandas.read_csv |
"""
Copyright 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from janos import *
import pandas as pd
import numpy as np
import sys
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from datetime import datetime
import time
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import LinearRegression
pd.options.mode.chained_assignment = None
"""
load data
"""
# This is the data frame for training the predictive models.
historical_student_data = pd.read_csv("./data/college_student_enroll-s1-1.csv")
# This is information of applicants, whose financial aid is to be determined.
# We will use these numbers (SAT, GPA) later in the objective function.
applications = | pd.read_csv("./data/college_applications6000.csv") | pandas.read_csv |
"""Instantiate a Dash app."""
import datetime
import numpy as np
import pandas as pd
import dash
import dash_table
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from .layout import html_layout
import sqlite3
import plotly.express as px
app_colors = {
'background': '#0C0F0A',
'text': '#FFFFFF',
'sentiment-plot':'#41EAD4',
'volume-bar':'#FBFC74',
'someothercolor':'#FF206E',
}
def create_dashboard(server):
"""Create a Plotly Dash dashboard."""
dash_app = dash.Dash(server=server,
routes_pathname_prefix='/dashapp/',
external_stylesheets=['/static/dist/css/styles.css',
'https://fonts.googleapis.com/css?family=Lato']
)
#connect to the main database
conn = sqlite3.connect('data/alldata.db', isolation_level=None, check_same_thread=False)
c = conn.cursor()
# Prepare a youtube DataFrame------------------------------------------- ytdf
ytdf = pd.read_sql('select * from ytsentiment', conn)
ytdf['date'] = pd.to_datetime(ytdf['unix'])
num_entries = ytdf['id'].value_counts()
# Prepare a twitter DataFrame------------------------------------------- twdf
twdf = pd.read_sql('select * from twsentiment', conn)
twdf['date'] = pd.to_datetime(twdf['timestamp'])
num_entries = twdf['id'].value_counts()
# Prepare a reddit DataFrame-------------------------------------------- rddf
rddf = pd.read_sql('select * from rdsentiment', conn)
rddf['date'] = | pd.to_datetime(rddf['c_date']) | pandas.to_datetime |
"""Provides data for models of prediction phenotype from genotype."""
from microbepy.common import constants as cn
from microbepy.common import util
from microbepy.common.isolate import Isolate
from microbepy.common.study_context import StudyContext
from microbepy.data import util_data as ud
from microbepy.data.predictor_transformer import PredictorTransformer
import numpy as np
import pandas as pd
PERCENTILE_THRESHOLD = 50.0
MAX_PREDICTOR_COLUMNS = 100 # Maximum number of predictor columns
IS_DEBUG = True
################## CLASSES ###########################
class PredictorProvider(object):
pass
class ModelDataProvider(PredictorProvider):
"""
Provides predictor and dependent variables based on constraints
and specifications of data transformations.
Data are filtered for outlier cultures. Data are aggregated
by isolate pair.
The results are the instance variables df_X, df_y
"""
#cn.KEY_CULTURE, cn.LINE, cn.GENE_ID, cn.GGENE_ID,
#cn.POSITION, cn.KEY_MUTATION, cn.RATE, cn.YIELD,
#cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP
df_data = None # Base data
def __init__(self, context,
is_standardize=True, constraints=None, rc_vector=None,
is_standardize_by_line=False, **kwargs):
"""
:param MutationContext context: instance variables are:
depvar, mutation_column
:param bool is_standardize: standardize the data
:param list-of-booleanFunction constraints: constraints are predicates on cls.df_data
:param RangeConstraintVector rc_vector: Range constraint on
dependent variables
:param bool is_standardize: data are standardized
:param bool is_standardize_by_line: data are standardized by line
"""
cls = self.__class__
self.context = context
self._constraints = util.setNoneList(constraints)
self._rc_vector = rc_vector
self._is_standardize = is_standardize
self._is_standardize_by_line = is_standardize_by_line
#
if cls.df_data is None:
cls.df_data = ud.makeIsolateData()
#
self.df_X = None
self.df_y = None
self.df_y_std = None
@staticmethod
def getIsolatesFromIndices(indices):
"""
Extracts the isolates from the indices of a df_X.
:param pandas.index indices:
cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP
:return dict: keyed by cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP
values correspond to rows element in the index
"""
keys = [n for n in indices.names]
result = {}
for idx, key in enumerate(keys):
result[key] = [v[idx] for v in indices.values]
return result
@classmethod
def getLinesForRows(cls, df):
"""
Gets the lines for rows in the data.
:return list-str:
"""
isolate_dict = cls.getIsolatesFromIndices(df.index)
return [Isolate.create(v).line for v in
isolate_dict[cn.KEY_ISOLATE_DVH]]
def _makeXyDF(self):
"""
Updates state: self.df_X, self.df_y
Columns of self.df_X should be strings
"""
cls = self.__class__
df = util.selectRows(cls.df_data, self._constraints)
if len(df) == 0:
raise ValueError("No data returned with constraints.")
#
self.df_y = pd.DataFrame(df[[cn.KEY_CULTURE, self.context.depvar]])
self.df_y = self.df_y.drop_duplicates()
self.df_y = self.df_y.set_index(cn.KEY_CULTURE)
#
df[cn.COUNT] = 1
df = util.cleanDF(df, is_reset_index=True)
self.df_X = util.makeMatrix(df, row_name=cn.KEY_CULTURE,
column_name=self.context.mutation_column)
for col in self.df_X.columns:
self.df_X.rename(columns={col: str(col)}, inplace=True)
def _aggregateByIsolatePair(self):
"""
Creates X, y aggregated by and indexed by isolate pair.
:param pd.DataFrame df: indexed by culture
:return pd.DataFrame: indexed by cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP
"""
cls = self.__class__
# Map from culture to isolate pair
df_merge = cls.df_data[
[cn.KEY_CULTURE, cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP]]
df_merge = df_merge.copy()
df_merge = util.cleanDF(df_merge, is_reset_index=True)
#
def aggregate(df, aggregate_type=cn.AVG):
df_result = df.copy()
df_result = df_result.reset_index()
df_result = df_result.merge(df_merge, on=cn.KEY_CULTURE, how='inner')
del df_result[cn.KEY_CULTURE]
df_grp = df_result.groupby([cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP])
if aggregate_type == cn.AVG:
df_result = df_grp.mean()
elif aggregate_type == cn.STD:
df_count = df_grp.count()
df_count = df_count.applymap(lambda v: np.sqrt(v))
df_result = df_grp.std()
df_result = df_result.divide(df_count, fill_value=0)
else:
raise ValueError("Invalid aggreate_type %s" % aggregate_type)
df_result = df_result.copy()
return df_result
#
self.df_X = aggregate(self.df_X)
df_y = self.df_y.copy()
self.df_y = aggregate(df_y)
self.df_y_std = aggregate(df_y, aggregate_type=cn.STD)
def do(self, transform_type=cn.TRANSFORM_NONE,
**kwargs):
"""
Creates the predictor and dependent variable for the regression.
The columns are:
X - mutations, line
y - depvar
Both DFs are indexed by cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP.
:param str transform_type: how the predictor variables are transformed
:raises ValueError: if empty dataframe with constraints
Results are in self.df_X, self.df_y
"""
self._makeXyDF()
#
transformer = PredictorTransformer(self.df_X,
self.context.mutation_column)
if transform_type == cn.TRANSFORM_DEFAULT:
transformer.transformDefault(**kwargs)
elif transform_type == cn.TRANSFORM_LOW_FREQUENCY_ISOLATES:
transformer.transformLowFrequencyColumns(MAX_PREDICTOR_COLUMNS)
elif transform_type == cn.TRANSFORM_ONLY_LOW_FREQUENCY_ISOLATES:
transformer.transformOnlyLowFrequencyColumns(
MAX_PREDICTOR_COLUMNS)
else:
pass # no transformation of the predictors
self.df_X = transformer.df_X
# Aggregate data by isolate pairs
self._aggregateByIsolatePair()
# Standardize the values
if self._is_standardize_by_line:
self.df_y = self._standardizeByLine()
elif self._is_standardize:
util.standardize(self.df_y)
# Prune based on aggregated values of dependent variable
if self._rc_vector is not None:
indices = self._rc_vector.findSatisfiedRows(self.df_y)
self.df_X = self.df_X.loc[indices]
self.df_y = self.df_y.loc[indices]
self.df_y_std = self.df_y_std.loc[indices]
def getMutations(self):
return [str(c) for c in self.df_X.columns]
def _standardizeByLine(self):
"""
Standardizes values separately for each line
:return list-str: lines
"""
lines = [Isolate.create(v[0]).line
for v in self.df_X.index.tolist()]
df_old = self.df_y.copy()
df_old[cn.LINE] = lines
dfs = []
for line in set(lines):
df = df_old[df_old[cn.LINE] == line].copy()
del df[cn.LINE]
util.standardize(df)
dfs.append(df)
return | pd.concat(dfs, sort=True) | pandas.concat |
import snippets.basic_operations as bo
import requests
import csv
import os
import math
from random import uniform
import pandas as pd
from datetime import datetime, timedelta
import time
from tqdm import tqdm
# -----------------------------------------------------------------------
# DataFetch class
class DataFetch:
def __init__(self, api_keys):
self.keys = api_keys
self.auth = bo.CoinbaseExchangeAuth(
api_key=self.keys['public'],
secret_key=self.keys['secret'],
passphrase=self.keys['pass']
)
self.meta = None
self.response = None
self.prices = None
self._max_candles = 300
def build_meta(self):
r = requests.get(
url='https://api.pro.coinbase.com/products',
auth=self.auth
)
self.meta = pd.DataFrame.from_dict(r.json()).set_index('id').sort_index()
self.response = r.status_code
pass
def candles(self, sid, start_dt, end_dt, frequency):
params = {
'start': start_dt.isoformat(),
'end': end_dt.isoformat(),
'granularity': frequency
}
r = requests.get(
url='https://api.pro.coinbase.com/products/' + sid + '/candles',
auth=self.auth,
params=params
)
self.response = r.status_code
candles = pd.DataFrame(r.json(), columns=['dt', 'low', 'high', 'open', 'close', 'volume'])
candles['dt'] = candles['dt'].apply(datetime.fromtimestamp)
candles.set_index('dt', inplace=True)
candles.sort_index(inplace=True)
self.prices = candles
return candles
def build_historical(self, sid, start_dt, end_dt, frequency):
length = (end_dt - start_dt).total_seconds()
pulls = math.ceil(length / (self._max_candles * frequency)) # Make sure the 300 cap is met
step_size = math.ceil(length / pulls)
if step_size > self._max_candles * frequency:
pulls += 1
step_size = math.ceil(length / pulls)
fetch = []
dts = [end - timedelta(seconds=x * step_size) for x in range(pulls + 1)]
dts = dts[::-1]
dts[0] -= timedelta(seconds=frequency)
dts[-1] = end
for i in tqdm(range(pulls)):
prices = data.candles(
sid='BTC-USD',
start_dt=dts[i] + timedelta(seconds=frequency),
end_dt=dts[i + 1],
frequency=frequency
)
fetch += [prices]
time.sleep(1 + uniform(.025, .5))
self.prices = | pd.concat(fetch, axis=0) | pandas.concat |
'''
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
GDELTbase.py
Class for creating/maintaining data directory structure, bulk downloading of
GDELT files with column reduction, parsing/cleaning to JSON format, and export
of cleaned records to MongoDB.
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
See license.txt for information related to each open-source library used.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations.
If those directories are not already present, a fallback method for
string-literal directory reorientation may be found in GDELTbase shared class
data at this tag: # A01a - backup path specification.
Any given user's project directory must be specified there.
See also GDELTeda.py, tag # A02b - Project directory path, as any given user's
project directory must be specified for that os.chdir() call, also.
Contents:
A00 - GDELTbase
A01 - shared class data (toolData, localDb)
A01a - backup path specification
Note: Specification at A01a should be changed to suit a user's desired
directory structure, given their local filesystem.
A02 - __init__ w/ instanced data (localFiles)
B00 - class methods
B01 - updateLocalFilesIndex
B02 - clearLocalFilesIndex
B03 - showLocalFiles
B04 - wipeLocalFiles
B05 - extensionToTableName
B06 - isFileDownloaded
B07 - downloadGDELTFile
B08 - downloadGDELTDay
B09 - cleanFile (includes the following field/subfield parser functions)
B09a - themeSplitter
B09b - locationsSplitter
B09c - personsSplitter
B09d - organizationsSplitter
B09e - toneSplitter
B09f - countSplitter
B09g - One-liner date conversion function for post-read_csv use
B09h - llConverter
B10 - cleanTable
B11 - mongoFile
B12 - mongoTable
C00 - main w/ testing
'''
import pandas as pd
import numpy as np
import os
import pymongo
import wget
import json
from time import time
from datetime import datetime, tzinfo
from zipfile import ZipFile as zf
from pprint import pprint as pp
from urllib.error import HTTPError
# A00
class GDELTbase:
'''Base object for GDELT data acquisition, cleaning, and storage.
Shared class data:
-----------------
toolData - dict with these key - value pairs:
URLbase - "http://data.gdeltproject.org/gdeltv2/"
path - os.path path objects, 'raw' and 'clean', per-table
names - lists of string column names, per-table, original and reduced
extensions - dict mapping table names to file extensions, per-table
columnTypes - dicts mapping table column names to appropriate types
localDb - dict with these key - value pairs:
client - pymongo.MongoClient()
database - pymongo.MongoClient().capstone
collections - dict mapping table names to suitable mongoDB collections
Instanced class data:
--------------------
localFiles - dict, per-table keys for lists of local 'raw' and 'clean'
filenames
Class methods:
-------------
updateLocalFilesIndex()
clearLocalFilesIndex()
showLocalFiles()
wipeLocalFiles()
extensionToTableName()
isFileDownloaded()
downloadGDELTFile()
downloadGDELTDay()
cleanFile()
cleanTable()
mongoFile()
mongoTable()
'''
# A01 - shared class data
toolData = {}
# A01a - backup path specification
# Failsafe path for local main project directory. Must be changed to suit
# location of any given end-user's 'script' directory in case directory
# 'GDELTdata' is not present one directory up.
toolData['projectPath'] = 'C:\\Users\\urf\\Projects\\WGU capstone'
# Controls generation of datafile download URLs in downloadGDELTDay()/File()
toolData['URLbase'] = "http://data.gdeltproject.org/gdeltv2/"
# Used in forming URLs for datafile download
toolData['extensions'] = {
'events' : "export.CSV.zip",
'gkg' : "gkg.csv.zip",
'mentions' : "mentions.CSV.zip",
}
# These paths are set relative to the location of this script, one directory
# up, in 'GDELTdata', parallel to the script directory.
toolData['path'] = {}
toolData['path']['base']= os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'GDELTdata')
toolData['path']['events'] = {
'table': os.path.join(toolData['path']['base'], 'events'),
'raw': os.path.join(toolData['path']['base'], 'events', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'events', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'events',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'events',
'realtimeClean')
}
toolData['path']['gkg'] = {
'table': os.path.join(toolData['path']['base'], 'gkg'),
'raw': os.path.join(toolData['path']['base'], 'gkg', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'gkg', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeClean')
}
toolData['path']['mentions'] = {
'table': os.path.join(toolData['path']['base'], 'mentions'),
'raw': os.path.join(toolData['path']['base'], 'mentions', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'mentions', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeClean')
}
# These mappings and lists are for recognition of all possible
# column names, and the specific discarding of a number of columns
# which have been predetermined as unnecessary in the context of
# simple EDA.
toolData['names'] = {}
toolData['names']['events'] = {
'original' : [
'GLOBALEVENTID',
'Day',
'MonthYear',
'Year',
'FractionDate',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1KnownGroupCode',
'Actor1EthnicCode',
'Actor1Religion1Code',
'Actor1Religion2Code',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2KnownGroupCode',
'Actor2EthnicCode',
'Actor2Religion1Code',
'Actor2Religion2Code',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'GoldsteinScale',
'NumMentions',
'NumSources',
'NumArticles',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_CountryCode',
'Actor1Geo_ADM1Code',
'Actor1Geo_ADM2Code',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor1Geo_FeatureID',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_CountryCode',
'Actor2Geo_ADM1Code',
'Actor2Geo_ADM2Code',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'Actor2Geo_FeatureID',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_CountryCode',
'ActionGeo_ADM1Code',
'ActionGeo_ADM2Code',
'ActionGeo_Lat',
'ActionGeo_Long',
'ActionGeo_FeatureID',
'DATEADDED',
'SOURCEURL',
],
'reduced' : [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
],
}
toolData['names']['gkg'] = {
'original' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCollectionIdentifier',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V21Counts',
'V1Themes',
'V2EnhancedThemes',
'V1Locations',
'V2EnhancedLocations',
'V1Persons',
'V2EnhancedPersons',
'V1Organizations',
'V2EnhancedOrganizations',
'V15Tone',
'V21EnhancedDates',
'V2GCAM',
'V21SharingImage',
'V21RelatedImages',
'V21SocialImageEmbeds',
'V21SocialVideoEmbeds',
'V21Quotations',
'V21AllNames',
'V21Amounts',
'V21TranslationInfo',
'V2ExtrasXML',
],
'reduced' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V1Themes',
'V1Locations',
'V1Persons',
'V1Organizations',
'V15Tone',
],
}
toolData['names']['mentions'] = {
'original' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'SentenceID', #
'Actor1CharOffset',#
'Actor2CharOffset',#
'ActionCharOffset',#
'InRawText',
'Confidence',
'MentionDocLen', #
'MentionDocTone',
'MentionDocTranslationInfo', #
'Extras', #
],
'reduced' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'InRawText',
'Confidence',
'MentionDocTone',
],
}
# These mappings are used in automated dtype application to Pandas
# DataFrame collections of GDELT records, part of preprocessing.
toolData['columnTypes'] = {}
toolData['columnTypes']['events'] = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': | pd.StringDtype() | pandas.StringDtype |
from urllib.parse import urlparse
import pytest
import pandas as pd
import numpy as np
from visions.core.implementations.types import *
from visions.application.summaries.summary import CompleteSummary
@pytest.fixture(scope="class")
def summary():
return CompleteSummary()
def validate_summary_output(test_series, visions_type, correct_output, summary):
trial_output = summary.summarize_series(test_series, visions_type)
for metric, result in correct_output.items():
assert metric in trial_output, "Metric `{metric}` is missing".format(
metric=metric
)
assert (
trial_output[metric] == result
), "Expected value {result} for metric `{metric}`, got {output}".format(
result=result, metric=metric, output=trial_output[metric]
)
def test_integer_summary(summary, visions_type=visions_integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_integer_missing_summary(summary, visions_type=visions_integer):
test_series = pd.Series([0, 1, 2, 3, 4])
correct_output = {
"n_unique": 5,
"mean": 2,
"median": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 5,
"n_zeros": 1,
"na_count": 0,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_float_missing_summary(summary, visions_type=visions_float):
test_series = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0, np.nan])
correct_output = {
"n_unique": 5,
"median": 2,
"mean": 2,
"std": pytest.approx(1.58113, 0.00001),
"max": 4,
"min": 0,
"n_records": 6,
"n_zeros": 1,
"na_count": 1,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_bool_missing_summary(summary, visions_type=visions_bool):
test_series = pd.Series([True, False, True, True, np.nan])
correct_output = {"n_records": 5, "na_count": 1}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_categorical_missing_summary(summary, visions_type=visions_categorical):
test_series = pd.Series(
pd.Categorical(
[True, False, np.nan, "test"],
categories=[True, False, "test", "missing"],
ordered=True,
)
)
correct_output = {
"n_unique": 3,
"n_records": 4,
"na_count": 1,
"category_size": 4,
"missing_categorical_values": True,
}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_complex_missing_summary(summary, visions_type=visions_complex):
test_series = pd.Series([0 + 0j, 0 + 1j, 1 + 0j, 1 + 1j, np.nan])
correct_output = {"n_unique": 4, "mean": 0.5 + 0.5j, "na_count": 1, "n_records": 5}
validate_summary_output(test_series, visions_type, correct_output, summary)
def test_datetime_missing_summary(summary, visions_type=visions_datetime):
test_series = pd.Series(
[
pd.datetime(2010, 1, 1),
pd.datetime(2010, 8, 2),
pd.datetime(2011, 2, 1),
np.nan,
]
)
correct_output = {
"n_unique": 3,
"max": pd.datetime(2011, 2, 1),
"min": | pd.datetime(2010, 1, 1) | pandas.datetime |
from collections import Counter
import pandas as pd
import pytest
from simplekv import KeyValueStore
from kartothek.api.discover import (
discover_cube,
discover_datasets,
discover_datasets_unchecked,
discover_ktk_cube_dataset_ids,
)
from kartothek.core.cube.constants import (
KTK_CUBE_DF_SERIALIZER,
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
KTK_CUBE_METADATA_STORAGE_FORMAT,
KTK_CUBE_METADATA_SUPPRESS_INDEX_ON,
KTK_CUBE_METADATA_VERSION,
)
from kartothek.core.cube.cube import Cube
from kartothek.core.uuid import gen_uuid
from kartothek.io.eager import (
store_dataframes_as_dataset,
update_dataset_from_dataframes,
)
from kartothek.io_components.metapartition import MetaPartition
@pytest.fixture
def cube():
return Cube(
dimension_columns=["x", "y"],
partition_columns=["p", "q"],
uuid_prefix="cube",
index_columns=["i1"],
seed_dataset="myseed",
)
def store_data(
cube,
function_store,
df,
name,
partition_on="default",
metadata_version=KTK_CUBE_METADATA_VERSION,
metadata_storage_format=KTK_CUBE_METADATA_STORAGE_FORMAT,
metadata=None,
overwrite=False,
new_ktk_cube_metadata=True,
write_suppress_index_on=True,
):
if partition_on == "default":
partition_on = cube.partition_columns
if isinstance(df, pd.DataFrame):
mp = MetaPartition(label=gen_uuid(), data=df, metadata_version=metadata_version)
indices_to_build = set(cube.index_columns) & set(df.columns)
if name == cube.seed_dataset:
indices_to_build |= set(cube.dimension_columns) - set(
cube.suppress_index_on
)
mp = mp.build_indices(indices_to_build)
dfs = mp
else:
assert isinstance(df, MetaPartition)
assert df.metadata_version == metadata_version
dfs = df
if metadata is None:
metadata = {
KTK_CUBE_METADATA_DIMENSION_COLUMNS: cube.dimension_columns,
KTK_CUBE_METADATA_KEY_IS_SEED: (name == cube.seed_dataset),
}
if new_ktk_cube_metadata:
metadata.update(
{KTK_CUBE_METADATA_PARTITION_COLUMNS: cube.partition_columns}
)
if write_suppress_index_on:
metadata.update(
{KTK_CUBE_METADATA_SUPPRESS_INDEX_ON: list(cube.suppress_index_on)}
)
return store_dataframes_as_dataset(
store=function_store,
dataset_uuid=cube.ktk_dataset_uuid(name),
dfs=dfs,
partition_on=list(partition_on) if partition_on else None,
metadata_storage_format=metadata_storage_format,
metadata_version=metadata_version,
df_serializer=KTK_CUBE_DF_SERIALIZER,
metadata=metadata,
overwrite=overwrite,
)
def assert_datasets_equal(left, right):
assert set(left.keys()) == set(right.keys())
for k in left.keys():
ds_l = left[k]
ds_r = right[k]
assert ds_l.uuid == ds_r.uuid
def assert_dataset_issubset(superset, subset):
assert set(subset.keys()).issubset(set(superset.keys()))
for k in subset.keys():
assert subset[k].uuid == superset[k].uuid
def test_discover_ktk_cube_dataset_ids(function_store):
cube = Cube(
dimension_columns=["dim"],
partition_columns=["part"],
uuid_prefix="cube",
seed_dataset="seed",
)
ktk_cube_dataset_ids = ["A", "B", "C"]
for ktk_cube_id in ktk_cube_dataset_ids:
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"dim": [0], "part": [0]}),
name=ktk_cube_id,
)
collected_ktk_cube_dataset_ids = discover_ktk_cube_dataset_ids(
cube.uuid_prefix, function_store()
)
assert collected_ktk_cube_dataset_ids == set(ktk_cube_dataset_ids)
class TestDiscoverDatasetsUnchecked:
def test_simple(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
),
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_no_seed(self, cube, function_store):
expected = {
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_other_files(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
function_store().put(cube.ktk_dataset_uuid("enrich") + "/foo", b"")
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_no_common_metadata(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
keys = set(function_store().keys())
metadata_key = cube.ktk_dataset_uuid("enrich") + ".by-dataset-metadata.json"
assert metadata_key in keys
for k in keys:
if (k != metadata_key) and k.startswith(cube.ktk_dataset_uuid("enrich")):
function_store().delete(k)
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_filter_partial_datasets_found(self, cube, function_store):
enrich_dataset = store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="mytable",
)
expected = {"enrich": enrich_dataset}
actual = discover_datasets_unchecked(
cube.uuid_prefix, function_store, filter_ktk_cube_dataset_ids=["enrich"]
)
assert_dataset_issubset(actual, expected)
def test_filter_no_datasets_found(self, cube, function_store):
actual = discover_datasets_unchecked(
cube.uuid_prefix, function_store, filter_ktk_cube_dataset_ids=["enrich"]
)
assert actual == {}
def test_msgpack_clean(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
metadata_storage_format="msgpack",
),
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_msgpack_priority(self, cube, function_store):
"""
json metadata files have priority in kartothek, so the disovery should respect this
"""
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
)
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v2": [0]}),
name=cube.seed_dataset,
overwrite=True,
)
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v3": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
overwrite=True,
)
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_msgpack_efficiency(self, cube, function_store):
"""
We should only iterate over the store once, even though we are looking for 2 suffixes.
Furthermore, we must only load every dataset once.
"""
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
overwrite=True,
)
class StoreMock(KeyValueStore):
def __init__(self, store):
self._store = store
self._iter_keys_called = 0
self._iter_prefixes_called = 0
self._get_called = Counter()
def iter_keys(self, prefix=""):
self._iter_keys_called += 1
return self._store.iter_keys(prefix)
def iter_prefixes(self, delimiter, prefix=""):
self._iter_prefixes_called += 1
return self._store.iter_prefixes(delimiter, prefix)
def get(self, key):
self._get_called[key] += 1
return self._store.get(key)
store = StoreMock(function_store())
discover_datasets_unchecked(cube.uuid_prefix, store)
assert store._iter_keys_called == 0
assert store._iter_prefixes_called == 1
assert max(store._get_called.values()) == 1
class TestDiscoverDatasets:
def test_seed_only(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_2_datasets(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_partitions_superset(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
partition_on=["p", "q", "v1"],
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_raises_no_seed(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert str(exc.value) == 'Seed data ("myseed") is missing.'
def test_raises_wrong_partition_on_seed_other(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0]}),
name=cube.seed_dataset,
partition_on=["p"],
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value) == 'Seed dataset "myseed" has missing partition columns: q'
)
def test_partition_on_nonseed_no_part(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "v1": [0]}),
name="enrich",
partition_on=[],
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_raises_wrong_metadata_version(self, cube, function_store):
with pytest.raises(
NotImplementedError, match="Minimal supported metadata version is"
):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
metadata_version=2,
partition_on=None,
)
def test_raises_dtypes(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0.0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert 'Found incompatible entries for column "y"' in str(exc.value)
def test_raises_overlap(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert "Found columns present in multiple datasets" in str(exc.value)
def test_raises_partition_on_overlap(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "v1": 100}),
name="enrich",
partition_on=["v1"],
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert "Found columns present in multiple datasets" in str(exc.value)
def test_raises_missing_dimension_columns(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "p": [0], "q": [0]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["x"]),
name=cube.seed_dataset,
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value) == 'Seed dataset "myseed" has missing dimension columns: y'
)
def test_raises_no_dimension_columns(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"p": [0], "q": [0], "v2": 100}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value)
== 'Dataset "enrich" must have at least 1 of the following dimension columns: x, y'
)
def test_raises_dimension_index_missing(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name=cube.seed_dataset,
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value)
== 'ExplicitSecondaryIndex "x" is missing in dataset "myseed".'
)
def test_raises_other_index_missing(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["x", "y"]),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame(
{"x": [0], "y": [0], "p": [0], "q": [0], "i1": [1337]}
),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value)
== 'ExplicitSecondaryIndex or PartitionIndex "i1" is missing in dataset "enrich".'
)
def test_accepts_addional_indices(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame(
{"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}
),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["x", "y", "v1"]),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame(
{
"x": [0],
"y": [0],
"p": [0],
"q": [0],
"i1": [1337],
"v2": [42],
}
),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["i1", "x", "v2"]),
name="enrich",
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_accepts_partition_index_for_index(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "y": [0], "i1": [1337], "v2": [42]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name="enrich",
partition_on=["i1"],
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_raises_unspecified_partition_columns(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
partition_on=["p", "q"],
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}),
name="enrich",
partition_on=["q"],
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value) == "Unspecified but provided partition columns in enrich: p"
)
def test_accepts_projected_datasets(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["x", "y"]),
name=cube.seed_dataset,
),
"x": store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "p": [0], "q": [0], "v1": [42]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name="x",
),
"y": store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"y": [0], "p": [0], "q": [0], "v2": [42]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name="y",
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_filter_basic(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
),
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v2": 100}),
name="foo",
)
actual = discover_datasets(cube, function_store, {"myseed", "enrich"})
assert_datasets_equal(actual, expected)
def test_filter_ignores_invalid(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
),
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame(
{
"x": [0],
"y": [0],
"p": [0],
"q": [0],
"v1": 100, # overlapping payload
}
),
name="foo",
)
actual = discover_datasets(cube, function_store, {"myseed", "enrich"})
assert_datasets_equal(actual, expected)
def test_filter_missing(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store, {"myseed", "enrich"})
assert (
str(exc.value) == "Could not find the following requested datasets: enrich"
)
def test_filter_empty(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store, {})
assert str(exc.value) == 'Seed data ("myseed") is missing.'
def test_raises_partial_datasets_found(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(
cube,
function_store,
filter_ktk_cube_dataset_ids=["enrich", "non_existing_table"],
)
assert (
str(exc.value)
== "Could not find the following requested datasets: non_existing_table"
)
def test_raises_no_datasets_found(self, cube, function_store):
with pytest.raises(ValueError) as exc:
discover_datasets(
cube,
function_store,
filter_ktk_cube_dataset_ids=["enrich", "non_existing_table"],
)
assert (
str(exc.value)
== "Could not find the following requested datasets: enrich, non_existing_table"
)
def test_msgpack(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
metadata_storage_format="msgpack",
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_empty_dataset(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
metadata_storage_format="msgpack",
),
}
expected = {
filter_ktk_cube_dataset_id: update_dataset_from_dataframes(
[], store=function_store, dataset_uuid=ds.uuid, delete_scope=[{}]
)
for filter_ktk_cube_dataset_id, ds in expected.items()
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
class TestDiscoverCube:
def test_seed_only(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df= | pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "i1": [0]}) | pandas.DataFrame |
'''
Class for a bipartite network
'''
from pandas.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import pandas as pd
from pandas import DataFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitepandas as bpd
from bipartitepandas import col_order, update_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or isinstance(self, (bpd.BipartiteLongCollapsed, bpd.BipartiteEventStudyCollapsed)):
kwargs['copy'] = False
if len(frame) != len(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while len(frame) != len(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep all observations.
'''),
'component_size_variable': ('firms', 'set', ['len', 'length', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to determine largest connected component. Options are 'len'/'length' (length of frame), 'firms' (number of unique firms), 'workers' (number of unique workers), 'stayers' (number of unique stayers), and 'movers' (number of unique movers).
'''),
'i_t_how': ('max', 'set', ['max', 'sum', 'mean'],
'''
(default='max') When dropping i-t duplicates: if 'max', keep max paying job; if 'sum', sum over duplicate worker-firm-year observations, then take the highest paying worker-firm sum; if 'mean', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study format), then data is converted to long, cleaned, then reconverted to its original format.
'''),
'drop_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force all cleaning methods to run; much faster if set to False.
'''),
'copy': (True, 'type', bool,
'''
(default=True) If False, avoid copying data when possible.
''')
})
def clean_params(update_dict={}):
'''
Dictionary of default clean_params.
Arguments:
update_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.copy()
new_dict.update(update_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bpd.measures.cdfs(), 'list_of_type', (bpd.measures.cdfs, bpd.measures.moments),
'''
(default=bpd.measures.cdfs()) How to compute measures for clustering. Options can be seen in bipartitepandas.measures.
'''),
'grouping': (bpd.grouping.kmeans(), 'type', (bpd.grouping.kmeans, bpd.grouping.quantiles),
'''
(default=bpd.grouping.kmeans()) How to group firms based on measures. Options can be seen in bipartitepandas.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'dropna': (False, 'type', bool,
'''
(default=False) If True, drop observations where firms aren't clustered; if False, keep all observations.
'''),
'clean_params': (None, 'type_none', bpd.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations get dropped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bpd.clean_params().describe_all() for descriptions of all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study format. If False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'copy': (True, 'type', bool,
'''
(default=True) If False, avoid copy.
''')
})
def cluster_params(update_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
update_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.copy()
new_dict.update(update_dict)
return new_dict
class BipartiteBase(DataFrame):
'''
Base class for BipartitePandas, where BipartitePandas gives a bipartite network of firms and workers. Contains generalized methods. Inherits from DataFrame.
Arguments:
*args: arguments for Pandas DataFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Pandas dataframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Pandas DataFrame
'''
# Attributes, required for Pandas inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_unique', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize DataFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if len(args) > 0 and isinstance(args[0], BipartiteBase):
# Note that isinstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = update_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = update_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = update_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Pandas.
'''
return BipartiteBase
def copy(self):
'''
Return copy of self.
Returns:
bdf_copy (BipartiteBase): copy of instance
'''
df_copy = DataFrame(self, copy=True)
# Set logging on/off depending on current selection
bdf_copy = self._constructor(df_copy, log=self._log_on_indicator)
# This copies attribute dictionaries, default copy does not
bdf_copy._set_attributes(self)
return bdf_copy
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def summary(self):
'''
Print summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
mean_wage = np.mean(y)
median_wage = np.median(y)
max_wage = np.max(y)
min_wage = np.min(y)
var_wage = np.var(y)
ret_str += 'format: {}\n'.format(type(self).__name__)
ret_str += 'number of workers: {}\n'.format(self.n_workers())
ret_str += 'number of firms: {}\n'.format(self.n_firms())
ret_str += 'number of observations: {}\n'.format(len(self))
ret_str += 'mean wage: {}\n'.format(mean_wage)
ret_str += 'median wage: {}\n'.format(median_wage)
ret_str += 'min wage: {}\n'.format(min_wage)
ret_str += 'max wage: {}\n'.format(max_wage)
ret_str += 'var(wage): {}\n'.format(var_wage)
ret_str += 'no NaN values: {}\n'.format(self.no_na)
ret_str += 'no duplicates: {}\n'.format(self.no_duplicates)
ret_str += 'i-t (worker-year) observations unique (None if t column(s) not included): {}\n'.format(self.i_t_unique)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.format(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.format(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.append(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_values(sort_order)).to_numpy().all()
ret_str += 'sorted by i (and t, if included): {}\n'.format(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.isnull().to_numpy().any())
ret_str += 'no NaN values: {}\n'.format(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated().any())
ret_str += 'no duplicates: {}\n'.format(no_duplicates)
##### i-t unique #####
no_i_t_duplicates = (not self.duplicated(subset=sort_order).any())
ret_str += 'i-t (worker-year) observations unique (if t column(s) not included, then i observations unique): {}\n'.format(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.unique_ids(contig_col)
is_contig = (len(contig_ids) == (max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.format(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.format(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (len(self) == len(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (len(self) == len(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.format(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.format(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().all()
ret_str += "'m' column correct (None if not included): {}\n".format(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".format(None)
print(ret_str)
def unique_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): unique ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].unique())
return np.array(list(set(id_lst)))
def n_unique_ids(self, id_col):
'''
Number of unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of unique ids
'''
return len(self.unique_ids(id_col))
def n_workers(self):
'''
Get the number of unique workers.
Returns:
(int): number of unique workers
'''
return self.loc[:, 'i'].nunique()
def n_firms(self):
'''
Get the number of unique firms.
Returns:
(int): number of unique firms
'''
return self.n_unique_ids('j')
def n_clusters(self):
'''
Get the number of unique clusters.
Returns:
(int or None): number of unique clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in dataframe
return None
return self.n_unique_ids('g')
def original_ids(self, copy=True):
'''
Return self merged with original column ids.
Arguments:
copy (bool): if False, avoid copy
Returns:
(BipartiteBase or None): copy of self merged with original column ids, or None if id_reference_dict is empty
'''
frame = pd.DataFrame(self, copy=copy)
if self.id_reference_dict:
for id_col, reference_df in self.id_reference_dict.items():
if len(reference_df) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.merge(reference_df.loc[:, ['original_ids', 'adjusted_ids_' + str(len(reference_df.columns) - 1)]].rename({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(len(reference_df.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].astype('Int64', copy=False)
frame = frame.merge(reference_df.loc[:, ['original_ids', 'adjusted_ids_' + str(len(reference_df.columns) - 1)]].rename({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(len(reference_df.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartitePandas object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartitePandas object.
Arguments:
frame (BipartitePandas): BipartitePandas object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Pandas dataframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.copy()
self.columns_opt = frame.columns_opt.copy()
self.reference_dict = frame.reference_dict.copy()
self.col_dtype_dict = frame.col_dtype_dict.copy()
self.col_dict = frame.col_dict.copy()
self.columns_contig = frame.columns_contig.copy() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep copy
for id_col, reference_df in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_df.copy()
else:
# This is if the original dataframe DIDN'T have an id_reference_dict (but the new dataframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_unique = frame.i_t_unique # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_unique=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_unique (bool): if True, reset self.i_t_unique
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_unique:
self.i_t_unique = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_unique = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: pd.DataFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
all_cols (list): included columns
'''
all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
all_cols += to_list(self.reference_dict[col])
else:
all_cols.append(col)
return all_cols
def drop(self, indices, axis=0, inplace=False, allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optionally as a list): row(s) or column(s) to drop. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be dropped
axis (int): 0 to drop rows, 1 to drop columns
inplace (bool): if True, modify in-place
allow_required (bool): if True, allow to drop required columns
Returns:
frame (BipartiteBase): BipartiteBase with dropped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
DataFrame.drop(frame, subcol, axis=1, inplace=True)
else:
frame = DataFrame.drop(frame, subcol, axis=1, inplace=False)
frame.col_dict[subcol] = None
if col in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col] = pd.DataFrame()
elif col not in frame._included_cols() and col not in frame._included_cols(flat=True): # If column is not pre-established
if inplace:
DataFrame.drop(frame, col, axis=1, inplace=True)
else:
frame = DataFrame.drop(frame, col, axis=1, inplace=False)
else:
if not allow_required:
warnings.warn("{} is either (a) a required column and cannot be dropped or (b) a subcolumn that can be dropped, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".format(col))
else:
if inplace:
DataFrame.drop(frame, col, axis=1, inplace=True)
else:
frame = DataFrame.drop(frame, col, axis=1, inplace=False)
else:
warnings.warn('{} is not in data columns'.format(col))
elif axis == 0:
if inplace:
DataFrame.drop(frame, indices, axis=0, inplace=True)
else:
frame = DataFrame.drop(frame, indices, axis=0, inplace=False)
frame._reset_attributes()
# frame.clean_data({'connectedness': frame.connectedness})
return frame
def rename(self, rename_dict, inplace=True):
'''
Rename a column.
Arguments:
rename_dict (dict): key is current column name, value is new column name. Use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be renamed
inplace (bool): if True, modify in-place
Returns:
frame (BipartiteBase): BipartiteBase with renamed columns
'''
if inplace:
frame = self
else:
frame = self.copy()
for col_cur, col_new in rename_dict.items():
if col_cur in frame.columns or col_cur in frame.columns_req or col_cur in frame.columns_opt:
if col_cur in self.columns_opt: # If column optional
if len(to_list(self.reference_dict[col_cur])) > 1:
for i, subcol in enumerate(to_list(self.reference_dict[col_cur])):
DataFrame.rename(frame, {subcol: col_new + str(i + 1)}, axis=1, inplace=True)
frame.col_dict[subcol] = None
else:
DataFrame.rename(frame, {col_cur: col_new}, axis=1, inplace=True)
frame.col_dict[col_cur] = None
if col_cur in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col_cur] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col_cur] = pd.DataFrame()
elif col_cur not in frame._included_cols() and col_cur not in frame._included_cols(flat=True): # If column is not pre-established
DataFrame.rename(frame, {col_cur: col_new}, axis=1, inplace=True)
else:
warnings.warn("{} is either (a) a required column and cannot be renamed or (b) a subcolumn that can be renamed, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".format(col_cur))
else:
warnings.warn('{} is not in data columns'.format(col_cur))
return frame
def merge(self, *args, **kwargs):
'''
Merge two BipartiteBase objects.
Arguments:
*args: arguments for Pandas merge
**kwargs: keyword arguments for Pandas merge
Returns:
frame (BipartiteBase): merged dataframe
'''
frame = DataFrame.merge(self, *args, **kwargs)
frame = self._constructor(frame) # Use correct constructor
if kwargs['how'] == 'left': # Non-left merge could cause issues with data, by default resets attributes
frame._set_attributes(self)
return frame
def _contiguous_ids(self, id_col, copy=True):
'''
Make column of ids contiguous.
Arguments:
id_col (str): column to make contiguous ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'. Only optional columns may be renamed
copy (bool): if False, avoid copy
Returns:
frame (BipartiteBase): BipartiteBase with contiguous ids
'''
if copy:
frame = self.copy()
else:
frame = self
cols = to_list(frame.reference_dict[id_col])
n_cols = len(cols)
n_rows = len(frame)
all_ids = frame.loc[:, cols].to_numpy().reshape(n_cols * n_rows)
# Source: https://stackoverflow.com/questions/16453465/multi-column-factorize-in-pandas
factorized = | pd.factorize(all_ids) | pandas.factorize |
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
import numpy as np
import itertools
def cramers_corrected_stat(confusion_matrix):
""" calculate Cramers V statistic for categorical-categorical association.
uses correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328
"""
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
kcorr = k - ((k-1)**2)/(n-1)
return np.sqrt(phi2corr / min( (kcorr-1), (rcorr-1)))
# Page layout
## Page expands to full width
st.set_page_config(page_title='Data Science App',
layout='wide')
# Model building
def build_model(data):
sns.set_style('darkgrid')
global target_variable
st.markdown('**1.2- Dataset general info**')
st.text('Dataset shape:')
st.text(df.shape)
categorical_attributes = list(data.select_dtypes(include=['object']).columns)
st.text("Categorical Variables:")
st.text(categorical_attributes)
numerical_attributes = list(data.select_dtypes(include=['float64', 'int64']).columns)
st.text("Numerical Variables:")
st.text(numerical_attributes)
st.markdown('**1.3- Duplicated values**')
st.text(data.duplicated().sum())
st.markdown('**1.4- Missing values**')
st.text(data.isnull().sum())
st.markdown('**1.5- Unique values in the Categorical Variables**')
for col_name in data.columns:
if data[col_name].dtypes == 'object':
unique_cat = len(data[col_name].unique())
st.text("Feature '{col_name}' has {unique_cat} unique categories".format(col_name=col_name, unique_cat=unique_cat))
st.subheader('2- Exploratory Data Analysis (EDA)')
hue = target_variable
st.markdown('**2.1- Descriptive Statistics**')
st.text(data.describe())
st.markdown('**2.2- Outlier detectetion by Boxplot**')
if len(numerical_attributes) == 0:
st.text('There is no numerical variable')
else:
for a in numerical_attributes:
st.text(a)
fig = plt.figure(figsize = (20,10))
sns.boxplot(data[a])
st.pyplot(fig)
if data[target_variable].dtypes == 'O':
catplots(data)
else:
if len(data[target_variable].unique()) > 5:
numplots(data)
else:
catplots(data)
def catplots(data):
sns.set_style('darkgrid')
global target_variable
hue = target_variable
categorical_attributes = list(data.select_dtypes(include=['object']).columns)
numerical_attributes = list(data.select_dtypes(include=['float64', 'int64']).columns)
st.markdown('**2.3- Target Variable plot**')
st.text("Target variable:" + hue)
fig = plt.figure(figsize = (20,10))
ax = sns.countplot(data[hue])
for p in ax.patches:
height = p.get_height()
ax.text(x = p.get_x()+(p.get_width()/2), y = height*1.01, s = '{:.0f}'.format(height), ha = 'center')
st.pyplot(fig)
st.markdown('**2.4- Numerical Variables**')
#fig = plt.figure(figsize = (5,5))
#sns.pairplot(data, hue = hue)
#st.pyplot(fig)
st.markdown('***2.4.1- Correlation***')
try:
fig = plt.figure(figsize = (20,10))
sns.heatmap(data.corr(), cmap = 'Blues', annot = True)
st.pyplot(fig)
except:
st.text('There is no numerical variable')
st.markdown('***2.4.2- Distributions***')
for a in numerical_attributes:
st.text(a)
fig = plt.figure(figsize = (20,10))
sns.histplot(data = data , x =a , kde = True, hue = hue)
st.pyplot(fig)
st.markdown('**2.5- Categorical Variables**')
if len(categorical_attributes) == 0:
st.text('There is no categorical variable')
else:
for a in categorical_attributes:
if a == hue:
pass
else:
if len(data[a].unique()) < 13:
st.text(a)
fig = plt.figure()
g = sns.catplot(data = data, x = a, kind = 'count', col = hue, sharey=False)
for i in range(data[hue].nunique()):
ax = g.facet_axis(0,i)
for p in ax.patches:
height = p.get_height()
ax.text(x = p.get_x()+(p.get_width()/2), y = height * 1.01 , s = '{:.0f}'.format(height), ha = 'center')
g.set_xticklabels(rotation=90)
st.pyplot(g)
st.markdown('***2.5.1 - Correlation between categorical***')
corrM = np.zeros((len(categorical_attributes),len(categorical_attributes)))
for col1, col2 in itertools.combinations(categorical_attributes, 2):
idx1, idx2 = categorical_attributes.index(col1), categorical_attributes.index(col2)
corrM[idx1, idx2] = cramers_corrected_stat( | pd.crosstab(data[col1], data[col2]) | pandas.crosstab |
""" Module containing functions to convert between time formats """
import warnings
import pandas as pd
import numpy as np
from gnssmapper.common.constants import gps_epoch, leap_seconds, nanos_in_day
def gps_to_utc(time: pd.Series) -> pd.Series:
"""Converts nanos since gps epoch into utc datetime format."""
_check_nanos(time)
time_int = time.astype('Int64')
# origin defined indirectly due to incompatibility with Int64
ts_gps = pd.to_datetime(time_int+gps_epoch.value, unit='ns')
# iterates leapsecond calculation to avoid problems at rollover.
ts_estimate = ts_gps - pd.to_timedelta(leap_seconds(ts_gps), unit='s')
return ts_gps - pd.to_timedelta(leap_seconds(ts_estimate), unit='s')
def utc_to_gps(time: pd.Series) -> pd.Series:
"""Converts utc datetime into nanos since gps epoch."""
delta = (time
+ pd.to_timedelta(leap_seconds(time), unit='s', errors='coerce')
- gps_epoch)
return utc_to_int(delta)
def gps_to_doy(time: pd.Series) -> pd.DataFrame:
"""Turn nanos from gps epoch into a (YYYYDOY, nanos) format."""
_check_nanos(time)
time_int = time.astype('Int64')
ts = pd.to_datetime(time_int + gps_epoch.value, unit='ns')
year = ts.dt.year.astype('Int64').astype("string")
day = ts.dt.dayofyear.astype('Int64').astype("string")
yeardoy = year.str.cat(day.str.pad(3, side='left', fillchar="0"))
time_in_day = ts - pd.to_datetime(yeardoy, format='%Y%j', errors='coerce')
ns = utc_to_int(time_in_day)
return pd.DataFrame({'date': yeardoy, 'time': ns})
def doy_to_gps(date: pd.Series, time: pd.Series) -> pd.Series:
"""Turn (YYYYDOY, nanos) format into nanos from gps epoch"""
_check_nanos(time)
time_int = time.astype('Int64')
dt = pd.to_datetime(date, format='%Y%j', errors='coerce')
delta = dt + | pd.to_timedelta(time_int, unit='ns') | pandas.to_timedelta |
import os
import sys
import time
from collections import defaultdict
import pandas as pd
import requests
from tqdm import tqdm
pd.options.mode.chained_assignment = None # default='warn'
# Fill in API key here
MOTIVE_API_URL = "https://api-data.motivesoftware.com/scores"
MOTIVE_API_KEY = "ADD-API-KEY-HERE"
def batch_apply_func_to_df(df, func, batch_size=250):
""" Applies the given function to the dataframe
in batches of batch_size rows, returns
concatenated dataframe of results
"""
in_dfs = [df[i:i + batch_size] for i in range(0, df.shape[0], batch_size)]
out_dfs = []
for batch_df in tqdm(in_dfs):
out_dfs.append(func(batch_df))
df = pd.concat(out_dfs)
return df
def score_with_motive_api(df, models=None, domain='cx', channel='surveys'):
""" Score a dataframe with the Motive API.
Expects text to be scored to be in 'text' column in dataframe.
Unless models are specified, default to returning sentiment and emotion.
Domain and channel parameters are also defaulted for ease of use.
"""
if not models:
models = ['sentiment', 'emotion']
if isinstance(models, str):
models = [models]
if df.shape[0] > 1000:
print("Max batch size is 1000, truncating")
df = df.head(n=1000)
# Format documents appropriately
docs_to_score = [{'document_id': str(i), 'text': s} for i, s in enumerate(df['text'].values)]
# Set up scoring call
headers = {"X-API-Key": MOTIVE_API_KEY}
payload = {
"correlation_id": "0",
"domain": domain,
"data_channel": channel,
"models": models,
"documents": docs_to_score
}
response = requests.post(MOTIVE_API_URL, json=payload, headers=headers)
# Poll for response
if response.status_code == requests.codes.accepted:
job_id = response.json()['job_id']
# get results
results_url = f"{MOTIVE_API_URL}/{job_id}"
response = requests.get(results_url, headers=headers)
# get responses
while response.status_code == requests.codes.accepted \
and response.json()['status'] not in ['ERROR', 'DONE']:
time.sleep(1)
response = requests.get(results_url, headers=headers)
all_responses = response.json()
else:
print(f'Error {response.content.decode("utf-8")}')
# Get the top classification for each model,
# ignoring secondary classifications (eg, emotions 2 & 3)
# or sentence-level scores for simplicity:
classifications = defaultdict(dict)
for doc in all_responses["documents"]:
idx = doc['document_id']
for model in doc["models"]:
model_name = model['model']
if not model['scores']:
continue
top_label = max(model['scores'], key=lambda x: x['score'])
label, score = top_label['label'], top_label['score']
classifications[idx][model_name] = label
classifications[idx]['%s_score' % model_name] = score
# Add classifications to original dataframe:
doc_ids = [d['document_id'] for d in docs_to_score]
for model in models:
df[model] = [classifications.get(idx, {}).get(model, None) for idx in doc_ids]
score_col = '%s_score' % model
df[score_col] = [classifications.get(idx, {}).get(score_col, None) for idx in doc_ids]
# Return scored dataframe
return df
if __name__ == '__main__':
# Pass in filename to score via command-line:
# (Should we upgrade to named parameters?)
if len(sys.argv) == 2:
file_name = sys.argv[1]
if not os.path.exists(file_name):
print("Error: Unrecognized file '%s'" % file_name)
sys.exit(1)
else:
print(f"You need to call {sys.argv[0]} with the name of the file to process (csv, xlsx)")
sys.exit(1)
# Check to make sure this is a CSV of an XLSX
if not file_name.endswith(".xlsx") and not file_name.endswith(".csv"):
print("Error: Expecting CSV of XLSX file")
sys.exit(1)
filetype = '.csv' if file_name.endswith('.csv') else '.xlsx'
# Create scored file name by appending
# "_Motive" to the filename:
scored_file_name = file_name.replace(filetype, '_Motive' + filetype)
# Assume that text to score is in 'text' column,
# or specify a different column here:
text_column = "text"
# Read the file
if filetype == '.csv':
df = | pd.read_csv(file_name) | pandas.read_csv |
"""
Simulate the spread of sentiment amongst a population from a comment
Created by <NAME>, <NAME>, <NAME>, and <NAME>.
"""
import random
import plotly
import plotly.express as px
import pandas as pd
import numpy as np
from sentiment_model.sentiment_analysis_naive_bayes import SentimentAnalyzer
class PopulationSentimentSimulation:
"""
This abstract class represents a single simulation instance for a population
Instance Attributes:
- comment: The comment who's effect will be simulated
- comment_sentiment: The sentiment of the comment
- population: The size of the population to be simulated
- population_sentiment: The sentiments of each person in the population
Representation Invariants:
- self.population > 0
"""
comment: str
comment_sentiment: float
population: int
population_sentiment: list[float]
def compute_comment_sentiment(self, comment: str) -> None:
"""
Utilises the Sentiment Analysis model to compute the sentiment of the comment
"""
raise NotImplementedError
def generate_responses_sentiment(self, sentiment: float) -> None:
"""
Simulate the impact of the comment sentiment on the sentiment of the public
"""
raise NotImplementedError
def calc_impact(self, sentiment, index):
"""
Calculates the sentiment impact based on the sentiment value
"""
raise NotImplementedError
def generate_sentiment_impact(self) -> list[float]:
"""
Generate the sentiment impact using the past sentiment values
"""
raise NotImplementedError
def run_simulation(self, comment_raised: str) -> None:
"""
Manage the simulation and run it on the comment raised and generate the visualisation
"""
raise NotImplementedError
class OpinionSimulation(PopulationSentimentSimulation):
"""
This class represents a single simulation instance for a population for Opinion spread
simulation
Instance Attributes:
- comment: The comment who's effect will be simulated
- comment_sentiment: The sentiment of the comment
- population: The size of the population to be simulated
- population_sentiment: The sentiments of each person in the population
Representation Invariants:
- self.population > 0
- self.j_max > 0
"""
comment: str
comment_sentiment: float
population: int
population_sentiment: list[float]
past_values: list[float]
j_max: int
def __init__(self, population: int) -> None:
self.population = population
self.population_sentiment = [0.0 for person in range(population)]
self.comment = ""
self.comment_sentiment = 0
self.past_values = list(self.population_sentiment)
self.j_max = 0
def compute_comment_sentiment(self, comment: str) -> None:
"""
Utilises the Sentiment Analysis model to compute the sentiment of the comment
"""
self.comment = comment
sentiment_analyzer = SentimentAnalyzer(pretrained=True)
probability_positive = sentiment_analyzer.classify(self.comment)[2]["0"][0] - 0.5
self.comment_sentiment = probability_positive * -100
print(self.comment_sentiment)
def generate_responses_sentiment(self, sentiment: float) -> None:
"""
Simulate the impact of the comment sentiment on the sentiment of the public
"""
sentiment_impact = []
sentiment_impact.append(self.calc_impact(sentiment, 0))
already_impacted = []
not_impacted = list(range(self.population))
for sentiments in sentiment_impact:
j = sentiment_impact.index(sentiments)
impacted = []
if not_impacted == []:
already_impacted = []
not_impacted = list(range(self.population))
for i in range(((self.population * 2) // 5) - j):
if not_impacted == []:
already_impacted = []
not_impacted = list(range(self.population))
temp = random.choice(not_impacted)
impacted.append(temp)
not_impacted.remove(temp)
already_impacted.append(temp)
if sum([abs(x) for x in sentiment_impact[j]]) <= 0.04 or j >= self.population * 3:
if j <= self.population:
sentiment_impact[j] = self.generate_sentiment_impact()
else:
self.j_max += j
return
for i in range(((self.population * 2) // 5) - j):
self.population_sentiment[impacted[i]] += sentiment_impact[j][i]
sentiment_impact.append(self.calc_impact(self.population_sentiment[impacted[i]], j))
self.past_values += self.population_sentiment
sentiment_impact.remove(sentiment_impact[j])
def calc_impact(self, sentiment, index):
"""
Calculates the sentiment impact based on the sentiment value
"""
sentiment_impact = []
if sentiment == 0:
sentiment_impact = [random.uniform(-abs(sentiment / 2), abs(sentiment / 2))
for a in range(((self.population * 2) // 5) - index)]
elif sentiment < 0:
sentiment_impact = [random.uniform(-abs(sentiment / 2), abs(sentiment / 4))
for a in range(((self.population * 2) // 5) - index)]
elif sentiment > 0:
sentiment_impact = [random.uniform(-abs(sentiment / 4), abs(sentiment / 2))
for a in range(((self.population * 2) // 5) - index)]
return sentiment_impact
def generate_sentiment_impact(self) -> list[float]:
"""
Generate the sentiment impact using the past sentiment values
"""
return ([random.uniform(-abs(max([abs(x) for x in [max(self.past_values),
min(self.past_values)]]) / 2),
abs(max([abs(x)
for x in [max(self.past_values),
min(self.past_values)]]) / 2)) for i in
range((self.population * 2) // 5)])
def run_simulation(self, comment_raised: str) -> None:
"""
Manage the simulation and run it on the comment raised and generate the visualisation
"""
self.compute_comment_sentiment(comment_raised)
self.generate_responses_sentiment(self.comment_sentiment)
temps = []
for i in range(self.j_max + 1):
temps += [i] * self.population
dict_temp = {"Population": np.array(
list(range(1, self.population + 1))
* (len(self.past_values) // self.population)),
"Sentiment": np.array(self.past_values), "j": np.array(temps)}
df = | pd.DataFrame.from_dict(dict_temp, orient="index") | pandas.DataFrame.from_dict |
# Importing essential libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import floor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
import pickle
# Loading the dataset
df = pd.read_csv('Bengluru_House_Data.csv')
# Removing the columns of society
df = df.drop('society', axis='columns')
''' Data Cleaning Process '''
# Applying median to the balcony and bath column
balcony_median = float(floor(df.balcony.median()))
bath_median = float(floor(df.bath.median()))
df.balcony = df.balcony.fillna(balcony_median)
df.bath = df.bath.fillna(bath_median)
# Dropping the rows with null values because the dataset is huge as compared to null values.
df = df.dropna()
# Converting the size column to bhk
df['bhk'] = df['size'].apply(lambda x: int(x.split(' ')[0]))
df = df.drop('size', axis='columns')
# Since the total_sqft contains range values such as 1133-1384, lets filter out these values
def isFloat(x):
try:
float(x)
except:
return False
return True
# Displaying all the rows that are not integers
df[~df['total_sqft'].apply(isFloat)]
# Converting the range values to integer values and removing other types of error
def convert_sqft_to_num(x):
tokens = x.split('-')
if len(tokens) == 2:
return (float(tokens[0])+float(tokens[1]))/2
try:
return float(x)
except:
return None
df['new_total_sqft'] = df.total_sqft.apply(convert_sqft_to_num)
df = df.drop('total_sqft', axis='columns')
# Removing the rows in new_total_sqft column that hase None values
df = df.dropna()
''' Feature Engineering '''
# Adding a new column of price_per_sqft
df1 = df.copy()
# In our dataset the price column is in Lakhs
df1['price_per_sqft'] = (df1['price']*100000)/df1['new_total_sqft']
# Checking unique values of 'location' column
locations = list(df['location'].unique())
# Removing the extra spaces at the end
df1.location = df1.location.apply(lambda x: x.strip())
# Calulating all the unqiue values in 'location' column
location_stats = df1.groupby('location')['location'].agg('count').sort_values(ascending=False)
# Labelling the locations with less than or equal to 10 occurences to 'other'
locations_less_than_10 = location_stats[location_stats<=10]
df1.location = df1.location.apply(lambda x: 'other' if x in locations_less_than_10 else x)
# Labelling the dates into Not Ready
dates = df1.groupby('availability')['availability'].agg('count').sort_values(ascending=False)
dates_not_ready = dates[dates<10000]
df1.availability = df1.availability.apply(lambda x: 'Not Ready' if x in dates_not_ready else x)
''' Removing Outliers '''
df2 = df1[~(df1.new_total_sqft/df1.bhk<300)]
# Since there is a wide range for 'price_per_sqft' column with min = Rs.267/sqft till max = Rs. 127470/sqft, we remove the extreme ends using the SD
def remove_pps_outliers(df):
df_out = pd.DataFrame()
for key, sub_df in df.groupby('location'):
m = np.mean(sub_df.price_per_sqft)
sd = np.std(sub_df.price_per_sqft)
reduce_df = sub_df[(sub_df.price_per_sqft>(m-sd)) & (sub_df.price_per_sqft<(m+sd))]
df_out = pd.concat([df_out, reduce_df], ignore_index=True)
return df_out
df3 = remove_pps_outliers(df2)
def remove_bhk_outliers(df):
exclude_indices = np.array([])
for location, location_df in df.groupby('location'):
bhk_stats = {}
for bhk, bhk_df in location_df.groupby('bhk'):
bhk_stats[bhk] = {
'mean': np.mean(bhk_df.price_per_sqft),
'std': np.std(bhk_df.price_per_sqft),
'count': bhk_df.shape[0]
}
for bhk, bhk_df in location_df.groupby('bhk'):
stats = bhk_stats.get(bhk-1)
if stats and stats['count']>5:
exclude_indices = np.append(exclude_indices, bhk_df[bhk_df.price_per_sqft<(stats['mean'])].index.values)
return df.drop(exclude_indices, axis='index')
df4 = remove_bhk_outliers(df3)
# Removing the rows that have 'bath' greater than 'bhk'+2
df5 = df4[df4.bath<(df4.bhk+2)]
''' Model '''
# Removing the unnecessary columns (columns that were added only for removing the outliers)
df6 = df5.copy()
df6 = df6.drop('price_per_sqft', axis='columns')
# Converting the categorical_value into numerical_values using get_dummies method
dummy_cols = pd.get_dummies(df6.location).drop('other', axis='columns')
df6 = | pd.concat([df6,dummy_cols], axis='columns') | pandas.concat |
import pandas as pd
import numpy as np
import datetime
class Durations(object):
@classmethod
def set(cls, X, extract_cols, dataset):
print("... ... Durations")
all_df = dataset["all_df"]
# duration from first action to clickout
dffac_df = all_df[["session_id", "timestamp", "timestamp_dt"]].groupby(
"session_id").first().reset_index()
dffac_df = dffac_df[["session_id", "timestamp_dt"]]
dffac_df.columns = ["session_id", "first_timestamp_dt"]
X = pd.merge(X, dffac_df, on="session_id", how="left")
X["session_duration"] = X.apply(lambda x: (x.timestamp_dt - x.first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["session_duration"]
del dffac_df
# duration from last distination to clickout
dflsc_df = all_df[["session_id", "_session_id", "timestamp", "timestamp_dt"]].groupby(
"_session_id").first().reset_index()
dflsc_df = dflsc_df[dflsc_df._session_id.isin(X._session_id)]
dflsc_df = dflsc_df[["session_id", "timestamp_dt"]]
dflsc_df.columns = ["session_id", "step_first_timestamp_dt"]
X = pd.merge(X, dflsc_df, on="session_id", how="left")
X["step_duration"] = X.apply(lambda x: (x.timestamp_dt - x.step_first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["step_duration"]
del dflsc_df
return (X, extract_cols)
class JustClickout(object):
@classmethod
def set(cls, X, extract_cols):
print("... ... JustClickout")
# append current fillters
def get_cf_features(x):
sbp = 1 if "Sort by Price" in x.current_filters else 0
sbd = 1 if "Sort By Distance" in x.current_filters else 0
sbr = 1 if "Sort By Rating" in x.current_filters else 0
fod = 1 if "Focus on Distance" in x.current_filters else 0
fsr = 1 if "Focus on Rating" in x.current_filters else 0
bev = 1 if "Best Value" in x.current_filters else 0
return pd.Series({'cf_sbp': sbp
, 'cf_sbd': sbd
, 'cf_sbr': sbr
, 'cf_fod': fod
, 'cf_fsr': fsr
, 'cf_bev': bev})
X["current_filters"] = X["current_filters"].fillna("")
curf_df = X[["current_filters"]].apply(lambda x: get_cf_features(x), axis=1)
X = pd.concat([X, curf_df], axis=1)
extract_cols = extract_cols + list(curf_df.columns)
del curf_df
return (X, extract_cols)
class JustBeforeClickout(object):
@classmethod
def set(cls, X, dataset):
print("... ... JustBeforeClickout")
all_df = dataset["all_df"]
# last action_type
lasttype_df = all_df[["session_id", "action_type", "is_y"]].copy()
lasttype_df["lat"] = lasttype_df["action_type"].shift(1)
lasttype_df["last_session_id"] = lasttype_df["session_id"].shift(1)
lasttype_df = lasttype_df[lasttype_df.is_y == 1]
lasttype_df = lasttype_df[lasttype_df.session_id == lasttype_df.last_session_id]
lasttype_df = lasttype_df[["session_id", "lat"]]
onehot_lat = pd.get_dummies(lasttype_df, columns=['lat'])
X = pd.merge(X, onehot_lat, on="session_id", how="left")
lat_cols = list(onehot_lat.columns)
lat_cols.remove("session_id")
for lat_col in lat_cols:
X[lat_col] = X[lat_col].fillna(0)
del lasttype_df
del onehot_lat
return X
class Record2Impression(object):
@classmethod
def expand(cls, X, extract_cols, dataset):
print("... ... Record2Impression")
# create expanded
X = X.reset_index()
X["gid"] = X.index
X["n_imps"] = X[["impressions"]].apply(lambda x: len(str(x.impressions).split("|")), axis=1)
X["price_mean"] = X[["prices"]].apply(lambda x: np.mean(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["price_std"] = X[["prices"]].apply(lambda x: np.std(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["impression"] = X[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
X["price"] = X[["prices"]].apply(lambda x: str(x.prices).split("|"), axis=1)
X_impression = X[["gid", "impression"]].set_index('gid').impression.apply(pd.Series).stack().reset_index(
level=0).rename(columns={0: 'impression'})
X_price = X[["gid", "price"]].set_index('gid').price.apply(pd.Series).stack().reset_index(level=0).rename(
columns={0: 'price'})
X_position = X[["gid", "impression"]].set_index('gid').impression.apply(
lambda x: pd.Series(range(len(x)))).stack().reset_index(level=0).rename(columns={0: 'position'})
X_expanded = pd.concat([X_impression, X_price], axis=1)
X_expanded = pd.concat([X_expanded, X_position], axis=1)
X_expanded.columns = ["gid", "impression", "gid2", "price", "gid3", "position"]
X_expanded = X_expanded[["gid", "impression", "price", "position"]]
# join expaned
X = pd.merge(X_expanded, X[["gid", "n_imps", "price_mean", "price_std"] + extract_cols], on="gid", how="left")
# to normalize position and price
X["pos_rate"] = X["position"] / X["n_imps"]
X["pos"] = X["position"] + 1
X["price_norm"] = (X["price"].astype(float) - X["price_mean"].astype(float)) / X["price_std"].astype(float)
# join price_norm rank
pnorm_rank_df = X[["session_id", "price_norm"]].copy()
pnorm_rank_df = pnorm_rank_df[["session_id", "price_norm"]].groupby("session_id").rank(ascending=False)
pnorm_rank_df.columns = ["price_norm_rank"]
X = pd.concat([X, pnorm_rank_df], axis=1)
del pnorm_rank_df
# calc discount rate
X["price"] = X["price"].astype(float)
prices_df = X[["impression", "price"]].groupby("impression").agg({'price': np.mean}).reset_index()
prices_df.columns = ["impression", "item_price_mean"]
X = pd.merge(X, prices_df, on="impression", how="left")
X["discount_rate"] = X["price"] / X["item_price_mean"]
del prices_df
# append some important props and other props with over 0.2 coverage
sum_item_props_df = dataset["sum_item_props_df"]
item_props = dataset["item_props"]
prop_cols = ["pGood Rating"
, "pVery Good Rating"
, "pExcellent Rating"
, "pSatisfactory Rating"
, "p1 Star"
, "p2 Star"
, "p3 Star"
, "p4 Star"
, "p5 Star"
, "pBusiness Centre"
, "pBusiness Hotel"
, "pConference Rooms"]
c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
prop_cols = prop_cols + c02over_prop_cols
prop_cols = list(set(prop_cols))
X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
X[prop_cols] = X[prop_cols].fillna(0)
return (X, extract_cols)
class DecisionMakingProcess(object):
@classmethod
def detect(cls, X, dataset):
print("... ... Decision Making Process")
print("... ... ... Attention and Perceptual Encoding")
print("... ... ... Information Acquisition and Evaluation")
all_df = dataset["all_df"]
# join pos stats"
copos_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "impressions", "is_y"]].copy()
copos_df = copos_df[copos_df.is_y == 0]
copos_df["impression"] = copos_df[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
copos_df["co_pos"] = copos_df[["impression", "reference"]].apply(
lambda x: x.impression.index(x.reference) + 1 if x.reference in x.impression else 1, axis=1)
copos_df_stats = copos_df[["session_id", "co_pos"]].groupby("session_id").agg(
{'co_pos': [np.min, np.max, np.mean]}).reset_index()
copos_df_stats.columns = ["session_id", "co_pos_min", "co_pos_max", "co_pos_mean"]
X = pd.merge(X, copos_df_stats, on="session_id", how="left")
X["co_pos_min"] = X["co_pos_min"].fillna(1)
X["co_pos_mean"] = X["co_pos_mean"].fillna(1)
X["co_pos_max"] = X["co_pos_max"].fillna(1)
X["co_pos_min_diff"] = X["pos"] - X["co_pos_min"]
X["co_pos_mean_diff"] = X["pos"] - X["co_pos_mean"]
X["clickouted_pos_max_diff"] = X["co_pos_max"] - X["pos"]
del copos_df
del copos_df_stats
# is_last and is_last_elapsed_time
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
lastref_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
lastref_df["is_target"] = 0
lastref_df.loc[lastref_df.is_y == 1, "is_target"] = 1
lastref_df = lastref_df[lastref_df.action_type.isin(action_types)]
lastref_df["last_session_id"] = lastref_df["session_id"].shift(1)
lastref_df["last_reference"] = lastref_df["reference"].shift(1)
lastref_df["last_timestamp"] = lastref_df["timestamp"].shift(1)
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_session_id]
lastref_df = lastref_df[lastref_df.is_target == 1][["session_id", "last_reference", "last_timestamp"]]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_reference"]] = X[["last_reference"]].fillna("-1")
X[["last_timestamp"]] = X[["last_timestamp"]].fillna(-1)
X["is_last"] = X[["impression", "last_reference"]].apply(lambda x: 1 if x.impression == x.last_reference else 0,
axis=1)
X["elapsed_time_between_is_last"] = X[["impression", "last_reference", "timestamp", "last_timestamp"]].apply(
lambda x: int(x.timestamp) - int(x.last_timestamp) if x.impression == x.last_reference else np.nan, axis=1)
lastdur_df = X[["session_id", "elapsed_time_between_is_last"]].copy()
lastdur_df = lastdur_df.dropna(axis=0, how='any')
X.drop("elapsed_time_between_is_last", axis=1, inplace=True)
X = pd.merge(X, lastdur_df, on="session_id", how="left")
del lastref_df
del lastdur_df
# join is_last_last
lastref_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
lastref_df["last_last_session_id"] = lastref_df["session_id"].shift(2)
lastref_df["last_last_reference"] = lastref_df["reference"].shift(2)
lastref_df = lastref_df[lastref_df.is_y == 1]
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_last_session_id]
lastref_df = lastref_df[["session_id", "last_last_reference"]]
lastref_df = lastref_df[~lastref_df.duplicated()]
X = | pd.merge(X, lastref_df, on="session_id", how="left") | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# Author : <NAME>
# Initial Date: Feb 17, 2020
# About: strymread class to read CAN data from CSV file captured using
# libpanda (https://jmscslgroup.github.io/libpanda/) or from `strym` class.
# Read associated README for full description
# License: MIT License
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ARIZONA BOARD OF REGENTS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# For System and OS level task
import sys, getopt
## General Data processing and visualization Import
import time
import ntpath
import datetime
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (16,8)
plt.rcParams["image.cmap"] = "Dark2"
# to change default color cycle
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Dark2.colors)
from scipy.interpolate import interp1d
from scipy import signal
import pandas as pd # Note that this is not commai Panda, but Database Pandas
from scipy import integrate
import pickle
import os
from os.path import expanduser
import seaborn as sea
import plotly.express as px
import csv
import copy
import scipy.stats
# cantools import
import cantools
import strym.DBC_Read_Tools as dbc
import pkg_resources
from subprocess import Popen, PIPE
from .utils import configure_logworker
LOGGER = configure_logworker()
dbc_resource = ''
try:
import importlib.resources as pkg_resources
with pkg_resources.path('strym', 'dbc') as rsrc:
dbc_resource = rsrc
except ImportError:
# Try backported to PY<37 `importlib_resources`.
print("Python older than 3.7 detected. ")
try:
import importlib_resources as pkg_resources
with pkg_resources.path('strym', 'dbc') as rsrc:
dbc_resource = rsrc
except ImportError:
print("importlib_resources not found. Install backported importlib_resources through `pip install importlib-resources`")
import vin_parser as vp
# from sqlalchemy import create_engine
import sqlite3
import matplotlib.colors as colors
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
import plotly.offline as pyo
# Set notebook mode to work in offline
pyo.init_notebook_mode()
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from .config import config
class strymread:
'''
`strymread` reads the logged CAN data from the given CSV file.
This class provides several utilities functions
Parameters
----------------
csvfile: `str`, `pandas.DataFrame`, default = None
The CSV file to be read. If `pandas.DataFrame` is supplied, then csvfile is set to None
PandasDataFrame, if provided, must have columns ["Time", "Message", "MessageID", "Bus"]
dbcfile: `str`, default = ""
The DBC file which will provide codec for decoding CAN messages
kwargs: variable list of argument in the dictionary format
bus: `list` | default = None
A list of integer correspond to Bus ID.
dbcfolder: `str` | default = None
Specifies a folder path where to look for appropriate dbc if dbcfile='' or dbcfile = None
Appropriate dbc file can be inferred from <brand>_<model>_<year>.dbc
If dbcfolder is None or empty string, then by default, strymread will look for dbc file in the dbc folder of the package where we ship sample dbc file to work with.
verbose: `bool`
Option for verbosity, prints some information when True
createdb: `bool`
If True, creates a sqlite3 database for raw CAN data if the database doesn't exist
dbdir: `str`
Optional argument that specifies where sqlite3 database will be stored.
The default location is `~/.strym/`
Attributes
---------------
dbcfile: `str`, default = ""
The filepath of DBC file
csvfile:`str` | `pandas.DataFrame`
The filepath of CSV Data file, or, raw CAN Message DataFrame
dataframe: `pandas.Dataframe`
Pandas dataframe that stores content of csvfile as dataframe
dataframe_raw: `pandas.Dataframe`
Pandas original dataframe with all bus IDs. When `bus=` is passed to the constructor to filter out dataframe based on bus id, then original dataframe is save
in dataframe_raw
candb: `cantools.db`
CAN database fetched from DBC file
burst: `bool`
A boolean flag that checks if CAN data came in burst. If `True`, then CAN Data was captured in burst, else
`False`. If CAN Data came in burst (as in say 64 messages at a time or so)
then any further analysis might not be reliable. Always check that.
success: `bool`
A boolean flag, if `True`, tells that reading of CSV file was successful.
bus: `list` | default = None
A list of integer correspond to Bus ID.
dbcfolder: `str` | default = None
Specifies a folder path where to look for appropriate dbc if `dbcfile=""` or `dbcfile = None`
Appropriate dbc file can be inferred from <brand>_<model>_<year>.dbc
If dbcfolder is None or empty string, then by default, strymread will look for dbc file in package's dbcfolder
where we ship sample dbc file to work with.
dbdir:`str`
Location of database where sqlite3 database for CAN Dataframe will stored.
Default location: `~/.strym/`
database: `str`
The name of the database corresponding to the model/make of the vehicle from which the CAN data
was captured
inferred_dbc: `str`
DBC file inferred from the name of the csvfile passed.
Returns
---------------
`strymread`
Returns an object of type `strymread` upon successful reading or else return None
Example
----------------
>>> import strym
>>> from strym import strymread
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> dbcfile = 'newToyotacode.dbc'
>>> csvdata = '2020-03-20.csv'
>>> r0 = strymread(csvfile=csvdata, dbcfile=dbcfile)
'''
sunset = truncate_colormap(plt.get_cmap('magma'), 0.0, 0.7) # truncated color map from magma
def __init__(self, csvfile, dbcfile = "", **kwargs):
# success attributes will be set to True ultimately if everything goes well and csvfile is read successfully
self.success = False
if csvfile is None:
print("csvfile is None. Unable to proceed with further analysis. See https://jmscslgroup.github.io/strym/api_docs.html#module-strym for further details.")
return
if isinstance(csvfile, pd.DataFrame):
self.dataframe = csvfile
self.csvfile = ''
if ((len(dbcfile) == 0) or (dbcfile is None)):
print("Please provide a valid dbcfile using argument `dbcfile` to strymread if you intend to supply a dataframe to strymread")
return
elif isinstance(csvfile, str):
# Check if file exists
if not os.path.exists(csvfile):
print("Provided csvfile: {} doesn't exist, or read permission error".format(csvfile))
return
# if file size is less than 60 bytes, return without processing
if os.path.getsize(csvfile) < 60:
print("Nothing significant to read in {}. No further analysis is warranted.".format(csvfile))
return
self.csvfile = csvfile
self.basefile = ntpath.basename(csvfile)
else:
print("Unsupported type for csvfile. Please see https://jmscslgroup.github.io/strym/api_docs.html#module-strym for further details.")
return
# Optional argument for verbosity
self.verbose = kwargs.get("verbose", False)
# Optional argument for bus ID
self.bus = kwargs.get("bus", None)
# Optional argument for dbcfolder where to look for dbc files
self.dbcfolder = kwargs.get("dbcfolder", None)
# Optional argument to tell strymread whether to create a table of the raw count in the db
self.createdb = kwargs.get("createdb", False)
default_db_dir = expanduser("~") + "/.strym/"
# Optional argument for where TIMESERIES DB will be saved
self.dbdir = kwargs.get("dbdir", default_db_dir)
if not os.path.exists(self.dbdir):
if self.verbose:
print("The directory {} for timeseries db doesn't exist, creating one".format(self.dbdir ))
try:
os.mkdir(self.dbdir)
except OSError as error:
print(error)
# If a single bus ID is passed, convert it to list of one item, if multiple bus ID
# needs to be passed, then it must be passed as int
if isinstance(self.bus, int):
self.bus = [self.bus]
# If data were recorded in the first then burst attribute will be set to True. In practical scenario, we won't proceeding
# with further analysis when data comes in burst, however, if csvfile has data in burst, no real error will be raised. It
# will be upto user to check attribute boolean for True/False
self.burst = False
if len(self.csvfile) > 0:
# All CAN messages will be saved as pandas dataframe
try:
# Get the number of rows using Unix `wc` word count function
is_windows = sys.platform.startswith('win')
if not is_windows:
word_counts = Popen(['wc', '-l', self.csvfile], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = word_counts.communicate()
output = output.decode("utf-8")
output = output.strip()
output = output.split(' ')
n_lines = int(output[0])
if n_lines < 5:
print("Not enough data to read in the provided csvfile {}".format(ntpath.basename(self.csvfile)))
return
self.dataframe = pd.read_csv(self.csvfile,dtype={'Time': np.float64,'Bus':np.uint8, 'MessageID': np.uint32, 'Message': str, 'MessageLength': np.uint16}, nrows=n_lines - 2)
else:
self.dataframe = | pd.read_csv(self.csvfile,dtype={'Time': np.float64,'Bus':np.uint8, 'MessageID': np.uint32, 'Message': str, 'MessageLength': np.uint16}, skipfooter=2) | pandas.read_csv |
from typing import Tuple, List, Dict, Any
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, Imputer, FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV, cross_validate, KFold
from sklearn.metrics import mean_squared_error, make_scorer
import joblib
import mlflow
pd.options.display.max_columns = None
CURRENT_EXPERIMENT_NAME = 'feature engineering'
def filter_by(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
df_out = df
for key, value in kwargs.items():
if type(value) is list:
df_out = df_out[df_out[key].isin(value)]
else:
df_out = df_out[df_out[key] == value]
return df_out
def missing_rate(df: pd.DataFrame) -> pd.Series:
return df.isnull().sum() / len(df)
def reduce_mem_usage(df: pd.DataFrame, verbose: bool = True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / (1024 ** 2)
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose:
print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(
end_mem, 100 * (start_mem - end_mem) / start_mem)
)
return df
def rmse(y_true, y_pred) -> float:
return np.sqrt(mean_squared_error(y_true, y_pred))
rmse_score = make_scorer(rmse, greater_is_better=False)
def add_key_prefix(d: Dict, prefix = 'best_') -> Dict:
return {prefix + key: value for key, value in d.items()}
def df_from_cv_results(d: Dict):
df = pd.DataFrame(d)
score_columns = ['mean_test_score', 'mean_train_score']
param_columns = [c for c in df.columns if c.startswith('param_')]
return pd.concat([
-df.loc[:, score_columns],
df.loc[:, param_columns],
], axis=1).sort_values(by='mean_test_score')
def sample(*args, frac: float = 0.01) -> np.ndarray:
n_rows = args[0].shape[0]
random_index = np.random.choice(n_rows, int(n_rows * frac), replace=False)
gen = (
a[random_index] for a in args
)
if len(args) == 1:
return next(gen)
else:
return gen
class BaseTransformer(BaseEstimator, TransformerMixin):
def fit(self, x: pd.DataFrame, y = None):
return self
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
return x
class ColumnTransformer(BaseTransformer):
def __init__(self, defs: Dict[str, BaseTransformer]):
self.defs = defs
def fit(self, x: pd.DataFrame, y: np.ndarray = None):
for col, transformer in self.defs.items():
transformer.fit(x[col], y)
return self
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
xp = x.copy()
for col, transformer in self.defs.items():
xp[col] = transformer.transform(x[col])
return xp
def fit_transform(self, x: pd.DataFrame, y: np.ndarray = None) -> pd.DataFrame:
xp = x.copy()
for col, transformer in self.defs.items():
if hasattr(transformer, 'fit_transform'):
xp[col] = transformer.fit_transform(x[col], y)
else:
xp[col] = transformer.fit(x[col], y).transform(x[col])
return xp
class WrappedLabelEncoder(BaseTransformer):
def __init__(self):
self.le = LabelEncoder()
def fit(self, x, y = None):
self.le.fit(x)
return self
def transform(self, x):
return self.le.transform(x)
class WeatherImputer(BaseTransformer):
def transform(self, w: pd.DataFrame) -> pd.DataFrame:
# add missing datetime
dt_min, dt_max = w['timestamp'].min(), w['timestamp'].max()
empty_df = pd.DataFrame({'timestamp': pd.date_range(start=dt_min, end=dt_max, freq='H')})
w_out = pd.concat([
ws.merge(
empty_df, on='timestamp', how='outer'
).sort_values(
by='timestamp'
).assign(
site_id=site_id
) for site_id, ws in w.groupby('site_id')
], ignore_index=True)
# large missing rate columns; fill by -999
w_out['cloud_coverage'] = w_out['cloud_coverage'].fillna(-999).astype(np.int16)
# small missing rate columns; fill by same value forward and backward
w_out = pd.concat([
ws.fillna(method='ffill').fillna(method='bfill') for _, ws in w_out.groupby('site_id')
], ignore_index=True)
# fill nan by mean over all sites
w_mean = w_out.groupby('timestamp').mean().drop(columns=['site_id']).reset_index()
w_mean = w_out.loc[:, ['site_id', 'timestamp']].merge(w_mean, on='timestamp', how='left')
w_out = w_out.where(~w_out.isnull(), w_mean)
# float -> uint
w_out['site_id'] = w_out['site_id'].astype(np.uint8)
return w_out
class WeatherEngineerer(BaseTransformer):
@staticmethod
def shift_by(wdf: pd.DataFrame, n: int) -> pd.DataFrame:
method = 'bfill' if n > 0 else 'ffill'
return pd.concat([
ws.iloc[:, [2, 4, 8]].shift(n).fillna(method=method) for _, ws in wdf.groupby('site_id')
], axis=0)
def weather_weighted_average(self, w: pd.DataFrame, hours: int = 5) -> pd.DataFrame:
ahours = abs(hours)
sign = int(hours / ahours)
w_weighted_average = sum(
[self.shift_by(w, (i+1)*sign) * (ahours-i) for i in range(ahours)]
) / (np.arange(ahours) + 1).sum()
w_weighted_average.columns = ['{0}_wa{1}'.format(c, hours) for c in w_weighted_average.columns]
return pd.concat([w, w_weighted_average], axis=1)
@staticmethod
def dwdt(df: pd.DataFrame, base_col: str) -> pd.DataFrame:
df_out = df.copy()
df_out[base_col + '_dt_wa1'] = df[base_col] - df[base_col + '_wa1']
df_out[base_col + '_dt_wa-1'] = df[base_col] - df[base_col + '_wa-1']
df_out[base_col + '_dt_wa5'] = df[base_col] - df[base_col + '_wa5']
df_out[base_col + '_dt_wa-5'] = df[base_col] - df[base_col + '_wa-5']
return df_out
@staticmethod
def wet(df: pd.DataFrame, suffix: str) -> pd.DataFrame:
df_out = df.copy()
df_out['wet' + suffix] = df['air_temperature' + suffix] - df['dew_temperature' + suffix]
return df_out
def transform(self, w_in: pd.DataFrame) -> pd.DataFrame:
w = w_in.pipe(self.weather_weighted_average, hours=1) \
.pipe(self.weather_weighted_average, hours=-1) \
.pipe(self.weather_weighted_average) \
.pipe(self.weather_weighted_average, hours=-5)
w = w.pipe(self.dwdt, base_col='air_temperature') \
.pipe(self.dwdt, base_col='dew_temperature') \
.pipe(self.dwdt, base_col='wind_speed') \
.pipe(self.wet, suffix='') \
.pipe(self.wet, suffix='_wa1') \
.pipe(self.wet, suffix='_wa-1') \
.pipe(self.wet, suffix='_wa5') \
.pipe(self.wet, suffix='_wa-5')
return w
class WindDirectionEncoder(BaseTransformer):
@staticmethod
def _from_degree(degree: int) -> int:
val = int((degree / 22.5) + 0.5)
arr = [i for i in range(0,16)]
return arr[(val % 16)]
def transform(self, x: pd.Series) -> pd.Series:
return x.apply(self._from_degree)
class WindSpeedEncoder(BaseTransformer):
def transform(self, x: pd.Series) -> pd.Series:
return pd.cut(
x,
bins=[0, 0.3, 1.6, 3.4, 5.5, 8, 10.8, 13.9, 17.2, 20.8, 24.5, 28.5, 33, 1000],
right=False, labels=False,
)
weather_pipeline = Pipeline(steps=[
('impute_missing_value', WeatherImputer()),
('feature_engineering', WeatherEngineerer()),
('label_encode', ColumnTransformer({
'wind_direction': WindDirectionEncoder(),
'wind_speed': WindSpeedEncoder(),
'wind_speed_wa1': WindSpeedEncoder(),
'wind_speed_wa-1': WindSpeedEncoder(),
'wind_speed_wa5': WindSpeedEncoder(),
'wind_speed_wa-5': WindSpeedEncoder(),
}))
])
class BuildingMetadataEngineerer(BaseTransformer):
def transform(self, bm_in: pd.DataFrame) -> pd.DataFrame:
bm = bm_in.copy()
bm['log_square_feet'] = np.log(bm['square_feet'])
bm['square_feet_per_floor'] = bm['square_feet'] / bm['floor_count']
bm['log_square_feet_per_floor'] = bm['log_square_feet'] / bm['floor_count']
bm['building_age'] = 2019 - bm['year_built']
bm['square_feet_per_age'] = bm['square_feet'] / bm['building_age']
bm['log_square_feet_per_age'] = bm['log_square_feet'] / bm['building_age']
return bm
class BuildingMetadataImputer(BaseTransformer):
def transform(self, bm: pd.DataFrame) -> pd.DataFrame:
return bm.fillna(-999)
building_metadata_pipeline = Pipeline(steps=[
('label_encode', ColumnTransformer({
'primary_use': WrappedLabelEncoder(),
})),
('feature_engineering', BuildingMetadataEngineerer()),
('impute_missing_value', BuildingMetadataImputer()),
])
class BuildingMetaJoiner(BaseTransformer):
def __init__(self, bm: pd.DataFrame = None):
self.bm = bm
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
if self.bm is None:
return x
else:
return x.merge(
self.bm,
on='building_id',
how='left',
)
class WeatherJoiner(BaseTransformer):
def __init__(self, w: pd.DataFrame = None):
self.w = w
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
if self.w is None:
return x
else:
return x.merge(
self.w,
on=['site_id', 'timestamp'],
how='left',
)
class DatetimeFeatureEngineerer(BaseTransformer):
def __init__(self, col: str = 'timestamp'):
self.col = col
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
xp = x.copy()
ts = x[self.col]
xp['month'] = ts.dt.month.astype(np.int8)
xp['week'] = ts.dt.week.astype(np.int8)
xp['day_of_week'] = ts.dt.weekday.astype(np.int8)
xp['time_period'] = pd.cut(
ts.dt.hour,
bins=[0, 3, 6, 9, 12, 15, 18, 21, 25],
right=False, labels=False,
)
holidays = [
'2016-01-01', '2016-01-18', '2016-02-15', '2016-05-30', '2016-07-04',
'2016-09-05', '2016-10-10', '2016-11-11', '2016-11-24', '2016-12-26',
'2017-01-01', '2017-01-16', '2017-02-20', '2017-05-29', '2017-07-04',
'2017-09-04', '2017-10-09', '2017-11-10', '2017-11-23', '2017-12-25',
'2018-01-01', '2018-01-15', '2018-02-19', '2018-05-28', '2018-07-04',
'2018-09-03', '2018-10-08', '2018-11-12', '2018-11-22', '2018-12-25',
'2019-01-01'
]
xp['is_holiday'] = (ts.dt.date.astype('str').isin(holidays)).astype(np.int8)
return xp
class TargetEncoder(BaseTransformer):
def __init__(self, cv: int = 5, smoothing: int = 1):
self.agg = None
self.cv = cv
self.smoothing = 1
def transform(self, x: pd.Series):
if self.agg is None:
raise ValueError('you shold fit() before predict()')
encoded = pd.merge(x, self.agg, left_on=x.name, right_index=True, how='left')
encoded = encoded.fillna(encoded.mean())
xp = encoded['y']
xp.name = x.name
return xp
def fit_transform(self, x: pd.Series, y: np.ndarray = None) -> pd.Series:
df = pd.DataFrame({'x': x, 'y': y})
self.agg = df.groupby('x').mean()
fold = KFold(n_splits=self.cv, shuffle=True)
xp = x.copy()
for idx_train, idx_test in fold.split(x):
df_train = df.loc[idx_train, :]
df_test = df.loc[idx_test, :]
agg_train = df_train.groupby('x').mean()
encoded = pd.merge(df_test, agg_train, left_on='x', right_index=True, how='left', suffixes=('', '_mean'))['y_mean']
encoded = encoded.fillna(encoded.mean())
xp[encoded.index] = encoded
return xp
class ColumnDropper(BaseTransformer):
def __init__(self, cols: List[str]):
self.cols = cols
def transform(self, x: pd.DataFrame, y = None) -> pd.DataFrame:
return x.drop(columns=self.cols)
class ArrayTransformer(BaseTransformer):
def transform(self, x: pd.DataFrame, y = None) -> np.ndarray:
return x.values
def pipeline_factory() -> Pipeline:
return Pipeline(steps=[
# join
('join_building_meta', BuildingMetaJoiner(
building_metadata_pipeline.fit_transform(
building_metadata
)
)),
('join_weather', WeatherJoiner(
weather_pipeline.fit_transform(
pd.concat([weather_train, weather_test], axis=0, ignore_index=True)
)
)),
# feature engineering
('feature_engineering_from_datetime', DatetimeFeatureEngineerer()),
('target_encode', ColumnTransformer({
'primary_use': TargetEncoder(),
'meter': TargetEncoder(),
'cloud_coverage': TargetEncoder(),
'time_period': TargetEncoder(),
'wind_direction': TargetEncoder(),
'wind_speed': TargetEncoder(),
'wind_speed_wa1': TargetEncoder(),
'wind_speed_wa-1': TargetEncoder(),
'wind_speed_wa5': TargetEncoder(),
'wind_speed_wa-5': TargetEncoder(),
})),
# drop columns
('drop_columns', ColumnDropper([
'building_id', 'timestamp', 'site_id', 'precip_depth_1_hr',
])),
# pd.DataFrame -> np.ndarray
('df_to_array', ArrayTransformer()),
# regressor
('regressor', RandomForestRegressor()),
])
def cv(pipeline: Pipeline, df: pd.DataFrame, n_jobs: int = -1, **params) -> Tuple[float, float]:
x = df.drop(columns='meter_reading')
y = np.log1p(df['meter_reading'].values)
default_params = dict(
n_estimators=10,
max_depth=None,
max_features='auto',
min_samples_leaf=1,
)
merged_params = {**default_params, **params}
pipeline_params = {**merged_params, 'n_jobs': n_jobs}
pipeline_params = add_key_prefix(pipeline_params, 'regressor__')
pipeline.set_params(**pipeline_params)
mlflow.set_experiment(CURRENT_EXPERIMENT_NAME)
with mlflow.start_run():
mlflow.log_params(merged_params)
scores = cross_validate(
pipeline, x, y,
cv=3,
scoring=rmse_score,
return_train_score=True,
verbose=2,
)
rmse_val = - np.mean(scores['test_score'])
rmse_train = - np.mean(scores['train_score'])
mlflow.log_metrics(dict(
rmse_val=rmse_val,
rmse_train=rmse_train,
))
return rmse_val, rmse_train
def oneshot(pipeline: Pipeline, df: pd.DataFrame, **params):
x = df.drop(columns='meter_reading')
y = np.log1p(df['meter_reading'].values)
default_params = dict(
n_estimators=10,
max_depth=None,
max_features='auto',
min_samples_leaf=1,
)
merged_params = {**default_params, **params}
pipeline_params = {**merged_params, 'n_jobs': -1, 'verbose': 2}
pipeline_params = add_key_prefix(pipeline_params, 'regressor__')
pipeline.set_params(**pipeline_params)
mlflow.set_experiment(CURRENT_EXPERIMENT_NAME)
with mlflow.start_run():
mlflow.log_params(merged_params)
pipeline.fit(x, y)
joblib.dump(pipeline, 'out/pipeline.sav', compress=1)
score = rmse(y, pipeline.predict(x))
mlflow.log_metrics(dict(rmse_train=score))
mlflow.log_artifact('out/pipeline.sav')
return pipeline
def grid_search(pipeline: Pipeline, df: pd.DataFrame, n_jobs: int = -1, **param_grid):
x = df.drop(columns='meter_reading')
y = np.log1p(df['meter_reading'].values)
default_param_grid = dict(
n_estimators=[80],
max_depth=[None],
max_features=['auto'],
min_samples_leaf=[0.00003],
)
merged_param_grid = {**default_param_grid, **param_grid}
pipeline_param_grid = add_key_prefix(merged_param_grid, 'regressor__')
pipeline.set_params(regressor__n_jobs=n_jobs)
mlflow.set_experiment(CURRENT_EXPERIMENT_NAME)
with mlflow.start_run():
mlflow.log_params(merged_param_grid)
regressor = GridSearchCV(
pipeline,
param_grid=pipeline_param_grid,
cv=3,
scoring=rmse_score,
verbose=2,
refit=True,
)
regressor.fit(x, y)
best_model = regressor.best_estimator_
best_param = add_key_prefix(regressor.best_params_)
best_rmse = - regressor.best_score_
cv_results = df_from_cv_results(regressor.cv_results_)
joblib.dump(best_model, 'out/model.sav')
cv_results.to_csv('out/cv_results.csv', index=False)
mlflow.log_params(best_param)
mlflow.log_metrics(dict(
rmse=best_rmse,
))
mlflow.log_artifact('./out/model.sav')
mlflow.log_artifact('./out/cv_results.csv')
mlflow.end_run()
return cv_results
def load_model(run_id: str = None):
if run_id is None:
model_path = 'out/model.joblib'
else:
mlflow_client = mlflow.tracking.MlflowClient()
model_path = mlflow_client.download_artifacts(run_id, 'model.joblib')
return joblib.load(model_path)
def predict(df: pd.DataFrame, pipeline: Pipeline) -> pd.DataFrame:
x = df.iloc[:, 1:]
y_log1p = pipeline.predict(x)
y = np.expm1(y_log1p)
return pd.DataFrame({
'row_id': df.iloc[:, 0],
'meter_reading': y,
})[['row_id', 'meter_reading']]
if __name__ == '__main__':
train = pd.read_csv('data/train.csv', parse_dates=['timestamp']).pipe(reduce_mem_usage)
building_metadata = | pd.read_csv('data/building_metadata.csv') | pandas.read_csv |
import batman
import ellc
import torch
import numpy as np
import pickle
import matplotlib.pyplot as plt
import pandas as pd
from time import time
from pytransit import OblateStarModel, QuadraticModel
from data_preparation.data_processing_utils import min_max_norm_vectorized, resize, standardize
R_SUN2JUPYTER = 1.0 / 0.10045
M_EARTH_KG = 5.9723e24
M_SUN_KG = 1.9884e30
M_JUPYTER = 1.89813e27
M_JUPYTER2Sun = M_JUPYTER / M_SUN_KG
M_EARTH2SUN = M_EARTH_KG / M_SUN_KG
AU2kKM = 149597870.7
R_SUN = 696340.0
R_EARTH2SUN = 6371.0 / R_SUN
def prob_decrease(max_spots, mass=0.5, decay=0.5):
assert 0.0 < decay < 1.0, "Invalid decay value! Must be between 0 and 1."
probs = []
current = mass
for i in range(max_spots):
current -= current * decay
probs.append(current)
remaining_mass = mass-sum(probs)
return [prob + (remaining_mass/len(probs)) for prob in probs]
def sample_spot_parameters_realistic(b=0., max_spots=4, max_size=20., spotless_prob=0.0, latitude_offset_prob=0.5,
latitude_offset_std=0.1):
p = prob_decrease(max_spots, mass=1 - spotless_prob, decay=0.5)
p.insert(0, spotless_prob)
num_spots = np.random.choice(range(max_spots + 1), p=p)
if num_spots == 0:
return None
spot_params = np.empty((4, num_spots))
longitude_range = np.linspace(-60., 60, 360)
for s, spot in enumerate(range(num_spots)):
if np.random.choice([True, False], p=[latitude_offset_prob, 1.-latitude_offset_prob]):
offset = np.random.normal(0, latitude_offset_std)
else:
offset = 0.
#latitude = (b+offset)*90.
latitude = -b * 60. + np.random.uniform(-5., 5.)
longitude = np.random.choice(longitude_range)
size = np.random.uniform(2., max_size-(5.*s))
used_longitude = np.logical_and(longitude-size >= longitude_range, longitude_range <= longitude+size)
longitude_range = longitude_range[~used_longitude]
brightness = np.random.uniform(0.7, 1.3)
spot_params[0, s] = longitude
spot_params[1, s] = latitude
spot_params[2, s] = size
spot_params[3, s] = brightness
return spot_params
def extract_parameters(path="All_Exoplanets_Params.csv", transit__method_only=True,
params_essential=('pl_orbper', 'pl_rade', 'pl_orbsmax', 'pl_orbincl', 'st_rad'),
params_optional=('pl_trandur', 'pl_orbeccen', 'pl_orblper', 'st_teff', 'st_logg', 'st_met')):
exos = pd.read_csv(path, comment='#', sep=',')
if transit__method_only:
exos = exos[exos['discoverymethod'] == 'Transit']
if params_essential is None:
params_essential = ['pl_orbper', 'pl_trandur', 'pl_rade', 'pl_orbsmax', 'pl_orbeccen',
'pl_orbincl', 'st_rad']
if params_optional is None:
params_optional = []
param_names = list(params_essential) + list(params_optional)
exos_selected = exos.loc[:, param_names]
# convert unit of 'a' from AU to ratio of stellar radii
exos_selected.loc[:, 'pl_orbsmax'] = (AU2kKM * exos_selected['pl_orbsmax']) / (R_SUN * exos_selected['st_rad'])
valid_exos = exos_selected.dropna(subset=params_essential)
return valid_exos.where(pd.notnull(valid_exos), None)
def bin_parameters_by_impact(param_df, uniform_impact_bins=10):
bin_edges = np.linspace(0, 1, uniform_impact_bins)
bins = {bin_edge: [] for bin_edge in bin_edges}
for r_idx, row in param_df.iterrows():
a = row.get('pl_orbsmax')
i = row.get('pl_orbincl')
if a is None:
a = np.random.uniform(2.3, 30.)
if i is None:
b = np.random.uniform(0., 1.)
i = np.arccos(b / a) * 180 / np.pi
# calculate impact parameter
b = a * np.cos(i * np.pi / 180)
# determine closest bin edge
bin_idx = np.abs(bin_edges - b).argmin()
bins[bin_edges[bin_idx]].append(row)
return bins
def get_valid_range(value, constraints=('t_eff',), quantity='logg', limb_model='quad'):
assert all([constraint in ('t_eff', 'logg', 'met') for constraint in constraints]),\
f"Unknown value in constraints for argument constraint! Must only contain ('t_eff', 'logg' 'met')."
assert quantity in ('t_eff', 'logg', 'met'),\
f"Unknown value {quantity} for argument quantity! Must be one of ('t_eff', 'logg' 'met')."
assert all([constraint != quantity for constraint in constraints]), "Argument constraints must not contain quantity!"
assert len(value) == len(constraints), f"Arguments value and constraints have different lengths {len(value)} and " \
f"{len(constraints)}. You need to provide a value for each constraint!"
if limb_model == 'claret':
table = pd.read_csv("TESS_Nonlinear_Limb_Darkening_Atlas.csv", comment='#', sep=',')
elif limb_model == 'quad':
table = pd.read_csv("TESS_Quadratic_Limb_Darkening_Atlas.csv", comment='#', sep=',')
else:
raise RuntimeError("Unknown limb-darkening model use one of ['quad', 'claret']")
translate_arguments = lambda x: 'Teff [K]' if x == 't_eff' else 'logg [cm/s2]' if x == 'logg' else 'Z [Sun]'
constraints_translated = [translate_arguments(constraint) for constraint in constraints]
quantity_translated = translate_arguments(quantity)
constraint_results = table
for c, constraint in enumerate(constraints_translated):
match_idx = (constraint_results[constraint] - value[c]).abs().argmin()
matched_value = constraint_results[constraint].iloc[match_idx]
constraint_results = constraint_results[constraint_results[constraint] == matched_value]
joined_constraints = set(constraint_results[quantity_translated])
if len(joined_constraints) > 0:
return list(joined_constraints)
else:
return [None]
def match_stellar_params_with_table(table, T_eff, logg, met):
T_eff_match = (table['Teff [K]'] - T_eff).abs().argmin()
T_eff = table['Teff [K]'].iloc[T_eff_match]
logg_match = (table['logg [cm/s2]'] - logg).abs().argmin()
logg = table['logg [cm/s2]'].iloc[logg_match]
met_match = (table['Z [Sun]'] - met).abs().argmin()
met = table['Z [Sun]'].iloc[met_match]
candidates = table.loc[(table['Teff [K]'] == T_eff) & (table['logg [cm/s2]'] == logg) & (table['Z [Sun]'] == met)]
if len(candidates) > 1:
candidates = candidates.loc[candidates['xi [km/s]'] == 2.0]
return candidates
def lookup_limb_darkening(T_eff, logg, met, limb_model='claret', model='Atlas'):
if model == 'Atlas':
if limb_model == 'claret':
table = pd.read_csv("TESS_Nonlinear_Limb_Darkening_Atlas.csv", comment='#', sep=',')
candidates = match_stellar_params_with_table(table, T_eff, logg, met)
if candidates.empty:
return None
else:
return candidates['a1LSM'].item(), candidates['a2LSM'].item(), candidates['a3LSM'].item(), candidates['a4LSM'].item()
elif limb_model == 'quad':
table = pd.read_csv("TESS_Quadratic_Limb_Darkening_Atlas.csv", comment='#', sep=',')
candidates = match_stellar_params_with_table(table, T_eff, logg, met)
if candidates.empty:
return None
else:
return candidates['aLSM'].item(), candidates['bLSM'].item()
else:
raise RuntimeError("Unknown limb-darkening model use one of ['quad', 'claret']")
else:
raise RuntimeError("Currently Atlas is the only Model implemented! Please use model='Atlas'")
def lookup_gravity_darkening(T_eff, logg, met):
table = pd.read_csv("TESS_Gravity_Darkening.csv", comment='#', sep=',')
candidates = match_stellar_params_with_table(table, T_eff, logg, met)
if candidates.empty:
return None
else:
return candidates['y'].item()
def batman_model(R_ratio, a, i, ecc, t0=0.0, period=2.5, duration=2.5, w=0.0, size=256,
limb_dark="quadratic", limb_coeff=(0.1, 0.3), mult=3.):
params = batman.TransitParams()
params.t0 = t0 # time of inferior conjunction
params.per = period # orbital period
params.rp = R_ratio # planet radius (in units of stellar radii)
params.a = a # semi-major axis (in units of stellar radii)
params.inc = i # orbital inclination (in degrees)
params.ecc = ecc # eccentricity
params.w = w # longitude of periastron (in degrees)
params.u = limb_coeff # limb darkening coefficients [u1, u2]
params.limb_dark = limb_dark
time = (duration / 24.) * (mult / 2.)
# print(R_ratio, a)
t = np.linspace(-time, time, size, endpoint=True)
m = batman.TransitModel(params, t) # initializes model
return m.light_curve(params)
def ellc_transit(r1, r2, m_star=1.0, m_planet=None, period=1.0, duration=None, incl=90.0, ecc=None, w=None, a_sun=None,
star_shape='sphere', rot_per=None, T_eff=None, logg=None, met=None, limb_model="quad",
lambda_1=None, spots_1 = None, include_gd=True,
sbratio=0.0, t_zero=0, mult=3, size=256, accuracy='default', plot=False):
if period == 1:
window = 0.5
a_sun = None
else:
window = (duration / 24.) * (mult / 2.)
t_obs = np.linspace(-window, window, size, endpoint=True)
# eccentricity
if ecc is None or w is None:
f_c = None
f_s = None
else:
f_c = np.sqrt(ecc) * np.cos(w * np.pi / 180)
f_s = np.sqrt(ecc) * np.sin(w * np.pi / 180)
# limb and gravity darkening
if T_eff is None or logg is None or met is None:
ldc_1 = None
gdc_1 = None
else:
ldc_1 = lookup_limb_darkening(T_eff, logg, met, limb_model=limb_model, model='Atlas')
if include_gd:
gdc_1 = lookup_gravity_darkening(T_eff, logg, met)
else:
gdc_1 = None
grav = True if gdc_1 is not None else False
# mass relation
if m_planet is None:
q = (M_JUPYTER * (r2 * R_SUN2JUPYTER) ** 3) / m_star
else:
q = m_planet / m_star
# stellar shape
if rot_per is None:
async_rot = 1
else:
async_rot = period / rot_per
f1 = ellc.lc(t_obs, r1, r2, sbratio, incl,
t_zero=t_zero, period=period,
a=a_sun,
q=q,
f_c=f_c, f_s=f_s,
ldc_1=ldc_1, ldc_2=None,
gdc_1=gdc_1, gdc_2=None,
didt=None,
domdt=None,
rotfac_1=async_rot, rotfac_2=1,
hf_1=1.5, hf_2=1.5,
bfac_1=None, bfac_2=None,
heat_1=None, heat_2=None,
lambda_1=lambda_1, lambda_2=None,
vsini_1=None, vsini_2=None,
t_exp=None, n_int=None,
grid_1=accuracy, grid_2=accuracy,
ld_1=limb_model, ld_2=None,
shape_1=star_shape, shape_2='sphere',
spots_1=spots_1, spots_2=None,
exact_grav=grav, verbose=0)
if plot:
plt.plot(t_obs, f1)
plt.show()
return f1
def allign_model_with_residuals(model, res_ingress, res_egress, res_size):
model_ingress = min(np.argwhere(model < 1.))[0]
model_egress = max(np.argwhere(model < 1.))[0]
scale_factor = (res_egress - res_ingress) / (model_egress - model_ingress)
model_size = len(model)
new_model_size = int(model_size*scale_factor)
resized = resize(model, new_model_size)
resized_ingress = min(np.argwhere(resized < 1))[0]
start_diff = res_ingress-resized_ingress
if start_diff > 0:
resized = np.insert(resized, 0, np.ones(start_diff))
elif start_diff < 0:
resized = resized[abs(start_diff):]
size_diff = res_size-len(resized)
if size_diff > 0:
resized = np.append(resized, np.ones(size_diff))
elif size_diff < 0:
resized = resized[:res_size]
return resized, scale_factor, start_diff
def fill_missing_params(row, max_repetitions=10):
period = row.get('pl_orbper') or np.random.uniform(0.2, 18.)
r_ratio = np.inf
radii_iter = 0
while r_ratio >= 1.:
r_st = row.get('st_rad') or np.random.uniform(0.3, 4.)
if radii_iter > 3:
r_pl = 0.08*r_st
else:
r_pl = row.get('pl_rade') or np.random.uniform(0.3, 7.)
r_pl *= R_EARTH2SUN
r_ratio = r_pl / r_st
radii_iter += 1
repeat = 0
while repeat < max_repetitions:
rot_per = row.get('st_rotp') or np.random.uniform(10., 40.)
st_mass = row.get('st_mass') or np.random.uniform(0.5, 2.5)
pl_mass = row.get('pl_massj') or np.random.uniform(0.005, 2.5)
pl_mass *= M_JUPYTER2Sun
# impact parameter
a = row.get('pl_orbsmax')
i = row.get('pl_orbincl')
if a is None:
a = np.random.uniform(2.3, 30.)
b_valid = np.random.uniform(0., 1.)
if i is None:
i = np.arccos(b_valid/a) * 180/np.pi
else:
if i is None:
b_valid = np.random.uniform(0., 1.)
i = np.arccos(b_valid/a) * 180/np.pi
else:
b_valid = a * np.cos(i * np.pi/180)
# eccentricity
ecc = row.get('pl_orbeccen') or np.random.uniform(0., 1.) if repeat < max_repetitions - 1 else 0.
w = row.get('pl_orblper') or np.random.uniform(0., 90.)
# limb darkening
t_eff = row.get('st_teff') or np.random.uniform(3500., 12500.)
logg = row.get('st_logg') or np.random.choice(
get_valid_range((t_eff,), constraints=('t_eff',), quantity='logg'))
met = row.get('st_met') or np.random.choice(
get_valid_range((t_eff, logg), constraints=('t_eff', 'logg'), quantity='met'))
limb_coeff = lookup_limb_darkening(t_eff, logg, met, limb_model='quad', model='Atlas') or \
np.random.uniform(-1., 1., size=2)
duration = row.get('pl_trandur') or period * 24. / np.pi * np.arcsin(
np.sqrt((r_st + r_pl) ** 2 - (b_valid * r_st) ** 2) / a)
flux_original = ellc_transit(1./a, (1./a)*r_ratio, m_star=st_mass, m_planet=pl_mass, period=1.0, duration=duration,
incl=i, ecc=ecc, w=w, a_sun=a*r_st, star_shape='sphere', rot_per=rot_per,
T_eff=t_eff, logg=logg, met=met, limb_model="quad", lambda_1=None, spots_1=None)
if max(flux_original) > min(flux_original):
return {'a':a, 'i':i, 'b':b_valid, 'e':ecc, 'w':w, 'period':period, 'duration':duration,
'r_star':r_st, 'r_planet':r_pl, 'm_star':st_mass, 'm_planet':pl_mass, 'rot_per':rot_per,
't_eff':t_eff, 'logg':logg, 'met':met, 'ldc':limb_coeff}
else:
repeat += 1
return {'a':a, 'i':i, 'b':b_valid, 'e':ecc, 'w':w, 'period':period, 'duration':duration,
'r_star':r_st, 'r_planet':r_pl, 'm_star':st_mass, 'm_planet':pl_mass, 'rot_per':rot_per,
't_eff':t_eff, 'logg':logg, 'met':met, 'ldc':limb_coeff}
def sample_horizontal_scale(mean_gaussian=3., std_gaussian=1., uniform_range=(1., 8.), uniform_prob=0.0):
gaussian = np.random.choice([True, False], p=(1-uniform_prob, uniform_prob))
if gaussian:
return max(1., np.random.normal(mean_gaussian, std_gaussian))
else:
return np.random.uniform(*uniform_range)
def py_transit_model(radii_ratio, a, i, period, duration, ldc, e=0., w=0., t0=0., mult=3., size=256):
tmc = QuadraticModel(interpolate=False)
window = (duration / 24.) * (mult / 2.)
times = np.linspace(-window, window, size, endpoint=True)
tmc.set_data(times)
k = np.array([radii_ratio])
i *= np.pi / 180
return tmc.evaluate_ps(k, ldc, t0, period, a, i, e, w)
def py_transit_gd(radii_ratio, a, i, period, duration, ldc, gdc, e=0., w=0.,
r_star=1., density=1., phi=90., az=0., rot_per=12., t_pole=6500., t0=0., mult=3., size=256):
degree2rad = np.pi / 180
tmo = OblateStarModel(sres=100, pres=8, rstar=r_star)
window = (duration / 24.) * (mult / 2.)
times = np.linspace(-window, window, size, endpoint=True)
tmo.set_data(times)
k = np.array([radii_ratio])
i *= degree2rad
phi *= degree2rad
az *= degree2rad
return tmo.evaluate_ps(k, density, rot_per, t_pole, phi, gdc, ldc, t0, period, a, i, l=az, e=e, w=w)
def sample_gd_params():
gd_params = {}
gd_params['rot_per'] = np.random.uniform(0.5, 2.5)
gd_params['st_density'] = np.random.uniform(0.1, 1.9)
gd_params['st_obliquity'] = np.random.uniform(0., 90.)
gd_params['spin_orbit_angle'] = np.random.uniform(0., 90.)
gd_params['t_pole'] = np.random.uniform(5700., 12000.)
return gd_params
def create_transit_model(params, mode, feature=False, t0=0., size=256, res_data=None):
if mode == 'normal' or (mode == 'dre' and not feature):
flux = batman_model(params['r_planet'] / params['r_star'], params['a'], params['i'],
ecc=params['e'], w=params['w'], period=params['period'], duration=params['duration'],
limb_dark="quadratic", limb_coeff=params['ldc'], t0=t0, size=size)
if feature:
return flux, {}
else:
return flux
elif mode == 'spots':
if feature:
spots = sample_spot_parameters_realistic(params['b'])
apply_gd = False # True
else:
spots = None
apply_gd = False
r1 = 1./params['a'] # radius of star in units of semi-major-axis (a is in units of stellar radii)
flux = ellc_transit(r1, r1*params['r_planet']/params['r_star'], m_star=params['m_star'],
m_planet=params['m_planet'], period=params['period'],
duration=params['duration'], incl=params['i'], ecc=params['e'], w=params['w'],
a_sun=params['a'] * params['r_star'], star_shape='sphere',
rot_per=max(params['period'],params['rot_per']), T_eff=params['t_eff'], logg=params['logg'],
met=params['met'], limb_model="quad", lambda_1=None, include_gd=apply_gd, spots_1=spots,
t_zero=t0)
# fix numerical issue pre transit flux slightly smaller 1.0
flux += 1e-8
gdc = lookup_gravity_darkening(params['t_eff'], params['logg'], params['met'])
if feature:
if spots is None:
spot_params = {'num_spots':0}
else:
spot_params = {'num_spots':spots.shape[-1], 'longitudes':spots[0,:], 'latitudes':spots[1,:],
'spot_sizes':spots[2,:], 'spot_brightnesses':spots[3,:], 'gdc':gdc}
return flux, spot_params
else:
return flux
elif mode == 'gd':
flux_no_gd = py_transit_model(params['r_planet'] / params['r_star'], params['a'], params['i'], params['period'],
params['duration'], params['ldc'], params['e'], params['w'], t0=t0, size=size)
if feature:
mean_effect = 0.
max_effect = 0.
gdc = lookup_gravity_darkening(params['t_eff'], params['logg'], params['met'])
gd_iter = 0
while not 5e-5 < mean_effect < 5e-4 and not 1e-4 < max_effect < 1e-3:
if gd_iter > 5:
return None, gd_params
gd_params = sample_gd_params()
gd_params['gdc'] = gdc or np.random.uniform(0., 0.6)
flux = py_transit_gd(params['r_planet'] / params['r_star'], params['a'], params['i'], params['period'],
params['duration'], params['ldc'], gd_params['gdc'], params['e'], params['w'],
params['r_star'], gd_params['st_density'], gd_params['st_obliquity'],
gd_params['spin_orbit_angle'], gd_params['rot_per'], gd_params['t_pole'],
t0=t0, size=size)
in_transit_idxs = np.nonzero(flux < 1.)[0]
if len(in_transit_idxs) == 0:
gd_iter += 1
continue
oot_idxs = np.nonzero(flux == 1.)[0]
if np.logical_and(oot_idxs > min(in_transit_idxs), oot_idxs < max(in_transit_idxs)).any():
gd_iter += 1
continue
abs_diff = abs(flux - flux_no_gd)
diff_idxs = abs_diff > 0
mean_effect = sum(abs_diff[diff_idxs]) / (sum(diff_idxs) + 1)
c_mean_effect = sum(abs_diff) / len(abs_diff)
max_effect = max(abs_diff)
gd_iter += 1
gd_params['gd_effect'] = mean_effect
return flux, gd_params
else:
return flux_no_gd
elif mode == 'dre' and feature:
flux = batman_model(params['r_planet'] / params['r_star'], params['a'], params['i'],
ecc=params['e'], w=params['w'], period=params['period'], duration=params['duration'],
limb_dark="quadratic", limb_coeff=params['ldc'], t0=t0, size=size)
residual = res_data[0][np.random.choice(len(res_data))]
#transit_model = min_max_norm(flux)
transit_model = flux
alligned_model, dre_scale, dre_shift = allign_model_with_residuals(transit_model, residual['ingress'], residual['egress'],
len(residual['residual']))
# scale and shift
res_to_depth = max(abs(residual['residual'])) / (1.-min(alligned_model))
tran_dur = (residual['egress'] - residual['ingress'])
if res_data[1] is None:
in_transit_ratio = tran_dur * 1. / len(residual['residual'])
sampled_mult = max(1.1, sample_horizontal_scale())
dre_mult = min(sampled_mult, 1./in_transit_ratio)
else:
dre_mult = res_data[1]
select_size = int(dre_mult * tran_dur)
epoch_idx = residual['ingress'] + (tran_dur // 2)
start_dre = epoch_idx-(select_size//2)
end_dre = start_dre + select_size
dre_shift = 0
if res_data[2]:
left_in_data_constraint = residual['ingress'] - start_dre
right_in_data_constraint = end_dre - residual['egress']
left_out_data_constraint = start_dre
right_out_data_constraint = len(residual['residual']) - 1 - residual['egress']
left_constraint = min(left_in_data_constraint, left_out_data_constraint)
right_constraint = min(right_in_data_constraint, right_out_data_constraint)
#shift_range = max(1, int((dre_mult-1.)/2. * tran_dur))
#dre_shift = np.random.randint(-shift_range, shift_range)
dre_shift = np.random.randint(-left_constraint, right_constraint)
start_dre += dre_shift
end_dre += dre_shift
feature_model = alligned_model[start_dre:end_dre]
res_scale_factor = np.random.uniform(0.001, 0.5) / res_to_depth
dre = feature_model + residual['residual'][start_dre:end_dre] * res_scale_factor
# resize models
feature_model = resize(feature_model, size)
dre = resize(dre, size)
return resize(feature_model, size), resize(dre, size), dre_mult, np.argmin(feature_model)-feature_model.shape[-1]//2
else:
raise RuntimeError(f"Unknown mode {mode} for transit creation use one of ('normal', 'spots', 'gd', 'dre').")
def sample_artificial_transits(num_transits, mode='normal', size=256, snr_min=1., snr_max=30., max_repetitions=10,
multiplier=3., horizontal_scale=False, horizontal_shift=False, uniform_params=False,
num_bins=20):
assert mode in ('normal', 'spots', 'gd', 'dre')
function_start = time()
if mode == 'dre':
res_data = (pickle.load(open("data/Kepler1520/residuals_dv_model_plain.pkl", "rb")),
multiplier, horizontal_shift)
else:
res_data = None
known_exos_params = extract_parameters()
bins = bin_parameters_by_impact(known_exos_params, uniform_impact_bins=num_bins)
results = {'artificial_data':np.empty((num_transits, size)), 'transit_model':np.empty((num_transits, size)),
'feature_model':np.empty((num_transits, size))}
mata_columns = ['a', 'i', 'b', 'e', 'w', 'period', 'duration', 'multiplier',
'r_star', 'r_planet', 'm_star', 'm_planet', 'rot_per', 'st_density', 't_pole',
't_eff', 'logg', 'met', 'limb_model', 'ldc', 'gdc', 'st_obliquity', 'spin_orbit_angle', 'gd_effect',
'num_spots', 'longitudes', 'latitudes', 'spot_sizes', 'spot_brightnesses', 'mode',
'uniform_params', 'transit_depth', 'shift', 'ingress_idx', 'egress_idx', 'snr']
meta = pd.DataFrame(columns=mata_columns, index=range(num_transits))
transits_generated = 0
loop_start = time()
while transits_generated < num_transits:
params = dict.fromkeys(mata_columns, np.nan)
uniform_param_iter = 0
# get theoretical model
if uniform_params:
raise NotImplementedError("This functionality is deprecated and not implemented anymore!")
else:
bin_idx = np.random.choice(num_bins)
selected_bin = bins[list(bins.keys())[bin_idx]]
row = selected_bin[np.random.choice(len(selected_bin))]
params.update(fill_missing_params(row))
params['mode'] = mode
params['uniform_params'] = uniform_params
params['limb_model'] = "quad"
params['num_spots'] = 0
flux_original = create_transit_model(params, mode=mode, feature=False, size=size)
# skip invalid configurations
if max(flux_original) - min(flux_original) <= 1e-8:
continue
in_transit_ratio = sum(flux_original < 1.) / len(flux_original)
# if zoomed in too close into transit zoom out until ingress/egress edges become visible
while in_transit_ratio == 1.:
params['duration'] *= 5
flux_original = create_transit_model(params, mode=mode, feature=False, size=size)
in_transit_ratio = sum(flux_original < 1.) / len(flux_original)
if params['duration'] >= params['period']/2.:
break
# if correction fails skip transit
if in_transit_ratio >= 1.:
print(f"Inaccurate parameters detected! Transit model will be skipped.")
continue
if mode == 'dre':
feature_params = {}
flux_feature, dre, dre_mult, dre_shift = create_transit_model(params, mode=mode, feature=True,
size=size, res_data=res_data)
results['artificial_data'][transits_generated, :] = dre
# fix horizontal scale to given number of transit durations (multiplier)
duration_correction = 1.
if horizontal_scale:
# always scale original (label) to 3 transit durations
duration_correction_original = in_transit_ratio / (1. / 3.)
params['duration'] *= duration_correction_original
flux_original = create_transit_model(params, mode=mode, feature=False, size=size)
params['duration'] /= duration_correction_original
excess_in_transit = round(sum(flux_original < 1.) - len(flux_original)*(1./3.))
if excess_in_transit > 1:
fill_num = round(excess_in_transit/2.)
flux_original = np.insert(flux_original, 0, np.ones(fill_num))
flux_original = np.append(flux_original, np.ones(fill_num))
flux_original = resize(flux_original, size=size)
elif excess_in_transit < -1:
discard_num = abs(excess_in_transit) - 1
flux_original = resize(flux_original[discard_num:-discard_num], size=size)
# scale feature model to specified ratio
fix_scale = multiplier or sample_horizontal_scale()
duration_correction = in_transit_ratio / (1. / fix_scale)
if mode != 'dre':
params['duration'] *= duration_correction
flux_feature, feature_params = create_transit_model(params, mode=mode, feature=True, size=size)
params['duration'] /= duration_correction
params['multiplier'] = fix_scale
else:
params['multiplier'] = dre_mult
else:
if not mode == 'dre':
flux_feature, feature_params = create_transit_model(params, mode=mode, feature=True, size=size)
if flux_feature is None:
continue
in_transit_indices = np.argwhere(flux_original < 1.).flatten()
if len(in_transit_indices) == 0:
print("No cadences below 1 in theoretical transit model! Skip example.")
continue
ingress_idx = in_transit_indices.min()
eggress_idx = in_transit_indices.max()
# add horizontal shift
shift = 0
if horizontal_shift:
shift = np.random.randint(-ingress_idx, (size - 1) - eggress_idx)
if mode == 'spots':
params['duration'] *= duration_correction
window_size = (params['duration'] / 24.) * (params['multiplier'] / 2.)
time_crop = np.linspace(-window_size, window_size, size, endpoint=True)
t0 = time_crop[size // 2 + shift]
flux_feature, feature_params = create_transit_model(params, mode=mode, feature=True, t0=t0, size=size)
params['duration'] /= duration_correction
else:
if mode != 'dre':
flux_feature = np.roll(flux_feature, shift=shift)
if np.isnan(flux_original).any() or np.isnan(flux_feature).any() or min(flux_feature) == max(flux_feature):
continue
params.update(feature_params)
params['shift'] = shift if mode != 'dre' else dre_shift
params['ingress_idx'] = ingress_idx + shift
params['egress_idx'] = eggress_idx + shift
params['transit_depth'] = min(flux_original)
# add noise to model to create artificial data
if not mode == 'dre':
if mode == 'gd':
tran_sig = 1.-params['transit_depth']
gd_snr = tran_sig/params['gd_effect']
sampled_snr = np.random.uniform(gd_snr, gd_snr*2)
else:
sampled_snr = np.random.uniform(snr_min, snr_max)
params['snr'] = sampled_snr
results['artificial_data'][transits_generated, :] = add_random_noise(flux_feature, inplace=False,
snr=sampled_snr, size=size)
else:
noise_scale = (sum(abs(results['artificial_data'][transits_generated, :]-flux_feature))/len(flux_feature))/2.
dre_snr = (max(flux_feature)-min(flux_feature)) / noise_scale
params['snr'] = dre_snr
add_random_noise(results['artificial_data'][transits_generated, :], inplace=True, snr=dre_snr, size=size)
results['transit_model'][transits_generated, :] = flux_original
results['feature_model'][transits_generated, :] = flux_feature
meta.iloc[transits_generated] = | pd.Series(params) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
import os
import csv
import re
from matplotlib.patches import Rectangle
gem5root = os.environ.get('GEM5_ROOT')
specroot = os.environ.get('SPEC_ROOT')
assert(gem5root is not None)
assert(specroot is not None)
df = pd.DataFrame(columns = ["cycles", "weight", "para", "ipc", "insts", "total_nodes", "fake_reads", "fake_writes", "bandwidth"])
| pd.set_option("display.max_rows", None, "display.max_columns", None) | pandas.set_option |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = | Timestamp(result, tz='UTC') | pandas.Timestamp |
import pandas as pd
import os
import numpy as np
from pandas.core.frame import DataFrame
import lightgbm as lgb
class predict:
def __init__(self):
self.setConstants()
def setConstants(self):
self.houses = []
self.noMatchHouses = []
from ..models import models_logs
model = models_logs.objects.get(inUseFlag=1, trainSuccess=1)
print("将使用%s号模型" % model.id)
model_id = model.id
self.beginDate = model.startMonth.strftime('%Y-%m')
self.endDate = model.endMonth.strftime('%Y-%m')
'''
测试用
print("将使用%s号模型" % 57)
model_id = 57
self.beginDate = '2017-02'
self.endDate = '2017-03'
'''
# 新房数据表路径
self.newdisk_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_NewDisk.csv'
# 房源属性数据表路径
self.property_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_Property.csv'
# 地址数据表路径
self.address_path = os.path.dirname(os.path.realpath(__file__)) + '/data/AD_NewDiskAddress.csv'
# 挂牌数据路径
self.data_path = os.path.dirname(os.path.realpath(__file__)) + '/data/'
self.model_dir = os.path.dirname(os.path.realpath(__file__)) + '/cache/model_%s/' % (model_id)
if not os.path.exists(self.model_dir):
print("模型训练有问题")
return "模型训练有问题"
# 房源中位数价格路径
self.medprice_path = self.model_dir + '/medprice.csv'
# 区名特征化路径
self.arealabel_path = self.model_dir + '/arealabel.csv'
# 板块名特征化路径
self.platelabel_path = self.model_dir + '/platelabel.csv'
# 内中外环特征化路径
self.modulelabel_path = self.model_dir + 'modulelabel.csv'
# 模型缓存路径
self.cache_path_model = self.model_dir + '/model.txt'
# 挂牌缓存路径
self.cache_path_guapai = os.path.dirname(os.path.realpath(__file__)) + '/cache/guapai_%s-%s.hdf' % (self.beginDate, self.endDate)
# 预处理缓存路径
self.cache_path_feats = os.path.dirname(os.path.realpath(__file__)) + '/cache/feats_%s-%s.hdf' % (self.beginDate, self.endDate)
self.meta_df = pd.read_hdf(self.cache_path_guapai, 'meta')
self.gbm = lgb.Booster(model_file=self.cache_path_model)
self.med_price = pd.read_csv(self.medprice_path)
self.arealabel = pd.read_csv(self.arealabel_path, usecols=["label", "area"])
self.arealabel.set_index(["area"], inplace=True)
self.arealabel = self.arealabel.to_dict()["label"]
self.platelabel = pd.read_csv(self.platelabel_path, usecols=["label", "plate"])
self.platelabel.set_index(["plate"], inplace=True)
self.platelabel = self.platelabel.to_dict()["label"]
self.modulelabel = pd.read_csv(self.modulelabel_path, usecols=["Module", "unit_price"])
self.modulelabel.set_index(["Module"], inplace=True)
def make_coordinates(self, data):
coors = []
# for i in tqdm(data):
for i in data:
if type(i) == str and i != '公寓' and i != '商业' and i != '其它':
coors.append(i.split(','))
else:
coors.append([None, None])
coors = pd.DataFrame(coors, columns=['loc_x', 'loc_y'])
# coors=pd.DataFrame([coor.split(',') for coor in all_df.Coordinates],columns=['loc_x','loc_y'],index=all_df.index)
coors = coors.astype(float)
return coors
def find_DiskID(self, address):
# 为了改进体验,希望以后可以在网页中输入地址时就判断出是否存在某个小区,而不是在预测前再返回错误信息!
# address = address_filter(address)
address_df = pd.read_csv(self.address_path, usecols=['RoadLaneNo', 'NewDiskID'])
# address_df = tools.read_basic_table("AD_NewDiskAddress")
address_df.rename(columns={'RoadLaneNo': 'address'}, inplace=True)
address_all = pd.merge(self.meta_df[["NewDiskID", "name"]], address_df, how='left', on='NewDiskID').dropna(axis=0,
how='any')
address_fit = address_all[address_all.address.str.contains(address)]
address_fit = address_fit.head(1) # 取第一个匹配的
if address_fit.empty:
print("找不到对应的小区!") # 报错信息记得返回到前端
return (None, None, None)
else:
print(address_fit)
NewDiskID = address_fit.iat[0, 0]
return (NewDiskID, address_fit.iat[0, 1], address_fit.iat[0, 2])
###############################################
## 需要改写,将查找功能放到数据库,不要在本地查找。##
def find_DiskID_ByName(self, diskname_input):
address_df = pd.read_csv(self.address_path, usecols=['RoadLaneNo', 'NewDiskID'])
address_df.rename(columns={'RoadLaneNo': 'address'}, inplace=True)
name_all = | pd.merge(self.meta_df[['NewDiskID', 'name']], address_df, how='left', on='NewDiskID') | pandas.merge |
from django.shortcuts import render
import pandas as pd
from dashboard.models import Utilization, Samples, Revenue, monthlystats
from dashboard.serializers import UtilizationSerializer, SamplesSerializer, RevenueSerializer, monthlystatsSerializer
from dashboard.viewfuncs import index_context, sample_context, util_context, revenue_context
# Create your views here.
def indexpage(request):
samples_obj = Samples.objects.all()
revenue_obj = Revenue.objects.all()
samples_serializer = SamplesSerializer(samples_obj, many=True)
revenue_serializer = RevenueSerializer(revenue_obj, many=True)
samples_df = pd.DataFrame(samples_serializer.data)
revenue_df = | pd.DataFrame(revenue_serializer.data) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:set ts=4 sts=4 sw=4 expandtab fenc=utf-8 ff=unix :
#
# Schedule work shift
#
# Copyright (C) 2018 <NAME>
import logging
import argparse
import numpy as np
import pandas as pd
import pulp
from pulp import lpSum, lpDot, LpBinary, LpVariable
from pulp import LpProblem, LpMaximize, LpStatus
logger = logging.getLogger(__name__)
| pd.set_option("display.max_columns", 200) | pandas.set_option |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = | Period(freq='Q', year=2007, quarter=1) | pandas.tseries.period.Period |
import os
import pandas as pd
import torch
from detectron2.data import MetadataCatalog
from detectron2.evaluation.evaluator import DatasetEvaluator
from pycocotools.coco import COCO
from dataset_utils import register_polyp_datasets, dataset_annots
class GianaEvaulator(DatasetEvaluator):
def __init__(self, dataset_name, output_dir, thresholds=None, old_metric=False):
self.iou_thresh = 0.0
self.eval_mode = 'new'
self.dataset_name = dataset_name
self.dataset_folder = os.path.join("datasets", self.dataset_name)
coco_annot_file = os.path.join(self.dataset_folder, "annotations", dataset_annots[dataset_name])
self._coco_api = COCO(coco_annot_file)
self.output_folder = os.path.join(output_dir, "giana")
self.detection_folder = os.path.join(output_dir, "detection")
self.localization_folder = os.path.join(output_dir, "localization")
self.classification_folder = os.path.join(output_dir, "classification")
self.old_metric = old_metric
self.debug = False
if thresholds is None:
self.thresholds = [x / 10 for x in range(10)]
else:
self.thresholds = thresholds
self._partial_results = []
self.make_dirs()
self.classes_id = MetadataCatalog.get(dataset_name).get("thing_dataset_id_to_contiguous_id")
self.class_id_name = {v: k for k, v in
zip(MetadataCatalog.get(dataset_name).get("thing_classes"), self.classes_id.values())}
def make_dirs(self):
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if not os.path.exists(self.detection_folder):
os.makedirs(self.detection_folder)
if not os.path.exists(self.localization_folder):
os.makedirs(self.localization_folder)
if not os.path.exists(self.classification_folder):
os.makedirs(self.classification_folder)
def reset(self):
self.results = pd.DataFrame(columns=["image", "detected", "localized", "classified", "score", "pred_box"])
self._partial_results = []
def evaluate(self):
if not self.debug:
self.results = pd.DataFrame(self._partial_results,
columns=["image", "detected", "localized", "classified", "score", "pred_box"])
print(len(self._partial_results))
print(self.results.groupby("image"))
print(self.results.groupby("image").count())
self.results[['sequence', 'frame']] = self.results.image.str.split("-", expand=True)
sequences = pd.unique(self.results.sequence)
dets = []
locs = []
classifs = []
avg_df_detection = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN"])
avg_df_localization = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN"])
avg_df_classification = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN"])
for sequence in sequences:
df_detection = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN", "RT"])
df_localization = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN", "RT"])
df_classification = pd.DataFrame(columns=["threshold", "TP", "FP", "TN", "FN"])
filtered = self.results[self.results.sequence == sequence]
filtered_det = self.results[self.results.sequence == sequence].drop_duplicates(subset="image")
for threshold in self.thresholds:
th_cond = (filtered.score >= threshold) | (filtered.score == -1)
over_threshold = filtered[th_cond]
under_threshold = filtered[~th_cond]
over_threshold_det = filtered_det[th_cond].drop_duplicates(subset="image")
under_threshold_det = filtered_det[~th_cond].drop_duplicates(subset="image")
det = over_threshold_det.detected.value_counts()
under_det = under_threshold_det.detected.value_counts()
det_tp = det.TP if "TP" in det.keys() else 0
det_fp = det.FP if "FP" in det.keys() else 0
det_tn = (det.TN if "TN" in det.keys() else 0) + (under_det.FP if "FP" in under_det.keys() else 0)
det_fn = (det.FN if "FN" in det.keys() else 0) + (under_det.TP if "TP" in under_det.keys() else 0)
first_polyp = over_threshold_det[over_threshold_det.detected == "FN"].frame.apply(
lambda x: int(x.split(".")[0])).min()
first_det_polyp = over_threshold_det[over_threshold_det.detected == "TP"].frame.apply(
lambda x: int(x.split(".")[0]))
first_det_polyp = first_det_polyp[first_det_polyp >= first_polyp].min()
det_rt = first_det_polyp - first_polyp
self._add_row(df_detection, [threshold, det_tp, det_fp, det_tn, det_fn, det_rt])
loc = over_threshold.localized.value_counts()
under_loc = under_threshold.localized.value_counts()
loc_tp = loc.TP if "TP" in loc.keys() else 0
loc_fp = loc.FP if "FP" in loc.keys() else 0
loc_tn = (loc.TN if "TN" in loc.keys() else 0) + (under_loc.FP if "FP" in under_loc.keys() else 0)
loc_fn = (loc.FN if "FN" in loc.keys() else 0) + (under_loc.TP if "TP" in under_loc.keys() else 0)
first_polyp = over_threshold[over_threshold.localized == "FN"].frame.apply(
lambda x: int(x.split(".")[0])).min()
first_loc_polyp = over_threshold[over_threshold.localized == "TP"].frame.apply(
lambda x: int(x.split(".")[0]))
first_loc_polyp = first_loc_polyp[first_loc_polyp >= first_polyp].min()
loc_rt = first_loc_polyp - first_polyp
self._add_row(df_localization, [threshold, loc_tp, loc_fp, loc_tn, loc_fn, loc_rt])
clasif = over_threshold[over_threshold.localized == "TP"].classified.value_counts()
class_tp = clasif.TP if "TP" in clasif.keys() else 0
class_fp = clasif.FP if "FP" in clasif.keys() else 0
class_tn = clasif.TN if "TN" in clasif.keys() else 0
class_fn = clasif.FN if "FN" in clasif.keys() else 0
self._add_row(df_classification, [threshold, class_tp, class_fp, class_tn, class_fn])
df_detection.to_csv(
os.path.join(self.detection_folder, "d{}{}.csv".format(sequence, "_old" if self.old_metric else "")),
index=False)
df_localization.to_csv(
os.path.join(self.localization_folder, "l{}{}.csv".format(sequence, "_old" if self.old_metric else "")),
index=False)
df_classification.to_csv(os.path.join(self.classification_folder,
"c{}{}.csv".format(sequence, "_old" if self.old_metric else "")),
index=False)
dets.append(df_detection)
locs.append(df_localization)
classifs.append(df_classification)
print("computing Averages and aggregation metrics")
for det, loc, classif in zip(dets, locs, classifs):
avg_df_detection = pd.concat([avg_df_detection, det], ignore_index=True, sort=False)
avg_df_localization = pd.concat([avg_df_localization, loc], ignore_index=True, sort=False)
avg_df_classification = pd.concat([avg_df_classification, classif], ignore_index=True, sort=False)
self.compute_average_metrics(avg_df_detection, len(sequences), self.detection_folder)
self.compute_average_metrics(avg_df_localization, len(sequences), self.localization_folder)
self.compute_average_metrics(avg_df_classification, len(sequences), self.classification_folder)
self.results.to_csv(os.path.join(self.output_folder, "results{}.csv".format("_old" if self.old_metric else "")),
index=False)
def compute_average_metrics(self, df, sequences, save_folder):
df = df.groupby("threshold")
if "RT" in df.sum().columns:
stdRT = df.std().RT
df = df.sum()
df['mRT'] = df.RT.apply(lambda x: round(x / sequences, 2))
df['stdRT'] = stdRT.round(2)
else:
df = df.sum()
df = self._compute_aggregation_metrics(df)
df.to_csv(os.path.join(save_folder, "avg{}.csv".format("_old" if self.old_metric else "")), index=False)
def _compute_aggregation_metrics(self, df):
tp = df.TP
fp = df.FP
tn = df.TN
fn = df.FN
acc = (tp + tn) / (tp + fp + tn + fn)
pre = tp / (tp + fp)
rec = tp / (tp + fn)
f1core = 2 * pre * rec / (pre + rec)
df['accuracy'] = acc.round(4)
df["precision"] = pre.round(4)
df["recall"] = rec.round(4)
df["f1score"] = f1core.round(4)
return df
def _add_row(self, df, row):
df.loc[len(df)] = row
df.index += 1
df.reset_index(inplace=True, drop=True)
def process(self, input, output):
previous_len = len(self._partial_results)
for instance, output in zip(input, output):
input_image_id = instance['image_id']
instance_gt_annots = self._coco_api.loadAnns(self._coco_api.getAnnIds(imgIds=input_image_id))
im_name = os.path.basename(instance['file_name'])
fields = output["instances"].get_fields()
pred_boxes = fields['pred_boxes'] # xyxy
scores = fields['scores'].cpu().numpy()
pred_class = fields['pred_classes']
if instance_gt_annots:
# GT but not preds --> FN
if len(pred_boxes) == 0:
for annot_dict in instance_gt_annots:
row = [im_name, "FN", "FN", "non-eval", -1, "NA"]
self._partial_results += [row]
# GT and preds --> TP or FP
else:
det_out = "TP"
from detectron2.structures import Boxes, pairwise_iou, BoxMode
gt_boxes = torch.tensor([annot_dict['bbox'] for annot_dict in instance_gt_annots])
gt_boxes = BoxMode.convert(gt_boxes, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
gt_boxes = Boxes(gt_boxes.to(pred_boxes.device))
ious = pairwise_iou(gt_boxes, pred_boxes)
paired_preds = []
for gt_idx, matches in enumerate(ious):
if matches.sum() == 0:
row = [im_name, "FN", "FN", "non-eval", -1, "NA"]
self._partial_results += [row]
else:
if self.eval_mode == "iou":
pred_idx = matches.argmax()
if pred_idx not in paired_preds:
paired_preds.append(pred_idx)
class_out = self._is_polyp_classified(pred_class[pred_idx], instance_gt_annots[gt_idx]['category_id'])
row = [im_name, det_out, "TP", class_out, scores[pred_idx], pred_boxes[pred_idx]]
self._partial_results += [row]
else:
row = [im_name, det_out, "FP", "non-eval", scores[pred_idx], pred_boxes[pred_idx]]
self._partial_results += [row]
else:
for posible_match in matches.nonzero():
gt_box = gt_boxes.tensor[gt_idx]
gt_x1, gt_y1, gt_x2, gt_y2 = gt_box
pred_box = pred_boxes.tensor[posible_match]
pred_x1, pred_y1, pred_x2, pred_y2 = pred_box.squeeze()
if self.eval_mode == 'old':
pred_cx, pred_cy = (pred_x1 + (pred_x2 - pred_x1) / 2), (pred_y1 + (pred_y2 - pred_y1) / 2)
eval_condition = (gt_x1 < pred_cx < gt_x2) and (gt_y1 < pred_cy < gt_y2)
else:
gt_cx, gt_cy = (gt_x1 + (gt_x2 - gt_x1) / 2), (gt_y1 + (gt_y2 - gt_y1) / 2)
eval_condition = (pred_x1 < gt_cx < pred_x2) and (pred_y1 < gt_cy < pred_y2)
if eval_condition:
if posible_match not in paired_preds:
paired_preds.append(posible_match)
class_out = self._is_polyp_classified(pred_class[posible_match],instance_gt_annots[gt_idx]['category_id'])
row = [im_name, det_out, "TP", class_out, scores[posible_match], pred_boxes[posible_match]]
self._partial_results += [row]
else:
row = [im_name, det_out, "FP", "non-eval", scores[posible_match], pred_boxes[posible_match]]
self._partial_results += [row]
# for pred_box, pred_score, pred_classif in zip(pred_boxes, scores, pred_class):
# pred_x1, pred_y1, pred_x2, pred_y2 = pred_box
# if instance_gt_annots:
# for annot_dict in instance_gt_annots:
# gt_bbox = annot_dict['bbox'] # xywh
# gt_bbox[2] += gt_bbox[0]
# gt_bbox[3] += gt_bbox[1] # xyxy
#
# gt_x1, gt_y1, gt_x2, gt_y2 = gt_bbox
#
# eval_condition = self._is_localized(gt_bbox, gt_x1, gt_x2, gt_y1, gt_y2, pred_box,
# pred_x1, pred_x2, pred_y1, pred_y2)
#
# if eval_condition:
# class_out = self._is_polyp_classified(pred_classif, annot_dict['category_id'])
#
# row = [im_name, det_out, "TP", class_out, pred_score, pred_box]
# self._partial_results += [row]
# instance_gt_annots.remove(annot_dict)
# break
#
# else:
# row = [im_name, "FP", "FP", "non-eval", pred_score, pred_box]
# self._partial_results += [row]
else:
# No GT but Preds --> FP
if len(pred_boxes) > 0:
for pred_box, pred_score, pred_classif in zip(pred_boxes, scores, pred_class):
row = [im_name, "FP", "FP", "non-eval", pred_score, pred_box]
self._partial_results += [row]
# No GT and no Preds --> TN
else:
row = [im_name, "TN", "TN", "non-eval", -1, "NA"]
self._partial_results += [row]
def _is_localized(self, gt_bbox, gt_x1, gt_x2, gt_y1, gt_y2, pred_box, pred_x1, pred_x2, pred_y1, pred_y2):
if self.eval_mode == 'iou':
eval_condition = bb_intersection_over_union(gt_bbox,
pred_box.tensor.numpy()) > self.iou_thresh
elif self.eval_mode == 'old':
pred_cx, pred_cy = (pred_x1 + (pred_x2 - pred_x1) / 2), (
pred_y1 + (pred_y2 - pred_y1) / 2)
eval_condition = (gt_x1 < pred_cx < gt_x2) and (gt_y1 < pred_cy < gt_y2)
else:
gt_cx, gt_cy = (gt_x1 + (gt_x2 - gt_x1) / 2), (gt_y1 + (gt_y2 - gt_y1) / 2)
eval_condition = (pred_x1 < gt_cx < pred_x2) and (pred_y1 < gt_cy < pred_y2)
return eval_condition
@staticmethod
def _is_polyp_classified(pred, gt):
if pred + gt == 2:
return "TP"
if pred + gt == 0:
return "TN"
if pred == 1:
return "FP"
else:
return "FN"
def offline_evaluation(dataset_name, output_dir, results_file):
evaluator = GianaEvaulator(dataset_name, output_dir)
evaluator.results = | pd.read_csv(results_file) | pandas.read_csv |
__all__ = ['ANTsImage',
'LabelImage',
'copy_image_info',
'set_origin',
'get_origin',
'set_direction',
'get_direction',
'set_spacing',
'get_spacing',
'image_physical_space_consistency',
'image_type_cast',
'allclose']
import os
import numpy as np
import pandas as pd
try:
from functools import partialmethod
HAS_PY3 = True
except:
HAS_PY3 = False
import inspect
from .. import registration, segmentation, utils, viz
from . import ants_image_io as iio2
_supported_ptypes = {'unsigned char', 'unsigned int', 'float', 'double'}
_supported_dtypes = {'uint8', 'uint32', 'float32', 'float64'}
_itk_to_npy_map = {
'unsigned char': 'uint8',
'unsigned int': 'uint32',
'float': 'float32',
'double': 'float64'}
_npy_to_itk_map = {
'uint8': 'unsigned char',
'uint32':'unsigned int',
'float32': 'float',
'float64': 'double'}
class ANTsImage(object):
def __init__(self, pixeltype='float', dimension=3, components=1, pointer=None, is_rgb=False, label_image=None):
"""
Initialize an ANTsImage
Arguments
---------
pixeltype : string
ITK pixeltype of image
dimension : integer
number of image dimension. Does NOT include components dimension
components : integer
number of pixel components in the image
pointer : py::capsule (optional)
pybind11 capsule holding the pointer to the underlying ITK image object
label_image : LabelImage
a discrete label image for mapping locations to atlas regions
"""
## Attributes which cant change without creating a new ANTsImage object
self.pointer = pointer
self.pixeltype = pixeltype
self.dimension = dimension
self.components = components
self.has_components = self.components > 1
self.dtype = _itk_to_npy_map[self.pixeltype]
self.is_rgb = is_rgb
self._pixelclass = 'vector' if self.has_components else 'scalar'
self._shortpclass = 'V' if self._pixelclass == 'vector' else ''
if is_rgb:
self._pixelclass = 'rgb'
self._shortpclass = 'RGB'
self._libsuffix = '%s%s%i' % (self._shortpclass, utils.short_ptype(self.pixeltype), self.dimension)
self.shape = utils.get_lib_fn('getShape%s'%self._libsuffix)(self.pointer)
self.physical_shape = tuple([round(sh*sp,3) for sh,sp in zip(self.shape, self.spacing)])
if label_image is not None:
if not isinstance(label_image, LabelImage):
raise ValueError('label_image argument must be a LabelImage type')
self.label_image = label_image
self._array = None
@property
def spacing(self):
"""
Get image spacing
Returns
-------
tuple
"""
libfn = utils.get_lib_fn('getSpacing%s'%self._libsuffix)
return libfn(self.pointer)
def set_spacing(self, new_spacing):
"""
Set image spacing
Arguments
---------
new_spacing : tuple or list
updated spacing for the image.
should have one value for each dimension
Returns
-------
None
"""
if not isinstance(new_spacing, (tuple, list)):
raise ValueError('arg must be tuple or list')
if len(new_spacing) != self.dimension:
raise ValueError('must give a spacing value for each dimension (%i)' % self.dimension)
libfn = utils.get_lib_fn('setSpacing%s'%self._libsuffix)
libfn(self.pointer, new_spacing)
@property
def origin(self):
"""
Get image origin
Returns
-------
tuple
"""
libfn = utils.get_lib_fn('getOrigin%s'%self._libsuffix)
return libfn(self.pointer)
def set_origin(self, new_origin):
"""
Set image origin
Arguments
---------
new_origin : tuple or list
updated origin for the image.
should have one value for each dimension
Returns
-------
None
"""
if not isinstance(new_origin, (tuple, list)):
raise ValueError('arg must be tuple or list')
if len(new_origin) != self.dimension:
raise ValueError('must give a origin value for each dimension (%i)' % self.dimension)
libfn = utils.get_lib_fn('setOrigin%s'%self._libsuffix)
libfn(self.pointer, new_origin)
@property
def direction(self):
"""
Get image direction
Returns
-------
tuple
"""
libfn = utils.get_lib_fn('getDirection%s'%self._libsuffix)
return libfn(self.pointer)
def set_direction(self, new_direction):
"""
Set image direction
Arguments
---------
new_direction : numpy.ndarray or tuple or list
updated direction for the image.
should have one value for each dimension
Returns
-------
None
"""
if isinstance(new_direction, (tuple,list)):
new_direction = np.asarray(new_direction)
if not isinstance(new_direction, np.ndarray):
raise ValueError('arg must be np.ndarray or tuple or list')
if len(new_direction) != self.dimension:
raise ValueError('must give a origin value for each dimension (%i)' % self.dimension)
libfn = utils.get_lib_fn('setDirection%s'%self._libsuffix)
libfn(self.pointer, new_direction)
@property
def orientation(self):
if self.dimension == 3:
return self.get_orientation()
else:
return None
def view(self, single_components=False):
"""
Geet a numpy array providing direct, shared access to the image data.
IMPORTANT: If you alter the view, then the underlying image data
will also be altered.
Arguments
---------
single_components : boolean (default is False)
if True, keep the extra component dimension in returned array even
if image only has one component (i.e. self.has_components == False)
Returns
-------
ndarray
"""
if self.is_rgb:
img = self.rgb_to_vector()
else:
img = self
dtype = img.dtype
shape = img.shape[::-1]
if img.has_components or (single_components == True):
shape = list(shape) + [img.components]
libfn = utils.get_lib_fn('toNumpy%s'%img._libsuffix)
memview = libfn(img.pointer)
return np.asarray(memview).view(dtype = dtype).reshape(shape).view(np.ndarray).T
def numpy(self, single_components=False):
"""
Get a numpy array copy representing the underlying image data. Altering
this ndarray will have NO effect on the underlying image data.
Arguments
---------
single_components : boolean (default is False)
if True, keep the extra component dimension in returned array even
if image only has one component (i.e. self.has_components == False)
Returns
-------
ndarray
"""
array = np.array(self.view(single_components=single_components), copy=True, dtype=self.dtype)
if self.has_components or (single_components == True):
array = np.rollaxis(array, 0, self.dimension+1)
return array
def clone(self, pixeltype=None):
"""
Create a copy of the given ANTsImage with the same data and info, possibly with
a different data type for the image data. Only supports casting to
uint8 (unsigned char), uint32 (unsigned int), float32 (float), and float64 (double)
Arguments
---------
dtype: string (optional)
if None, the dtype will be the same as the cloned ANTsImage. Otherwise,
the data will be cast to this type. This can be a numpy type or an ITK
type.
Options:
'unsigned char' or 'uint8',
'unsigned int' or 'uint32',
'float' or 'float32',
'double' or 'float64'
Returns
-------
ANTsImage
"""
if pixeltype is None:
pixeltype = self.pixeltype
if pixeltype not in _supported_ptypes:
raise ValueError('Pixeltype %s not supported. Supported types are %s' % (pixeltype, _supported_ptypes))
if self.has_components and (not self.is_rgb):
comp_imgs = utils.split_channels(self)
comp_imgs_cloned = [comp_img.clone(pixeltype) for comp_img in comp_imgs]
return utils.merge_channels(comp_imgs_cloned)
else:
p1_short = utils.short_ptype(self.pixeltype)
p2_short = utils.short_ptype(pixeltype)
ndim = self.dimension
fn_suffix = '%s%i%s%i' % (p1_short,ndim,p2_short,ndim)
libfn = utils.get_lib_fn('antsImageClone%s'%fn_suffix)
pointer_cloned = libfn(self.pointer)
return ANTsImage(pixeltype=pixeltype,
dimension=self.dimension,
components=self.components,
is_rgb=self.is_rgb,
pointer=pointer_cloned)
# pythonic alias for `clone` is `copy`
copy = clone
def astype(self, dtype):
"""
Cast & clone an ANTsImage to a given numpy datatype.
Map:
uint8 : unsigned char
uint32 : unsigned int
float32 : float
float64 : double
"""
if dtype not in _supported_dtypes:
raise ValueError('Datatype %s not supported. Supported types are %s' % (dtype, _supported_dtypes))
pixeltype = _npy_to_itk_map[dtype]
return self.clone(pixeltype)
def new_image_like(self, data):
"""
Create a new ANTsImage with the same header information, but with
a new image array.
Arguments
---------
data : ndarray or py::capsule
New array or pointer for the image.
It must have the same shape as the current
image data.
Returns
-------
ANTsImage
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array')
if not self.has_components:
if data.shape != self.shape:
raise ValueError('given array shape (%s) and image array shape (%s) do not match' % (data.shape, self.shape))
else:
if (data.shape[-1] != self.components) or (data.shape[:-1] != self.shape):
raise ValueError('given array shape (%s) and image array shape (%s) do not match' % (data.shape[1:], self.shape))
return iio2.from_numpy(data, origin=self.origin,
spacing=self.spacing, direction=self.direction,
has_components=self.has_components)
def to_file(self, filename):
"""
Write the ANTsImage to file
Args
----
filename : string
filepath to which the image will be written
"""
filename = os.path.expanduser(filename)
libfn = utils.get_lib_fn('toFile%s'%self._libsuffix)
libfn(self.pointer, filename)
to_filename = to_file
def apply(self, fn):
"""
Apply an arbitrary function to ANTsImage.
Args
----
fn : python function or lambda
function to apply to ENTIRE image at once
Returns
-------
ANTsImage
image with function applied to it
"""
this_array = self.numpy()
new_array = fn(this_array)
return self.new_image_like(new_array)
def as_label_image(self, label_info=None):
return LabelImage(image=self, label_info=label_info)
## NUMPY FUNCTIONS ##
def abs(self, axis=None):
""" Return absolute value of image """
return np.abs(self.numpy())
def mean(self, axis=None):
""" Return mean along specified axis """
return self.numpy().mean(axis=axis)
def median(self, axis=None):
""" Return median along specified axis """
return np.median(self.numpy(), axis=axis)
def std(self, axis=None):
""" Return std along specified axis """
return self.numpy().std(axis=axis)
def sum(self, axis=None, keepdims=False):
""" Return sum along specified axis """
return self.numpy().sum(axis=axis, keepdims=keepdims)
def min(self, axis=None):
""" Return min along specified axis """
return self.numpy().min(axis=axis)
def max(self, axis=None):
""" Return max along specified axis """
return self.numpy().max(axis=axis)
def range(self, axis=None):
""" Return range tuple along specified axis """
return (self.min(axis=axis), self.max(axis=axis))
def argmin(self, axis=None):
""" Return argmin along specified axis """
return self.numpy().argmin(axis=axis)
def argmax(self, axis=None):
""" Return argmax along specified axis """
return self.numpy().argmax(axis=axis)
def argrange(self, axis=None):
""" Return argrange along specified axis """
amin = self.argmin(axis=axis)
amax = self.argmax(axis=axis)
if axis is None:
return (amin, amax)
else:
return np.stack([amin, amax]).T
def flatten(self):
""" Flatten image data """
return self.numpy().flatten()
def nonzero(self):
""" Return non-zero indices of image """
return self.numpy().nonzero()
def unique(self, sort=False):
""" Return unique set of values in image """
unique_vals = np.unique(self.numpy())
if sort:
unique_vals = np.sort(unique_vals)
return unique_vals
## OVERLOADED OPERATORS ##
def __add__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array + other
return self.new_image_like(new_array)
def __sub__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array - other
return self.new_image_like(new_array)
def __mul__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array * other
return self.new_image_like(new_array)
def __truediv__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array / other
return self.new_image_like(new_array)
def __pow__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array ** other
return self.new_image_like(new_array)
def __gt__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array > other
return self.new_image_like(new_array.astype('uint8'))
def __ge__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array >= other
return self.new_image_like(new_array.astype('uint8'))
def __lt__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array < other
return self.new_image_like(new_array.astype('uint8'))
def __le__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array <= other
return self.new_image_like(new_array.astype('uint8'))
def __eq__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array == other
return self.new_image_like(new_array.astype('uint8'))
def __ne__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array != other
return self.new_image_like(new_array.astype('uint8'))
def __getitem__(self, idx):
if self._array is None:
self._array = self.numpy()
if isinstance(idx, ANTsImage):
if not image_physical_space_consistency(self, idx):
raise ValueError('images do not occupy same physical space')
return self._array.__getitem__(idx.numpy().astype('bool'))
else:
return self._array.__getitem__(idx)
def __setitem__(self, idx, value):
arr = self.view()
if isinstance(idx, ANTsImage):
if not image_physical_space_consistency(self, idx):
raise ValueError('images do not occupy same physical space')
arr.__setitem__(idx.numpy().astype('bool'), value)
else:
arr.__setitem__(idx, value)
def __repr__(self):
if self.dimension == 3:
s = 'ANTsImage ({})\n'.format(self.orientation)
else:
s = 'ANTsImage\n'
s = s +\
'\t {:<10} : {} ({})\n'.format('Pixel Type', self.pixeltype, self.dtype)+\
'\t {:<10} : {}{}\n'.format('Components', self.components, ' (RGB)' if 'RGB' in self._libsuffix else '')+\
'\t {:<10} : {}\n'.format('Dimensions', self.shape)+\
'\t {:<10} : {}\n'.format('Spacing', tuple([round(s,4) for s in self.spacing]))+\
'\t {:<10} : {}\n'.format('Origin', tuple([round(o,4) for o in self.origin]))+\
'\t {:<10} : {}\n'.format('Direction', np.round(self.direction.flatten(),4))
return s
if HAS_PY3:
# Set partial class methods for any functions which take an ANTsImage as the first argument
for k, v in utils.__dict__.items():
if callable(v):
args = inspect.getargspec(getattr(utils,k)).args
if (len(args) > 0) and (args[0] in {'img','image'}):
setattr(ANTsImage, k, partialmethod(v))
for k, v in registration.__dict__.items():
if callable(v):
args = inspect.getargspec(getattr(registration,k)).args
if (len(args) > 0) and (args[0] in {'img','image'}):
setattr(ANTsImage, k, partialmethod(v))
for k, v in segmentation.__dict__.items():
if callable(v):
args = inspect.getargspec(getattr(segmentation,k)).args
if (len(args) > 0) and (args[0] in {'img','image'}):
setattr(ANTsImage, k, partialmethod(v))
for k, v in viz.__dict__.items():
if callable(v):
args = inspect.getargspec(getattr(viz,k)).args
if (len(args) > 0) and (args[0] in {'img','image'}):
setattr(ANTsImage, k, partialmethod(v))
class Dictlist(dict):
def __setitem__(self, key, value):
try:
self[key]
except KeyError:
super(Dictlist, self).__setitem__(key, [])
self[key].append(value)
class LabelImage(ANTsImage):
"""
A LabelImage is a special class of ANTsImage which has discrete values
and string labels or other metadata (e.g. another string label such as the
"lobe" of the region) associated with each of the discrete values.
A canonical example of a LabelImage is a brain label_image or parcellation.
This class provides convenient functionality for manipulating and visualizing
images where you have real values associated with aggregated image regions (e.g.
if you have cortical thickness values associated with brain regions)
Commonly-used functionality for LabelImage types:
- create publication-quality figures of an label_image
Nomenclature
------------
- key : a string representing the name of the associated index in the atlas image
- e.g. if the index is 1001 and the key may be InferiorTemporalGyrus`
- value : an integer value in the atlas image
- metakey : a string representing one of the possible sets of label key
- e.g. 'Lobes' or 'Regions'
Notes
-----
- indexing works by creating a separate dict for each metakey, where
"""
def __init__(self, label_image, label_info=None, template=None):
"""
Initialize a LabelImage
ANTsR function: N/A
Arguments
---------
label_image : ANTsImage
discrete (integer) image as label_image
label_info : dict or pandas.DataFrame
mapping between discrete values in `image` and string names
- if dict, the keys should be the discrete integer label values
and the values should be the label names or another dict with
any metadata
- if pd.DataFrame, the index (df.index) should be the discrete integer
label values and the other column(s) should be the label names and
any metadata
template : ANTsImage
default real-valued image to use for plotting or creating new images.
This image should be in the same space as the `label_image` image and the
two should be aligned.
Example
-------
>>> import ants
>>> square = np.zeros((20,20))
>>> square[:10,:10] = 0
>>> square[:10,10:] = 1
>>> square[10:,:10] = 2
>>> square[10:,10:] = 3
>>> img = ants.from_numpy(square).astype('uint8')
>>> label_image = ants.LabelImage(label_image=img, label_info=label_dict)
"""
if label_image.pixeltype not in {'unsigned char', 'unsigned int'}:
raise ValueError('Label images must have discrete pixeltype - got %s' % label_image.pixeltype)
if label_image.components > 1:
raise ValueError('Label images must have only one component - got %i' % label_image.components)
if label_info is None:
label_info = {k:'Label%i'%k for k in range(len(label_image.unique()))}
if isinstance(label_info, pd.DataFrame):
pass
elif isinstance(label_info, dict):
if isinstance(label_info[list(label_info.keys())[0]], dict):
label_info = | pd.DataFrame(label_info) | pandas.DataFrame |
import sys
import re
import os
import pandas as pd
pd.set_option('display.max_columns', 500)
from matplotlib import pyplot as plt
import seaborn as sns
import calendar
from collections import Counter
def start_bot():
path = 'chat.txt'
loop = os.path.isfile(path)
while loop == False:
path = input('Please type the file to analyze. Ex: chat.txt >>>')
if path == 'exit':
sys.exit()
loop = os.path.isfile(path)
if loop == False:
print('There is no file named like that. Please try again or type exit to close.')
try:
os.makedirs('whatsapp')
except:
print('Overwritting folder...')
return path
def opener(p):
f = open(p, encoding='utf-8')
text = f.read()
return text
def extract_time(text):
date_time = re.findall('\d{1,2}.\d{1,2}.\d{2}.\d{1,2}.\d{1,2}.-.+:', text)
date = [i.split(' ')[0] for i in date_time]
time = [i.split(' ')[1] for i in date_time]
return date, time
def extract_messages(text):
chat = re.split('\d{1,2}.\d{1,2}.\d{2}.\d{1,2}.\d{1,2}.-.+:', text)[1:]
users = re.findall('\d{1,2}.\d{1,2}.\d{2}.\d{1,2}.\d{1,2}.-.+:', text)
users = [i.split(':')[1] for i in users]
users = [i.split('-')[-1] for i in users]
users = [i.strip() for i in users]
return chat, users
def create_table(path):
whatsapp = opener(path)
date, time = extract_time(whatsapp)
messages, users = extract_messages(whatsapp)
whatsapp_df = pd.DataFrame({'date': date, 'time': time, 'user': users, 'message': messages})
return(whatsapp_df)
def erase_users(users):
for us in users:
df.drop(df[df.user == us].index, axis=0, inplace=True)
def text2words(text):
def no_accents(txt):
txt = re.sub('[àáä]','a', txt)
txt = re.sub('[éëè]','e', txt)
txt = re.sub('[íïì]','i', txt)
txt = re.sub('[óöò]','o', txt)
txt = re.sub('[úüù]','u', txt)
return txt
text = re.sub('<Multimedia omitido>', '', text)
text = re.sub('[^\w]', ' ', text)
text = re.sub('[ \t\n]', ' ', text)
text = re.sub('[\d]', ' ', text)
text = re.sub('[ja]{2,10}', '', text)
text = text.lower()
text = no_accents(text)
words = text.split()
words = [i for i in words if len(i)>2]
return words
def words_rank(df):
#all words counted
all_messages = df.groupby('user').message.sum().reset_index()
#create dataframe
dfs = []
for i in list(all_messages.user):
txt = all_messages[all_messages.user == i].message.reset_index(drop=True)[0]
user_words = text2words(txt)
counted = Counter(user_words)
df_words = pd.DataFrame({'user': [i]*len(list(counted.keys())),'word': list(counted.keys()), 'times_repeated': list(counted.values())})
dfs.append(df_words)
df_words_users = | pd.concat(dfs) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 12:05:22 2017
@author: rgryan
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
import datetime
sh = False # Plotting the scale height info?
zc = False # Plotting the zero height concentration info?
re = True # Plotting the a.p. relative error info?
op = False # Plotting a.p. aerosol optical properties info?
path = 'E:\\Sciatran2\\AEROSOL_RETRIEVAL_v-1-5\\Campaign\\'
date = '20170307'
time = '130130'
startdate = datetime.datetime(2017, 3, 7, 6)
enddate = datetime.datetime(2017, 3, 7, 20)
tests = ['t103', 't104', 't105', 't106','t102', 't107', 't108', 't109', 't110']
dates = ['20170307','20170308', '20170309']
scale_height = [0.2, 0.4, 0.6, 0.8, 1.0,1.2]
zconc = [0.02, 0.06, 0.08, 0.1, 0.12]
relerror = [0.1, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0]
opprop = [0.3, 0.8, 1.29, 1.8]
values = [0.1, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0]
colours = ['red', 'orange', 'yellow', 'green', 'lightseagreen',
'skyblue', 'mediumblue', 'midnightblue',
'darkviolet', 'darkmagenta', 'magenta', 'pink']
mm_rms = []
aod_ave = []
aod_vals = pd.DataFrame()
aod_err_vals = pd.DataFrame()
prof_vals = pd.DataFrame()
prof_err_vals = pd.DataFrame()
aodErr_ave = []
aodPErr_ave = []
csq_ave = []
dofs_ave = []
for test in tests:
mmfilelist = []
aodfilelist = []
for date in dates:
fullpath = path+'aer_retr_7-9March2017_'+test+'\\'+date+'\\'
# Read in the measured and retrieved values
mmfile_ = pd.read_csv(fullpath+'general/meas_'+date+'.dat', delim_whitespace=True,
parse_dates=[['date', 'time']], dayfirst=True)
#mmfile_ = mmfile_.sort_values('date_time')
#mmfile_ = mmfile_.drop_duplicates(subset='date_time')
mmfile_ = mmfile_[mmfile_['elev']<80]
mmfile_ = mmfile_[mmfile_['O4retr']>0] # remove negative retrievals (not physical)
mmfilelist.append(mmfile_)
# Read in the retrieved AOD and errors
aodfile_ = pd.read_csv(fullpath+'general/retrieval_'+date+'.dat', delim_whitespace=True,
parse_dates=[['Date', 'Time']], dayfirst=True)
#aodfile_ = aodfile_.sort_values('Date_Time')
#aodfile_ = aodfile_.drop_duplicates(subset='Date_Time')
aodfile_ = aodfile_[aodfile_['AOT361']<0.225] # remove errors where AOD too high to be realistic
aodfile_ = aodfile_[aodfile_['AOT361']>0] # and remove negative AOD values
aodfilelist.append(aodfile_)
# Section to deal with AOD values and errors
aodfile = pd.concat(aodfilelist)
aodfile.reset_index(inplace=True)
aod_ave.append(aodfile['AOT361'].mean())
aodErr_ave.append(aodfile['err_AOT361'].mean())
aodfile['percentError'] = 100*(aodfile['err_AOT361']/aodfile['AOT361'])
aodPErr_ave.append(aodfile['percentError'].mean())
# The chi squared
csq_ave.append(aodfile['chisq'].mean())
# Section to calculate RMS value for (O4meas - O4retr)
mmfile = pd.concat(mmfilelist)
mmfile.reset_index(inplace=True)
x = np.array((mmfile['elev']))
y = np.array((mmfile['O4meas']))
y1 = np.array((mmfile['O4retr']))
err = np.array((mmfile['err_O4meas']))
ss = [(((y[i])-(y1[i]))**2) for i in np.arange(len(y))]
rms = np.sqrt(sum(ss)/len(ss))
mm_rms.append(rms)
# Save the daily aod diurnal profile, and associated errors
aod_vals[test] = aodfile['AOT361']
aod_err_vals[test] = aodfile['err_AOT361']
# Averaging kernels
akfiles = glob.glob(fullpath+'av_kernels/*.dat')
dofs_ = []
for file in akfiles:
akf = pd.read_csv(file, delim_whitespace=True)
ak_formatrix = akf.iloc[0:20,5:25]
ak_matrix = ak_formatrix.as_matrix()
dofs_.append(np.trace(ak_matrix))
dofs_ave.append((sum(dofs_)/float(len(dofs_))))
# Also plot specific averaging kernels from one measurement time
akfile = pd.read_csv(fullpath+'av_kernels/avk_ext_'+date+'_'+time+'.dat',
delim_whitespace=True)
ak_formatrix = akfile.iloc[0:20,5:25]
ak_matrix = ak_formatrix.as_matrix()
dofs = np.trace(ak_matrix)
xlim=[-0.1,1.]
fontsize=16
figsize=(3.5,3.5)
akp = akfile.plot(x='AVK_0.1km', y='z', color='red', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_0.3km', y='z', ax=akp, color='orangered', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_0.5km', y='z', ax=akp, color='orange', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_0.7km', y='z', ax=akp, color='yellow', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_0.9km', y='z', ax=akp, color='greenyellow', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_1.1km', y='z', ax=akp, color='limegreen', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_1.3km', y='z', ax=akp, color='green', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_1.5km', y='z', ax=akp, color='lightseagreen', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_1.7km', y='z', ax=akp, color='aqua', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_1.9km', y='z', ax=akp, color='mediumaquamarine', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_2.1km', y='z', ax=akp, color='mediumturquoise', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_2.3km', y='z', ax=akp, color='powderblue', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_2.5km', y='z', ax=akp, color='skyblue', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_2.7km', y='z', ax=akp, color='mediumblue', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_2.9km', y='z', ax=akp, color='royalblue', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_3.1km', y='z', ax=akp, color='midnightblue', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_3.3km', y='z', ax=akp, color='darkviolet', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_3.5km', y='z', ax=akp, color='darkmagenta', xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_3.7km', y='z', ax=akp, color='magenta',xlim=xlim,
figsize=figsize, fontsize=fontsize)
akfile.plot(x='AVK_3.9km', y='z', ax=akp, color='pink',xlim=xlim,
figsize=figsize, fontsize=fontsize)
entries = ['tot','0.1', '0.3', '0.5', '0.7', '0.9', '1.1', '1.3', '1.5', '1.7',
'1.9', '2.1', '2.3', '2.5', '2.7', '2.9', '3.1', '3.3',
'3.5', '3.7', '3.9']
akp.legend(entries, fontsize=7, loc='center left', bbox_to_anchor=(1, 0.5))
akp.text(0.25, 3.2, 'DOFS = '+str(round(dofs,3)), size=15)
#akp.set_title('NO2_AVK_'+date+'_'+time, fontsize=fontsize)
akp.set_xlabel('Aer. Ext. AK', fontsize=fontsize)
akp.set_ylabel('Altitude (km)', fontsize=fontsize)
fig = akp.get_figure()
#fig.savefig(path+'ap_EA_avkplots/'+test+'_avk_'+date+'_'+time+'.png',
# bbox_inches='tight')
profile = pd.read_csv(fullpath+'/profiles/prof361nm_'+date+'_'+time+'.dat',
delim_whitespace=True)
prof_vals[test] = profile['retrieved'][:3]
prof_err_vals[test] = profile['err_retrieved'][:3]
xlim=(0,0.30)
ylim=(0,4)
#figsize=(3,3)
prop = profile.plot(x='apriori', y='z', style='k-', figsize=figsize,
xlim=xlim, ylim=ylim, fontsize=fontsize)
profile.plot(x='retrieved', y='z', xerr='err_retrieved', figsize=figsize,
color='darkorange', xlim=xlim, ylim=ylim, ax=prop, fontsize=fontsize)
prop.legend(['a-priori', 'retrieved'], loc='upper right', fontsize=fontsize)
# Set the axes ticks and labels
xticks = [0, 5, 10, 15, 20, 25, 30]
xlab = '(km$^{-1}$) (x10$^{-2})'
yticks = [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0]
ylab = 'Altitude (km)'
prop.set_xticklabels(xticks, fontsize=fontsize)
prop.set_yticklabels(yticks, fontsize=fontsize)
prop.set_xlabel(xlab, fontsize=fontsize)
prop.set_ylabel(ylab, fontsize=fontsize)
fig = prop.get_figure()
#fig.savefig(path+'ap_EA_profplots/'+test+'_prof_'+date+'_'+time+'.png',
# bbox_inches='tight')
prep = profile.plot(x='err_smooth', y='z', style='m-', figsize=figsize,
xlim=xlim, ylim=ylim, fontsize=fontsize)
profile.plot(x='err_noise', y='z', style='b-',figsize=figsize,
xlim=xlim, ylim=ylim, ax=prep, fontsize=fontsize)
prep.set_xticklabels(xticks, fontsize=fontsize)
prep.set_yticklabels(yticks, fontsize=fontsize)
prep.set_xlabel(xlab, fontsize=fontsize)
prep.set_ylabel(ylab, fontsize=fontsize)
prep.legend(['Smooth.', 'Noise'], loc='upper right', fontsize=fontsize)
fig2 = prep.get_figure()
#fig2.savefig(path+'ap_EA_profplots/'+test+'_profErrors_'+date+'_'+time+'.png',
# bbox_inches='tight')
#%%
statsummary = pd.DataFrame()
statsummary['test'] = pd.Series(tests)
statsummary['mm_rms'] = pd.Series(mm_rms)
statsummary['aod_ave'] = pd.Series(aod_ave)
statsummary['aodErr_ave'] = pd.Series(aodErr_ave)
statsummary['aodPErr_ave'] = pd.Series(aodPErr_ave)
statsummary['csq_ave'] = | pd.Series(csq_ave) | pandas.Series |
"""
Genereate ablated modality images. One time use code.
Modality ablation experiment. Generate and save the ablated brats images
Generate dataset with
Save in the directory: Path(brats_path).parent / "ablated_brats", and can be loaded with the script:
T1 = os.path.join(image_path_list[0], bratsID, bratsID+'_t1.nii.gz') # (240, 240, 155)
T1c = os.path.join(image_path_list[1], bratsID, bratsID+'_t1ce.nii.gz')
T2 = os.path.join(image_path_list[2], bratsID, bratsID+'_t2.nii.gz')
FLAIR = os.path.join(image_path_list[3], bratsID, bratsID+'_flair.nii.gz')
For the original brats image, ablate it by filling in the non-zero value with random values ~ N(image_mean, image_std)
"""
import nibabel
import os
from pathlib import Path
import numpy as np
import pandas as pd
import itertools, math
from monai.data import write_nifti
from monai.transforms import LoadNifti
import monai
# from .heatmap_utils import get_heatmaps
from .heatmap_utlis import *
from scipy import stats
from scipy.stats import spearmanr as spr
from scipy.stats import kendalltau
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, recall_score, precision_score, confusion_matrix
import csv
import itertools, math
import copy
from validate import test
from datetime import datetime
# from skimage.morphology import binary_dilation
# print(monai.__version__)
from sklearn.metrics import auc, roc_curve
def generate_ablated_dataset(modalities = ["t1", "t1ce", "t2", "flair"], ablation_mode = 'allzero'):
"""
One time function to get and save the ablated modalities.
:param allzero: replace the modality with all zeros
:return:
"""
data_root = "/local-scratch/authorid/dld_data/brats2020/MICCAI_BraTS2020_TrainingData/all_tmp"
data_root = Path(data_root)
if ablation_mode == 'allzero': # ablate the whole modality, and replace with 0s
saved_path = data_root.parent / "zero_ablated_brats"
elif ablation_mode == 'allnoise': # ablate the whole modality, and replace with nontumor signal noises
saved_path = data_root.parent / "ablated_brats"
elif ablation_mode == 'lesionzero': # ablate the lesion only on the modality, and replace with 0s
saved_path = data_root.parent / "lesionzero"
# read brain MRI
ids = [f for f in os.listdir(data_root) if os.path.isdir(os.path.join(data_root, f))]
for id in ids:
seg_path = data_root / id / "{}_seg.nii.gz".format(id)
seg = nibabel.load(seg_path).get_fdata()
for m in modalities:
path = data_root/id / "{}_{}.nii.gz".format(id, m)
# mri = nibabel.load(path)
# img_data = mri.get_fdata()
loader = LoadNifti(image_only = False)
img_data, header = loader(path)
if ablation_mode == "allzero":
ablate_array = np.zeros(img_data.shape)
elif ablation_mode == 'allnoise':
ablate_array = ablate_signal(img_data, seg)
elif ablation_mode == 'lesionzero':
ablate_array = ablate_tumor_only(img_data, seg)
# nibabel.save(ablate_array, "{}_{}.nii.gz".format(id, m))
output_root = saved_path/id
output_root.mkdir(exist_ok=True, parents=True)
print(header['affine'], header['original_affine'])
write_nifti(ablate_array,
affine= header['affine'],
target_affine = header['original_affine'],
file_name = output_root/"{}_{}.nii.gz".format(id, m))
# saver = NiftiSaver(data_root_dir = output_root, output_postfix = None, output_ext='.nii.gz')
# saver.save(ablate_array, {'filename_or_obj': "{}_{}".format(id, m)})
def ablate_tumor_only(array, seg):
edge = 10
dilated_seg = []
for s in range(array.shape[-1]):
dilated= binary_dilation(seg[:,:,s], selem = np.ones([edge for i in range(seg[:,:,s].ndim)]))
dilated_seg.append(dilated)
dilated_seg = np.stack(dilated_seg, axis=-1)
ablated_array = np.copy(array)
ablated_array[dilated_seg > 0] = 0
return ablated_array
def ablate_signal(array, seg):
"""Helper: given a image array, replace the non-zero value by sampling from the rest non-tumor regions (with replacement, so
that to keep the same distribution)
"""
non_tumor = array[(array != 0) & (seg != 1) & (seg != 2) & (seg != 4)].flatten() # get brain region with non-tumor part [0. 1. 2. 4.]
print(np.unique(seg))
print(non_tumor.shape)
# mean = np.mean(array)
# std = np.std(array)
ablated_array = np.random.choice(non_tumor, size=array.shape, replace=True)
ablated_array[array == 0] = 0
print('ablated_array', ablated_array.shape)
return ablated_array
### Utlities to get gt modality shapley value, and compare hm value with this gt ###
def modality_shapley(config, ablated_image_folder, csv_save_dir = "/local-scratch/authorid/BRATS_IDH/log/mod_shapley"):
"""
Modality ablation experiment. Generate and save the ablated brats images
Generate dataset with
Save in the directory: Path(brats_path).parent / "ablated_brats", and can be loaded with the script:
T1 = os.path.join(image_path_list[0], bratsID, bratsID+'_t1.nii.gz') # (240, 240, 155)
T1c = os.path.join(image_path_list[1], bratsID, bratsID+'_t1ce.nii.gz')
T2 = os.path.join(image_path_list[2], bratsID, bratsID+'_t2.nii.gz')
FLAIR = os.path.join(image_path_list[3], bratsID, bratsID+'_flair.nii.gz')
For the original brats image, ablate it by filling in the non-zero value with random values ~ N(image_mean, image_std)
"""
modalities = config['xai']['modality']
print(modalities)
# generate modality combinations
N_sets = list(itertools.product([0, 1], repeat=len(modalities)) ) # set of all_combinations
for modality_selection in N_sets:
test(config, timestamp = False, ablated_image_folder = ablated_image_folder, csv_save_dir = csv_save_dir, modality_selection= modality_selection)
def shapley_result_csv(fold = 1, root = '/local-scratch/authorid/BRATS_IDH/log/mod_shapley/test/', modalities= ["t1", "t1ce", "t2", "flair"], metric = 'acc'):
"""
From the individual test records, get the summarized csv of modality: accuracy pair.
:param fold:
:param path:
:return:
"""
# get all csvs in the folder
save_path = Path(root)/"shapley"
save_path.mkdir(parents = True, exist_ok= True)
csv_filename = save_path / 'aggregated_performance_fold_{}.csv'.format(fold)
file_exists = os.path.isfile(csv_filename)
fnames = modalities+["accuracy"]
with open(csv_filename, 'w', newline='') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=fnames)
# if not file_exists:
csv_writer.writeheader()
for f in Path(root).rglob('*cv_result_fold*.csv'):
fn = f.name.split(".")[0].split("-")
if len(fn) == len(modalities)+1:
fold_num = fn[0].split("_")[-1]
else:
fold_num = fn[1].split("_")[-1]
fold_num = int(fold_num)
if fold_num == fold:
if len(fn) == len(modalities) + 1:
modelity_selection = [int(i) for i in fn[1:]]
else:
modelity_selection = [int(i) for i in fn[2:]]
# print( fold_num, modelity_selection)
results = pd.read_csv(f)
gt = results['gt']
pred = results['pred']
if metric == 'auc':
fpr, tpr, threshold = roc_curve(results['gt'].to_list(), results['pred'].to_list())
accuracy = auc(fpr, tpr)
else:
accuracy = accuracy_score(gt, pred)
csv_record = {'accuracy': accuracy}
for i, m in enumerate(modalities):
csv_record[m]= modelity_selection[i]
csv_writer.writerow(csv_record)
print("Fold {}: modality: {}, accuracy: {}".format(fold, modelity_selection, accuracy))
print("Saved at {}".format(csv_filename))
return csv_filename
def get_shapley(csv_filename, modalities = ["t1", "t1ce", "t2", "flair"]):
"""
calculate modality shapeley value
CSV with column: t1, t1c, t2, flair, of 0 / 1. and perforamnce value.
:param csv:
:return:
"""
# convert csv to dict: {(0, 0, 1, 0): 10} {tuple: performance}
df = pd.read_csv(csv_filename)
fold = Path(csv_filename).name.split('.')[0].split('_')[-1]
# print(fold)
df_dict = df.to_dict(orient='records')
# print(df_dict)
v_dict = {} #
for row in df_dict:
mod_lst = []
for m in modalities:
mod_lst.append(row[m])
v_dict[tuple(mod_lst)] = row['accuracy']
# print(v_dict)
n = len(modalities)
# sanity check if all mod combinations are exists
N_sets = list(itertools.product([0,1],repeat = len(modalities))) # set of all_combinations
for s in N_sets:
if tuple(s) not in v_dict:
print("ERROR in get_shapley! {} missing".format(s))
N_sets_array = np.array(N_sets) # array([[0, 0, 0, 0], [0, 0, 0, 1],
mod_shapley = {}
# for each mod, calculate its shapley value:
for i, mod in enumerate(modalities):
# get combination not including mod
n_not_i = N_sets_array[N_sets_array[:, i]==0]# # a list containing all subsets that don't contains i todo
# print(n_not_i, i)
phi_i= 0
for s in n_not_i:
# print('s', s)
v_s = v_dict[tuple(s)]
sANDi = copy.deepcopy(s)
sANDi[i] =1
v_sANDi = v_dict[tuple(sANDi)]
# print(s , s.sum(), i, mod)
phi_i += (v_sANDi - v_s) * math.factorial(s.sum()) * (math.factorial(n - s.sum() - 1)) / math.factorial(n)
mod_shapley[mod] = phi_i
mod_shapley['fold'] = fold
print(mod_shapley)
# save gt shapley to csv
with open(Path(csv_filename).parent/'fold_{}_modality_shapley.csv'.format(fold), 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=list(mod_shapley.keys()))
csv_writer.writeheader()
csv_writer.writerow(mod_shapley)
# for key in mod_shapley.keys():
# f.write("%s,%s\n" % (key, mod_shapley[key]))
return mod_shapley
def get_shapley_gt_multiple_runs_pipeline(config, run_num, ablated_image_folder, csv_save_dir):
"""Since the shapley value gt is not deterministic, run multiple run_num to get the distribution of gt modality shapley value."""
modalities = config['xai']['modality']
fold = config['data_loader']['args']['fold']
# support multiple runtime, check if file exists
existing_runs = [f for f in os.listdir(csv_save_dir) if os.path.isdir(os.path.join(csv_save_dir, f))]
existing_runs.sort()
starting_run = -1
for i in existing_runs:
i = int(i)
shapley_csv = os.path.join(csv_save_dir, "{}".format(i), 'shapley', 'fold_{}_modality_shapley.csv'.format(fold))
file_exists = os.path.isfile(shapley_csv)
if file_exists:
starting_run = i
else:
break
if starting_run >= run_num:
return
for run_i in range(starting_run+1, run_num):
run_dir = os.path.join(csv_save_dir, "{}".format(run_i))
modality_shapley(config, ablated_image_folder = ablated_image_folder, csv_save_dir= run_dir)
csv_filename = shapley_result_csv(fold = fold, modalities=modalities, root = run_dir)
print(csv_filename)
get_shapley(csv_filename, modalities=modalities)
def aggregate_shapley_gt_mean_std(fold, csv_save_dir, modalities):
# calculate the mean and std of the multiple run shapley
result_list = []
runs = [f for f in os.listdir(csv_save_dir) if os.path.isdir(os.path.join(csv_save_dir, f))]
for run_i in runs:
shapley_csv = os.path.join(csv_save_dir, "{}".format(run_i), 'shapley', 'fold_{}_modality_shapley.csv'.format(fold))
file_exists = os.path.isfile(shapley_csv)
if file_exists:
df = | pd.read_csv(shapley_csv) | pandas.read_csv |
"""
Tests for the generic MLEModel
Author: <NAME>
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import (sarimax, varmax, kalman_filter,
kalman_smoother)
from statsmodels.tsa.statespace.mlemodel import MLEModel, MLEResultsWrapper
from statsmodels.tsa.statespace.tools import compatibility_mode
from statsmodels.datasets import nile
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from statsmodels.tsa.statespace.tests.results import results_sarimax, results_var_misc
current_path = os.path.dirname(os.path.abspath(__file__))
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
# Basic kwargs
kwargs = {
'k_states': 1, 'design': [[1]], 'transition': [[1]],
'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
def get_dummy_mod(fit=True, pandas=False):
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
if pandas:
index = pd.date_range('1960-01-01', periods=100, freq='MS')
endog = pd.Series(endog, index=index)
exog = pd.Series(exog, index=index)
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
if fit:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
else:
res = None
return mod, res
def test_wrapping():
# Test the wrapping of various Representation / KalmanFilter /
# KalmanSmoother methods / attributes
mod, _ = get_dummy_mod(fit=False)
# Test that we can get the design matrix
assert_equal(mod['design', 0, 0], 2.0 * np.arange(100))
# Test that we can set individual elements of the design matrix
mod['design', 0, 0, :] = 2
assert_equal(mod.ssm['design', 0, 0, :], 2)
assert_equal(mod.ssm['design'].shape, (1, 1, 100))
# Test that we can set the entire design matrix
mod['design'] = [[3.]]
assert_equal(mod.ssm['design', 0, 0], 3.)
# (Now it's no longer time-varying, so only 2-dim)
assert_equal(mod.ssm['design'].shape, (1, 1))
# Test that we can change the following properties: loglikelihood_burn,
# initial_variance, tolerance
assert_equal(mod.loglikelihood_burn, 1)
mod.loglikelihood_burn = 0
assert_equal(mod.ssm.loglikelihood_burn, 0)
assert_equal(mod.tolerance, mod.ssm.tolerance)
mod.tolerance = 0.123
assert_equal(mod.ssm.tolerance, 0.123)
assert_equal(mod.initial_variance, 1e10)
mod.initial_variance = 1e12
assert_equal(mod.ssm.initial_variance, 1e12)
# Test that we can use the following wrappers: initialization,
# initialize_known, initialize_stationary, initialize_approximate_diffuse
# Initialization starts off as none
assert_equal(mod.initialization, None)
# Since the SARIMAX model may be fully stationary or may have diffuse
# elements, it uses a custom initialization by default, but it can be
# overridden by users
mod.initialize_state()
# (The default initialization in this case is known because there is a non-
# stationary state corresponding to the time-varying regression parameter)
assert_equal(mod.initialization, 'known')
mod.initialize_approximate_diffuse(1e5)
assert_equal(mod.initialization, 'approximate_diffuse')
assert_equal(mod.ssm._initial_variance, 1e5)
mod.initialize_known([5.], [[40]])
assert_equal(mod.initialization, 'known')
assert_equal(mod.ssm._initial_state, [5.])
assert_equal(mod.ssm._initial_state_cov, [[40]])
mod.initialize_stationary()
assert_equal(mod.initialization, 'stationary')
# Test that we can use the following wrapper methods: set_filter_method,
# set_stability_method, set_conserve_memory, set_smoother_output
# The defaults are as follows:
assert_equal(mod.ssm.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(mod.ssm.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(mod.ssm.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
assert_equal(mod.ssm.smoother_output, kalman_smoother.SMOOTHER_ALL)
# Now, create the Cython filter object and assert that they have
# transferred correctly
mod.ssm._initialize_filter()
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# (the smoother object is so far not in Cython, so there is no
# transferring)
# Change the attributes in the model class
if compatibility_mode:
assert_raises(NotImplementedError, mod.set_filter_method, 100)
else:
mod.set_filter_method(100)
mod.set_stability_method(101)
mod.set_conserve_memory(102)
mod.set_smoother_output(103)
# Assert that the changes have occurred in the ssm class
if not compatibility_mode:
assert_equal(mod.ssm.filter_method, 100)
assert_equal(mod.ssm.stability_method, 101)
assert_equal(mod.ssm.conserve_memory, 102)
assert_equal(mod.ssm.smoother_output, 103)
# Assert that the changes have *not yet* occurred in the filter object
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# Re-initialize the filter object (this would happen automatically anytime
# loglike, filter, etc. were called)
# In this case, an error will be raised since filter_method=100 is not
# valid
# Note: this error is only raised in the compatibility case, since the
# newer filter logic checks for a valid filter mode at a different point
if compatibility_mode:
assert_raises(NotImplementedError, mod.ssm._initialize_filter)
# Now, test the setting of the other two methods by resetting the
# filter method to a valid value
mod.set_filter_method(1)
mod.ssm._initialize_filter()
# Retrieve the new kalman filter object (a new object had to be created
# due to the changing filter method)
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, 1)
assert_equal(kf.stability_method, 101)
assert_equal(kf.conserve_memory, 102)
def test_fit_misc():
true = results_sarimax.wpi1_stationary
endog = np.diff(true['data'])[1:]
mod = sarimax.SARIMAX(endog, order=(1,0,1), trend='c')
# Test optim_hessian={'opg','oim','approx'}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = mod.fit(method='ncg', disp=0, optim_hessian='opg', optim_complex_step=False)
res2 = mod.fit(method='ncg', disp=0, optim_hessian='oim', optim_complex_step=False)
# Check that the Hessians broadly result in the same optimum
assert_allclose(res1.llf, res2.llf, rtol=1e-2)
# Test return_params=True
mod, _ = get_dummy_mod(fit=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res_params = mod.fit(disp=-1, return_params=True)
# 5 digits necessary to accommodate 32-bit numpy / scipy with OpenBLAS 0.2.18
assert_almost_equal(res_params, [0, 0], 5)
def test_score_misc():
mod, res = get_dummy_mod()
# Test that the score function works
mod.score(res.params)
def test_from_formula():
assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1,2,3))
def test_score_analytic_ar1():
# Test the score against the analytic score for an AR(1) model with 2
# observations
# Let endog = [1, 0.5], params=[0, 1]
mod = sarimax.SARIMAX([1, 0.5], order=(1,0,0))
def partial_phi(phi, sigma2):
return -0.5 * (phi**2 + 2*phi*sigma2 - 1) / (sigma2 * (1 - phi**2))
def partial_sigma2(phi, sigma2):
return -0.5 * (2*sigma2 + phi - 1.25) / (sigma2**2)
params = np.r_[0., 2]
# Compute the analytic score
analytic_score = np.r_[
partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])]
# Check each of the approximations, transformed parameters
approx_cs = mod.score(params, transformed=True, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(params, transformed=True, approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(params, transformed=True, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(params, transformed=True, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the approximations for untransformed parameters. The analytic
# check now comes from chain rule with the analytic derivative of the
# transformation
# if L* is the likelihood evaluated at untransformed parameters and
# L is the likelihood evaluated at transformed parameters, then we have:
# L*(u) = L(t(u))
# and then
# L'*(u) = L'(t(u)) * t'(u)
def partial_transform_phi(phi):
return -1. / (1 + phi**2)**(3./2)
def partial_transform_sigma2(sigma2):
return 2. * sigma2
uparams = mod.untransform_params(params)
analytic_score = np.dot(
np.diag(np.r_[partial_transform_phi(uparams[0]),
partial_transform_sigma2(uparams[1])]),
np.r_[partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])])
approx_cs = mod.score(uparams, transformed=False, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(uparams, transformed=False,
approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(uparams, transformed=False, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the Hessian: these approximations are not very good, particularly
# when phi is close to 0
params = np.r_[0.5, 1.]
def hessian(phi, sigma2):
hessian = np.zeros((2,2))
hessian[0,0] = (-phi**2 - 1) / (phi**2 - 1)**2
hessian[1,0] = hessian[0,1] = -1 / (2 * sigma2**2)
hessian[1,1] = (sigma2 + phi - 1.25) / sigma2**3
return hessian
analytic_hessian = hessian(params[0], params[1])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_allclose(mod._hessian_complex_step(params) * 2,
analytic_hessian, atol=1e-1)
assert_allclose(mod._hessian_finite_difference(params) * 2,
analytic_hessian, atol=1e-1)
def test_cov_params():
mod, res = get_dummy_mod()
# Smoke test for each of the covariance types
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(res.params, disp=-1, cov_type='none')
assert_equal(res.cov_kwds['description'], 'Covariance matrix not calculated.')
res = mod.fit(res.params, disp=-1, cov_type='approx')
assert_equal(res.cov_type, 'approx')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using numerical (complex-step) differentiation.')
res = mod.fit(res.params, disp=-1, cov_type='oim')
assert_equal(res.cov_type, 'oim')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='opg')
assert_equal(res.cov_type, 'opg')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the outer product of gradients (complex-step).')
res = mod.fit(res.params, disp=-1, cov_type='robust')
assert_equal(res.cov_type, 'robust')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_oim')
assert_equal(res.cov_type, 'robust_oim')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_approx')
assert_equal(res.cov_type, 'robust_approx')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using numerical (complex-step) differentiation.')
assert_raises(NotImplementedError, mod.fit, res.params, disp=-1, cov_type='invalid_cov_type')
def test_transform():
# The transforms in MLEModel are noops
mod = MLEModel([1,2], **kwargs)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [2, 3])
assert_allclose(mod.untransform_params([2, 3]), [2, 3])
# Smoke test for transformation in `filter`, `update`, `loglike`,
# `loglikeobs`
mod.filter([], transformed=False)
mod.update([], transformed=False)
mod.loglike([], transformed=False)
mod.loglikeobs([], transformed=False)
# Note that mod is an SARIMAX instance, and the two parameters are
# variances
mod, _ = get_dummy_mod(fit=False)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [4, 9])
assert_allclose(mod.untransform_params([4, 9]), [2, 3])
# Test transformation in `filter`
res = mod.filter([2, 3], transformed=True)
assert_allclose(res.params, [2, 3])
res = mod.filter([2, 3], transformed=False)
assert_allclose(res.params, [4, 9])
def test_filter():
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
# Test return of ssm object
res = mod.filter([], return_ssm=True)
assert_equal(isinstance(res, kalman_filter.FilterResults), True)
# Test return of full results object
res = mod.filter([])
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'opg')
# Test return of full results object, specific covariance type
res = mod.filter([], cov_type='oim')
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'oim')
def test_params():
mod = MLEModel([1,2], **kwargs)
# By default start_params raises NotImplementedError
assert_raises(NotImplementedError, lambda: mod.start_params)
# But param names are by default an empty array
assert_equal(mod.param_names, [])
# We can set them in the object if we want
mod._start_params = [1]
mod._param_names = ['a']
assert_equal(mod.start_params, [1])
assert_equal(mod.param_names, ['a'])
def check_results(pandas):
mod, res = get_dummy_mod(pandas=pandas)
# Test fitted values
assert_almost_equal(res.fittedvalues[2:], mod.endog[2:].squeeze())
# Test residuals
assert_almost_equal(res.resid[2:], np.zeros(mod.nobs-2))
# Test loglikelihood_burn
assert_equal(res.loglikelihood_burn, 1)
def test_results(pandas=False):
check_results(pandas=False)
check_results(pandas=True)
def test_predict():
dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS')
endog = pd.Series([1,2], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Test that predict with start=None, end=None does prediction with full
# dataset
predict = res.predict()
assert_equal(predict.shape, (mod.nobs,))
assert_allclose(res.get_prediction().predicted_mean, predict)
# Test a string value to the dynamic option
assert_allclose(res.predict(dynamic='1981-01-01'), res.predict())
# Test an invalid date string value to the dynamic option
# assert_raises(ValueError, res.predict, dynamic='1982-01-01')
# Test for passing a string to predict when dates are not set
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
assert_raises(KeyError, res.predict, dynamic='string')
def test_forecast():
# Numpy
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
forecast = res.forecast(steps=10)
assert_allclose(forecast, np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, forecast)
# Pandas
index = pd.date_range('1960-01-01', periods=2, freq='MS')
mod = MLEModel(pd.Series([1,2], index=index), **kwargs)
res = mod.filter([])
assert_allclose(res.forecast(steps=10), np.ones((10,)) * 2)
assert_allclose(res.forecast(steps='1960-12-01'), np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, np.ones((10,)) * 2)
def test_summary():
dates = pd.date_range(start='1980-01-01', end='1984-01-01', freq='AS')
endog = | pd.Series([1,2,3,4,5], index=dates) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import sys
sys.path.append('..')
# In[3]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime, date
import os
from utils import data_paths, load_config
from pathlib import Path
from nltk.metrics import edit_distance #(Levenshtein)
import pycountry
import math
# # Estimating The Infected Population From Deaths
# > Estimating the number of infected people by country based on the number of deaths and case fatality rate.
#
# - comments: true
# - author: <NAME>
# - categories: [growth, compare, interactive, estimation]
# - hide: false
# - image: images/covid-estimate-infections.png
# - permalink: /covid-infected/
# - toc: true
# In[4]:
LOCAL_FILES=True
#jupyter or script
IS_SCRIPT = False
# In[5]:
os.getcwd()
# In[6]:
if IS_SCRIPT:
RUN_PATH = Path(os.path.realpath(__file__))
DATA_PARENT = RUN_PATH.parent.parent
else:
#for jupyter
cw = get_ipython().getoutput('pwd')
RUN_PATH = Path(cw[0])
DATA_PARENT = RUN_PATH.parent
# In[7]:
if IS_SCRIPT:
csse_data = data_paths('tools/csse_data_paths.yml')
else:
csse_data = data_paths('csse_data_paths.yml')
# In[8]:
if LOCAL_FILES:
confirmed_url=csse_data.get("csse_ts_local", {}).get('confirmed', {})
deaths_url=csse_data.get("csse_ts_local", {}).get('deaths', {})
recovered_url=csse_data.get("csse_ts_local", {}).get('recovered', {})
confirmed_url = str(DATA_PARENT/confirmed_url)
deaths_url = str(DATA_PARENT/deaths_url)
recovered_url = str(DATA_PARENT/recovered_url)
else:
confirmed_url=csse_data.get("csse_ts_global", {}).get('confirmed', {})
deaths_url=csse_data.get("csse_ts_global", {}).get('deaths', {})
recovered_url=csse_data.get("csse_ts_global", {}).get('recovered', {})
# In[9]:
### UN stats
# In[10]:
df_un_pop_density_info=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_density_info.csv')
df_un_urban_growth_info=pd.read_csv(DATA_PARENT/'data/un/urban_growth_info.csv')
df_un_health_info=pd.read_csv(DATA_PARENT/'data/un/df_un_health_info.csv')
df_un_tourism_info=pd.read_csv(DATA_PARENT/'data/un/df_un_tourism_info.csv')
df_un_gdp_info=pd.read_csv(DATA_PARENT/'data/un/df_un_gdp_info.csv')
df_un_edu_info=pd.read_csv(DATA_PARENT/'data/un/df_un_edu_info.csv')
df_un_pop_growth_info=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_growth_info.csv')
df_un_gdrp_rnd_info=pd.read_csv(DATA_PARENT/'data/un/df_un_gdrp_rnd_info.csv')
df_un_education_info=pd.read_csv(DATA_PARENT/'data/un/df_un_education_info.csv')
df_un_sanitation_info=pd.read_csv(DATA_PARENT/'data/un/df_un_sanitation_info.csv')
df_un_health_expenditure_info=pd.read_csv(DATA_PARENT/'data/un/df_un_health_expenditure_info.csv')
df_un_immigration_info=pd.read_csv(DATA_PARENT/'data/un/df_un_immigration_info.csv')
df_un_trading_info=pd.read_csv(DATA_PARENT/'data/un/df_un_trading_info.csv')
df_un_land_info=pd.read_csv(DATA_PARENT/'data/un/df_un_land_info.csv')
# In[11]:
df_un_health_info.head()
#Health personnel: Pharmacists (per 1000 population)
# In[12]:
df_un_trading_info.tail(n=20)
#column Major trading partner 1 (% of exports)
#Major trading partner 1 (% of exports)
#Major trading partner 2 (% of exports)
#Major trading partner 3 (% of exports)
# In[13]:
df_population_density=df_un_pop_density_info.loc[df_un_pop_density_info['Series'] == 'Population density']
# In[14]:
df_population_density.tail(n=50)
#Population aged 60+ years old (percentage)
#Population density
#Population mid-year estimates (millions)
# In[15]:
df_population_density.loc[df_population_density.groupby('Country')['Year'].idxmax()]
# In[16]:
df_population_density
# In[17]:
### Freedom House stats
# In[18]:
#Freedon House stats
def country_freedom():
global_freedom = str(DATA_PARENT/'data/freedom_house/Global_Freedom.csv')
df_global_free = pd.read_csv(global_freedom)
internet_freedom = str(DATA_PARENT/'data/freedom_house/Internet_Freedom.csv')
df_internet_free = pd.read_csv(internet_freedom)
return df_global_free, df_internet_free
df_global_freedom, df_internet_freedom = country_freedom()
# In[19]:
#csse countries
df_deaths = pd.read_csv(deaths_url, error_bad_lines=False)
df_confirmed = pd.read_csv(confirmed_url, error_bad_lines=False)
df_recovered = pd.read_csv(recovered_url, error_bad_lines=False)
csse_countries = []
for df in [df_deaths, df_confirmed, df_recovered]:
c = set(df["Country/Region"].unique())
csse_countries.append(c)
csse_countries = [item for sublist in csse_countries for item in sublist]
csse_countries = list(set(csse_countries))
# ## CSSE
# In[20]:
# Get data on deaths D_t
df_deaths = pd.read_csv(deaths_url, error_bad_lines=False)
df_deaths = df_deaths.drop(columns=["Lat", "Long"])
df_deaths = df_deaths.melt(id_vars= ["Province/State", "Country/Region"])
df_deaths = pd.DataFrame(df_deaths.groupby(['Country/Region', "variable"]).sum())
df_deaths.reset_index(inplace=True)
df_deaths = df_deaths.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_deaths"})
df_deaths['date'] =pd.to_datetime(df_deaths.date)
df_deaths = df_deaths.sort_values(by = "date")
df_deaths.loc[df_deaths.location == "US","location"] = "United States"
df_deaths.loc[df_deaths.location == "Korea, South","location"] = "South Korea"
# In[21]:
#confirmed
# In[22]:
df_confirmed = pd.read_csv(confirmed_url, error_bad_lines=False)
df_confirmed = df_confirmed.drop(columns=["Lat", "Long"])
df_confirmed = df_confirmed.melt(id_vars= ["Province/State", "Country/Region"])
df_confirmed = pd.DataFrame(df_confirmed.groupby(['Country/Region', "variable"]).sum())
df_confirmed.reset_index(inplace=True)
df_confirmed = df_confirmed.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_cases"})
df_confirmed['date'] =pd.to_datetime(df_confirmed.date)
df_confirmed = df_confirmed.sort_values(by = "date")
df_confirmed.loc[df_confirmed.location == "US","location"] = "United States"
df_confirmed.loc[df_confirmed.location == "Korea, South","location"] = "South Korea"
# In[23]:
df_confirmed.head()
# In[24]:
df_final = pd.merge(df_deaths,
df_confirmed)
# In[25]:
df_final.head()
# In[26]:
df_final["CFR"] = df_final["total_deaths"]/df_final["total_cases"]
df_final["total_infected"] = np.NaN
df_final = df_final.sort_values(by = ['location', 'date'])
df_final = df_final.reset_index(drop = True)
# In[27]:
df_un_pop_per_country=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_per_country_info.csv')
# In[28]:
def get_country_list(pop_cutoff=5.0):
pop_nmill=df_un_pop_per_country.loc[df_un_pop_per_country['Value'] >= pop_cutoff]
countries_n_plus=pop_nmill.Country.tolist()
return countries_n_plus
# In[29]:
csse_countries.sort()
csse_countries
# In[30]:
csse_countries=list(map(lambda x: x if x != 'Korea, South' else "South Kores", csse_countries))
# In[31]:
countries_n_plus = get_country_list(pop_cutoff=5.0)
# In[32]:
for j in countries_n_plus:
if not j in csse_countries:
print(j)
# In[33]:
for j in countries_n_plus:
for i in df_final["date"].unique()[0:-8]:
numer = df_final.loc[(df_final.date == i + np.timedelta64(8, 'D')) & (df_final.location == j), "total_deaths"].iloc[0]
denom = df_final.loc[(df_final.date == i + np.timedelta64(8, 'D')) & (df_final.location == j), "CFR"].iloc[0]
df_final.loc[(df_final.date == i) & (df_final.location == j), "total_infected"] = numer/denom
# In[34]:
df_final.head()
# In[35]:
# Estimate growth rate of infected, g
df_final['infected_g'] = np.log(df_final['total_infected'])
df_final['infected_g'] = df_final['infected_g'].diff()
# In[36]:
# Estimate number of infected given g
today = df_final.date.iloc[-1]
for j in countries_n_plus:
for i in range(7,-1,-1):
df_final.loc[(df_final.location == j) & (df_final.date == today - timedelta(i)), "total_infected"] = df_final.loc[df_final.location == j, "total_infected"].iloc[-i-2]*(1+df_final.loc[df_final.location == j, "infected_g"].aggregate(func = "mean"))
# In[37]:
data_pc = df_final[['location', 'date', 'total_infected']].copy()
# In[38]:
data_countries = []
data_countries_pc = []
# In[39]:
for i in countries_n_plus:
data_pc.loc[data_pc.location == i,"total_infected"] = data_pc.loc[data_pc.location == i,"total_infected"]
# In[40]:
# Get each country time series
filter1 = data_pc["total_infected"] > 1
for i in countries_n_plus:
filter_country = data_pc["location"]== i
data_countries_pc.append(data_pc[filter_country & filter1])
# In[41]:
len(data_countries_pc)
# In[42]:
data_countries_pc[0]
# ## Estimated Infected Population By Country
#
# by days since outbreak
# In[43]:
# Lastest Country Estimates
label = 'Total_Infected'
temp = pd.concat([x.copy() for x in data_countries_pc]).loc[lambda x: x.date >= '3/1/2020']
# In[44]:
metric_name = f'{label}'
temp.columns = ['Country', 'Date', metric_name]
# temp.loc[:, 'month'] = temp.date.dt.strftime('%Y-%m')
temp.loc[:, "Total_Infected"] = temp.loc[:, "Total_Infected"].round(0)
temp.groupby('Country').last()
# In[ ]:
# ## Infected vs. number of confirmed cases
# > Allows you to compare how countries have been tracking the true number of infected people.
# The smaller deviation from the dashed line (45 degree line) the better job at tracking the true number of infected people.
# In[45]:
data_pc = df_final.copy()
# In[46]:
data_countries = []
data_countries_pc = []
# In[47]:
for i in countries_n_plus:
data_pc.loc[data_pc.location == i,"total_infected"] = data_pc.loc[data_pc.location == i,"total_infected"]
data_pc.loc[data_pc.location == i,"total_cases"] = data_pc.loc[data_pc.location == i,"total_cases"]
# get each country time series
filter1 = data_pc["total_infected"] > 1
for i in countries_n_plus:
filter_country = data_pc["location"]== i
data_countries_pc.append(data_pc[filter_country & filter1])
# In[48]:
type(data_countries_pc[0])
# In[49]:
data_countries_pc[0]
# In[ ]:
# In[50]:
def get_df_country(country):
for i, df in enumerate(data_countries_pc):
if len(df.loc[df['location'] == country]):
print(f'country: {country}, index: {i}')
# In[51]:
get_df_country('Italy')
# In[52]:
data_countries_pc[47]
# In[79]:
df_all_data_countries_pc=pd.concat(data_countries_pc)
# In[81]:
df_all_data_countries_pc.tail()
# In[ ]:
#### save all pred as one df
# In[82]:
df_all_data_countries_pc.to_csv(DATA_PARENT/'data/processed/csse/df_all_data_countries_pc.csv')
# In[ ]:
# In[ ]:
### Combine last day only pred with un and freedom house data
# In[53]:
df_country_un_stats = pd.read_csv(DATA_PARENT/'data/un/df_un_merged_stats.csv')
# In[60]:
df_country_un_stats.rename(columns={'Country': 'location'}, inplace=True)
# In[61]:
idx = data_countries_pc[0].groupby(['location'])['date'].transform(max) == data_countries_pc[0]['date']
sub_df=data_countries_pc[0][idx]
sub_df
# In[62]:
sub_df.iloc[0]['location']
# In[63]:
df_country_un_stats.head()
# In[ ]:
# In[ ]:
### freedom house
# In[72]:
df_freedomhouse_merged = pd.read_csv(DATA_PARENT/'data/freedom_house/df_freedomhouse_merged.csv')
# In[73]:
df_freedomhouse_merged.head()
# In[74]:
df_freedomhouse_merged.rename(columns={'Country': 'location'}, inplace=True)
# In[76]:
frames=[]
for df in data_countries_pc:
idx = df.groupby(['location'])['date'].transform(max) == df['date']
sub_df=df[idx]
if len(sub_df)>0:
#print(f'sub_df: {sub_df}')
country=sub_df.iloc[0]['location']
un_df=df_country_un_stats.loc[df_country_un_stats['location'] == country]
#print(f'un_df: {un_df}')
df_merged=pd.merge(sub_df, un_df)
#freedom house data
fh_df=df_freedomhouse_merged.loc[df_freedomhouse_merged['location'] == country]
df_merged=pd.merge(df_merged, fh_df)
frames.append(df_merged)
df_all_un_fh= | pd.concat(frames) | pandas.concat |
import pathlib as pl
import pandas as pd
import pytz
import requests
import zipfile
from datetime import datetime
import io
from . import utils
#from . import DataQuality
class NYISOData:
"""A class used to download and construct a local database from the NYISO.
Attributes
----------
df: Dataframe
Dataframe containing NYISO data post-processed ready for use
dataset: str
Name of a supported dataset found in 'nyisodata/dataset_url_map.yml'
year: str
Dataset year in Eastern Standard Time
redownload: bool, optional
A flag used to redownload CSV files (default is False)
reconstruct: bool, optional
A flag used to reconstruct database from downloaded CSV files (default is False)
create_csv: bool
A flag used to save the database in a CSV file (default is False).
Pickle databases are the primary database because they save frequency and timezone.
curr_date: Datetime Object
Datetime object of current time
storage_dir: Pathlib Object
Path to directory which will contain directories for finalized databases and raw CSV files
download_dir: Pathlib Object
Path to directory within the storage_dir that will store the raw CSV files downloaded from the NYISO
output_dir: Pathlib Object
Path to directory within the storage_dir that will store the finalized databases
dataset_details: Namedtuple
Namedtuple containing dataset details from 'dataset_url_map.yml'
Methods
-------
config
Creates the download_dir and output_dir directories if they don't exist
main
Handles logic for downloading data and constructing or reading finalized database
get_raw_data
Downloads and unzips raw CSV's from NYISO Website month by month
"""
def __init__(self, dataset, year,
redownload=False, reconstruct=False, create_csv=False):
"""Creates a local database based on dataset name and year stored in UTC.
Parameters
----------
dataset: str
Name of a supported dataset found in 'dataset_url_map.yml'
year: str
Dataset year in Eastern Standard Time
redownload: bool, optional
A flag used to redownload CSV files (default is False)
reconstruct: bool, optional
A flag used to reconstruct database from downloaded CSV files (default is False)
create_csv: bool, optional
A flag used to save the database in a CSV file (default is False)
Pickle databases are the primary database because they save frequency and timezone
"""
#Attributes
self.df = None
self.dataset = dataset
self.year = str(year)
self.redownload = redownload
self.reconstruct = reconstruct
self.create_csv = create_csv
self.curr_date = datetime.now(tz=pytz.timezone('US/Eastern'))
self.storage_dir = pl.Path(pl.Path(__file__).resolve().parent, 'storage')
self.download_dir = pl.Path(self.storage_dir, 'raw_datafiles', self.dataset, self.year)
self.output_dir = pl.Path(self.storage_dir, 'databases')
self.dataset_details = utils.fetch_dataset_url_map(self.dataset)
#Methods
self.config()
self.main()
def config(self):
"""Creates the download_dir and output_dir directories if they don't exist"""
for dir_ in [self.download_dir, self.output_dir]:
dir_.mkdir(parents=True, exist_ok=True)
def main(self):
"""Handles logic for downloading data and constructing or reading finalized database"""
file_ = pl.Path(self.output_dir, f'{self.year}_{self.dataset}.pkl')
if not file_.exists() or self.redownload or self.reconstruct:
if not file_.exists() or self.redownload:
self.get_raw_data()
#TODO: DataQuality(dataset=self.dataset, year=self.year).fix_issues()
self.construct_database()
else:
self.df = pd.read_pickle(file_)
def get_raw_data(self):
"""Downloads and unzips raw CSV's from NYISO Website month by month"""
month_range = utils.fetch_months_to_download(self.curr_date, self.year)
print(f'Downloading {self.year} {self.dataset}...', end='')
for month in month_range: # Download and extract all csv files month by month
r = requests.get(self.dataset_details.url.format(month))
if r.ok:
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.download_dir)
else:
print(f'Warning: Request failed for {month} with status: {r.status_code}') #TODO: log this
print('Completed!')
def construct_database(self):
"""Constructs database from raw datafiles and saves it in UTC"""
# Determine expected timestamps for dataset
self.curr_date = datetime.now(tz=pytz.timezone('US/Eastern')) # update current time after download
start, end = utils.fetch_ts_start_end(self.curr_date, self.year, self.dataset_details.f)
timestamps = pd.date_range(start, end, freq=self.dataset_details.f, tz='US/Eastern')
# Construct Database
files = sorted(pl.Path(self.download_dir).glob('*.csv'))
if not files:
print('Warning: No raw datafiles found!')
return # skip the rest
else:
frames = [pd.read_csv(file, index_col=0) for file in files] # Concatenate all CSVs into a DataFrame
df = pd.concat(frames, sort=False)
df.index = pd.to_datetime(df.index)
if ('Time Zone' in df.columns) or (self.dataset_details.col is None):
if 'Time Zone' in df.columns: # Make index timezone aware (US/Eastern)
df = df.tz_localize('US/Eastern', ambiguous=df['Time Zone'] == 'EST')
elif self.dataset_details.col is None: # there is no need to pivot
df = df.tz_localize('US/Eastern', ambiguous='infer')
df = df.sort_index(axis='index').tz_convert('UTC') # Convert to UTC so that pivot can work without throwing error for duplicate indices
if 'Time Zone' in df.columns: # make stacked columns
df = df.pivot(columns=self.dataset_details.col, values=self.dataset_details.val_col)
df = df.resample(self.dataset_details.f).mean()
df = utils.check_and_interpolate_nans(df)
else: # When there is no timezone column and there is 'stacked' data
frames = []
for ctype, subdf in df.groupby(by=self.dataset_details.col):
subdf = subdf.tz_localize('US/Eastern', ambiguous='infer').tz_convert('UTC')
subdf = subdf.resample(self.dataset_details.f).mean()
subdf = utils.check_and_interpolate_nans(subdf)
subdf.loc[:, self.dataset_details.col] = ctype
frames.append(subdf)
df = | pd.concat(frames) | pandas.concat |
# Globals #
import re
import numpy as np
import pandas as pd
import dateutil.parser as dp
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
from itertools import islice
from scipy.stats import boxcox
from scipy.integrate import simps
from realtime_talib import Indicator
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from pprint import pprint
from selenium import webdriver
RANDOM_STATE = 42
# Sentiment Preprocessing
def remove_special_chars(headline_list):
"""
Returns list of headlines with all non-alphabetical characters removed.
"""
rm_spec_chars = [re.sub('[^ A-Za-z]+', "", headline) for headline in headline_list]
return rm_spec_chars
def tokenize(headline_list):
"""
Takes list of headlines as input and returns a list of lists of tokens.
"""
tokenized = []
for headline in headline_list:
tokens = word_tokenize(headline)
tokenized.append(tokens)
# print("tokenize")
# pprint(tokenized)
return tokenized
def remove_stop_words(tokenized_headline_list):
"""
Takes list of lists of tokens as input and removes all stop words.
"""
filtered_tokens = []
for token_list in tokenized_headline_list:
filtered_tokens.append([token for token in token_list if token not in set(stopwords.words('english'))])
# print("stop words")
# pprint(filtered_tokens)
return filtered_tokens
def stem(token_list_of_lists):
"""
Takes list of lists of tokens as input and stems every token.
Returns a list of lists of stems.
"""
stemmer = PorterStemmer()
stemmed = []
for token_list in token_list_of_lists:
# print(token_list)
stemmed.append([stemmer.stem(token) for token in token_list])
# print("stem")
# pprint(stemmed)
return stemmed
def make_bag_of_words(df, stemmed):
"""
Create bag of words model.
"""
print("\tCreating Bag of Words Model...")
very_pos = set()
slightly_pos = set()
neutral = set()
slightly_neg = set()
very_neg = set()
# Create sets that hold words in headlines categorized as "slightly_neg" or "slightly_pos" or etc
for stems, sentiment in zip(stemmed, df["Sentiment"].tolist()):
if sentiment == -2:
very_neg.update(stems)
elif sentiment == -1:
slightly_neg.update(stems)
elif sentiment == 0:
neutral.update(stems)
elif sentiment == 1:
slightly_pos.update(stems)
elif sentiment == 2:
very_pos.update(stems)
# Count number of words in each headline in each of the sets and encode it as a list of counts for each headline.
bag_count = []
for x in stemmed:
x = set(x)
bag_count.append(list((len(x & very_neg), len(x & slightly_neg), len(x & neutral), len(x & slightly_pos), len(x & very_pos))))
df["sentiment_class_count"] = bag_count
return df
def sentiment_preprocessing(df):
"""
Takes a dataframe, removes special characters, tokenizes
the headlines, removes stop-tokens, and stems the remaining tokens.
"""
specials_removed = remove_special_chars(df["Headline"].tolist())
tokenized = tokenize(specials_removed)
tokenized_filtered = remove_stop_words(tokenized)
stemmed = stem(tokenized_filtered)
return df, stemmed
def headlines_balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\nSplitting headlines into *balanced* training and test sets...")
# pprint(list(dataset.values))
# pprint(dataset)
# Use sklearn.train_test_split to split all features into x_train and x_test,
# and all expected values into y_train and y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Sentiment", "Headline"], axis=1).values,
dataset["Sentiment"].values, test_size=test_size,
random_state=RANDOM_STATE)
x_train = [x[0] for x in x_train]
x_test = [x[0] for x in x_test]
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
train["Sentiment"] = pd.Series(y_train)
# Do the same for x_test and y_test
test = pd.DataFrame(data=x_test, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
test["Sentiment"] = pd.Series(y_test)
train_prediction = train["Sentiment"].values
test_prediction = test["Sentiment"].values
train_trimmed = train.drop(["Sentiment"], axis=1).values
test_trimmed = test.drop(["Sentiment"], axis=1).values
return train_trimmed, test_trimmed, train_prediction, test_prediction
def split(dataset, test_size, balanced=True):
if balanced:
return headlines_balanced_split(dataset, test_size)
else:
# TODO: write imbalanced split function
return None
# Helpers #
def sliding_window(seq, n=2):
"""
Returns a sliding window (of width n) over data from the iterable. https://stackoverflow.com/a/6822773/8740440
"""
"s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ..."
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def integrate(avg_daily_sentiment, interval):
"""
Takes a list of average daily sentiment scores and returns a list of definite integral estimations calculated
with Simpson's method. Each integral interval is determined by the `interval` variable. Shows accumulated sentiment.
"""
# Split into sliding window list of lists
sentiment_windows = sliding_window(avg_daily_sentiment, interval)
integral_simpson_est = []
# https://stackoverflow.com/a/13323861/8740440
for x in sentiment_windows:
# Estimate area using composite Simpson's rule. dx indicates the spacing of the data on the x-axis.
integral_simpson_est.append(simps(x, dx=1))
dead_values = list([None] * interval)
dead_values.extend(integral_simpson_est)
dead_values.reverse()
return dead_values
def random_undersampling(dataset):
"""
Randomly deleting rows that contain the majority class until the number
in the majority class is equal with the number in the minority class.
"""
minority_set = dataset[dataset.Trend == -1.0]
majority_set = dataset[dataset.Trend == 1.0]
# print(dataset.Trend.value_counts())
# If minority set larger than majority set, swap
if len(minority_set) > len(majority_set):
minority_set, majority_set = majority_set, minority_set
# Downsample majority class
majority_downsampled = resample(majority_set,
replace=False, # sample without replacement
n_samples=len(minority_set), # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
return pd.concat([majority_downsampled, minority_set])
def get_popularity(headlines):
# TODO: Randomize user-agents OR figure out how to handle popups
if "Tweets" not in headlines.columns:
counts = []
driver = webdriver.Chrome()
for index, row in headlines.iterrows():
try:
driver.get(row["URL"])
time.sleep(3)
twitter_containers = driver.find_elements_by_xpath("//li[@class='twitter']")
count = twitter_containers[0].find_elements_by_xpath("//span[@class='count']")
if count[0].text == "":
counts.append(1)
else:
counts.append(int(count[0].text))
except:
counts.append(1) # QUESTION: Should it be None?
headlines["Tweets"] = (pd.Series(counts)).values
print(counts)
return headlines
def balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\tSplitting data into *balanced* training and test sets")
# Use sklearn.train_test_split to split original dataset into x_train, y_train, x_test, y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Date", "Trend"], axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])
train["Trend"] = pd.Series(y_train)
# Do the same for x_test and y__test
test = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])
test["Trend"] = pd.Series(y_test)
# Apply random undersampling to both data frames
train_downsampled = random_undersampling(train)
test_downsampled = random_undersampling(test)
train_trend = train_downsampled["Trend"].values
test_trend = test_downsampled["Trend"].values
train_trimmed = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""
Randomly splits dataset into unbalanced training and test sets.
"""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""
Extracts technical indicators from OHLCV data.
"""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = ( | pd.Series(unix_times) | pandas.Series |
#!/usr/bin/env python
from configuration.settings import Conf
from database.sql_connect import SQLDatabase
from KPIForecaster.forecaster import KPIForecaster
from datetime import datetime
import pandas as pd
import numpy as np
import time
import sys
import os.path
path = sys.argv[0].rsplit("/", 1)[0]
# Create configuration and Database connection and our KPI Forecaster Object
try:
conf = Conf(os.path.join(path,"config.json"))
except:
conf = Conf("config.json")
sql = SQLDatabase(conf)
# Creating out KPI Forecaster Object
KPIForecaster = KPIForecaster(conf)
#df_train = pd.read_csv('FT_CELL_NOV.csv')
# Starting Timer for benchmarking
T_START = time.time()
df_train = sql.getHourlyKPIReportXDays(160)
t0 = time.time()
completion_time = t0-T_START
print(f'[INFO] Total Time to Download Report: {completion_time}')
print("[INFO] Report Loaded")
# Replace UTC string from time
df_train['START_TIME'] = df_train['START_TIME'].str.replace('\(UTC-04:00\)', '')
# Set KPI here
KPI = 'DL_USER_THROUGHPUT_MBPS'
cell_names = df_train.CELL_NAME.unique()
df_train['START_TIME'] = pd.to_datetime(df_train['START_TIME'])
df_train['DATE'] = df_train['START_TIME'].dt.date
df = pd.DataFrame()
appended_data = []
number_of_cells = len(cell_names)
for (i,cell_name) in enumerate(cell_names):
df = df_train[df_train["CELL_NAME"] == cell_name]
df2 = df.groupby(['CELL_NAME','DATE']).mean().pct_change().reset_index()
df2['KEY'] = df2['CELL_NAME'] + df2['DATE'].astype(str)
df3 = df.groupby(['CELL_NAME','DATE']).mean().reset_index()
df3['KEY'] = df3['CELL_NAME'] + df3['DATE'].astype(str)
df3 = df3[['DL_USER_THROUGHPUT_MBPS', 'KEY']].copy()
df4 = | pd.merge(df2, df3, on='KEY') | pandas.merge |
import numpy as np
import pandas as pd
from nilearn import image
import json
import pytest
from neuroquery_image_search import _searching, _datasets
def test_image_search(tmp_path, fake_img):
img_path = str(tmp_path / "img.nii.gz")
fake_img.to_filename(img_path)
results_path = tmp_path / "results.json"
_searching.image_search(
f"{img_path} -o {results_path} --n_studies 7 --n_terms 3".split()
)
results = json.loads(results_path.read_text())
study_results = | pd.DataFrame(results["studies"]) | pandas.DataFrame |
import plotly.graph_objects as go
import pandas as pd
import plotly.express as px
from datetime import datetime, timedelta
import requests
import json
import time
def read():
df1 = pd.read_csv("CSV/ETH_BTC_USD_2015-08-09_2020-04-04-CoinDesk.csv")
df1.columns = ['date', 'ETH', 'BTC']
df1.date = pd.to_datetime(df1.date, dayfirst=True)
df1.set_index('date', inplace=True)
EOS = pd.read_csv("ICO_coins/EOS_USD_2018-06-06_2020-04-02-CoinDesk.csv")
IOTA = pd.read_csv("ICO_coins/IOTA_USD_2018-06-06_2020-04-02-CoinDesk.csv")
LSK = pd.read_csv("ICO_coins/LSK_USD_2018-06-06_2020-04-02-CoinDesk.csv")
NEO = | pd.read_csv("ICO_coins/NEO_USD_2018-06-06_2020-04-02-CoinDesk.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import pandas as pd
'''
@test($$;type(pd))
@alt(表データ|データフレーム)
@alt(カラム|列)
@alt(インデックス|行)
@alt(どの程度|どれだけ)
@alt(欠損値|NaN|未入力値)
@alt(変更する|増やす|減らす)
@alt(保存する|保存する|書き込む)
@alt(抽出する|取り出す)
@alt(読み込む|読む)
@alt(読み込んで|読んで)
@alt(全ての|すべての|全)
@alt(の名前|名)
@alt(丸める|四捨五入する)
@alt(丸めて|四捨五入して)
@prefix(df;データフレーム)
@prefix(ds;[データ列|列])
@prefix(col;[列|カラム])
@prefix(value;[文字列|日付|値])
データ列を使う
データ列をインポートする
'''
df = pd.DataFrame(data={'A': [1, 2, 3], 'B': [2, 1, 0]})
ds = df['A']
col = 'A'
__X__ = ds2 = pd.Series([1, 2, 3, 4])
# ds の操作
__X__.value_counts()
'''
@X(ds;df[col])
@Y(ds;[dfの|]col)
__Y__の各[データ|]値の出現[|回]数[|を求める]
__Y__の各[データ|値][が|は]何回出現するか見る
'''
__X__.unique()
'''
__Y__の[ユニーク|一意]な[値|要素][|を見る]
'''
__X__.nunique()
'''
@test(df=ds=missing;aList=['A','B'];$$)
__Y__の[ユニーク|一意]な[値の個数|要素数][|を見る]
'''
__X__.astype(object)
'''
@X(df[col]|ds)
@Y(dfのcol|ds)
__Y__をカテゴリデータに変換する
'''
__X__.str.len()
'''
@test(df=ds=missing;$$)
__Y__の文字列長を列として得る
'''
__X__.unique().tolist()
'''
__Y__からユニークな[要素|値]を抽出し、リスト化する
'''
set(ds.unique().tolist()+ds2.unique().tolist())
'''
dsとds2から重複を取り除く
'''
ty = int
df[col] = df[col].astype(ty)
'''
@alt(に代入する|[と|に]する)
dfのcolをtyに変換する
'''
# ビン
n = 2
names = ['A', 'B']
aList = [0, 4, 6]
__X__ = [1, 2, 3, 4, 5, 6, 7, 8]
| pd.cut(__X__, n) | pandas.cut |
# coding: utf-8
from argparse import Namespace, ArgumentParser
from typing import Tuple
import pandas as pd
import skimage.io
import torch
from torch.utils.data import Dataset
import numpy as np
def onehot(size, target):
vec = torch.zeros(size, dtype=torch.float32)
vec[target] = 1.
return vec
class MelanomaDataset(Dataset):
def __init__(self, mode: str, config: Namespace, transform=None, use_external=False, use_pseudolabeled=False):
super().__init__()
self.mode = mode
if mode not in ['train', 'val']:
raise NotImplementedError("Not implemented dataset configuration")
self.image_folder = config.image_folder
self.fold = config.fold
self.df = pd.read_csv(f"{config.data_path}/{mode}_{config.fold}.csv")
print('N samples from original data: {}'.format(self.df.shape[0]))
self.df.loc[:, 'data_t'] = 'competition'
if use_external:
print(f'Will use external data for: {mode}')
self.external_df = pd.read_csv(f"{config.data_path}/external_train_cleaned.csv")
self.external_df.loc[:, 'data_t'] = 'external'
self.df = pd.concat([self.df, self.external_df])
self.external_image_folder = config.external_image_folder
print('N samples from external data: {}'.format(self.external_df.shape[0]))
if use_pseudolabeled:
print(f'Will use pseudolabeled data for {mode}')
self.pseudolabeled_df = pd.read_csv(f"{config.data_path}/labeled_test.csv")
self.pseudolabeled_df.loc[:, 'data_t'] = 'test'
self.df = pd.concat([self.df, self.pseudolabeled_df])
self.test_image_folder = config.test_image_folder
print('N samples from pseudolabeled test data: {}'.format(self.pseudolabeled_df.shape[0]))
print('Total N samples: {}'.format(self.df.shape[0]))
self.transform = transform
self.df.loc[:, 'bin_target'] = (self.df.target >= 0.5).astype(int)
self.targets = self.df.bin_target.values
self.target_counts = self.df.bin_target.value_counts().values
def __len__(self) -> int:
return self.df.shape[0]
def __getitem__(self, index) -> Tuple[torch.Tensor, torch.Tensor]:
row = self.df.iloc[index]
img_id = row.image_name
img_type = row.data_t
if img_type == 'competition':
img_path = f"{self.image_folder}/{img_id}.jpg"
elif img_type == 'external':
img_path = f"{self.external_image_folder}/{img_id}.jpg"
elif img_type == 'test':
img_path = f"{self.test_image_folder}/{img_id}.jpg"
image = skimage.io.imread(img_path)
if self.transform is not None:
image = self.transform(image=image)['image']
image = image.transpose(2, 0, 1)
image = torch.from_numpy(image)
label = row.target
# target = onehot(2, label)
target = torch.tensor(np.expand_dims(label, 0)).float()
return{'features': image, 'target': target}
class MelanomaDatasetTest(Dataset):
def __init__(self, config: Namespace, transform=None):
super().__init__()
self.image_folder = config.test_image_folder
self.df = pd.read_csv(f"{config.data_path}/test.csv")
self.transform = transform
def __len__(self) -> int:
return self.df.shape[0]
def __getitem__(self, index) -> Tuple[torch.Tensor, torch.Tensor]:
row = self.df.iloc[index]
img_id = row.image_name
img_path = f"{self.image_folder}/{img_id}.jpg"
image = skimage.io.imread(img_path)
if self.transform is not None:
image = self.transform(image=image)['image']
image = image.transpose(2, 0, 1)
image = torch.from_numpy(image)
return{'features': image, 'img_id': img_id}
class MelanomaDatasetGeneratedData(Dataset):
def __init__(self, mode: str, config: Namespace, transform=None, use_external=False):
super().__init__()
self.mode = mode
if mode not in ['train', 'val']:
raise NotImplementedError("Not implemented dataset configuration")
self.fold = config.fold
self.df = pd.read_csv(f"{config.data_path}/{config.generated_data_csv}.csv")
self.image_folder = config.generated_data_image_folder
self.df.loc[:, 'data_t'] = 'competition'
if use_external:
print(f'Will use external data for {mode}')
self.external_df = | pd.read_csv(f"{config.data_path}/external_{mode}_{config.fold}.csv") | pandas.read_csv |
from typing import List
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
from settings import SCORING_OUTPUT_FOLDER, CRAWLING_OUTPUT_FOLDER
import logging
logger = logging.getLogger()
def get_fields():
fields_fn = Path(__file__).parent.absolute().joinpath("../data/faculties_to_fields.csv")
return pd.read_csv(fields_fn)['field'].unique()
def convert_faculty_to_fields(programs_df, school: str):
fields_fn = Path(__file__).parent.absolute().joinpath("../data/faculties_to_fields.csv")
faculties_to_fields_df = pd.read_csv(fields_fn)
faculties_to_fields_df = faculties_to_fields_df[faculties_to_fields_df.school == school]
faculties_to_fields_ds = faculties_to_fields_df[["faculty", "field"]].set_index("faculty")
def faculty_to_field(faculty):
if faculty in faculties_to_fields_ds.index:
return faculties_to_fields_ds.loc[faculty][0]
else:
logger.warning(f"Warning: {faculty} was not found in faculty_to_fields")
return 'other'
programs_df["field"] = programs_df["faculty"].apply(lambda x: faculty_to_field(x))
return programs_df
def analyse_courses(schools: List[str], year: int, themes: List[str]):
number_of_courses_per_school = | pd.Series(index=schools, dtype=float) | pandas.Series |
import pandas as pd
import pycountry
import xlrd
pca = pd.read_csv('PCA.csv')
kMeans = pd.read_csv('kmeans.csv')
factor = pd.read_csv('factors.csv')
# Merge dfs
result = pd.merge(pca, kMeans, how='outer', on=['ISO', 'year', 'countries', 'hf_score', 'hf_quartile'])
final = | pd.merge(result, factor, how='outer', on=['ISO', 'year', 'countries', 'hf_score', 'hf_quartile']) | pandas.merge |
#!/usr/bin/env python2
import warnings
warnings.filterwarnings("ignore")
import sys
import glob
import pyproj
import pandas as pd
import numpy as np
import h5py
import argparse
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree
from gdalconst import *
from osgeo import gdal, osr
from scipy.ndimage import map_coordinates
from scipy.stats import binned_statistic_2d
from scipy.spatial import cKDTree
"""
Program for computing statistics between two altimetry data sets
"""
def interp2d(xd, yd, data, xq, yq, **kwargs):
""" Interpolator from raster to point """
xd = np.flipud(xd)
yd = np.flipud(yd)
data = np.flipud(data)
xd = xd[0, :]
yd = yd[:, 0]
nx, ny = xd.size, yd.size
(x_step, y_step) = (xd[1] - xd[0]), (yd[1] - yd[0])
assert (ny, nx) == data.shape
assert (xd[-1] > xd[0]) and (yd[-1] > yd[0])
if np.size(xq) == 1 and np.size(yq) > 1:
xq = xq * ones(yq.size)
elif np.size(yq) == 1 and np.size(xq) > 1:
yq = yq * ones(xq.size)
xp = (xq - xd[0]) * (nx - 1) / (xd[-1] - xd[0])
yp = (yq - yd[0]) * (ny - 1) / (yd[-1] - yd[0])
coord = np.vstack([yp, xp])
zq = map_coordinates(data, coord, **kwargs)
return zq
def mad_std(x, axis=None):
""" Robust standard deviation (using MAD). """
return 1.4826 * np.nanmedian(np.abs(x - np.nanmedian(x, axis)), axis)
def sigma_filter(x, xmin=-9999, xmax=9999, tol=5, alpha=5):
""" Iterative outlier filter """
# Set default value
tau = 100.0
# Remove data outside selected range
x[x < xmin] = np.nan
x[x > xmax] = np.nan
# Initiate counter
k = 0
# Outlier rejection loop
while tau > tol:
# Compute initial rms
rmse_b = mad_std(x)
# Compute residuals
dh_abs = np.abs(x - np.nanmedian(x))
# Index of outliers
io = dh_abs > alpha * rmse_b
# Compute edited rms
rmse_a = mad_std(x[~io])
# Determine rms reduction
tau = 100.0 * (rmse_b - rmse_a) / rmse_a
# Remove data if true
if tau > tol or k == 0:
# Set outliers to NaN
x[io] = np.nan
# Update counter
k += 1
return x
def geotiffread(ifile):
""" Read Geotiff file """
file = gdal.Open(ifile, GA_ReadOnly)
metaData = file.GetMetadata()
projection = file.GetProjection()
src = osr.SpatialReference()
src.ImportFromWkt(projection)
proj = src.ExportToWkt()
Nx = file.RasterXSize
Ny = file.RasterYSize
trans = file.GetGeoTransform()
dx = trans[1]
dy = trans[5]
Xp = np.arange(Nx)
Yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(Xp, Yp)
X = trans[0] + (Xp + 0.5) * trans[1] + (Yp + 0.5) * trans[2]
Y = trans[3] + (Xp + 0.5) * trans[4] + (Yp + 0.5) * trans[5]
band = file.GetRasterBand(1)
Z = band.ReadAsArray()
dx = np.abs(dx)
dy = np.abs(dy)
return X, Y, Z, dx, dy, proj
def transform_coord(proj1, proj2, x, y):
"""Transform coordinates from proj1 to proj2 (EPSG num)."""
# Set full EPSG projection strings
proj1 = pyproj.Proj("+init=EPSG:"+proj1)
proj2 = pyproj.Proj("+init=EPSG:"+proj2)
# Convert coordinates
return pyproj.transform(proj1, proj2, x, y)
def wrapTo360(lon):
""" Wrap longitude to 360 deg """
positiveInput = (lon > 0.0)
lon = np.mod(lon, 360.0)
lon[(lon == 0) & positiveInput] = 360.0
return lon
# Wrap longitude to 180 deg
def wrapTo180(lon):
"""Wrap longitude to 180 deg """
q = (lon < -180.0) | (180.0 < lon)
lon[q] = wrapTo360(lon[q] + 180.0) - 180.0
return lon
# Output description of solution
description = ('Program for computing statistics between two altimetry datasets.')
# Define command-line arguments
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'-r', metavar='fref', dest='fref', type=str, nargs='+',
help='reference file(s)',
required=True)
parser.add_argument(
'-f', metavar='fcomp', dest='fcomp', type=str, nargs='+',
help='comparison files(s)',
required=True)
parser.add_argument(
'-o', metavar='ofile', dest='ofile', type=str, nargs=1,
help='name of output statistics file',)
parser.add_argument(
'-d', metavar='dxy', dest='dxy', type=float, nargs=1,
help=('spatial resolution of comparison grid (m)'),
default=[50],)
parser.add_argument(
'-p', metavar=('epsg_num'), dest='proj', type=str, nargs=1,
help=('EPSG proj number (AnIS=3031, GrIS=3413)'),
default=['3031'],)
parser.add_argument(
'-v', metavar=('x','y','t','h'), dest='vnames_ref', type=str, nargs=4,
help=('name of varibales in reference file'),
default=['lon','lat','t_year','h_cor'],)
parser.add_argument(
'-u', metavar=('x','y','t','h'), dest='vnames_com', type=str, nargs=4,
help=('name of varibales in comparison file'),
default=['lon','lat','t_year','h_cor'],)
parser.add_argument(
'-s', metavar=('s_min','s_max'), dest='slope', type=float, nargs=2,
help=('min and max slope interval (deg)'),
default=[0.0,1.0],)
parser.add_argument(
'-t', metavar='dt', dest='tspan', type=float, nargs=1,
help=('only compare data with time-span < dt'),
default=[3./12],)
parser.add_argument(
'-n', metavar=('slope_file'), dest='fslope', type=str, nargs=1,
help='name of slope raster file',
default=[None],)
parser.add_argument(
'-i', metavar=('n_ref','n_com'), dest='ncomp', type=int, nargs=2,
help=('sub-sample data using every n:th point'),
default=[1,1],)
# Create parser argument container
args = parser.parse_args()
# Pass arguments
fref = args.fref
fcom = args.fcomp
ofile = args.ofile[0]
dxy = args.dxy[0]
proj = args.proj[0]
vref = args.vnames_ref[:]
cref = args.vnames_com[:]
s_min = args.slope[0]
s_max = args.slope[1]
tspan = args.tspan[0]
fslp = args.fslope[0]
nref = args.ncomp[0]
ncom = args.ncomp[1]
# Initiate statistics
Fref = []
Fcom = []
mean = []
stdv = []
rmse = []
vmin = []
vmax = []
nobs = []
# Check for slope file
if fslp is not None:
# Read slope file
(X, Y, Z) = geotiffread(fslp)[0:3]
# Loop trough reference list
for f_ref in fref:
# Load file
with h5py.File(f_ref, 'r') as fr:
# Load ref. variables
xr = fr[vref[0]][::nref]
yr = fr[vref[1]][::nref]
tr = fr[vref[2]][::nref]
zr = fr[vref[3]][::nref]
# Copy locations
lon_r, lat_r = xr[:], yr[:]
# Transform to wanted coordinate system
(xr, yr) = transform_coord('4326', proj, xr, yr)
# Compute bounding box
xmin, xmax, ymin, ymax = np.min(xr), np.max(xr), np.min(yr), np.max(yr)
# Loop through comparison list
for f_com in fcom:
# Load file
with h5py.File(f_com, 'r') as fr:
# Load com. variables
xc = fr[cref[0]][::ncom]
yc = fr[cref[1]][::ncom]
tc = fr[cref[2]][::ncom]
zc = fr[cref[3]][::ncom]
# Check mean time difference
# if np.abs(tr.mean() - tc.mean()) > 3 * tspan: continue
# Transform to wanted coordinate system
(xc, yc) = transform_coord('4326', proj, xc, yc)
# Index of data
idx = (xc > xmin) & (xc < xmax) & (yc > ymin) & (yc < ymax)
# Cut to same area as reference
xc, yc, zc, tc = xc[idx], yc[idx], zc[idx], tc[idx]
# Construct KD-Tree
tree = cKDTree(list(zip(xc, yc)))
# Output vector
dz = np.ones(len(zr)) * np.nan
xo = np.ones(len(zr)) * np.nan
yo = np.ones(len(zr)) * np.nan
z1 = np.ones(len(zr)) * np.nan
z2 = np.ones(len(zr)) * np.nan
t1 = np.ones(len(zr)) * np.nan
t2 = np.ones(len(zr)) * np.nan
# Loop trough reference points
for kx in range(len(xr)):
# Query KD-Tree
dr, ky = tree.query((xr[kx], yr[kx]), k=1)
# Check if we should compute
if dr > dxy: continue
if np.abs(tr[kx]-tc[ky]) > tspan: continue
# Compute difference
dz[kx] = zr[kx] - zc[ky]
# Save location where we have difference
z1[kx] = zr[kx]
z2[kx] = zc[ky]
xo[kx] = lon_r[kx]
yo[kx] = lat_r[kx]
t1[kx] = tr[kx]
t2[kx] = tc[ky]
# If no data skip
if np.all(np.isnan(dz)):
continue
# Light filtering of outliers
dz = sigma_filter(dz)
# Check if we are binning by slope
if fslp:
# Interpolate slope to data
slp = interp2d(X, Y, Z, xr, yr, order=1)
# Cull using surface slope
dz[(slp < s_min) & (slp > s_max)] = np.nan
else:
# No slope provided
slp = np.ones(len(zr)) * 9999
# Find NaN-values
inan = ~np.isnan(dz)
# Save to csv file
data = {'lat' : np.around(yo[inan],4),
'lon' : np.around(xo[inan],4),
't_ref' : np.around(t1[inan],3),
't_com' : np.around(t2[inan],3),
'v_ref' : np.around(z1[inan],3),
'v_com' : np.around(z2[inan],3),
'v_diff': np.around(dz[inan],3),
'dt' : np.around(t1[inan]-t2[inan],3),
'slope' : np.around(slp[inan],3),}
# Get name only and not path to files
f_ref_i = f_ref[f_ref.rfind('/') + 1:]
f_com_i = f_com[f_com.rfind('/') + 1:]
# Create data frame
df = pd.DataFrame(data, columns=['lat', 'lon', 't_ref', 't_com', 'v_ref', 'v_com', 'v_diff', 'dt', 'slope'])
# Save to csv
df.to_csv(f_ref_i+'_'+f_com_i+'.csv', sep=',', index=False)
# Compute statistics
avg = np.around(np.nanmean(dz),3)
std = np.around(np.nanstd(dz),3)
rms = np.around(np.sqrt(avg**2 + std**2),3)
min = np.around(np.nanmin(dz),3)
max = np.around(np.nanmax(dz),3)
nob = len(dz[~np.isnan(dz)])
# Save all the stats
Fref.append(f_ref_i)
Fcom.append(f_com_i)
mean.append(avg)
stdv.append(std)
rmse.append(rms)
vmin.append(min)
vmax.append(max)
nobs.append(nob)
# Print statistics to screen
print(('Ref:' ,f_ref_i, 'Comp:', f_com_i, 'Mean:', avg, 'Std:', std, 'RMS:', rms, 'Nobs:', nob))
# Plot data if wanted
if 0:
plt.figure()
plt.hist(dz[~np.isnan(dz)], 50)
plt.show()
# Compute weighted averages
m = np.asarray(mean)
n = np.asarray(nobs)
s = np.asarray(stdv)
# Compute weights
w = n / (s * s * np.sum(n))
# Weighted average and standard deviation
aw = np.sum(w * m)/np.sum(w)
sw = np.sqrt(1 / np.sum(w))
# Print weighted statistics
print('#############################################################')
print(('| Weighted Statistics |', 'Wmean:', np.around(aw, 2), 'Wstd:', np.around(sw, 2), \
'WRMSE:', np.around(np.sqrt(aw**2 + sw**2), 2), '|'))
print('#############################################################')
# Create data container
raw_data = {'Reference' : Fref,
'Comparison' : Fcom,
'Mean' : mean,
'Std.dev' : stdv,
'RMSE' : rmse,
'Min' : vmin,
'Max' : vmax,
'Nobs' : nobs,}
# Create data frame
df = | pd.DataFrame(raw_data, columns = ['Reference','Comparison','Mean','Std.dev','RMSE','Min','Max','Nobs']) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.5
# ---
# +
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
# -
df = pd.read_csv('ChanDarren_RaiTaran_Lab2a.csv')
df.head()
male = pd.get_dummies(df['Sex'],drop_first=True)
merged = pd.concat([df,male],axis='columns')
merged = merged.drop('Sex',axis='columns')
classes = pd.get_dummies(df['Pclass'],drop_first=True)
classes.columns = ['Class 2', 'Class 3']
merged2 = | pd.concat([merged,classes],axis='columns') | pandas.concat |
#!/usr/bin/python
# Setup of libraries
import os
import random
import pandas as pd
from datetime import datetime
from dotenv import load_dotenv
# Own library
import database
def convertPriceToFloat(x):
"""
This is a utility function to clean prices
from style characters.
"""
# Removing non-numeric characters
x = str(x) \
.replace('£', '') \
.replace('na', '0') \
.replace(',', '')
# Sometimes there is more than 1 item,
# so it takes only the first one
spc = x[1:].find(' ')
x = x[0:spc]
return float(x)
class Inventory:
"""
This class is built to populate the Inventory table
with simulated data got from public datasets.
"""
def __init__(self):
random.seed(5)
load_dotenv()
# Setting maximun number of items by company
self.max_inventory_item_num = int(os.getenv('MAX_INVENTORY_ITEM_NUM'))
# Loading and utility datasets
self.companies = pd.read_csv('csv/company.csv')
self.e_commerce_df = pd.read_csv('third_parties/amazon-e-commerce-sample/data.csv')
# Converting prices from string to float and
# ignoring records that have price equal to zero
self.e_commerce_df['price'] = self.e_commerce_df.price.apply(convertPriceToFloat).fillna(0)
self.e_commerce_df = self.e_commerce_df[self.e_commerce_df.price != 0]
def generate(self):
inventories = [] # Set for items for each company
counter = 0 # To control unique id
for idx in self.companies.index:
dt = datetime.isoformat(datetime.now())
# Getting a random sample of inventory items
sample = self.e_commerce_df.sample(random.randint(1, self.max_inventory_item_num))[['product_name', 'price']]
sample.rename(columns={'product_name': 'name'}, inplace=True)
sample['id'] = [counter + x for x in range(len(sample))]
sample['company'] = self.companies.loc[idx].id
sample['inventoryModel'] = 'default'
sample['uom'] = 'unit'
sample['isArchived'] = False
sample['isDeleted'] = False
sample['updatedAt'] = dt
sample['createdAt'] = dt
counter += len(sample)
inventories.append(sample)
# Joining in a unique dataset
inventory = | pd.concat(inventories) | pandas.concat |
#! /usr/bin/python
'''
Este codigo descarga la lista de las votaciones realizadas durante los años 2018,2019 y 2020. La lista se guarda en formato CSV
'''
import pandas as pd
import requests as re
import xmltodict
url_base = 'http://opendata.camara.cl/camaradiputados/WServices/WSLegislativo.asmx/retornarVotacionesXAnno?prmAnno='
res18 = re.get(url_base+'2018')
res19 = re.get(url_base+'2019')
res20 = re.get(url_base+'2020')
lista_v = [r.text for r in [res18,res19,res20]]
def parse_votacion(xml):
xmlDict = xmltodict.parse(xml)
data = []
for i, votacion in enumerate(xmlDict['VotacionesColeccion']['Votacion']):
# Columnas
if i == 0:
cols = [key for key in votacion]
data.append([votacion[d] for d in votacion])
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 10:54:32 2019
@author: nmei
"""
import os
from glob import glob
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
import statsmodels.api as sm
from statsmodels.formula.api import ols
from shutil import copyfile
copyfile('../../../utils.py','utils.py')
import utils
from matplotlib.ticker import FormatStrFormatter
experiment = 'metasema'
here = 'encoding_model_15_ROIs'
working_dir = '../../../../results/{}/RP/{}'.format(experiment,here)
figure_dir = '../../../../figures/{}/RP/{}'.format(experiment,here)
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
working_data = glob(os.path.join(working_dir,'*.csv'))
df = pd.concat([pd.read_csv(f) for f in working_data]).groupby(
['sub_name',
'roi_name',
'model_name',
'condition',
]).mean().reset_index()
N = len(pd.unique(df['sub_name']))
alpha = 100
feature_dict = {'vgg19':'image2vec',
'densenet121':'image2vec',
'mobilenetv2_1':'image2vec',
'Fast_Text':'Word2vec',
'Glove':'Word2vec',
'Word2Vec':'Word2vec',}
df['feature_type'] = df['model_name'].map(feature_dict)
hue_order = ['vgg19', 'densenet121', 'mobilenetv2_1',
'Fast_Text', 'Glove', 'Word2Vec',
]
df = pd.concat([df[df['model_name'] == model_name] for model_name in hue_order])
temp = dict(
F = [],
df_nonimator = [],
df_denominator = [],
p = [],
feature_type = [],
condition = [],
roi_name = [],
)
for (feat,condition,roi),df_sub in df.groupby(['feature_type','condition','roi_name']):
anova = ols('mean_variance ~ model_name',data = df_sub).fit()
aov_table = sm.stats.anova_lm(anova,typ=2)
print(aov_table)
temp['F'].append(aov_table['F']['model_name'])
temp['df_nonimator'].append(aov_table['df']['model_name'])
temp['df_denominator'].append(aov_table['df']['Residual'])
temp['p'].append(aov_table['PR(>F)']['model_name'])
temp['feature_type'].append(feat)
temp['condition'].append(condition)
temp['roi_name'].append(roi)
anova_results = pd.DataFrame(temp)
temp = []
for roi,df_sub in anova_results.groupby('condition'):
df_sub = df_sub.sort_values('p')
converter = utils.MCPConverter(pvals = df_sub['p'].values)
d = converter.adjust_many()
df_sub['p_corrected'] = d['bonferroni'].values
temp.append(df_sub)
anova_results = pd.concat(temp)
anova_results['stars'] = anova_results['p_corrected'].apply(utils.stars)
anova_results = anova_results.sort_values(['roi_name','condition','feature_type'])
g = sns.catplot(x = 'roi_name',
y = 'mean_variance',
hue = 'model_name',
row = 'condition',
data = df,
kind = 'bar',
aspect = 4,
)
g._legend.set_title('Encoding Models')
(g.set_axis_labels("","Mean Variance Explained")
.set_titles("{row_name}")
.set(ylim=(0,0.05)))
g.axes[-1][0].set_xticklabels(g.axes[-1][0].xaxis.get_majorticklabels(),
rotation = 45,
horizontalalignment = 'center')
g.axes[0][0].set(title = 'Read')
g.axes[1][0].set(title = 'Think')
k = {'image2vec':-0.25,
'Word2vec':0.175}
j = 0.15
l = 0.0005
height = 0.045
for ax,condition in zip(g.axes.flatten(),['read','reenact']):
ax.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
df_sub = anova_results[anova_results['condition'] == condition]
for ii,((roi),df_star_sub) in enumerate(df_sub.groupby(['roi_name',])):
for model,df_star_sub_model in df_star_sub.groupby(['feature_type']):
ax.hlines(height,ii+k[model]-j,ii+k[model]+j)
ax.vlines(ii+k[model]-j,height+l,height-l)
ax.vlines(ii+k[model]+j,height+l,height-l)
ax.annotate(df_star_sub_model['stars'].values[0],xy=(ii+k[model]-0.25,height + 0.002))
g.fig.suptitle("Comparison between Computer Vision and Word Embedding Models\nAverage Variance Explained\nN = {}, {} (alpha = {})\nBonforroni corrected for multiple one-way ANOVAs".format(
N,"Ridge Regression",alpha),
y = 1.15)
g.savefig(os.path.join(figure_dir,
'mean variance explained.png'),
dpi = 400,
bbox_inches = 'tight')
results = []
for image in ['densenet121','mobilenetv2_1','vgg19',]:
for word in ['Fast_Text', 'Glove', 'Word2Vec',]:
df_image = df[df['model_name'] == image]
df_word = df[df['model_name'] == word]
df_image = df_image.sort_values(['sub_name','roi_name','condition'])
df_word = df_word.sort_values(['sub_name','roi_name','condition'])
MV_diff = df_image['mean_variance'].values - df_word['mean_variance'].values
BV_diff = df_image['best_variance'].values - df_word['best_variance'].values
df_diff = df_image.copy()
df_diff['mean_variance'] = MV_diff
df_diff['best_variance'] = BV_diff
df_diff['model_name'] = f"{image} - {word}"
results.append(df_diff)
df_diff = pd.concat(results)
g = sns.catplot(x = 'roi_name',
y = 'mean_variance',
hue = 'model_name',
row = 'condition',
data = df_diff,
kind = 'bar',
aspect = 4,
)
(g.set_axis_labels("","$\Delta$ Mean Variance Explained")
.set_titles("{row_name}"))
g.axes[-1][0].set_xticklabels(g.axes[-1][0].xaxis.get_majorticklabels(),
rotation = 45,
horizontalalignment = 'center')
g.axes[0][0].set(title = 'Read')
g.axes[1][0].set(title = 'Think')
g.fig.suptitle('Difference of Computer Vision Models and Word Embedding Models\nBonforroni Corrected for multiple t-tests',
y=1.05)
g.savefig(os.path.join(figure_dir,
'difference of variance explained by image2vec and word2vec.png'),
dpi = 400,
bbox_inches = 'tight')
df_condition = dict(
roi = [],
condition = [],
ps_mean = [],
ps_std = [],
diff_mean = [],
diff_std = [],)
for (roi,condition),df_sub in df.groupby(['roi_name','condition']):
df_sub_img = df_sub[df_sub['feature_type'] == 'image2vec'].groupby(['sub_name']).mean().reset_index()
df_sub_word = df_sub[df_sub['feature_type'] == 'Word2vec'].groupby(['sub_name']).mean().reset_index()
a = df_sub_img['mean_variance'].values
b = df_sub_word['mean_variance'].values
ps = utils.resample_ttest_2sample(a,b,
n_ps = 100,
n_permutation = int(1e4),
one_tail = False,
match_sample_size = True)
df_condition['roi'].append(roi)
df_condition['condition'].append(condition)
df_condition['ps_mean'].append(ps.mean())
df_condition['ps_std'].append(ps.std())
df_condition['diff_mean'].append(np.mean(a - b))
df_condition['diff_std'].append(np.std(a - b))
df_condition = pd.DataFrame(df_condition)
temp = []
for condition, df_sub in df_condition.groupby(['condition']):
df_sub = df_sub.sort_values(['ps_mean'])
converter = utils.MCPConverter(pvals = df_sub['ps_mean'].values)
d = converter.adjust_many()
df_sub['ps_corrected'] = d['bonferroni'].values
temp.append(df_sub)
df_condition = pd.concat(temp)
df_diff_diff = dict(
roi = [],
ps_mean = [],
ps_std = [],
diff_mean = [],
diff_std = [],)
for roi,df_sub in df_diff.groupby(['roi_name']):
df_read = df_sub[df_sub['condition'] == 'read']#.sort_values(['sub'])
df_reenact = df_sub[df_sub['condition'] == 'reenact']#.sort_values(['sub'])
df_read = df_read.sort_values(['sub_name','roi_name','feature_type']).reset_index()
df_reenact = df_reenact.sort_values(['sub_name','roi_name','feature_type']).reset_index()
a = df_read['mean_variance'].values
b = df_reenact['mean_variance'].values
ps = utils.resample_ttest_2sample(a,b,
n_ps = 100,
n_permutation = int(1e4),
one_tail = False,
match_sample_size = True)
df_diff_diff['roi'].append(roi)
df_diff_diff['ps_mean'].append(ps.mean())
df_diff_diff['ps_std'].append(ps.std())
df_diff_diff['diff_mean'].append(np.mean(np.abs(a - b)))
df_diff_diff['diff_std'].append(np.std(np.abs(a - b)))
df_diff_diff = | pd.DataFrame(df_diff_diff) | pandas.DataFrame |
import numpy as np
import pandas as pd
import unittest
from featurewiz._timeseries import _percent_rank, pctrank, apply
class TimeSeriesTestCase(unittest.TestCase):
def test__percent_rank(self):
self.assertEqual(100, _percent_rank([1, 2, 3, 4, 5, 6, 7, 8]))
self.assertEqual(0, _percent_rank([1, 2, 3, 4, 5, 6, 7, 0]))
self.assertEqual(4/7*100, _percent_rank([1, 2, 3, 4, 5, 6, 7, 4]))
self.assertTrue(np.isclose(np.nan, _percent_rank([1, 2, 3, 4, 5, 6, 7, np.nan]), equal_nan=True))
def test_pctrank_category(self):
a = pd.Series([1, 2, 3, 4, 5, 6, 7, 2, 5])
cat = pd.Series([1, 0, 1, 0, 1, 0, 1, 0, 1])
res = pctrank(a, 4, category=cat)
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 100, 1/3*100, 2/3*100]),
res.values, equal_nan=True))
def test_pctrank(self):
res = pctrank(pd.Series([1, 2, 3, 4, 5, 6, 7, 0]), 3)
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 100, 100, 100, 100, 0]), res.values, equal_nan=True))
def test_pctrank_categorize(self):
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 2]),
pctrank(pd.Series([1, 2, 3, 4]), 3, categorize_by=3),
equal_nan=True))
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 1]),
pctrank(pd.Series([1, 2, 3, 2]), 3, categorize_by=3),
equal_nan=True))
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 0]),
pctrank(pd.Series([1, 2, 3, 0]), 3, categorize_by=3),
equal_nan=True))
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 0]),
pctrank(pd.Series([1, 2, 3, 0]), 3, categorize_by=[0, 33, 100]),
equal_nan=True))
self.assertTrue(np.allclose(np.array([np.nan, np.nan, np.nan, 1]),
pctrank(pd.Series([1, 2, 3, 2.5]), 3, categorize_by=[0, 33, 100]),
equal_nan=True))
def test_apply(self):
a = | pd.Series([1, 2, 3, 4, 5, 6, 7]) | pandas.Series |
# -*- coding: utf-8 -*-
"""Functions for downloading and analysing data on MPs."""
# Imports ---------------------------------------------------------------------
import numpy as np
import pandas as pd
from . import combine
from . import constants
from . import core
from . import elections
from . import filter
from . import members
from . import utils
# Raw MPs queries -------------------------------------------------------------
def fetch_mps_raw():
"""Fetch key details for all MPs."""
return members.fetch_members_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_commons_memberships_raw():
"""Fetch Commons memberships for all MPs."""
commons_memberships_query = """
PREFIX : <https://id.parliament.uk/schema/>
PREFIX d: <https://id.parliament.uk/>
SELECT DISTINCT
?person_id
?mnis_id
?given_name
?family_name
?display_name
?constituency_id
?constituency_name
?constituency_ons_id
?seat_incumbency_id
?seat_incumbency_start_date
?seat_incumbency_end_date
WHERE {{
# House constraint for the House of Commons
BIND(d:{0} AS ?house)
?person_id :memberMnisId ?mnis_id;
:personGivenName ?given_name ;
:personFamilyName ?family_name ;
<http://example.com/F31CBD81AD8343898B49DC65743F0BDF> ?display_name ;
:memberHasParliamentaryIncumbency ?seat_incumbency_id .
?seat_incumbency_id a :SeatIncumbency ;
:seatIncumbencyHasHouseSeat ?seat ;
:parliamentaryIncumbencyStartDate ?seat_incumbency_start_date .
OPTIONAL {{ ?seat_incumbency_id :parliamentaryIncumbencyEndDate ?seat_incumbency_end_date . }}
?seat :houseSeatHasHouse ?house ;
:houseSeatHasConstituencyGroup ?constituency_id .
?constituency_id :constituencyGroupName ?constituency_name ;
:constituencyGroupStartDate ?constituencyStartDate .
OPTIONAL {{ ?constituency_id :constituencyGroupOnsCode ?constituency_ons_id . }}
}}
""".format(constants.PDP_ID_HOUSE_OF_COMMONS)
return core.sparql_select(commons_memberships_query)
def fetch_mps_party_memberships_raw():
"""Fetch party memberships for all MPs."""
return members.fetch_party_memberships_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_government_roles_raw():
"""Fetch government roles for all MPs."""
return members.fetch_government_roles_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_opposition_roles_raw():
"""Fetch opposition roles for all MPs."""
return members.fetch_opposition_roles_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_committee_memberships_raw():
"""Fetch committee memberships for all MPs."""
return members.fetch_committee_memberships_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
# Main MPs API ----------------------------------------------------------------
def fetch_mps(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN):
"""Fetch key details for all MPs.
fetch_mps fetches data from the data platform showing key details about
each MP, with one row per MP.
The from_date and to_date arguments can be used to filter the MPs returned
based on the dates of their Commons memberships. The on_date argument is a
convenience that sets the from_date and to_date to the same given date. The
on_date has priority: if the on_date is set, the from_date and to_date are
ignored.
The filtering is inclusive: an MP is returned if any part of one of their
Commons memberships falls within the period specified with the from and to
dates.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
Returns
-------
out : DataFrame
A pandas dataframe of key details for each MP, with one row per MP.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch key details
mps = fetch_mps_raw()
# Filter based on membership dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
commons_memberships = fetch_commons_memberships()
matching_memberships = filter.filter_dates(
commons_memberships,
start_col='seat_incumbency_start_date',
end_col='seat_incumbency_end_date',
from_date=from_date,
to_date=to_date)
mps = mps[mps['person_id'].isin(matching_memberships['person_id'])]
# Tidy up and return
mps.sort_values(
by=['family_name'],
inplace=True)
mps.reset_index(drop=True, inplace=True)
return mps
def fetch_commons_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN):
"""Fetch Commons memberships for all MPs.
fetch_commons_memberships fetches data from the data platform showing
Commons memberships for each MP. The memberships are processed to impose
consistent rules on the start and end dates for memberships.
The from_date and to_date arguments can be used to filter the memberships
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The filtering is inclusive: a membership is returned if any part
of it falls within the period specified with the from and to dates.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
Returns
-------
out : DataFrame
A pandas dataframe of Commons memberships for each MP, with one row
per Commons membership.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the Commons memberships
commons_memberships = fetch_commons_memberships_raw()
# Get elections and fix the end dates of memberships
end_dates = commons_memberships['seat_incumbency_end_date'].values
general_elections = elections.get_general_elections().values
general_elections_count = len(general_elections)
# If the end date for a membership falls after dissolution adjust it
for i in range(len(end_dates)):
date = end_dates[i]
if pd.isna(date): continue
for j in range(general_elections_count):
dissolution = general_elections[j, 1]
election = general_elections[j, 2]
if date > dissolution and date <= election:
end_dates[i] = dissolution
continue
commons_memberships['seat_incumbency_end_date'] = end_dates
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
commons_memberships = filter.filter_dates(
commons_memberships,
start_col='seat_incumbency_start_date',
end_col='seat_incumbency_end_date',
from_date=from_date,
to_date=to_date)
# Tidy up and return
commons_memberships.sort_values(
by=['family_name',
'seat_incumbency_start_date'],
inplace=True)
commons_memberships.reset_index(drop=True, inplace=True)
return commons_memberships
def fetch_mps_party_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN,
while_mp=True,
collapse=False):
"""Fetch party memberships for all MPs.
fetch_mps_party_memberships fetches data from the data platform showing
party memberships for each MP.
The from_date and to_date arguments can be used to filter the memberships
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The while_mp argument can be used to filter the memberships to include only
those that occurred during the period when each individual was an MP.
The filtering is inclusive: a membership is returned if any part
of it falls within the period specified with the from and to dates.
The collapse argument controls whether memberships are combined so that
there is only one row for each period of continuous membership within the
same party. Combining the memberships in this way means that party
membership ids from the data platform are not included in the dataframe
returned.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
while_mp : bool, optional
A boolean indicating whether to filter the party memberships to include
only those memberships that were held while each individual was serving
as an MP. The default value is True.
collapse: bool, optional
Determines whether to collapse consecutive memberships within the same
party into a single period of continuous party membership. Setting this
to True means that party membership ids are not returned in the
dataframe. The default value is False.
Returns
-------
out : DataFrame
A pandas dataframe of party memberships for each MP, with one row per
party membership. The memberships are processed and merged so that
there is only one party membership for a period of continuous
membership within the same party. A membership with a NaN end date is
still open.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the party memberships
party_memberships = fetch_mps_party_memberships_raw()
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
party_memberships = filter.filter_dates(
party_memberships,
start_col='party_membership_start_date',
end_col='party_membership_end_date',
from_date=from_date,
to_date=to_date)
# Filter on Commons memberships if requested
if while_mp:
commons_memberships = fetch_commons_memberships()
party_memberships = filter.filter_memberships(
tm=party_memberships,
fm=commons_memberships,
tm_id_col='party_membership_id',
tm_start_col='party_membership_start_date',
tm_end_col='party_membership_end_date',
fm_start_col='seat_incumbency_start_date',
fm_end_col='seat_incumbency_end_date',
join_col='person_id')
# Collapse consecutive memberships and return if requested
if collapse:
return combine.combine_party_memberships(party_memberships)
# Otherwise tidy up and return
party_memberships.sort_values(
by=['family_name',
'party_membership_start_date'],
inplace=True)
party_memberships.reset_index(drop=True, inplace=True)
return party_memberships
def fetch_mps_government_roles(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN,
while_mp=True):
"""Fetch government roles for all MPs.
fetch_mps_government_roles fetches data from the data platform showing
government roles for each MP.
The from_date and to_date arguments can be used to filter the roles
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The while_mp argument can be used to filter the roles to include only those
that occurred during the period when each individual was an MP.
The filtering is inclusive: a role is returned if any part of it falls
within the period specified with the from and to dates.
Note that a role with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
while_mp : bool, optional
A boolean indicating whether to filter the government roles to include
only those roles that were held while each individual was serving as an
MP. The default value is True.
Returns
-------
out : DataFrame
A dataframe of government roles for each MP, with one row per role.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the government roles
government_roles = fetch_mps_government_roles_raw()
# Filter on dates if requested
if not pd.isna(from_date) or not | pd.isna(to_date) | pandas.isna |
"""
Mar 2021
@author: Natalia
"""
import networkx as nx
from networkx.classes import graph
from .utils import _normalize_corr,_build_network, create_normalize_graph
from networkx.algorithms import community as nx_comm
import pandas as pd
import numpy as np
import community
from random import sample
from typing import Tuple, Union,Dict,Any,List
def get_weight(graph:nx.Graph):
weights=np.asarray(list(nx.get_edge_attributes(graph, 'weight').values()))
return weights
def small_world_index(G:nx.Graph,
n:int=0,
p:float=0.0,
cc:float=0.0,
l:float=0.0,
n_iter:int = 20)->float:
'''
Returns small-world index calcualted as proposed by Humphries & Gurney, 2008.
Parameters
----------
G : netowrk graph (obtained from normalized data)
Returns
-------
small_world_index : small-world index.
'''
# Basic metrics from G
if n==0:
n = G.number_of_nodes()
if p==0.0:
p = nx.density(G)
if cc==0.0:
cc = nx.average_clustering(G)
if l==0.0:
l = nx.average_shortest_path_length(G)
small_world = []
for i in range(0,int(n_iter)):
# Build erdos reny netowrk with same number of nodes and connection probability
G_rand = nx.erdos_renyi_graph(n = n, p = p, seed=None, directed=False)
#Obtain clustering coefficients
cc_rand = nx.average_clustering(G_rand)
#Obtain shortest path
l_rand = nx.average_shortest_path_length(G_rand)
#Index numerator and denominator
num = cc/cc_rand
den = l/l_rand
# small world index
small_world.append(num/den)
return np.mean(small_world)
class NetWork_MicNet:
def __init__(self,p:float = 0.05,
prem:float = 0.1,
per_type:str = 'random',
groups:List[Any] = []) -> None:
self.p=p
self.prem=prem
self.per_type=per_type
self.groups=groups
def basic_description(self,corr:Union[np.ndarray,pd.DataFrame])->None:
self.description={}
#TODO Mnomr
if type(corr)==pd.DataFrame:
corr=corr.values.copy()
graph=_build_network(corr)
wgh=get_weight(graph)
del graph
graph = create_normalize_graph(corr)
#Calculate modularity
try:
mod = nx_comm.modularity(graph, nx_comm.greedy_modularity_communities(graph))
except ZeroDivisionError:
mod = 'nan'
#Description
self.description['nodes']=graph.number_of_nodes()
self.description['total_interactions']=sum(wgh>0)+sum(wgh<0)
self.description['posInt'] = sum(wgh>0)
self.description['negInt'] = sum(wgh<0)
self.description['pos_neg_ratio']=self.description['posInt']/self.description['negInt'] if \
self.description['negInt']!=0 else 0.0
self.description['density']=nx.density(graph)
self.description['average_degree']=np.mean([graph.degree(n) for n in graph.nodes()])
self.description['degree_std']=np.std([graph.degree(n) for n in graph.nodes()])
self.description['diameter']=nx.diameter(graph)
self.description['average_clustering']=nx.average_clustering(graph)
self.description['average_shortest_path_length']=nx.average_shortest_path_length(graph)
self.description['modularity']=mod
self.description['small_world_index']=small_world_index(graph,
n=self.description['nodes'],
p=self.description['density'],
cc=self.description['average_clustering'],
l=self.description['average_shortest_path_length']
)
def get_description(self)->Dict[str,Union[int,float]]:
if not hasattr(self,'description'):
raise AttributeError("There is no attribute: description")
data_dict = {
'Nodes':self.description['nodes'],
'Total interactions':self.description['total_interactions'],
'Positive interactions':self.description['posInt'],
'Negative interactions':self.description['negInt'],
'Pos-Neg ratio': self.description['pos_neg_ratio'],
'Density':self.description['density'],
'Average degree':self.description['average_degree'],
'Degree std': self.description['degree_std'],
'Diameter':self.description['diameter'],
'Clustering coefficient':self.description['average_clustering'],
'Shortest average path length': self.description['average_shortest_path_length'],
'Modularity': self.description['modularity'],
'Small-world index': self.description['small_world_index']
}
return data_dict
def percolation_sim(self,graph:nx.Graph)->pd.DataFrame:
'''
Function that performs percolation on the correlation matrix
corrm, by removing the proportion of nodes specified in prem.
The removal can be done : randomly, or by degree, closness or betweeness
centrality.
Parameters
----------
corr : corelation obtain from SparCC read as a pandas DataFrame or numpy matrix
prem : proportion of nodes to remove in each iteration. Only fractional values.
per_type : can take the values 'random', 'deg_centrality', 'clos_centrality', 'bet_centrality'
specifying how to remove the nodes from the interaction nework.
Returns
-------
df : pandas dtaframe with each percolation iteration, displaying the percentage of removal
and the change in the following netowrk metrics: 'Network density','Average degree',
'Number of components', 'Size of giant component','Fraction of giant component',
'Number of communities' and 'Modularity'.
'''
# Nodes to remove each iteration
n = graph.number_of_nodes()
nr = int(self.prem*n)
nr_cum = 0
if self.per_type == 'random':
#Initial node list
nod_list = list(graph.nodes())
elif self.per_type == 'deg_centrality':
cent = nx.degree_centrality(graph)
cent = dict(sorted(cent.items(), key=lambda item: item[1], reverse=True))
nod_list =list( cent.keys())
elif self.per_type == 'bet_centrality':
cent = nx.betweenness_centrality(graph)
cent = dict(sorted(cent.items(), key=lambda item: item[1], reverse=True))
nod_list =list( cent.keys())
elif self.per_type == 'clos_centrality':
cent = nx.closeness_centrality(graph)
cent = dict(sorted(cent.items(), key=lambda item: item[1], reverse=True))
nod_list =list( cent.keys())
#Counter
j = 0
#Data to store results
df = pd.DataFrame(columns = ['Fraction of removal',
'Network density','Average degree',
'Number of components', 'Size of giant component',
'Fraction of giant component', 'Number of communities',
'Modularity'])
#Loop
while len(nod_list)>nr:
# Choose nodes
if self.per_type == 'random':
nod_rem = sample(nod_list,nr)
elif self.per_type == 'deg_centrality' or self.per_type == 'bet_centrality' or self.per_type == 'clos_centrality':
nod_rem = nod_list[0:nr]
#Nodes removed so far
nr_cum = nr_cum + nr
#Remove nodes from network
graph.remove_nodes_from(nod_rem)
#Fraction of current removal
fr_rem = nr_cum/n
#Calculate metrics and store them
x = []
# 1- Fraction of removal
x.append(fr_rem)
# 2- netowrk density
x.append(nx.density(graph))
# 3- average degree
x.append(np.mean([graph.degree(n) for n in graph.nodes()]))
# Number of components
x.append(nx.number_connected_components(graph))
# Calculat size of largest component
components = nx.connected_components(graph)
x.append(len(max(components, key=len)))
#Fractions of nodes belonging to the giant component
x.append(x[4]/nx.number_of_nodes(graph))
# Calculate comminuties
com = community.best_partition(graph)
x.append(len(set(com.values())))
#Calculate modularity
try:
x.append(nx_comm.modularity(graph, nx_comm.greedy_modularity_communities(graph)))
except ZeroDivisionError:
x.append('nan')
#append current iteration to data
df.loc[j] = x
#Node list from current network
if self.per_type == 'random':
nod_list = list(graph.nodes())
elif self.per_type == 'deg_centrality' or self.per_type == 'bet_centrality' or self.per_type == 'clos_centrality':
for elem in nod_rem:
nod_list.remove(elem)
#Update counter
j = j + 1
return df
@staticmethod
def structural_balance(graph:nx.Graph)->Dict[str,float]:
'''
Takes the raw correaltions obtained from sparCC (without normalization)
Returns the percentage of balanced and unbalanced relationships
And the percentage of each type of triangle
'''
#Build netowrk with relationships as 1 or -1
edges = nx.get_edge_attributes(graph, 'weight')
Gn = nx.Graph()
for kv in edges.items():
if kv[1] >= 0:
r = 1
elif kv[1]<0:
r = -1
else:
print(f'Problem {kv[1]}')
r=1
Gn.add_edges_from([kv[0]],relationship = r)
#Find all triangles in network
triangles = [c for c in nx.cycle_basis(Gn) if len(c)==3]
#Classifiy triangles
balanced1 = 0
balanced2 = 0
unbalanced1 =0
unbalanced2 = 0
for triangle in triangles:
#Get subgraph of triangle
tri=nx.subgraph(Gn,triangle)
data = nx.get_edge_attributes(tri, 'relationship')
rel = list(data.values())
#Take the product of the relationships
prod = rel[0]+rel[1]+rel[2]
if prod == 3:
balanced1+=1
elif prod == -1:
balanced2+=1
elif prod == 1:
unbalanced1+=1
elif prod == -3:
unbalanced2+=1
D=len(triangles)
baltotal = (balanced1 + balanced2)/D
unbaltotal = (unbalanced1 + unbalanced2)/D
bal_1 = balanced1/D
bal_2 = balanced2/D
unbal_1 = unbalanced1/D
unbal_2 = unbalanced2/D
data_dict = {
'Percentage balanced': baltotal,
'Percentage unbalanced': unbaltotal,
'Triangles +++': bal_1,
'Triangles --+': bal_2,
'Triangles ++-': unbal_1,
'Triangles ---':unbal_2}
return data_dict
@staticmethod
def key_otus(graph:nx.Graph,taxa:Union[pd.DataFrame,pd.Series]=None)->Dict[str,List[Any]]:
'''
Parameters
----------
G : netowrk x graph
taxa : dataframe with ASV and/or taxa
n: number of top n nodes to return, default is 10
'all' then returns all centraility values
Returns
-------
key_otus: dictionary with dataframes, where each dataframe has the ASV,
taxa and centrality metric of the top 10 OTUS
'''
#Calculating centralities
dcent = nx.degree_centrality(graph)
bcent = nx.betweenness_centrality(graph)
ccent = nx.closeness_centrality(graph)
try:
pRank = nx.pagerank(graph)
except:
pRank={k:0 for k in dcent.keys()}
data_dict = {}
if taxa or type(taxa)==pd.DataFrame:
data_dict['NUM_OTUS']=list(dcent.keys())
data_dict['TAXA']=list(taxa.values)
data_dict['Degree centrality']=list(dcent.values())
data_dict['Betweeness centrality']=list(bcent.values())
data_dict['Closeness centrality']=list(ccent.values())
data_dict['PageRank']=list(pRank.values())
return data_dict
else:
data_dict['NUM_OTUS']=list(dcent.keys())
data_dict['Degree centrality']=list(dcent.values())
data_dict['Betweeness centrality']=list(bcent.values())
data_dict['Closeness centrality']=list(ccent.values())
data_dict['PageRank']=list(pRank.values())
return data_dict
@staticmethod
def community_analysis(graph:nx.Graph,
taxa:Union[pd.Series,pd.DataFrame]=None):
'''
Function that performs community analysis and returns a description of each
community subnetwork.
Parameters
----------
corr : interaction matrix as a pandas dataframe or numpy matrix of
dimension m x m.
taxa: dataframe with ASV and/or taxa of dimension m x n.
Returns
-------
num_com = number of different communities found
df = Community with taxa id
com_dict = dictionary with a dataframe for each community found.
Each dataframe contains the 'Nodes', 'Diameter',
'Clustering coefficient', and 'Average shortest path length'.
'''
com = community.best_partition(graph)
## Check taxa and corr match
if type(taxa)==pd.DataFrame:
if len(graph.nodes) != taxa.shape[0]:
raise ValueError('''The correaltion and the taxa dataframes do not match. \
If correlation matrix is of size m x m, then taxa dataframe should be of size m x n''')
else:
taxa['Community_id'] = com.values()
else:
taxa=pd.DataFrame()
taxa['Community_id'] = com.values()
n_com = len(set(com.values()))
data = []
#Subnetwork analysis
for com_id in range(0,n_com):
subnet = [key for (key, value) in com.items() if value == com_id]
Gc=nx.subgraph(graph,subnet)
data.append([Gc.number_of_nodes(),
Gc.number_of_edges(),
nx.density(Gc),
np.mean([Gc.degree(n) for n in Gc.nodes()]),
np.std([Gc.degree(n) for n in Gc.nodes()]),
nx.average_clustering(Gc)])
#transpose data
datat =[list(i) for i in zip(*data)]
com_df = pd.DataFrame(
datat,
index = ['Nodes', 'Edges','Density', 'Average degree','degree std', 'Clustering coefficient'],
columns = [f'Community_{i}' for i in range(0,n_com)]
)
data_dict = {
'Number of communities':n_com,
'Community_data': taxa,
'Communities_topology': com_df,
}
return data_dict
@staticmethod
def HDBSCAN_subnetwork(corr:Union[np.ndarray,pd.DataFrame],
HDBSCAN:pd.DataFrame)->pd.DataFrame:
'''
Function that performs an analysis of the clusters found by HDBSCAN and
returns a description of each cluster subnetwork characteristics.
Parameters
----------
corr : interaction matrix as a pandas dataframe or numpy matrix of
dimension m x m.
HDBSCAN: pandas dataframe of dimensions m x 1 containing the clusted id for each cluster
found, where outliers are identified with value -1.
Returns
-------
hdbscan_df = dataframe in which each column represents a different HDBSCAN cluster.
For each cluster we obtain : 'Nodes', 'Edges','Density', 'Average degree',
'degree std' and 'Clustering coefficient'.
'''
## check that correlation matrix is square
if corr.shape[0] != corr.shape[1]:
raise ValueError('''The correaltion matrix or data frame input is not square. Dimensions should be m x m.''')
## Check taxa and corr match
if corr.shape[0] != HDBSCAN.shape[0]:
raise ValueError('''The correaltion and the taxa dataframes do not match. If correlation matrix is of size m x m, then taxa dataframe should be of size m x n''')
groups = list(HDBSCAN)
groups_id = set(groups)
#Dont include ouliers as a group
if -1 in groups_id:
groups_id.remove(-1)
ng = len(groups_id)
#Graph with all nodes
corrn = _normalize_corr(corr)
G = _build_network(corrn)
group_data =[]
#create subnetwork and get their metrics
for gid in groups_id:
subnet = [i for i, x in enumerate(groups) if x == gid]
Gc=nx.subgraph(G,subnet)
group_data.append([Gc.number_of_nodes(),
Gc.number_of_edges(),
nx.density(Gc),
np.mean([Gc.degree(n) for n in Gc.nodes()]),
np.std([Gc.degree(n) for n in Gc.nodes()]),
nx.average_clustering(Gc)])
#Transpose data
datat =[list(i) for i in zip(*group_data)]
#Save all in one dataFrame
hdbscan_df = pd.DataFrame(
datat,
index = ['Nodes', 'Edges','Density', 'Average degree','degree std', 'Clustering coefficient'],
columns = [f'Cluster_{i}' for i in range(0,ng)]
)
data_dict = {
'Number of clusters':ng,
'Clusters_topology': hdbscan_df,
}
return data_dict
def __repr__(self) -> str:
return f"{self.name}"
class SyntheticNetwork:
"""
NETWORK TOPOLOGY
@author: <NAME>
TODO:Description
"""
def __init__(self,n:int,m:int,p:float,k:int,pr:float,seed:int=None) -> None:
self.n=n
self.m=m
self.p=p
self.k=k
self.pr=pr
self.seed=seed
def random_interaction(self)->pd.DataFrame:
'''
Function that creates a symmetric random interaction matrix with weights
from -1 to 1.
Parameters
----------
n : number of nodes.
p : density of net.
Returns
-------
B : symetric random network interaction matrix with weights drawn from a uniform
distribution ranging form [-1,1]
'''
#Create netwrok
G = nx.erdos_renyi_graph(n=self.n, p=self.p, seed=self.seed, directed=False)
A = nx.to_numpy_matrix(G)
#Assing interaction magnitude from a uniform distribution [-1,1]
it = np.nditer(A, flags=['multi_index'])
for ind in it:
if int(ind) == 1:
A[it.multi_index[0],it.multi_index[1]] = round(np.random.uniform(-1, 1),2)
#Make symectric matrix
B = np.triu(A)
B = B + B.T - np.diag(np.diag(A))
return pd.DataFrame(B)
def scalef_interaction(self)->pd.DataFrame:
'''
Function that generates a scale-free interaction matrix with weights
ranging for -1 to 1 using Barabasi-Albert algorithm.
Parameters
----------
n : number of nodes of the network.
m : average number of edges per node (average degree/2)
Returns
-------
B : symetric scale free interaction matrix with weights drawn from a
uniform distribution [-1,1].
'''
#Create netwrok
G = nx.barabasi_albert_graph(n=self.n, m=self.m, seed=self.seed)
A = nx.to_numpy_matrix(G)
#Adding interaction magnitude from a uniform distribution [-1,1]
it = np.nditer(A, flags=['multi_index'])
for ind in it:
if int(ind) == 1:
A[it.multi_index[0],it.multi_index[1]] = round(np.random.uniform(-1, 1),2)
#Set matrix diagonal to 1 and make symectric matrix
B = np.triu(A)
B = B + B.T - np.diag(np.diag(A))
return pd.DataFrame(B)
def smallw_interaction(self)->pd.DataFrame:
'''
Function that generates a small-world interaction matrix with weights
ranging for -1 to 1 using Watts Strogatz algorithm.
Parameters
----------
n : number of nodes.
k : average degree.
p : reconection probability
p = 0 -- laticce network
p = 1 -- random netowrk.
Returns
-------
B : symetric small world interaction matrix with weights drawn from a
uniform distribution [-1,1].
'''
#Create netwrok
G = nx.watts_strogatz_graph(n=self.n, k=self.k, p=self.pr, seed=self.seed)
A = nx.to_numpy_matrix(G)
#Assing interaction magnitude from a uniform distribution [-1,1]
it = np.nditer(A, flags=['multi_index'])
for ind in it:
if int(ind) == 1:
A[it.multi_index[0],it.multi_index[1]] = round(np.random.uniform(-1, 1),2)
#Set matrix diagonal to 1 and make symectric matrix
B = np.triu(A)
B = B + B.T - np.diag(np.diag(A))
return | pd.DataFrame(B) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 22:50:43 2018
@author: kennedy
"""
"""
Credit:
https://www.quantopian.com/posts/technical-analysis-indicators-without-talib-code
Bug Fix by Kennedy:
Works fine for library import.
returns only column of the indicator result.
Can be used as a predictor for for forecasting
stock returns using predictive modeling
in Machine Learning.
I configured it to meet my demand for multiple predictive modelling.
"""
import pandas as pd
import numpy as np
class TechnicalIndicators:
def moving_average(df, n):
"""Calculate the moving average for the given data.
:param df: pandas.DataFrame
:param n: window
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean(), name='MA_{}'.format(n))
return MA
def exponential_moving_average(df, n):
"""
:param df: pandas.DataFrame
:param n: window of data to take moving exponent mean
:return: pandas.DataFrame
"""
EMA = pd.Series(df['Close'].ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
return EMA
def momentum(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(n), name='Momentum_' + str(n))
return M
def rate_of_change(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name='ROC_' + str(n))
return ROC
def average_true_range(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(TR_s.ewm(span=n, min_periods=n).mean(), name='ATR_' + str(n))
return ATR
def bollinger_bands(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean())
MSD = pd.Series(df['Close'].rolling(n, min_periods=n).std())
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
return pd.concat([B1, B2], axis = 1)
def ppsr(df):
"""Calculate Pivot Points, Supports and Resistances for given data
:param df: pandas.DataFrame
:return: pandas.DataFrame
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = | pd.Series(2 * PP - df['High']) | pandas.Series |
import pandas as pd
xls = | pd.ExcelFile("https://www.eia.gov/dnav/ng/hist_xls/RNGWHHDd.xls") | pandas.ExcelFile |
import os
import pandas as pd
path = os.getcwd()
files = os.listdir('./files')
df_total = | pd.DataFrame() | pandas.DataFrame |
from collections import defaultdict, OrderedDict
import numpy as np
from scipy import signal
from scipy.sparse import csr_matrix, hstack
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import Imputer, OneHotEncoder, StandardScaler
from sklearn.utils.validation import check_is_fitted
from base import AbstractFeatureExtractor
class DenseMixedStrategyImputer(BaseEstimator, TransformerMixin):
def __init__(self, missing_values='NaN', strategies=None, add_missing_indicator=True, verbose=False):
self.missing_values = missing_values
if strategies is None:
raise ValueError('Must provide strategy.')
allowed_strategies = ['mean', 'median', 'most_frequent']
if any(s not in allowed_strategies for s in strategies):
raise ValueError('Invalid strategy in list.')
self.strategies = strategies
self.add_missing_indicator = add_missing_indicator
self.verbose = verbose
def fit(self, X, y=None):
n_samples, n_features = X.shape
print('n_features',n_features)
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
self.impute_strategies = list(set(self.strategies))
self.impute_indices = [np.array([i for i, x in enumerate(self.strategies) if x == s]) for s in self.impute_strategies]
self.impute_valid_indices = []
self.imputers = [Imputer(missing_values=self.missing_values, strategy=s, verbose=self.verbose) for s in
self.impute_strategies]
for indices, imputer in zip(self.impute_indices, self.imputers):
imputer.fit(X[:, indices])
valid_mask = np.logical_not(np.isnan(imputer.statistics_))
self.impute_valid_indices.append(indices[valid_mask])
return self
def transform(self, X):
n_samples, n_features = X.shape
if len(self.strategies) != n_features:
raise ValueError('Number of strategies must equal number of features.')
check_is_fitted(self, 'imputers')
if self.add_missing_indicator:
output_scale = 2
else:
output_scale = 1
X_out = np.zeros((n_samples, output_scale*n_features))
for input_indices, output_indices, imputer in zip(self.impute_indices, self.impute_valid_indices, self.imputers):
X_out[:, output_scale*output_indices] = imputer.transform(X[:, input_indices])
if self.add_missing_indicator:
X_out[:, np.arange(1, 2*n_features, 2)] = np.isnan(X).astype('float', copy=False)
return X_out
class DataFrameCategoricalEncoder(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.code_maps = {}
for k in X.columns:
self.code_maps[k] = defaultdict(lambda: np.nan)
self.code_maps[k].update({v: k for k, v in enumerate(X[k].astype('category').cat.categories)})
return self
def transform(self, X):
if set(X.columns) != set(self.code_maps):
raise ValueError('Columns do not match fit model.')
return X.apply(lambda x: x.apply(lambda y: self.code_maps[x.name][y])).as_matrix()
class AnnotatedTabularExtractor(AbstractFeatureExtractor):
param_distributions = {
'normalize_text': [True, False],
'categorize': [True, False],
'numeric_strategy': ['mean', 'median'],
'add_missing_indicator': [True, False]
}
def __init__(self, normalize_text=False, categorize=False, numeric_strategy='mean', add_missing_indicator=True):
self.normalize_text = normalize_text
self.categorize = categorize
self.numeric_strategy = numeric_strategy
self.add_missing_indicator = add_missing_indicator
def set_cols_info(self, cols_info):
self.cols_info = cols_info
def determine_colType(self, column):
variables = self.cols_info
for var in variables:
var_colName = var['colName']
if str(var_colName) != str(column):
continue
var_colType = var['colType']
if var_colType in {'categorical', 'boolean'}:
return 'categorical'
elif var_colType in {'integer', 'real'}:
return 'numeric'
elif var_colType == 'string':
return 'text'
elif var_colType == 'dateTime':
raise RuntimeError('datTime not implemented in this feature extractor yet !!')
def fit_transform(self, df, variables):
df = self.copy_normalize_text(df)
self.column_types = OrderedDict()
for column in df:
itype = self.determine_colType(column)
# print('itype',itype)
self.column_types[column] = itype
self.numeric_columns = [column for column, type in self.column_types.items() if type == 'numeric']
self.categorical_columns = [column for column, type in self.column_types.items() if type == 'categorical']
self.text_columns = [column for column, type in self.column_types.items() if type == 'text']
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: pd.to_numeric(x, errors='coerce')).as_matrix()
self.numeric_imputer = DenseMixedStrategyImputer(
strategies=[self.numeric_strategy]*len(self.numeric_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.numeric_imputer.fit_transform(X)
self.numeric_scaler = StandardScaler()
output_arrays.append(self.numeric_scaler.fit_transform(X))
if len(self.categorical_columns) > 0:
self.categorical_encoder = DataFrameCategoricalEncoder()
X = self.categorical_encoder.fit_transform(df[self.categorical_columns])
self.categorical_imputer = DenseMixedStrategyImputer(
strategies=['most_frequent']*len(self.categorical_columns),
add_missing_indicator=self.add_missing_indicator
)
X = self.categorical_imputer.fit_transform(X)
self.one_hot_encoder = OneHotEncoder(
categorical_features=np.arange(len(self.categorical_columns)) * (2 if self.add_missing_indicator else 1)
)
output_arrays.append(self.one_hot_encoder.fit_transform(X))
return hstack([csr_matrix(X) for X in output_arrays], format='csr')
def transform(self, df):
check_is_fitted(self, 'column_types')
if list(df) != list(self.column_types):
raise ValueError('Data to be transformed does not match fitting data.')
df = self.copy_normalize_text(df)
output_arrays = []
if len(self.numeric_columns) > 0:
X = df[self.numeric_columns].apply(lambda x: | pd.to_numeric(x, errors='coerce') | pandas.to_numeric |
# Package imports
import pandas as pd
import requests
import datetime
from unidecode import unidecode as UnicodeFormatter
import os
import bcolors
# Local imports
import path_configuration
import url_configuration
import progress_calculator
class GrandPrix(object):
Url = None
Path = None
Requests = None
def __init__(self):
self.Url = url_configuration.Url_builder()
self.Path = path_configuration.Path()
self.Requests = requests
def import_grand_prix(self):
content = os.listdir(self.Path.get_season_path())
content.sort()
"""for year in content:
DataFrame = pd.read_csv(Path.get_season_path()+year)
print(DataFrame)"""
DataFrame = pd.read_csv(self.Path.get_season_path()+'2019.csv')
Date = list(DataFrame['Date'])
GrandPrix = list(DataFrame['Grand Prix'])
Round = list(DataFrame['Round'])
Date_obj = []
# DATE OBJ
for date in Date:
Date_obj.append(datetime.datetime.strptime(date, '%Y-%m-%d'))
Progress = progress_calculator.ProgressBar(Round)
# WHILE - BY GPS OF THE YEAR
i = 0
while i < Round.__len__():
# CHECK YEAR
if Date_obj[i] < datetime.datetime.now():
# METHOD CALLS
print(bcolors.PASS + 'STARTING EXTRACTOR, GETTING FROM', GrandPrix[i], 'DATE:', Date[i] + bcolors.END)
self.drivers_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.contructors_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.pitstops_times_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.result_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.by_lap_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.current_driver_standings(Round[i], Date_obj[i].year, GrandPrix[i])
self.status(Round[i], Date_obj[i].year, GrandPrix[i])
if Date_obj[i].year > 2017:
url = self.Url.f1_url(Date_obj[i].year, Date_obj[i].date(), GrandPrix[i])
self.load_data_from_f1(url, Date_obj[i].year, GrandPrix[i])
Progress.get_progress_bar()
i = i + 1
def drivers_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING DRIVERS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_driver(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['DriverTable']
Drivers = json['Drivers']
DriversID = []
DriversInitials = []
DriversName = []
YearsOld = []
for driver in Drivers:
DriversID.append(driver['driverId'])
DriversInitials.append(driver['code'])
DriversName.append(UnicodeFormatter(driver['givenName']+' '+driver['familyName']))
YearsOld.append(
datetime.datetime.now().year - datetime.datetime.strptime(driver['dateOfBirth'], '%Y-%m-%d').year
)
Drivers_Dict = {'Driver ID': DriversID, 'Driver Initials': DriversInitials,
'Driver Name': DriversName, 'Years Old': YearsOld}
Drivers_Data = pd.DataFrame(data=Drivers_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'Drivers')
Drivers_Data.to_csv(Path)
def contructors_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING CONSTRUCTORS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_constructor(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['ConstructorTable']
Constructors = json['Constructors']
ConstructorID = []
ConstructorName = []
for constructor in Constructors:
ConstructorID.append(constructor['constructorId'])
ConstructorName.append(constructor['name'])
Constructors_Dict = {"Constructor ID": ConstructorID, "Constructor Name": ConstructorName}
Constructor_Data = | pd.DataFrame(data=Constructors_Dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_FP_S1 Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for timber plantation. Source: Khasanah et al. (2015)
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
#df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
t = range(0,tf,1)
c_firewood_energy_S1 = df1['Firewood_other_energy_use'].values
#c_loss_S2 = df2['C_loss'].values
c_firewood_energy_E = dfE['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
c_pellets_E = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S1
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
tf = 201
t = np.arange(tf)
def decomp_S1(t,remainAGB_S1):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1
#set zero matrix
output_decomp_S1 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1[i:,i] = decomp_S1(t[:len(t)-i],remain_part_S1)
print(output_decomp_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1[:,i] = np.diff(output_decomp_S1[:,i])
i = i + 1
print(subs_matrix_S1[:,:4])
print(len(subs_matrix_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1 = subs_matrix_S1.clip(max=0)
print(subs_matrix_S1[:,:4])
#make the results as absolute values
subs_matrix_S1 = abs(subs_matrix_S1)
print(subs_matrix_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1)
subs_matrix_S1 = np.vstack((zero_matrix_S1, subs_matrix_S1))
print(subs_matrix_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1 = (tf,1)
decomp_tot_S1 = np.zeros(matrix_tot_S1)
i = 0
while i < tf:
decomp_tot_S1[:,0] = decomp_tot_S1[:,0] + subs_matrix_S1[:,i]
i = i + 1
print(decomp_tot_S1[:,0])
#S1_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_S1')
tf = 201
t = np.arange(tf)
def decomp_S1_C(t,remainAGB_S1_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1_C
#set zero matrix
output_decomp_S1_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1_C[i:,i] = decomp_S1_C(t[:len(t)-i],remain_part_S1_C)
print(output_decomp_S1_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_C[:,i] = np.diff(output_decomp_S1_C[:,i])
i = i + 1
print(subs_matrix_S1_C[:,:4])
print(len(subs_matrix_S1_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_C = subs_matrix_S1_C.clip(max=0)
print(subs_matrix_S1_C[:,:4])
#make the results as absolute values
subs_matrix_S1_C = abs(subs_matrix_S1_C)
print(subs_matrix_S1_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1_C)
subs_matrix_S1_C = np.vstack((zero_matrix_S1_C, subs_matrix_S1_C))
print(subs_matrix_S1_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_C = (tf,1)
decomp_tot_S1_C = np.zeros(matrix_tot_S1_C)
i = 0
while i < tf:
decomp_tot_S1_C[:,0] = decomp_tot_S1_C[:,0] + subs_matrix_S1_C[:,i]
i = i + 1
print(decomp_tot_S1_C[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_E(t,remainAGB_E):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E
#set zero matrix
output_decomp_E = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E[i:,i] = decomp_E(t[:len(t)-i],remain_part_E)
print(output_decomp_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E[:,i] = np.diff(output_decomp_E[:,i])
i = i + 1
print(subs_matrix_E[:,:4])
print(len(subs_matrix_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E = subs_matrix_E.clip(max=0)
print(subs_matrix_E[:,:4])
#make the results as absolute values
subs_matrix_E = abs(subs_matrix_E)
print(subs_matrix_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E)
subs_matrix_E = np.vstack((zero_matrix_E, subs_matrix_E))
print(subs_matrix_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E = (tf,1)
decomp_tot_E = np.zeros(matrix_tot_E)
i = 0
while i < tf:
decomp_tot_E[:,0] = decomp_tot_E[:,0] + subs_matrix_E[:,i]
i = i + 1
print(decomp_tot_E[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_E')
tf = 201
t = np.arange(tf)
def decomp_E_C(t,remainAGB_E_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E_C
#set zero matrix
output_decomp_E_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E_C[i:,i] = decomp_E_C(t[:len(t)-i],remain_part_E_C)
print(output_decomp_E_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E_C[:,i] = np.diff(output_decomp_E_C[:,i])
i = i + 1
print(subs_matrix_E_C[:,:4])
print(len(subs_matrix_E_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_C = subs_matrix_E_C.clip(max=0)
print(subs_matrix_E_C[:,:4])
#make the results as absolute values
subs_matrix_E_C = abs(subs_matrix_E_C)
print(subs_matrix_E_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E_C)
subs_matrix_E_C = np.vstack((zero_matrix_E_C, subs_matrix_E_C))
print(subs_matrix_E_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_C = (tf,1)
decomp_tot_E_C = np.zeros(matrix_tot_E_C)
i = 0
while i < tf:
decomp_tot_E_C[:,0] = decomp_tot_E_C[:,0] + subs_matrix_E_C[:,i]
i = i + 1
print(decomp_tot_E_C[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_S1,label='S1')
#plt.plot(t,decomp_tot_S2,label='S2')
plt.plot(t,decomp_tot_E,label='E')
plt.xlim(0,200)
plt.legend(loc='best', frameon=False)
plt.show()
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
#product lifetime
#building materials
B = 35
TestDSM1 = DynamicStockModel(t = df1['Year'].values, i = df1['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME = DynamicStockModel(t = dfE['Year'].values, i = dfE['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1, ExitFlag1 = TestDSM1.dimension_check()
CheckStrE, ExitFlagE = TestDSME.dimension_check()
Stock_by_cohort1, ExitFlag1 = TestDSM1.compute_s_c_inflow_driven()
Stock_by_cohortE, ExitFlagE = TestDSME.compute_s_c_inflow_driven()
S1, ExitFlag1 = TestDSM1.compute_stock_total()
SE, ExitFlagE = TestDSME.compute_stock_total()
O_C1, ExitFlag1 = TestDSM1.compute_o_c_from_s_c()
O_CE, ExitFlagE = TestDSME.compute_o_c_from_s_c()
O1, ExitFlag1 = TestDSM1.compute_outflow_total()
OE, ExitFlagE = TestDSME.compute_outflow_total()
DS1, ExitFlag1 = TestDSM1.compute_stock_change()
DSE, ExitFlagE = TestDSME.compute_stock_change()
Bal1, ExitFlag1 = TestDSM1.check_stock_balance()
BalE, ExitFlagE = TestDSME.check_stock_balance()
#print output flow
print(TestDSM1.o)
#print(TestDSM2.o)
print(TestDSME.o)
plt.plot(t, TestDSM1.o)
plt.xlim(0,100)
plt.show()
#%%
#Step (5): Biomass growth
# RIL_Scenario biomass growth, following RIL disturbance
#recovery time, follow the one by Alice-guier
#H = [M, E, C_M, C_E]
#LD0 = [M, E, C_M, C_E]
H = [2.89, 4.34, 2.89, 4.34]
LD0 = [53.46-2.89, 53.46-4.34, 29.29-2.89, 29.29-4.34]
s = 1.106
#RIL
RT = ((H[0] + LD0[0])*100/initAGB)**s
print(RT)
#growth per year (Mg C/ha.yr)
gpy = (H[0] + LD0[0])/RT
print(gpy)
tf_RIL_S1 = 36
A1 = range(0,tf_RIL_S1,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_S1(A1):
return 44/12*1000*gpy*A1
seq_RIL = np.array([Y_RIL_S1(A1i) for A1i in A1])
print(len(seq_RIL))
print(seq_RIL)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL = []
for i in counter_35y:
y_RIL.append(seq_RIL)
flat_list_RIL = []
for sublist in y_RIL:
for item in sublist:
flat_list_RIL.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL = flat_list_RIL[:len(flat_list_RIL)-15]
print(flat_list_RIL)
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL = [p - q for q, p in zip(flat_list_RIL, flat_list_RIL[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL = [0 if i < 0 else i for i in flat_list_RIL]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL = [ -x for x in flat_list_RIL]
print(flat_list_RIL)
#RIL_C
RT_C = ((H[2] + LD0[2])*100/initAGB)**s
print(RT_C)
#growth per year (Mg C/ha.yr)
gpy_C = (H[2] + LD0[2])/RT_C
print(gpy_C)
tf_RIL_C = 36
A1 = range(0,tf_RIL_C,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_C(A1):
return 44/12*1000*gpy_C*A1
seq_RIL_C = np.array([Y_RIL_C(A1i) for A1i in A1])
print(len(seq_RIL_C))
print(seq_RIL_C)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL_C = []
for i in counter_35y:
y_RIL_C.append(seq_RIL_C)
flat_list_RIL_C = []
for sublist_C in y_RIL_C:
for item in sublist_C:
flat_list_RIL_C.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL_C = flat_list_RIL_C[:len(flat_list_RIL_C)-15]
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL_C, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL_C = [p - q for q, p in zip(flat_list_RIL_C, flat_list_RIL_C[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL_C = [0 if i < 0 else i for i in flat_list_RIL_C]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL_C.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL_C = [ -x for x in flat_list_RIL_C]
print(flat_list_RIL_C)
#%%
#Step (5_1): Biomass C sequestration of the remaining unharvested block
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
df1_C = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
dfE_C = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_E')
t = range(0,tf,1)
RIL_seq_S1= df1['RIL_seq'].values
RIL_seq_C_S1 = df1_C['RIL_seq'].values
RIL_seq_E = dfE['RIL_seq'].values
RIL_seq_C_E = dfE_C['RIL_seq'].values
#%%
#Step (6): post-harvest processing of wood
#post-harvest wood processing
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
t = range(0,tf,1)
PH_Emissions_HWP1 = df1['PH_Emissions_HWP'].values
PH_Emissions_HWPE = dfE ['PH_Emissions_HWP'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1
df1_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1(t,Landfill_decomp_CH4_S1):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_S1
#set zero matrix
output_decomp_CH4_S1 = np.zeros((len(t),len(df1_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1 in enumerate(df1_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1[i:,i] = decomp_CH4_S1(t[:len(t)-i],remain_part_CH4_S1)
print(output_decomp_CH4_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1 = np.zeros((len(t)-1,len(df1_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1[:,i] = np.diff(output_decomp_CH4_S1[:,i])
i = i + 1
print(subs_matrix_CH4_S1[:,:4])
print(len(subs_matrix_CH4_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1 = subs_matrix_CH4_S1.clip(max=0)
print(subs_matrix_CH4_S1[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1 = abs(subs_matrix_CH4_S1)
print(subs_matrix_CH4_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1 = np.zeros((len(t)-200,len(df1_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1)
subs_matrix_CH4_S1 = np.vstack((zero_matrix_CH4_S1, subs_matrix_CH4_S1))
print(subs_matrix_CH4_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1 = (tf,1)
decomp_tot_CH4_S1 = np.zeros(matrix_tot_CH4_S1)
i = 0
while i < tf:
decomp_tot_CH4_S1[:,0] = decomp_tot_CH4_S1[:,0] + subs_matrix_CH4_S1[:,i]
i = i + 1
print(decomp_tot_CH4_S1[:,0])
#E
dfE_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_CH4_E(t,Landfill_decomp_CH4_E):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_E
#set zero matrix
output_decomp_CH4_E = np.zeros((len(t),len(dfE_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_E in enumerate(dfE_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_E[i:,i] = decomp_CH4_E(t[:len(t)-i],remain_part_CH4_E)
print(output_decomp_CH4_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_E = np.zeros((len(t)-1,len(dfE_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_E[:,i] = np.diff(output_decomp_CH4_E[:,i])
i = i + 1
print(subs_matrix_CH4_E[:,:4])
print(len(subs_matrix_CH4_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_E = subs_matrix_CH4_E.clip(max=0)
print(subs_matrix_CH4_E[:,:4])
#make the results as absolute values
subs_matrix_CH4_E = abs(subs_matrix_CH4_E)
print(subs_matrix_CH4_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_E = np.zeros((len(t)-200,len(dfE_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_E)
subs_matrix_CH4_E = np.vstack((zero_matrix_CH4_E, subs_matrix_CH4_E))
print(subs_matrix_CH4_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_E = (tf,1)
decomp_tot_CH4_E = np.zeros(matrix_tot_CH4_E)
i = 0
while i < tf:
decomp_tot_CH4_E[:,0] = decomp_tot_CH4_E[:,0] + subs_matrix_CH4_E[:,i]
i = i + 1
print(decomp_tot_CH4_E[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1,label='S1')
#plt.plot(t,decomp_tot_CH4_S2,label='S2')
plt.plot(t,decomp_tot_CH4_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
type(decomp_tot_CH4_S1[:,0])
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1
df1_CO2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
tf = 201
t = np.arange(tf)
def decomp_CO2_S1(t,Landfill_decomp_CO2_S1):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CO2_S1
#set zero matrix
output_decomp_CO2_S1 = np.zeros((len(t),len(df1_CO2['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S1 in enumerate(df1_CO2['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S1[i:,i] = decomp_CO2_S1(t[:len(t)-i],remain_part_CO2_S1)
print(output_decomp_CO2_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S1 = np.zeros((len(t)-1,len(df1_CO2['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S1[:,i] = np.diff(output_decomp_CO2_S1[:,i])
i = i + 1
print(subs_matrix_CO2_S1[:,:4])
print(len(subs_matrix_CO2_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S1 = subs_matrix_CO2_S1.clip(max=0)
print(subs_matrix_CO2_S1[:,:4])
#make the results as absolute values
subs_matrix_CO2_S1 = abs(subs_matrix_CO2_S1)
print(subs_matrix_CO2_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S1 = np.zeros((len(t)-200,len(df1_CO2['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S1)
subs_matrix_CO2_S1 = np.vstack((zero_matrix_CO2_S1, subs_matrix_CO2_S1))
print(subs_matrix_CO2_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S1 = (tf,1)
decomp_tot_CO2_S1 = np.zeros(matrix_tot_CO2_S1)
i = 0
while i < tf:
decomp_tot_CO2_S1[:,0] = decomp_tot_CO2_S1[:,0] + subs_matrix_CO2_S1[:,i]
i = i + 1
print(decomp_tot_CO2_S1[:,0])
#E
dfE_CO2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_CO2_E(t,Landfill_decomp_CO2_E):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CO2_E
#set zero matrix
output_decomp_CO2_E = np.zeros((len(t),len(dfE_CO2['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_E in enumerate(dfE_CO2['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_E[i:,i] = decomp_CO2_E(t[:len(t)-i],remain_part_CO2_E)
print(output_decomp_CO2_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_E = np.zeros((len(t)-1,len(dfE_CO2['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_E[:,i] = np.diff(output_decomp_CO2_E[:,i])
i = i + 1
print(subs_matrix_CO2_E[:,:4])
print(len(subs_matrix_CO2_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_E = subs_matrix_CO2_E.clip(max=0)
print(subs_matrix_CO2_E[:,:4])
#make the results as absolute values
subs_matrix_CO2_E = abs(subs_matrix_CO2_E)
print(subs_matrix_CO2_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_E = np.zeros((len(t)-200,len(dfE_CO2['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_E)
subs_matrix_CO2_E = np.vstack((zero_matrix_CO2_E, subs_matrix_CO2_E))
print(subs_matrix_CO2_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_E = (tf,1)
decomp_tot_CO2_E = np.zeros(matrix_tot_CO2_E)
i = 0
while i < tf:
decomp_tot_CO2_E[:,0] = decomp_tot_CO2_E[:,0] + subs_matrix_CO2_E[:,i]
i = i + 1
print(decomp_tot_CO2_E[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1,label='S1')
#plt.plot(t,decomp_tot_CO2_S2,label='S2')
plt.plot(t,decomp_tot_CO2_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
type(decomp_tot_CO2_S1[:,0])
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_S1 = [c_firewood_energy_S1, decomp_tot_S1[:,0], TestDSM1.o, PH_Emissions_HWP1, decomp_tot_CO2_S1[:,0]]
Emissions_E = [c_firewood_energy_E, c_pellets_E, decomp_tot_E[:,0], TestDSME.o, PH_Emissions_HWPE, decomp_tot_CO2_E[:,0]]
Emissions_S1_C = [c_firewood_energy_S1, decomp_tot_S1_C[:,0], TestDSM1.o, PH_Emissions_HWP1, decomp_tot_CO2_S1[:,0]]
Emissions_E_C = [c_firewood_energy_E, c_pellets_E, decomp_tot_E_C[:,0], TestDSME.o, PH_Emissions_HWPE, decomp_tot_CO2_E[:,0]]
Emissions_RIL_S1 = [sum(x) for x in zip(*Emissions_S1)]
Emissions_RIL_E = [sum(x) for x in zip(*Emissions_E)]
Emissions_RIL_S1_C = [sum(x) for x in zip(*Emissions_S1_C)]
Emissions_RIL_E_C = [sum(x) for x in zip(*Emissions_E_C)]
#CH4_S1
Emissions_CH4_RIL_S1 = decomp_tot_CH4_S1[:,0]
#CH4_E
Emissions_CH4_RIL_E = decomp_tot_CH4_E[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S1 = Emissions_RIL_S1
#Col2_S2 = Emissions_RIL_S2
Col2_E = Emissions_RIL_E
Col2_S1_C = Emissions_RIL_S1_C
Col2_E_C = Emissions_RIL_E_C
Col3_1 = Emissions_CH4_RIL_S1
#Col3_2 = Emissions_CH4_RIL_S2
Col3_E = Emissions_CH4_RIL_E
Col4 = Emission_ref
Col5_1 = [x + y for x, y in zip(flat_list_RIL, RIL_seq_S1)]
Col5_E = [x + y for x, y in zip(flat_list_RIL, RIL_seq_E)]
Col5_C_1 = [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_S1)]
Col5_C_E = [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_E)]
df1 = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1,'kg_CH4':Col3_1,'kg_CO2_seq':Col5_1,'emission_ref':Col4})
#df2 = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S2,'kg_CH4':Col3_2,'kg_CO2_seq':Col5,'emission_ref':Col4})
dfE = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E,'kg_CH4':Col3_E,'kg_CO2_seq':Col5_E,'emission_ref':Col4})
df1_C = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_C,'kg_CH4':Col3_1,'kg_CO2_seq':Col5_C_1,'emission_ref':Col4})
dfE_C = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E_C,'kg_CH4':Col3_E,'kg_CO2_seq':Col5_C_E,'emission_ref':Col4})
writer = pd.ExcelWriter('emissions_seq_RIL_EC.xlsx', engine = 'xlsxwriter')
df1.to_excel(writer, sheet_name = 'RIL_S1', header=True, index=False )
#df2.to_excel(writer, sheet_name = 'RIL_S2', header=True, index=False)
dfE.to_excel(writer, sheet_name = 'RIL_E', header=True, index=False)
df1_C.to_excel(writer, sheet_name = 'RIL_C_S1', header=True, index=False )
dfE_C.to_excel(writer, sheet_name = 'RIL_C_E', header=True, index=False)
writer.save()
writer.close()
#%%
## DYNAMIC LCA (wood-based scenarios)
# Step (10): Set General Parameters for Dynamic LCA calculation
# General Parameters
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
##wood-based
#read S1
df = | pd.read_excel('emissions_seq_RIL_EC.xlsx', 'RIL_S1') | pandas.read_excel |
import nate
from graph_tool.all import *
import matplotlib
import pandas as pd
import numpy as np
from collections import defaultdict
from numpy.random import *
import sys, os, os.path
import time
import datetime
from gi.repository import Gtk, Gdk, GdkPixbuf, GObject, GLib
from PIL import Image, ImageDraw, ImageFont
from xvfbwrapper import Xvfb
"""
This module provides functions for animating the presence of bursting SVO terms over time
"""
def prepare_df(burst_dict, offset_dict):
"""
Takes a dictionary of SVO bursts and SVO time offsets to return a temporary dataframe used
in subsequent functions
"""
df = | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
"""
将数据转化为深度模型读取的tfrecords格式,
分别为deep_model_1和deep_model_2提供数据
@author: yuhaitao
"""
import pandas as pd
import os
import tqdm
import numpy as np
import seaborn as sns
import json
import gc
import sys
import datetime
import multiprocessing
import tensorflow as tf
from random import random
from sklearn.model_selection import KFold
from tqdm import tqdm
from sklearn.model_selection import KFold
from data_loader import myDataLoader, var_norm, min_max_norm
from utils import get_emb_id, cross_feature, norm_and_smape, SMAPE
def make_tfrecords_train_eval(data, out_dir, prefix='standard'):
"""将训练集5折分别写入tfrecords"""
# 划分k_fold
all_index = range(len(data))
k_fold = KFold(n_splits=5, shuffle=True, random_state=1)
# 训练k_fold
for fold_idx, (train_idx, val_idx) in enumerate(k_fold.split(all_index)):
train_fold = data.iloc[train_idx]
val_fold = data.iloc[val_idx]
# 提取使用特征的columns
use_cols = [col for col in data.columns if col !=
'id' and 'p' not in col]
print(f'Number of common used features: {len(use_cols)}')
print(f'Making tfrecords for fold_{fold_idx}')
# 将train存入tfrecords
train_x, train_labels, train_id = train_fold[use_cols], train_fold[[f'p{i+1}' for i in range(6)]], train_fold[['id']]
with tf.io.TFRecordWriter(os.path.join(out_dir, f'{prefix}_train_{len(use_cols)}_fold_{fold_idx}.tfrecords')) as writer:
for inputs, labels, id_ in zip(train_x.values, train_labels.values, train_id.values):
feature = {
'inputs': tf.train.Feature(float_list=tf.train.FloatList(value=inputs)),
'labels': tf.train.Feature(float_list=tf.train.FloatList(value=labels)),
'id_': tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(id_[0]).encode()]))
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
# 将val写入tfrecords
val_x, val_labels, val_id = val_fold[use_cols], val_fold[[f'p{i+1}' for i in range(6)]], val_fold[['id']]
with tf.io.TFRecordWriter(os.path.join(out_dir, f'{prefix}_val_{len(use_cols)}_fold_{fold_idx}.tfrecords')) as writer:
for inputs, labels, id_ in zip(val_x.values, val_labels.values, val_id.values):
feature = {
'inputs': tf.train.Feature(float_list=tf.train.FloatList(value=inputs)),
'labels': tf.train.Feature(float_list=tf.train.FloatList(value=labels)),
'id_': tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(id_[0]).encode()]))
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
def make_tfrecords_test(data, out_dir, prefix=''):
"""将测试集写入tfrecords"""
use_cols = [col for col in data.columns if col != 'id' and 'p' not in col]
print(f'Number of common used features: {len(use_cols)}')
print(f'Making tfrecords for test data')
test_x, test_labels, test_id = data[use_cols], data[[f'p{i+1}' for i in range(6)]], data[['id']]
with tf.io.TFRecordWriter(os.path.join(out_dir, f'{prefix}_test_{len(use_cols)}.tfrecords')) as writer:
for inputs, labels, id_ in zip(test_x.values, test_labels.values, test_id.values):
feature = {
'inputs': tf.train.Feature(float_list=tf.train.FloatList(value=inputs)),
'labels': tf.train.Feature(float_list=tf.train.FloatList(value=labels)),
'id_': tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(id_[0]).encode()]))
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
def standard_main(data, prefix, mode='train'):
with open('./feature_info.json', 'r') as f:
feature_infos = json.load(f)
use_cols = [col for col in data.columns if col !='id' and 'p' not in col]
# 划分wide 与 deep 不同部分的特征
deep_cols, wide_cols = [], []
for col in use_cols:
if 'w' not in col:
deep_cols.append(col)
else:
wide_cols.append(col)
wide_df = data[wide_cols]
deep_df = data[deep_cols]
# 打印一些
print(f'Number of common used features: {len(use_cols)}')
print(f'wide part dimension: {wide_df.shape}')
print(f'deep part dimension: {deep_df.shape}')
imp_feat = ['34', '10', '15', '3161', '14', '28', '753', '3160', '435', '3174', '928', '729', '1459', '2505', '457', '25', '8', '2801', '2483', '23', '869', '2976', '20', '11', '36']
cross_df = cross_feature(wide_df, imp_feat, feature_infos)
print(f'cross features size for {imp_feat} is: {cross_df.shape}')
# label也标准化
label_df = None
if mode == 'test':
label_df = pd.DataFrame(np.zeros((len(wide_df),6)), columns=[f'p{i+1}' for i in range(6)])
data = pd.concat([wide_df, deep_df, cross_df, label_df, data[['id']]], axis=1)
make_tfrecords_test(data=data, out_dir='./data/tfrecords/standard', prefix=prefix)
else:
label_df = data[[f'p{i+1}' for i in range(6)]]
label_df = label_df.apply(var_norm, args=(feature_infos,))
data = pd.concat([wide_df, deep_df, cross_df, label_df, data[['id']]], axis=1)
make_tfrecords_train_eval(data=data, out_dir='./data/tfrecords/standard', prefix=prefix)
def feature_norm_all(data, mode):
"""
数据全部标准化,01特征标准化成+1.-1,交叉特征也按均值方差标准化
注意:离散特征不是按照统一计算的均值方差,而是按照离散值集合的均值方差
"""
def w_col_norm(x, feature_infos):
"""w_cols标准化"""
x_list = feature_infos[x.name]['list']
x_mean, x_std = np.arange(len(x_list)).mean(), np.arange(len(x_list)).std()
out = []
for a in x:
if a in x_list:
out.append(float((x_list.index(a)-x_mean)/x_std))
else: # 对于未出现过的特征值,在尾后面加个小随机数赋值
out.append(float((len(x_list)-1+random()-x_mean)/x_std))
return out
def c_col_norm(x, feature_infos):
"""c_cols标准化"""
i_name, j_name = x.name.split('_')[1], x.name.split('_')[2]
i_list, j_list = feature_infos[i_name]['list'], feature_infos[j_name]['list']
x_len = len(i_list) * len(j_list) + 1
x_mean, x_std = np.arange(x_len).mean(), np.arange(x_len).std()
out = []
for a in x: # 这里不用判断a的取值是否存在,因为经过feature engineering,交叉特征里都是存在的数了
out.append(float((a - x_mean) / x_std))
return out
def bu_col_norm(x, feature_infos):
"""bu_cols标准化"""
x_mean, x_std = np.arange(12).mean(), np.arange(12).std() # 注意分桶都是12,加更大值和更小值
out = []
for a in x: # 这里不用判断a的取值是否存在,因为经过feature engineering,分桶特征里都是存在的数了
out.append(float((a - x_mean) / x_std))
return out
def bw_col_norm(x, feature_infos):
"""c_cols标准化"""
b_name, w_name = x.name.split('_')[1], x.name.split('_')[2]
w_list = feature_infos[w_name]['list']
x_len = 12 * len(w_list) + 1
x_mean, x_std = np.arange(x_len).mean(), np.arange(x_len).std()
out = []
for a in x: # 这里不用判断a的取值是否存在,因为经过feature engineering,交叉特征里都是存在的数了
out.append(float((a - x_mean) / x_std))
return out
def bb_col_norm(x, feature_infos):
"""c_cols标准化"""
x_len = 12 * 12 # 注意12
x_mean, x_std = np.arange(x_len).mean(), np.arange(x_len).std()
out = []
for a in x: # 这里不用判断a的取值是否存在,因为经过feature engineering,交叉特征里都是存在的数了
out.append(float((a - x_mean) / x_std))
return out
with open('./feature_info.json', 'r') as f:
feature_infos = json.load(f)
use_cols, not_use_cols = [], []
for col in data.columns:
if col != 'id' and 'p' not in col:
use_cols.append(col)
else:
not_use_cols.append(col)
deep_cols, wide_cols = [], []
for col in use_cols:
if data[col].dtype == float:
deep_cols.append(col)
else:
wide_cols.append(col)
w_cols, c_cols, bu_cols, bw_cols, bb_cols = [], [], [], [], []
for col in wide_cols:
if 'c_' in col:
c_cols.append(col)
elif 'bu_' in col:
bu_cols.append(col)
elif 'bw_' in col:
bw_cols.append(col)
elif 'bb_' in col:
bb_cols.append(col)
else:
w_cols.append(col)
new_data = None
new_data = data[w_cols].apply(w_col_norm, args=(feature_infos, ), axis=0)
mm = data[c_cols].apply(c_col_norm, args=(feature_infos, ), axis=0)
new_data = pd.concat([new_data, mm], axis=1)
mm_bu = data[bu_cols].apply(bu_col_norm, args=(feature_infos, ), axis=0)
new_data = pd.concat([new_data, mm_bu], axis=1)
mm_bw = data[bw_cols].apply(bw_col_norm, args=(feature_infos, ), axis=0)
new_data = pd.concat([new_data, mm_bw], axis=1)
mm_bb = data[bb_cols].apply(bb_col_norm, args=(feature_infos, ), axis=0)
new_data = pd.concat([new_data, mm_bb], axis=1)
deep_df = data[deep_cols].apply(var_norm, args=(feature_infos, ))
new_data = | pd.concat([new_data, deep_df], axis=1) | pandas.concat |
from snownlp import SnowNLP
import pandas as pd
import utils
import os
import numpy as np
import re
class Generate_CSV():
"""docstring for Generate_CSV"""
def __init__(self):
super(Generate_CSV, self).__init__()
self.dirlist = os.listdir()
if '关键词分类.xlsx' in self.dirlist:
self.keywords_df = pd.read_excel('关键词分类.xlsx')
else:
raise Exception('请先创建关键词分类.xlsx文件')
self.date_list = ['2015/7', '2015/8', '2015/9', '2015/10',
'2015/11', '2015/12', '2016/1', '2016/2',
'2016/3', '2016/4', '2016/5', '2016/6',
'2016/7', '2016/8', '2016/9', '2016/10',
'2016/11', '2016/12', '2017/1', '2017/2',
'2017/3', '2017/4', '2017/5', '2017/6',
'2017/7', '2017/8', '2017/9', '2017/10',
'2017/11', '2017/12', '1月', '2月', '3月', '4月',
'5月', '6月', '7月', '8月', '9月',
'10月', '11月', '12月']
def mark_commonds(self):
# 这个函数将两个功能合在一起实现
# 1.过滤掉无用评论 2.对有用评论打分
self.dirlist = os.listdir()
if 'raw_data.csv' in self.dirlist:
raw_df = pd.read_csv('raw_data.csv')
raw_df.fillna('nan')
else:
raise Exception('请先创建raw_data.csv文件')
word_lists = utils.word_frequency_statistics(raw_df)
df = pd.DataFrame(columns=['keywords', 'commands', 'date', 'mark'])
for i, row in raw_df.iterrows():
comm = str(row.评论内容)
if comm == 'nan' or comm == ''or len(comm) < 2:
continue
else:
comm = utils.Extract_Commands(comm).extract_command()
if comm == '':
# 有可能全部是引用 没有有效评论
continue
for w in word_lists:
if w in comm:
mark = round(SnowNLP(comm).sentiments, 3)
df.loc[df.shape[0]] = [row.关键词, comm, row.评论时间, mark]
if df.shape[0] % 100 == 0:
print('已经处理%d条评论' % df.shape[0])
break
print('所有评论打分完毕,正在生成sentiment_analysis.csv')
df.to_csv('sentiment_analysis.csv', index=False, encoding='utf_8_sig')
def commands_attribute(self):
self.dirlist = os.listdir()
if 'sentiment_analysis.csv' in self.dirlist:
sen_df = pd.read_csv('sentiment_analysis.csv')
else:
raise Exception('请先创建sentiment_analysis.csv文件\
或调用mark_commands方法')
t_list = self.keywords_df.columns.tolist()
t_list.insert(0, '日期')
df_avg = pd.DataFrame(columns=t_list)
df_std = pd.DataFrame(columns=t_list)
df_sum = pd.DataFrame(columns=t_list)
for t in self.date_list:
df_t = sen_df[sen_df.date.str.contains(t)]
if df_t.empty:
continue
list_avg = []
list_std = []
list_sum = []
for c in self.keywords_df.columns:
kwd_list = self.keywords_df[c].dropna().tolist()
kwd_s = ''
for i, s in enumerate(kwd_list):
if i == 0:
kwd_s += s
else:
kwd_s += '|'
kwd_s += s
df_attr = df_t[df_t.keywords.str.contains(kwd_s)]
list_avg.append(np.mean(df_attr.mark))
list_std.append(np.std(df_attr.mark, ddof=1))
list_sum.append(len(df_attr))
time_list = re.findall(r'\d+\.?\d*', t)
if '月' in t:
t_str = '2018-' + str(time_list[0])
else:
t_str = str(time_list[0]) + '-' + str(time_list[1])
list_avg.insert(0, t_str)
list_std.insert(0, t_str)
list_sum.insert(0, t_str)
print('正在处理%s的数据' % t_str)
df_avg.loc[df_avg.shape[0]] = list_avg
df_std.loc[df_std.shape[0]] = list_std
df_sum.loc[df_sum.shape[0]] = list_sum
df_avg.to_csv('avg_of_mark.csv', index=False, encoding='utf_8_sig')
df_std.to_csv('std_of_mark.csv', index=False, encoding='utf_8_sig')
df_sum.to_csv('sum_of_commands.csv', index=False, encoding='utf_8_sig')
def factor_weights(self):
self.dirlist = os.listdir()
df = pd.DataFrame(columns=['分类', '平均分权重',
'标准差权重', '评论总数权重',
'归一化的总权重'])
if 'avg_of_mark.csv' in self.dirlist and \
'std_of_mark.csv' in self.dirlist and\
'sum_of_commands.csv' in self.dirlist:
df_avg = pd.read_csv('avg_of_mark.csv')
df_avg = df_avg.dropna().reset_index(drop=True)
df_std = pd.read_csv('std_of_mark.csv')
df_std = df_avg.dropna().reset_index(drop=True)
df_sum = pd.read_csv('sum_of_commands.csv')
df_sum = df_avg.dropna().reset_index(drop=True)
else:
raise Exception('请先创建avg_of_mark.csv, std_of_mark.csv, \
sum_of_commands.csv文件 或调用commands_attribute方法')
print('正在计算平均分权重…')
df_avg_indexs = df_avg.columns[1:].tolist()
df_avg_positive = df_avg_indexs
df_avg_negative = []
df_avg_date = df_avg['日期']
df_avg_index = df_avg[df_avg_indexs]
df_avg_en = utils.EntropyMethod(df_avg_index, df_avg_negative,
df_avg_positive, df_avg_date)
avg_series = df_avg_en.calc_Weight()
df.平均分权重 = avg_series
print('正在计算标准差权重…')
std_wl = []
for c in df_std.columns[1:]:
std_wl.append(np.mean(df_std[c]))
std_ws = pd.Series(std_wl)
std_ws = std_ws / np.sum(std_ws)
df.标准差权重 = std_ws.tolist()
print('正在评论总数权重…')
sum_wl = []
for c in df_sum.columns[1:]:
sum_wl.append(np.mean(df_sum[c]))
sum_ws = pd.Series(sum_wl)
sum_ws = std_ws / np.sum(sum_ws)
df.评论总数权重 = sum_ws.tolist()
print('正在计算归一化的总权重…')
df.分类 = df_avg.columns[1:]
ws = df.平均分权重 * df.标准差权重 * df.评论总数权重
ws = ws / np.sum(ws)
df.归一化的总权重 = ws
df.to_csv('factor_weights.csv', index=False, encoding='utf_8_sig')
def total_grade(self):
self.dirlist = os.listdir()
if 'avg_of_mark.csv' in self.dirlist and \
'factor_weights.csv' in self.dirlist:
df_score = pd.read_csv('avg_of_mark.csv')
df_weight = pd.read_csv('factor_weights.csv')
else:
raise Exception('请先创建avg_of_mark.csv, factor_weights.csv\
或调用factor_weights方法')
t_list = df_score.columns.tolist()
t_list.append('满意度综合指数')
df = pd.DataFrame(columns=t_list)
years = ['2015', '2016', '2017', '2018']
print('正在计算4年的满意度综合指数…')
for y in years:
df_year = df_score[df_score.日期.str.contains(y)]
score_array = np.array(np.mean(df_year))
weight_array = np.array(df_weight.归一化的总权重)
col_list = np.multiply(score_array, weight_array).tolist()
col_list.append(np.sum(col_list))
col_list.insert(0, y)
df.loc[df.shape[0]] = col_list
df.to_csv('total_grade.csv', index=False, encoding='utf_8_sig')
def gary_relational_analysis(self):
self.dirlist = os.listdir()
if 'total_grade.csv' in self.dirlist and\
'annual.csv' in self.dirlist:
df_satisfy = pd.read_csv('total_grade.csv')
df_annual = | pd.read_csv('annual.csv') | pandas.read_csv |
import sys, os, threading, h5py, json, warnings, tables, logging, config, Queue, time
import multiprocessing as mp
import numpy as np
import pandas as pd
from util import now,now2
from routines import add_to_saver_buffer
class Saver(mp.Process):
# To save, call saver.write() with either a dict or a numpy array
# To end, just use the saver.end method. It will raise a kill flag, then perform a final flush.
def __init__(self, subj, sesh_name, session_obj, data_file=config.datafile, sync_flag=None, field_buffer_size=30, forced_flush_fieldnames=dict(analogreader=5)):
super(Saver, self).__init__()
# Sync
self.sync_flag = sync_flag
self.sync_val = mp.Value('d', 0)
# Static instance properties
self.subj = subj
self.sesh_name = sesh_name
self.session_obj = session_obj
self.data_file = data_file
self.sesh_path = ['sessions', self.sesh_name.strftime('%Y%m%d%H%M%S')]
self.past_trials = self.get_past_trials()
self.field_buffer_size = field_buffer_size
self.forced_flush_fieldnames = forced_flush_fieldnames # used to indicate special fields that should be flushed when n items have been supplied. format: {fieldname:n}
# Externally accessible flags and variables
self.buf = mp.Queue()
self.notes_q = mp.Queue()
self.kill_flag = mp.Value('b', False)
self.start()
def write(self, *args, **kwargs):
if self.kill_flag.value:
return
add_to_saver_buffer(self.buf, *args, **kwargs)
def run(self):
while not self.sync_flag.value:
self.sync_val.value = now()
# Externally inaccessible instance-specific structures
self.f = | pd.HDFStore(self.data_file, mode='a') | pandas.HDFStore |
import pandas as pd
import gc
import xgboost as xgb
from xgboost import XGBClassifier, plot_importance
from sklearn import metrics
from lib.utils import *
from lib.feature_lib import *
class Solution:
train_df = None
label_df = None
test_df = None
f_ppl = []
train_f_set = None
test_f_set = None
model = None
train_result = None
test_result = None
log = None
method = 'xgb'
input_path = '../input/'
output_path = '../output/'
data_set = None
transductive =False
para_tune_fcg = None
def __init__(self):
pass
def load_dataset(self):
tic = report("Load Dataset Start")
if self.transductive:
self.data_set.load_train(self)
self.data_set.load_test(self)
else:
self.data_set.load_test(self)
report("Load Dataset Done", tic)
def build_features(self):
tic = report("Build Features Start")
if self.transductive:
merge = | pd.concat([self.train_df, self.test_df]) | pandas.concat |
import glob
import hashlib
import os
from multiprocessing.pool import Pool
import numpy as np
import pandas as pd
from recsys.metric import mrr_fast
from recsys.mrr import mrr_fast_v3
from recsys.submission import group_clickouts
from recsys.utils import group_lengths
from scipy.optimize import fmin
def str_to_hash(s):
return int(hashlib.sha1(s.encode("utf-8")).hexdigest(), 16) % (10 ** 8)
# we have 2 predictions sets
def get_preds_1():
"""
This set of predictions is identified by hash and a suffix with a model type
:return:
"""
pred1_vals = glob.glob("predictions/model_val_*.csv")
pred1_vals_hashes = [fn.split("/")[-1].replace(".csv", "").replace("model_val_", "") for fn in pred1_vals]
pred1_subs = glob.glob("predictions/model_submit_*.csv")
pred1_subs_hashes = [fn.split("/")[-1].replace(".csv", "").replace("model_submit_", "") for fn in pred1_subs]
common_hashes = set(pred1_subs_hashes) & set(pred1_vals_hashes)
pred1_vals_c = sorted([(hsh, fn) for hsh, fn in zip(pred1_vals_hashes, pred1_vals) if hsh in common_hashes])
pred1_subs_c = sorted([(hsh, fn) for hsh, fn in zip(pred1_subs_hashes, pred1_subs) if hsh in common_hashes])
return pred1_vals_c, pred1_subs_c
def get_preds_2():
"""
This set of predictions is identified by hash and a suffix with a model type
:return:
"""
pred1_vals = glob.glob("predictions/runs/*_val/config.json")
pred1_vals_hashes = [fn.split("/")[-2].split("_")[0] for fn in pred1_vals]
pred1_subs = glob.glob("predictions/runs/*_sub/config.json")
pred1_subs_hashes = [fn.split("/")[-2].split("_")[0] for fn in pred1_subs]
common_hashes = set(pred1_subs_hashes) & set(pred1_vals_hashes)
pred1_vals_c = sorted(
[
(hsh, fn.replace("config.json", "predictions.csv"))
for hsh, fn in zip(pred1_vals_hashes, pred1_vals)
if hsh in common_hashes
]
)
pred1_subs_c = sorted(
[
(hsh, fn.replace("config.json", "predictions.csv"))
for hsh, fn in zip(pred1_subs_hashes, pred1_subs)
if hsh in common_hashes
]
)
return pred1_vals_c, pred1_subs_c
def read_prediction_val(fn):
p = pd.read_csv(fn)
p.sort_values(["user_id", "session_id", "step"], inplace=True)
p.reset_index(inplace=True, drop=True)
mrr = mrr_fast(p, "click_proba")
config_file = fn.replace("predictions.csv", "config.json")
if os.path.exists(config_file) and config_file.endswith("config.json"):
config = open(config_file).read()
else:
config = fn
return mrr, p, config
def read_prediction(fn):
p = pd.read_csv(fn)
p.sort_values(["user_id", "session_id", "step"], inplace=True)
p.reset_index(inplace=True, drop=True)
return p
if __name__ == "__main__":
preds1_vals, preds1_subs = get_preds_1()
preds2_vals, preds2_subs = get_preds_2()
preds_vals_all = preds1_vals + preds2_vals
preds_subs_all = preds1_subs + preds2_subs
# read validation models
with Pool(32) as pool:
val_predictions_dfs = pool.map(read_prediction_val, [fn for _, fn in preds_vals_all])
val_predictions = [
(mrr, hsh, df, config)
for ((hsh, fn), (mrr, df, config)) in zip(preds_vals_all, val_predictions_dfs)
if (df.shape[0] == 3_077_674) and (mrr > 0.68) and ("160357" not in fn) and ("59629" not in fn)
]
val_hashes = [p[1] for p in val_predictions]
print("Debuging click probas")
for mrr, hsh, df, _ in val_predictions:
print(mrr, hsh, df["click_proba"].min(), df["click_proba"].max())
final = val_predictions[-1][2].copy()
lengths = group_lengths(final["clickout_id"])
preds_stack = np.vstack([df["click_proba"] for _, _, df, _ in val_predictions]).T
def opt(v):
preds_ens = preds_stack.dot(v)
mrr = mrr_fast_v3(final["was_clicked"].values, preds_ens, lengths)
print(f"MRR {mrr}")
return -mrr
coefs = fmin(opt, [0] * preds_stack.shape[1])
coefs = fmin(opt, coefs, ftol=0.000_001)
final["click_proba"] = preds_stack.dot(coefs)
mrr = mrr_fast(final, "click_proba")
mrr_str = f"{mrr:.4f}"[2:]
print(mrr)
mrrs, _, _, configs = list(zip(*val_predictions))
summary_df = | pd.DataFrame({"config": configs, "mrr": mrrs, "coef": coefs}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scipy.stats as scs
import matplotlib.pyplot as plt
from PIL import Image
import requests
from bs4 import BeautifulSoup
import json
from Static import SP500
import os
def VAR(select):
ticker = SP500.get(select)
try:
url_prefix = 'https://sandbox.iexapis.com/stable/stock/'
url_suffix = '/chart/max?token=T<PASSWORD>'
full_url = '{}{}{}'.format(url_prefix, ticker.lower(), url_suffix)
source = requests.get(full_url, timeout=20)
soup = BeautifulSoup(source.text, 'html.parser')
data = json.loads(str(soup))
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
return ['error', 'Error: Internet connection failure.']
except (ValueError, requests.exceptions.HTTPError):
return ['error', 'Error: {}{}'.format(full_url, '\ndoes not exist.')]
except:
return ['error', 'Error parsing IEX API. Please contact the creator for its resolution.']
if not data:
error_msg = 'Error: IEX API cannot load price data for {}.'.format(select)
return ['error', error_msg]
data = | pd.DataFrame(data, columns=['date', 'close']) | pandas.DataFrame |
#encoding=utf-8
import pandas as pd
import re
import scipy.stats as stats
import numpy as np
from Data import load_file
import time
import datetime
dir='D:/kesci'
train_UserUpdate_path='/data/train/train_UserUpdateInfo.csv'
test_UserUpdate_path='/PPD-Second-Round-Data/复赛测试集/Userupdate_Info_9w_1.csv'
#类别特征处理
def getOneHot(category_data,category):
temp_category_data=category_data[category]
temp=pd.DataFrame()
for i in category:
temp_Series=temp_category_data[i].astype('category')
temp=pd.concat([temp,temp_Series],axis=1)
temp=pd.get_dummies(temp,dummy_na=True)
return temp
#日期特征处理
def get_day(date_col_1,date_col_2):
date1=[time.strptime(i,'%Y/%m/%d') for i in date_col_1]
date2=[time.strptime(i,'%Y/%m/%d') for i in date_col_2]
date1=[datetime.datetime(date1[i][0],date1[i][1],date1[i][2]) for i in range(len(date1))]
date2=[datetime.datetime(date2[i][0],date2[i][1],date2[i][2]) for i in range(len(date2))]
d=[(date1[i]-date2[i]).days for i in range(len(date1))]
return d
#获取train部分的UserUpdate特征
def get_train_UserUpdate():
train_UserUpdate=load_file(dir,train_UserUpdate_path)
train_update_Idx=train_UserUpdate['Idx']
#对UserupdateInfo1进行哑变量转换
category=['UserupdateInfo1']
train_UserUpdateInfo1=getOneHot(train_UserUpdate,category)
#连接上Idx标识
train_UserUpdateInfo_1=pd.concat([train_update_Idx,train_UserUpdateInfo1],axis=1)
print(train_UserUpdateInfo_1.shape)
train_UserUpdateInfo_GroupBy=train_UserUpdateInfo_1.groupby(train_UserUpdateInfo_1['Idx'])
train_UserInfo_1=train_UserUpdateInfo_GroupBy.aggregate(np.sum)
print('UserUpdateInfo1的特征处理')
print(train_UserInfo_1.shape)
#对日期特征进行处理
Update_day=get_day(train_UserUpdate['ListingInfo1'],train_UserUpdate['UserupdateInfo2'])
Update_day=pd.Series(Update_day,name='Update_day')
train_UserUpdate_day= | pd.concat([train_update_Idx,Update_day],axis=1) | pandas.concat |
"""
Helper functions specifically for the
profile asimilation coupled with Obs study
"""
import os, sys
import numpy as np
import pandas as pd
import xarray
from scipy.interpolate import interp1d
# manually add a2e-mmc repos to PYTHONPATH if needed
module_path = os.path.join(os.environ['HOME'],'tools','a2e-mmc')
if module_path not in sys.path:
sys.path.append(module_path)
from mmctools.helper_functions import calc_wind, covariance, power_spectral_density, theta
# manually add NWTC/datatools repo to PYTHONPATH
module_path = os.path.join(os.environ['HOME'],'tools')
if module_path not in sys.path:
sys.path.append(module_path)
from datatools.SOWFA6.postProcessing.averaging import PlanarAverages
from datatools.SOWFA6.postProcessing.probes import Probe
from datatools.SOWFA6.postProcessing.sourceHistory import SourceHistory
from datatools import openfoam_util
# ----------------------
# Loading reference data
# ----------------------
def load_wrf_reference_data(fpath):
"""
Load WRF reference data
"""
# Load data with xarray
xa = xarray.open_dataset(fpath)
# Convert to pandas dataframe
wrf = xa.to_dataframe()
# Convert to standard names
wrf.rename({'U':'u','V':'v','W':'w','UST':'u*'},
axis='columns',inplace=True)
# Compute wind speed and wind direction
wrf['wspd'], wrf['wdir'] = calc_wind(wrf)
return wrf
def load_radar_reference_data(fpath):
"""
Load TTU radar reference data
"""
radar = | pd.read_csv(fpath,parse_dates=True,index_col=['datetime','height']) | pandas.read_csv |
# Copyright WillianFuks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module main.py. Fixtures comes from file conftest.py located at the same dir
of this file.
"""
import mock
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
from causalimpact import CausalImpact
from causalimpact.misc import standardize
@pytest.mark.slow
def test_default_causal_cto(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'fit_method': 'hmc', 'niter': 1000, 'prior_level_sd': 0.01,
'season_duration': 1, 'nseasons': 1, 'standardize': True}
assert isinstance(ci.model, tfp.sts.Sum)
design_matrix = ci.model.components[1].design_matrix.to_dense()
assert_array_equal(
design_matrix,
pd.concat([normed_pre_data, normed_post_data]).astype(np.float32).iloc[:, 1:]
)
assert ci.inferences is not None
assert ci.inferences.index.dtype == rand_data.index.dtype
assert ci.summary_data is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.model_args['niter'] == 1000
assert ci.model_samples is not None
@pytest.mark.slow
def test_default_causal_cto_with_date_index(date_rand_data, pre_str_period,
post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
assert_frame_equal(ci.data, date_rand_data)
assert ci.pre_period == pre_str_period
assert ci.post_period == post_str_period
pre_data = date_rand_data.loc[pre_str_period[0]: pre_str_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = date_rand_data.loc[post_str_period[0]: post_str_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'fit_method': 'hmc', 'niter': 1000, 'prior_level_sd': 0.01,
'season_duration': 1, 'nseasons': 1, 'standardize': True}
assert isinstance(ci.model, tfp.sts.Sum)
design_matrix = ci.model.components[1].design_matrix.to_dense()
assert_array_equal(
design_matrix,
pd.concat([normed_pre_data, normed_post_data]).astype(np.float32).iloc[:, 1:]
)
assert ci.inferences is not None
assert ci.inferences.index.dtype == date_rand_data.index.dtype
assert ci.summary_data is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.model_args['niter'] == 1000
assert ci.model_samples is not None
@pytest.mark.slow
def test_default_causal_cto_no_covariates(rand_data, pre_int_period, post_int_period):
rand_data = pd.DataFrame(rand_data.iloc[:, 0])
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'fit_method': 'hmc', 'niter': 1000, 'prior_level_sd': 0.01,
'season_duration': 1, 'nseasons': 1, 'standardize': True}
assert isinstance(ci.model, tfp.sts.LocalLevel)
assert ci.inferences is not None
assert ci.inferences.index.dtype == rand_data.index.dtype
assert ci.summary_data is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.model_args['niter'] == 1000
assert ci.model_samples is not None
@pytest.mark.slow
def test_default_causal_cto_with_np_array(rand_data, pre_int_period, post_int_period):
data = rand_data.values
ci = CausalImpact(data, pre_int_period, post_int_period)
assert_array_equal(ci.data, data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
data = pd.DataFrame(data)
pre_data = data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'fit_method': 'hmc', 'niter': 1000, 'prior_level_sd': 0.01,
'season_duration': 1, 'nseasons': 1, 'standardize': True}
assert isinstance(ci.model, tfp.sts.Sum)
design_matrix = ci.model.components[1].design_matrix.to_dense()
assert_array_equal(
design_matrix,
pd.concat([normed_pre_data, normed_post_data]).astype(np.float32).iloc[:, 1:]
)
assert ci.inferences is not None
assert ci.inferences.index.dtype == data.index.dtype
assert ci.summary_data is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.model_args['niter'] == 1000
assert ci.model_samples is not None
@pytest.mark.slow
def test_causal_cto_with_no_standardization(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model_args=dict(
standardize=False, fit_method='vi'))
assert ci.normed_pre_data is None
assert ci.normed_post_data is None
assert ci.mu_sig is None
assert ci.p_value > 0 and ci.p_value < 1
@pytest.mark.slow
def test_causal_cto_with_seasons(date_rand_data, pre_str_period, post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period,
model_args={'nseasons': 7, 'season_duration': 2,
'fit_method': 'vi'})
assert len(ci.model.components) == 3
seasonal_component = ci.model.components[2]
assert seasonal_component.num_seasons == 7
assert seasonal_component.num_steps_per_season == 2
def test_plotter(monkeypatch, rand_data, pre_int_period, post_int_period):
plotter_mock = mock.Mock()
fit_mock = mock.Mock()
process_mock = mock.Mock()
summarize_mock = mock.Mock()
monkeypatch.setattr('causalimpact.main.CausalImpact._fit_model', fit_mock)
monkeypatch.setattr('causalimpact.main.CausalImpact._summarize_inferences',
summarize_mock)
monkeypatch.setattr('causalimpact.main.CausalImpact._process_posterior_inferences',
process_mock)
monkeypatch.setattr('causalimpact.main.plotter', plotter_mock)
ci = CausalImpact(rand_data, pre_int_period, post_int_period,
model_args={'fit_method': 'vi'})
ci.inferences = 'inferences'
ci.pre_data = 'pre_data'
ci.post_data = 'post_data'
ci.plot()
plotter_mock.plot.assert_called_with('inferences', 'pre_data', 'post_data',
panels=['original', 'pointwise', 'cumulative'],
figsize=(10, 7), show=True)
def test_summarizer(monkeypatch, rand_data, pre_int_period, post_int_period):
summarizer_mock = mock.Mock()
fit_mock = mock.Mock()
process_mock = mock.Mock()
summarize_mock = mock.Mock()
monkeypatch.setattr('causalimpact.main.CausalImpact._fit_model', fit_mock)
monkeypatch.setattr('causalimpact.main.CausalImpact._summarize_inferences',
summarize_mock)
monkeypatch.setattr('causalimpact.main.CausalImpact._process_posterior_inferences',
process_mock)
monkeypatch.setattr('causalimpact.main.summarizer', summarizer_mock)
ci = CausalImpact(rand_data, pre_int_period, post_int_period,
model_args={'fit_method': 'vi'})
ci.summary_data = 'summary_data'
ci.p_value = 0.5
ci.alpha = 0.05
ci.summary()
summarizer_mock.summary.assert_called_with('summary_data', 0.5, 0.05, 'summary', 2)
with pytest.raises(ValueError) as excinfo:
ci.summary(digits='1')
assert str(excinfo.value) == ('Input value for digits must be integer. Received '
'"<class \'str\'>" instead.')
def test_causal_cto_with_custom_model_and_seasons(rand_data, pre_int_period,
post_int_period):
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
observed_time_series = pre_data.iloc[:, 0].astype(np.float32)
level = tfp.sts.LocalLevel(observed_time_series=observed_time_series)
seasonal = tfp.sts.Seasonal(num_seasons=7, num_steps_per_season=2,
observed_time_series=observed_time_series)
model = tfp.sts.Sum([level, seasonal], observed_time_series=observed_time_series)
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model=model,
model_args={'fit_method': 'vi'})
assert len(ci.model.components) == 2
assert isinstance(ci.model.components[0], tfp.sts.LocalLevel)
assert isinstance(ci.model.components[1], tfp.sts.Seasonal)
seasonal_component = ci.model.components[-1]
assert seasonal_component.num_seasons == 7
assert seasonal_component.num_steps_per_season == 2
assert ci.inferences.index.dtype == rand_data.index.dtype
def test_default_causal_cto_vi_method(rand_data, pre_int_period, post_int_period):
freq_rand_data = rand_data.set_index(
pd.date_range(start='2020-01-01', periods=len(rand_data))
).astype(np.float32).asfreq(pd.offsets.DateOffset(days=1))
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model_args=dict(
fit_method='vi'))
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = freq_rand_data.iloc[pre_int_period[0]: pre_int_period[1] + 1, :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = freq_rand_data.iloc[post_int_period[0]: post_int_period[1] + 1, :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'fit_method': 'vi', 'niter': 1000, 'prior_level_sd': 0.01,
'season_duration': 1, 'nseasons': 1, 'standardize': True}
assert isinstance(ci.model, tfp.sts.Sum)
design_matrix = ci.model.components[1].design_matrix.to_dense()
assert_array_equal(
design_matrix,
| pd.concat([normed_pre_data, normed_post_data]) | pandas.concat |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'roster.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import xlrd
import pandas as pd
import numpy as np
class Ui_MainWindow(QMainWindow):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1097, 558)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(20, 50, 491, 391))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.tableWidget_2 = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget_2.setGeometry(QtCore.QRect(580, 50, 491, 391))
self.tableWidget_2.setObjectName("tableWidget_2")
self.tableWidget_2.setColumnCount(0)
self.tableWidget_2.setRowCount(0)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(20, 10, 93, 28))
self.pushButton.setObjectName("pushButton")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(140, 460, 151, 31))
self.textEdit.setObjectName("textEdit")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 460, 111, 41))
self.label.setObjectName("label")
#self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
#self.pushButton_2.setGeometry(QtCore.QRect(980, 460, 93, 28))
#self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(530, 190, 41, 111))
self.pushButton_3.setObjectName("pushButton_3")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(490, 470, 171, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(580, 20, 72, 15))
self.label_3.setObjectName("label_3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1097, 26))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
self.menu_2 = QtWidgets.QMenu(self.menubar)
self.menu_2.setObjectName("menu_2")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.action = QtWidgets.QAction(MainWindow)
self.action.setObjectName("action")
self.action_2 = QtWidgets.QAction(MainWindow)
self.action_2.setObjectName("action_2")
self.action_3 = QtWidgets.QAction(MainWindow)
self.action_3.setObjectName("action_3")
self.menu.addAction(self.action)
self.menu_2.addAction(self.action_2)
self.menu_2.addAction(self.action_3)
self.menubar.addAction(self.menu.menuAction())
self.menubar.addAction(self.menu_2.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# 导入数据(页面端)
self.pushButton.clicked.connect(self.openfile)
self.pushButton.clicked.connect(self.create_table_show)
# 导入数据(菜单栏)
self.action.triggered.connect(self.openfile)
self.action.triggered.connect(self.create_table_show)
# 转换
self.pushButton_3.clicked.connect(self.convertRoster)
#self.pushButton_3.clicked.connect(self.create_convert_table_show)
# 保存输出
#self.pushButton_2.clicked.connect(self.slot_btn_saveFile)
# 帮助
self.action_2.triggered.connect(self.Help)
# 关于
self.action_3.triggered.connect(self.AboutUS)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MyRoster名单转换程序"))
self.pushButton.setText(_translate("MainWindow", "导入数据"))
self.label.setText(_translate("MainWindow", "请输入转换列数:"))
#self.pushButton_2.setText(_translate("MainWindow", "保存数据"))
self.pushButton_3.setText(_translate("MainWindow", "转\n\n换"))
self.label_2.setText(_translate("MainWindow", "MyRoster V1.0"))
self.label_3.setText(_translate("MainWindow", "效果预览"))
self.menu.setTitle(_translate("MainWindow", "新建"))
self.menu_2.setTitle(_translate("MainWindow", "帮助"))
self.action.setText(_translate("MainWindow", "导入数据"))
self.action_2.setText(_translate("MainWindow", "使用说明"))
self.action_3.setText(_translate("MainWindow", "关于"))
def openfile(self):
print("111")
###获取路径===================================================================
try:
openfile_name = QFileDialog.getOpenFileName(self, '选择文件', '', 'Excel files(*.xlsx , *.xls);;CSV(*.csv)')
# print(openfile_name)
global path_openfile_name
###获取路径====================================================================
path_openfile_name = openfile_name[0]
except Exception as e:
print('err', e)
def create_table_show(self):
print("222")
try:
###===========读取表格,转换表格,===========================================
if len(path_openfile_name) > 0:
input_table = pd.read_excel(path_openfile_name)
# print(input_table)
input_table_rows = input_table.shape[0]
input_table_colunms = input_table.shape[1]
# print(input_table_rows)
# print(input_table_colunms)
input_table_header = input_table.columns.values.tolist()
# print(input_table_header)
###===========读取表格,转换表格,============================================
###======================给tablewidget设置行列表头============================
self.tableWidget.setColumnCount(input_table_colunms)
self.tableWidget.setRowCount(input_table_rows)
self.tableWidget.setHorizontalHeaderLabels(input_table_header)
###======================给tablewidget设置行列表头============================
###================遍历表格每个元素,同时添加到tablewidget中========================
for i in range(input_table_rows):
input_table_rows_values = input_table.iloc[[i]]
# print(input_table_rows_values)
input_table_rows_values_array = np.array(input_table_rows_values)
input_table_rows_values_list = input_table_rows_values_array.tolist()[0]
#print(input_table_rows_values_list)
for j in range(input_table_colunms):
input_table_items_list = input_table_rows_values_list[j]
#print(input_table_items_list)
#print(type(input_table_items_list))
# print(input_table_items_list)
# print(type(input_table_items_list))
###==============将遍历的元素添加到tablewidget中并显示=======================
input_table_items = str(input_table_items_list)
#print(input_table_items)
#print(type(input_table_items))
newItem = QTableWidgetItem(input_table_items)
newItem.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.tableWidget.setItem(i, j, newItem)
###================遍历表格每个元素,同时添加到tablewidget中========================
else:
self.centralWidget.show()
except Exception as e:
print("err2 ", e)
def convertRoster(self):
col_nums = self.textEdit.toPlainText()
global path_convertedfile_name
try:
if len(path_openfile_name) > 0:
if len(col_nums) > 0:
data = pd.read_excel(path_openfile_name, header=0) # 打开xls文件
out_data = []
#print(len(data))
for i in range(len(data)):
#print(data.values[i][0])
#print(data.values[i][1])
out_data.append(str(data.values[i][0]).replace("_", ".") + '\n' + str(data.values[i][1]))
#print(out_data)
#print(len(out_data))
data_dic = {}
for i in range(int(col_nums)):
data_dic[str(i)]=[]
print(data_dic)
count = 0
for i in out_data:
data_dic[str(count % int(col_nums))].append(i)
count += 1
max_len = len(data_dic["0"])
for i in data_dic:
if len(data_dic[i]) < max_len:
data_dic[i].append("")
'''
if int(col_nums) > 1:
for i in range
if len(data_dic["0"]) > len(data_dic[str(int(col_nums) - 1)]):
for i in range(len(data_dic["0"]) - len(data_dic[str(int(col_nums) - 1)])):
data_dic[str(int(col_nums) - 1)].append("")
'''
#styles = [dict(selector="th", props=[("text-align", "center")])]
df = pd.DataFrame(data_dic)
#df.style.set_table_styles(styles)
df.to_excel('output.xlsx', header=None, index=False)
print("Writing Done!")
print(path_openfile_name)
path_convertedfile_name = "./output.xlsx"
self.create_convert_table_show()
QMessageBox.warning(self,
"转换成功",
"转换成功,文件保存在output.xlsx!",
QMessageBox.Yes)
else:
QMessageBox.warning(self,
"转换错误",
"请设置转换后的列数!",
QMessageBox.Yes)
else:
QMessageBox.warning(self,
"转换错误",
"请先导入原始数据!",
QMessageBox.Yes)
except Exception as e:
print("err1", e)
QMessageBox.warning(self,
"转换错误",
str(e),
QMessageBox.Yes)
def create_convert_table_show(self):
try:
if len(path_convertedfile_name) > 0:
input_table = | pd.read_excel(path_convertedfile_name) | pandas.read_excel |
import pandas as pd
from sklearn import linear_model
import pickle
data = | pd.read_csv('data_new.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import requests
from termcolor import colored as cl
from math import floor
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20, 10)
plt.style.use('fivethirtyeight')
# EXTRACTING STOCK DATA
def get_historical_data(symbol, start_date):
api_key = 'YOUR API KEY'
api_url = f'https://api.twelvedata.com/time_series?symbol={symbol}&interval=1day&outputsize=5000&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df['values']).iloc[::-1].set_index('datetime').astype(float)
df = df[df.index >= start_date]
df.index = pd.to_datetime(df.index)
return df
aapl = get_historical_data('AAPL', '2020-01-01')
aapl
def get_stoch(symbol, k_period, d_period, start_date):
api_key = open(r'api_key.txt')
url = f'https://www.alphavantage.co/query?function=STOCH&symbol={symbol}&interval=daily&fastkperiod={k_period}&slowdperiod={d_period}&apikey={api_key}'
raw = requests.get(url).json()
df = pd.DataFrame(raw['Technical Analysis: STOCH']).T.iloc[::-1]
df = df[df.index >= start_date]
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/12/27 15:34
Desc: 沐甜科技数据中心-中国食糖指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
"""
import requests
import pandas as pd
def index_sugar_msweet() -> pd.DataFrame:
"""
沐甜科技数据中心-中国食糖指数
http://www.msweet.com.cn/mtkj/sjzx13/index.html
:return: 中国食糖指数
:rtype: pandas.DataFrame
"""
url = "http://www.msweet.com.cn/eportal/ui"
params = {
"struts.portlet.action": "/portlet/price!getSTZSJson.action",
"moduleId": "cb752447cfe24b44b18c7a7e9abab048",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.concat(
[ | pd.DataFrame(data_json["category"]) | pandas.DataFrame |
# Clear raw data, save clean data and compute correlation for each couple of symbols with all time offset
# Import libraries
import pandas as pd
import numpy as np
from os import listdir
from os.path import isfile, join
from datetime import datetime, timedelta
# Set parameters
last_day_to_consider = '2021-04-19'
symbols_per_file = 4
# Define function to compute Pearson correlation between a Pandas Series col1 and another Pandas series col2
# shifted by offset (offset must be >= 0)
def corr_offset(col1, col2, offset=0):
if offset > 0:
corr = np.corrcoef(col1[:-offset], col2[offset:])[0, 1]
else: # it means offset =0
corr = np.corrcoef(col1, col2)[0, 1]
return corr
# Load DataFrames of raw data
files = [f for f in listdir('raw_data') if isfile(join('raw_data', f))]
now = datetime.now()
all_days = [(now - timedelta(i)).strftime("%Y-%m-%d") for i in range(0, 1000) if
(now - timedelta(i)).weekday() < 5] # array of all days
full_data = | pd.DataFrame(all_days, columns=['time']) | pandas.DataFrame |
import requests
import json
import pandas as pd
from autumn.settings import INPUT_DATA_PATH
from pathlib import Path
INPUT_DATA_PATH = Path(INPUT_DATA_PATH)
COVID_SURVEY_PATH = INPUT_DATA_PATH / "covid_survey"
TODAY = pd.to_datetime("today").date().strftime("%Y%m%d")
FROM_DATE = "20200101"
countries = ["Australia", "Malaysia", "Myanmar", "Philippines", "Sri Lanka"]
indicators = ["mask", "avoid_contact"]
def fetch_covid_survey_data():
for country in countries:
for indicator in indicators:
if country == "Australia":
region = "Victoria"
API_URL = f"https://covidmap.umd.edu/api/resources?indicator={indicator}&type=daily&country={country}®ion={region}&daterange={FROM_DATE}-{TODAY}"
elif country == "Philippines":
region = "National Capital Region"
API_URL = f"https://covidmap.umd.edu/api/resources?indicator={indicator}&type=daily&country={country}®ion={region}&daterange={FROM_DATE}-{TODAY}"
else:
API_URL = f"https://covidmap.umd.edu/api/resources?indicator={indicator}&type=daily&country={country}&daterange={FROM_DATE}-{TODAY}"
# request data from api
response = requests.get(API_URL).text
# convert json data to dic data for use!
jsonData = json.loads(response)
# convert to pandas dataframe
df = | pd.DataFrame.from_dict(jsonData["data"]) | pandas.DataFrame.from_dict |
import pandas as pd
from engarde import decorators as ed
import pathlib
import logging
import datetime
from concurrent import futures
import toolz
import tqdm
import numpy as np
import time
THIS_DIR = pathlib.Path(__file__).parent
logger = logging.getLogger(__name__)
def extract_table(date: datetime.date, url=None) -> pd.DataFrame:
if url is None:
url = (r"https://www.powersmartpricing.org/psp/servlet?"
r"type=pricingtabledatesingle&date={}")
time.sleep(np.abs(np.random.random()))
table_list = pd.read_html(url.format(date.strftime(r"%Y%m%d")))
df = table_list[0]
df["date"] = date
return df
def scrape_tables(
start: datetime.date=datetime.date(2014, 5, 1),
end: datetime.date=None) -> pd.DataFrame:
"""
Parameters
----------
start:
Datetime start scraping from. No data is available prior to
May 1, 2014
end:
Datetime to stop scraping. The next day's data is available
after 430PM. There's logic to select the current day, or the
next day depending on when this is requested.
Returns
-------
pd.DataFrame:
A pandas DataFrame of cleaned data.
"""
if end is None:
now = datetime.datetime.now()
end = now.date() if now.time() <= datetime.time(16, 30) else now.date() + datetime.timedelta(days=1)
dates = pd.date_range(start=start, end=end, freq="D")
# raw_data = (extract_table(date) for date in dates)
# clean_data = pd.concat(raw_data)
with futures.ThreadPoolExecutor(4) as executor:
raw_data = tqdm.tqdm(executor.map(extract_table, dates), total=len(dates))
data_pipe_line = toolz.pipe(
raw_data,
)
clean_data = pd.concat(data_pipe_line)
return clean_data
def create_raw_hourly_energy_rate_data(
start: datetime.date=datetime.date(2014, 5, 1),
end: datetime.date=datetime.date(2018, 10, 2)) -> pd.DataFrame:
if end is None:
now = datetime.datetime.now()
end = now.date() if now.time() <= datetime.time(16, 30) else now.date() + datetime.timedelta(days=1)
dates = pd.date_range(start=start, end=end, freq="D")
# raw_data = (extract_table(date) for date in dates)
# clean_data = pd.concat(raw_data)
with futures.ThreadPoolExecutor(4) as executor:
raw_data = tqdm.tqdm(executor.map(extract_table, dates), total=len(dates))
data_pipe_line = toolz.pipe(
raw_data,
)
raw_data = pd.concat(data_pipe_line)
return raw_data
@toolz.curry
def extract_price_per_kwh(df: pd.DataFrame, col_name) -> pd.DataFrame:
"""
Raw price comes in as Cents per kWh and it's a string.
Convert to $/kWh and cast to a float
"""
pat = r"(\d+\.\d+)"
df[col_name] = (
df["Actual Price (Cents per kWh)"]
.str
.extract(pat, expand=False)
.astype(float)
) / 100
return df
@toolz.curry
def add_price_start_time(df: pd.DataFrame, col_name) -> pd.DataFrame:
"""
Add a start datetime for price such that:
current_price = df["price_dollar_per_kwh"][
df["price_start_datetime"] >= pd.Timestamp.now()
& df["price_end_datetime"] <= pd.Timestamp.now()
]
The "date" column comes in and a datetime64[ns] datatype.
The "Time of Day (CT)" column is a string with the following format:
HH:MM [A|P]M - HH + 1:MM [A|P]M
ie
"12:00 AM - 1:00 AM"
Logic extracts the time string then combines it with the date to create
a datetime column
"""
start_dates = df["date"]
start_times = (
df["Time of Day (CT)"]
.str
.extract(r"(\d{1,2}:\d{2} [A|P]M) - \d{1,2}:\d{2} [A|P]M", expand=False)
)
start_dt = start_dates.combine(
other=start_times,
func=lambda date, time: datetime.datetime.combine(date, pd.Timestamp(time).time())
)
df[col_name] = start_dt
return df
@toolz.curry
def add_price_end_time(df: pd.DataFrame, col_name: str) -> pd.DataFrame:
"""
Add an end datetime for price such that:
current_price = df["price_dollar_per_kwh"][
df["price_start_datetime"] >= pd.Timestamp.now()
& df["price_end_datetime"] <= pd.Timestamp.now()
]
The "date" column comes in and a datetime64[ns] datatype.
The "Time of Day (CT)" column is a string with the following format:
HH:MM [A|P]M - HH + 1:MM [A|P]M
ie
12:00 AM - 1:00 AM
Logic extracts the time string then combines it with the date to create
a datetime column
"""
stop_dates = df["date"]
stop_times = (
df["Time of Day (CT)"]
.str
.extract(r"\d{1,2}:\d{2} [A|P]M - (\d{1,2}:\d{2} [A|P]M)", expand=False)
)
stop_dt = stop_dates.combine(
other=stop_times,
func=lambda date, time: datetime.datetime.combine(date, pd.Timestamp(time).time())
)
df[col_name] = stop_dt - | pd.Timedelta(seconds=1) | pandas.Timedelta |
from preprocessing.data_utils import *
import os
from root import *
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy
| pd.set_option('display.max_columns', 100) | pandas.set_option |
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
import pandas as pd
import numpy as np
import pdb
#--------------------------------------------------------------------------------
# データの読み込み
def load_data():
# scikit-leanからbostonデータセットを読み込み
boston = load_boston()
# bostonデータセットの説明変数をpandasのデータフレームに変換
boston_df = | pd.DataFrame(boston.data, columns=boston.feature_names) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Testing cosmogan
# Sep 4, 2020
# Author: <NAME>. <EMAIL>
#
# Borrowing pieces of code from :
#
# - https://github.com/pytorch/tutorials/blob/11569e0db3599ac214b03e01956c2971b02c64ce/beginner_source/dcgan_faces_tutorial.py
# - https://github.com/exalearn/epiCorvid/tree/master/cGAN
import os
import random
import logging
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
#from torchsummary import summary
from torch.utils.data import DataLoader, TensorDataset
# import torch.fft
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#from IPython.display import HTML
import argparse
import time
from datetime import datetime
import glob
import pickle
import yaml
import collections
import shutil
# Import modules from other files
from utils import *
from spec_loss import *
def f_manual_add_argparse():
''' use only in jpt notebook'''
args=argparse.Namespace()
args.config='config_2dgan.yaml'
args.mode='fresh'
args.ip_fldr=''
# args.mode='continue'
# args.ip_fldr='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/results_from_other_code/pytorch/results/128sq/20201211_093818_nb_test/'
return args
def f_parse_args():
"""Parse command line arguments.Only for .py file"""
parser = argparse.ArgumentParser(description="Run script to train GAN using pytorch", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_arg = parser.add_argument
add_arg('--config','-cfile', type=str, default='/global/u1/v/vpa/project/jpt_notebooks/Cosmology/Cosmo_GAN/repositories/cosmogan_pytorch/cosmogan/main_code/config_128.yaml', help='Whether to start fresh run or continue previous run')
add_arg('--mode','-m', type=str, choices=['fresh','continue'],default='fresh', help='Whether to start fresh run or continue previous run')
add_arg('--ip_fldr','-ip', type=str, default='', help='The input folder for resuming a checkpointed run')
return parser.parse_args()
def f_init_gdict(args,gdict):
''' Create global dictionary gdict from args and config file'''
## read config file
config_file=args.config
with open(config_file) as f:
config_dict= yaml.load(f, Loader=yaml.SafeLoader)
gdict=config_dict['parameters']
## Add args variables to gdict
for key in ['mode','config','ip_fldr']:
gdict[key]=vars(args)[key]
return gdict
def f_load_data_precompute(gdict):
#################################
####### Read data and precompute ######
img=np.load(gdict['ip_fname'],mmap_mode='r')[:gdict['num_imgs']].transpose(0,1,2,3).copy()
t_img=torch.from_numpy(img)
print("%s, %s"%(img.shape,t_img.shape))
dataset=TensorDataset(t_img)
data_loader=DataLoader(dataset,batch_size=gdict['batch_size'],shuffle=True,num_workers=0,drop_last=True)
# Precompute metrics with validation data for computing losses
with torch.no_grad():
val_img=np.load(gdict['ip_fname'])[-3000:].transpose(0,1,2,3).copy()
t_val_img=torch.from_numpy(val_img).to(gdict['device'])
# Precompute radial coordinates
r,ind=f_get_rad(img)
r=r.to(gdict['device']); ind=ind.to(gdict['device'])
# Stored mean and std of spectrum for full input data once
mean_spec_val,sdev_spec_val=f_torch_image_spectrum(f_invtransform(t_val_img),1,r,ind)
hist_val=f_compute_hist(t_val_img,bins=gdict['bns'])
del val_img; del t_val_img; del img; del t_img
return data_loader,mean_spec_val,sdev_spec_val,hist_val,r,ind
def f_init_GAN(gdict,print_model=False):
# Define Models
print("Building GAN networks")
# Create Generator
netG = Generator(gdict).to(gdict['device'])
netG.apply(weights_init)
# Create Discriminator
netD = Discriminator(gdict).to(gdict['device'])
netD.apply(weights_init)
if print_model:
print(netG)
# summary(netG,(1,1,64))
print(netD)
# summary(netD,(1,128,128))
print("Number of GPUs used %s"%(gdict['ngpu']))
if (gdict['multi-gpu']):
netG = nn.DataParallel(netG, list(range(gdict['ngpu'])))
netD = nn.DataParallel(netD, list(range(gdict['ngpu'])))
#### Initialize networks ####
# criterion = nn.BCELoss()
criterion = nn.BCEWithLogitsLoss()
if gdict['mode']=='fresh':
optimizerD = optim.Adam(netD.parameters(), lr=gdict['learn_rate'], betas=(gdict['beta1'], 0.999),eps=1e-7)
optimizerG = optim.Adam(netG.parameters(), lr=gdict['learn_rate'], betas=(gdict['beta1'], 0.999),eps=1e-7)
### Initialize variables
iters,start_epoch,best_chi1,best_chi2=0,0,1e10,1e10
### Load network weights for continuing run
elif gdict['mode']=='continue':
iters,start_epoch,best_chi1,best_chi2=f_load_checkpoint(gdict['save_dir']+'/models/checkpoint_last.tar',netG,netD,optimizerG,optimizerD,gdict)
logging.info("Continuing existing run. Loading checkpoint with epoch {0} and step {1}".format(start_epoch,iters))
start_epoch+=1 ## Start with the next epoch
## Add to gdict
for key,val in zip(['best_chi1','best_chi2','iters','start_epoch'],[best_chi1,best_chi2,iters,start_epoch]): gdict[key]=val
return netG,netD,criterion,optimizerD,optimizerG
def f_setup(gdict,log):
'''
Set up directories, Initialize random seeds, add GPU info, add logging info.
'''
torch.backends.cudnn.benchmark=True
# torch.autograd.set_detect_anomaly(True)
###### Set up directories #######
if gdict['mode']=='fresh':
# Create prefix for foldername
fldr_name=datetime.now().strftime('%Y%m%d_%H%M%S') ## time format
gdict['save_dir']=gdict['op_loc']+fldr_name+'_'+gdict['run_suffix']
if not os.path.exists(gdict['save_dir']):
os.makedirs(gdict['save_dir']+'/models')
os.makedirs(gdict['save_dir']+'/images')
shutil.copy(gdict['config'],gdict['save_dir'])
elif gdict['mode']=='continue': ## For checkpointed runs
gdict['save_dir']=args.ip_fldr
### Read loss data
with open (gdict['save_dir']+'df_metrics.pkle','rb') as f:
metrics_dict=pickle.load(f)
### Initialize random seed
manualSeed = np.random.randint(1, 10000) if gdict['seed']=='random' else int(gdict['seed'])
random.seed(manualSeed)
np.random.seed(manualSeed)
torch.manual_seed(manualSeed)
torch.cuda.manual_seed_all(manualSeed)
if gdict['deterministic']:
logging.info("Running with deterministic sequence. Performance will be slower")
torch.backends.cudnn.deterministic=True
# torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
## Special declarations
gdict['ngpu']=torch.cuda.device_count()
gdict['device']=torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
gdict['multi-gpu']=True if (gdict['device'].type == 'cuda') and (gdict['ngpu'] > 1) else False
if log:
### Write all logging.info statements to stdout and log file (different for jpt notebooks)
logfile=gdict['save_dir']+'/log.log'
logging.basicConfig(level=logging.DEBUG, filename=logfile, filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s")
Lg = logging.getLogger()
Lg.setLevel(logging.DEBUG)
lg_handler_file = logging.FileHandler(logfile)
lg_handler_stdout = logging.StreamHandler(sys.stdout)
Lg.addHandler(lg_handler_file)
Lg.addHandler(lg_handler_stdout)
logging.info('Args: {0}'.format(args))
logging.info('Start: %s'%(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
logging.info('Device:{0}'.format(gdict['device']))
def f_train_loop(dataloader,metrics_df,gdict,fixed_noise,mean_spec_val,sdev_spec_val,hist_val,r,ind):
''' Train single epoch '''
## Define new variables from dict
keys=['image_size','start_epoch','epochs','iters','best_chi1','best_chi2','save_dir','device','flip_prob','nz','batch_size','bns']
image_size,start_epoch,epochs,iters,best_chi1,best_chi2,save_dir,device,flip_prob,nz,batchsize,bns=list(collections.OrderedDict({key:gdict[key] for key in keys}).values())
for epoch in range(start_epoch,epochs):
t_epoch_start=time.time()
for count, data in enumerate(dataloader, 0):
# print(count,len(data),data[0].shape)
####### Train GAN ########
netG.train(); netD.train(); ### Need to add these after inference and before training
tme1=time.time()
### Update D network: maximize log(D(x)) + log(1 - D(G(z)))
netD.zero_grad()
real_cpu = data[0].to(device)
real_cpu.requires_grad=True
b_size = real_cpu.size(0)
real_label = torch.full((b_size,), 1, device=device)
fake_label = torch.full((b_size,), 0, device=device)
g_label = torch.full((b_size,), 1, device=device) ## No flipping for Generator labels
# Flip labels with probability flip_prob
for idx in np.random.choice(np.arange(b_size),size=int(np.ceil(b_size*flip_prob))):
real_label[idx]=0; fake_label[idx]=1
# Generate fake image batch with G
noise = torch.randn(b_size, 1, 1, nz, device=device)
fake = netG(noise)
# Forward pass real batch through D
real_output = netD(real_cpu)
# print("Real output",torch.max(real_output[-1]))
errD_real = criterion(real_output[-1].view(-1), real_label.float())
# print(errD_real.item())
errD_real.backward(retain_graph=True)
D_x = real_output[-1].mean().item()
# Forward pass fake batch through D
fake_output = netD(fake.detach()) # The detach is important
# print("output for Dfake",torch.max(fake_output[-1]))
errD_fake = criterion(fake_output[-1].view(-1), fake_label.float())
# print(errD_fake.item())
errD_fake.backward(retain_graph=True)
D_G_z1 = fake_output[-1].mean().item()
errD = errD_real + errD_fake
if gdict['lambda_gp']: ## Add gradient - penalty loss
grads=torch.autograd.grad(outputs=real_output[-1],inputs=real_cpu,grad_outputs=torch.ones_like(real_output[-1]),allow_unused=False,create_graph=True)[0]
gp_loss=f_gp_loss(grads,gdict['lambda_gp'])
errD = errD + gp_loss
else:
gp_loss=torch.Tensor([np.nan])
optimizerD.step()
###Update G network: maximize log(D(G(z)))
netG.zero_grad()
output = netD(fake)
# print("op for G",torch.max(output[-1]))
errG_adv = criterion(output[-1].view(-1), g_label.float())
# print(errG_adv.item())
# Histogram pixel intensity loss
hist_gen=f_compute_hist(fake,bins=bns)
hist_loss=loss_hist(hist_gen,hist_val.to(device))
# Add spectral loss
mean,sdev=f_torch_image_spectrum(f_invtransform(fake),1,r.to(device),ind.to(device))
spec_loss=loss_spectrum(mean,mean_spec_val.to(device),sdev,sdev_spec_val.to(device),image_size,gdict['lambda_spec_mean'],gdict['lambda_spec_var'])
errG=errG_adv
if gdict['lambda_spec_mean']: errG=errG + spec_loss
if gdict['lambda_fm']:## Add feature matching loss
fm_loss=f_FM_loss(real_output,fake_output,gdict['lambda_fm'],gdict)
errG= errG + fm_loss
else:
fm_loss=torch.Tensor([np.nan])
if torch.isnan(errG).any():
logging.info(errG)
raise SystemError
# Calculate gradients for G
errG.backward(retain_graph=True)
D_G_z2 = output[-1].mean().item()
### Implement Gradient clipping
if gdict['grad_clip']:
nn.utils.clip_grad_norm_(netG.parameters(),gdict['grad_clip'])
nn.utils.clip_grad_norm_(netD.parameters(),gdict['grad_clip'])
optimizerG.step()
# optimizerD.step()
tme2=time.time()
####### Store metrics ########
# Output training stats
if count % gdict['checkpoint_size'] == 0:
logging.info('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_adv: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, epochs, count, len(dataloader), errD.item(), errG_adv.item(),errG.item(), D_x, D_G_z1, D_G_z2)),
logging.info("Spec loss: %s,\t hist loss: %s"%(spec_loss.item(),hist_loss.item())),
logging.info("Training time for step %s : %s"%(iters, tme2-tme1))
# Save metrics
cols=['step','epoch','Dreal','Dfake','Dfull','G_adv','G_full','spec_loss','hist_loss','fm_loss','gp_loss','D(x)','D_G_z1','D_G_z2','time']
vals=[iters,epoch,errD_real.item(),errD_fake.item(),errD.item(),errG_adv.item(),errG.item(),spec_loss.item(),hist_loss.item(),fm_loss.item(),gp_loss.item(),D_x,D_G_z1,D_G_z2,tme2-tme1]
for col,val in zip(cols,vals): metrics_df.loc[iters,col]=val
### Checkpoint the best model
checkpoint=True
iters += 1 ### Model has been updated, so update iters before saving metrics and model.
### Compute validation metrics for updated model
netG.eval()
with torch.no_grad():
#fake = netG(fixed_noise).detach().cpu()
fake = netG(fixed_noise)
hist_gen=f_compute_hist(fake,bins=bns)
hist_chi=loss_hist(hist_gen,hist_val.to(device))
mean,sdev=f_torch_image_spectrum(f_invtransform(fake),1,r.to(device),ind.to(device))
spec_chi=loss_spectrum(mean,mean_spec_val.to(device),sdev,sdev_spec_val.to(device),image_size,gdict['lambda_spec_mean'],gdict['lambda_spec_var'])
# Storing chi for next step
for col,val in zip(['spec_chi','hist_chi'],[spec_chi.item(),hist_chi.item()]): metrics_df.loc[iters,col]=val
# Checkpoint model for continuing run
if count == len(dataloader)-1: ## Check point at last step of epoch
f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc=save_dir+'/models/checkpoint_last.tar')
if (checkpoint and (epoch > 1)): # Choose best models by metric
if hist_chi< best_chi1:
f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc=save_dir+'/models/checkpoint_best_hist.tar')
best_chi1=hist_chi.item()
logging.info("Saving best hist model at epoch %s, step %s."%(epoch,iters))
if spec_chi< best_chi2:
f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc=save_dir+'/models/checkpoint_best_spec.tar')
best_chi2=spec_chi.item()
logging.info("Saving best spec model at epoch %s, step %s"%(epoch,iters))
if iters in gdict['save_steps_list']:
f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc=save_dir+'/models/checkpoint_{0}.tar'.format(iters))
logging.info("Saving given-step at epoch %s, step %s."%(epoch,iters))
# Save G's output on fixed_noise
if ((iters % gdict['checkpoint_size'] == 0) or ((epoch == epochs-1) and (count == len(dataloader)-1))):
netG.eval()
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_arr=np.array(fake[:,:,:,:])
fname='gen_img_epoch-%s_step-%s'%(epoch,iters)
np.save(save_dir+'/images/'+fname,img_arr)
t_epoch_end=time.time()
logging.info("Time taken for epoch %s: %s"%(epoch,t_epoch_end-t_epoch_start))
# Save Metrics to file after each epoch
metrics_df.to_pickle(save_dir+'/df_metrics.pkle')
logging.info("best chis: {0}, {1}".format(best_chi1,best_chi2))
if __name__=="__main__":
jpt=False
# jpt=True ##(different for jupyter notebook)
t0=time.time()
args=f_parse_args() if not jpt else f_manual_add_argparse()
#################################
### Set up global dictionary###
gdict={}
gdict=f_init_gdict(args,gdict)
if jpt: ## override for jpt nbks
gdict['num_imgs']=40000
gdict['run_suffix']='nb_test'
f_setup(gdict,log=(not jpt))
## Build GAN
netG,netD,criterion,optimizerD,optimizerG=f_init_GAN(gdict,print_model=True)
fixed_noise = torch.randn(gdict['batch_size'], 1, 1, gdict['nz'], device=gdict['device']) #Latent vectors to view G progress
## Load data and precompute
dataloader,mean_spec_val,sdev_spec_val,hist_val,r,ind=f_load_data_precompute(gdict)
#################################
########## Train loop and save metrics and images ######
### Set up metrics dataframe
cols=['step','epoch','Dreal','Dfake','Dfull','G_adv','G_full','spec_loss','hist_loss','spec_chi','hist_chi','gp_loss','fm_loss','D(x)','D_G_z1','D_G_z2','time']
metrics_df= | pd.DataFrame(columns=cols) | pandas.DataFrame |
import itertools
import string
import numpy as np
from numpy import random
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with | tm.assert_produces_warning(UserWarning) | pandas._testing.assert_produces_warning |
# -*- coding: utf-8 -*-
"""
svm和gdbt算法模块
@author 谢金豆
"""
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
import cv2
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler #标准差标准化
from sklearn.svm import SVC #svm包中SVC用于分类
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import cross_validation, metrics
from sklearn.metrics import roc_curve, auc
from sklearn import metrics
L=["cr","in","pa","ps","rs","sc"]
L1=["cr","in","pa","ps","rs","sc","gg","rp","sp"]
def get_data(x,y):
file_path='D:/NLS_data/' #设置文件路径
file_path1='D:/train/'
train_set = np.zeros(shape=[1,64*64]) #train_set用于获取的数据集
train_set = pd.DataFrame(train_set) #将train_set转换成DataFrame类型
target=[] #标签列表
for i in L:
for j in range(x,y):
target.append(i)
img = cv2.imread(file_path+i+'/'+str(j)+'.jpg',\
cv2.IMREAD_GRAYSCALE) #读取图片,第二个参数表示以灰度图像读入
img=img.reshape(1,img.shape[0]*img.shape[1])
img=pd.DataFrame(img)
train_set=pd.concat([train_set,img],axis=0)
train_set.index=list(range(0,train_set.shape[0]))
train_set.drop(labels=0,axis=0,inplace=True)
target=pd.DataFrame(target)
return train_set,target #返回数据集和标签
def get_data1(x,y):
file_path='D:/NLS_data/' #设置文件路径
file_path1='D:/train/'
train_set = np.zeros(shape=[1,64*64]) #train_set用于获取的数据集
train_set = pd.DataFrame(train_set) #将train_set转换成DataFrame类型
target=[] #标签列表
for i in range(len(L1)):
for j in range(x,y):
target.append(i)
img = cv2.imread(file_path1+L1[i]+'/'+str(j)+'.jpg',\
cv2.IMREAD_GRAYSCALE) #读取图片,第二个参数表示以灰度图像读入
img=img.reshape(1,img.shape[0]*img.shape[1])
img= | pd.DataFrame(img) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Iris Dataset - multi-class classification problem
=================================================
The `Iris DataSet <http://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html>`_
became quite popular in machine learning. It is a short and simple classification
problem with three classes. We first see how to handle it with
:epkg:`scikit-learn` and then with :epkg:`MicrosoftML`.
.. contents::
:local:
.. index:: classification, multi-class, iris
"""
########################################
#
# scikit-learn
# ------------
#
###########################################
# We retrieves the data with scikit-learn.
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
import sphinx_gallery
iris = datasets.load_iris()
X = iris.data[:, :2]
Y = iris.target
###########################################
# We train a logistic regression model
# with :epkg:`scikit-learn`.
# Training.
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg.fit(X, Y)
###########################################
# We compute the predictions on a grid.
h = .02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
gridX = np.c_[xx.ravel(), yy.ravel()]
###########################
# We run the predictions on this grid.
grid = logreg.predict(gridX)
######################
# We plot the predictions.
zgrid = grid.reshape(xx.shape)
plt.figure(figsize=(4, 3))
plt.pcolormesh(xx, yy, zgrid, cmap=plt.cm.Paired)
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("scikit-learn")
########################################
#
# microsoftml
# -----------
#
# We use :epkg:`microsoftml` to do the same.
# The main difference is the features and the label
# must appear in a same dataframe. A formula specifies
# which role the column play during the training.
# We also modify the label type which must be
# a boolean, a float or a category for this kind of problem.
import pandas
df = | pandas.DataFrame(data=X, columns=["X1", "X2"]) | pandas.DataFrame |
import pandas as pd
from sklearn.preprocessing import PowerTransformer
def preprocess_columns(df):
"""
Assumptions:
- Remove variables with more than 50% missing values
- Replace missing values of numerical variables with per mean
- Remove categorical variables with more than 25 unique values
:return: df
"""
mv_cols = df.columns[df.isnull().sum() / len(df) > 0.5]
df.drop(mv_cols, axis=1, inplace=True)
cols = df.columns
num_cols = df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
if len(cat_cols) > 0:
for cat_col in cat_cols:
if len(df[cat_col].unique()) > 25:
df.drop(cat_col, axis=1, inplace=True)
cols = df.columns
num_cols = df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
if len(cat_cols) > 0:
for cat_col in cat_cols:
df[cat_col] = df[cat_col].fillna(-1)
if len(num_cols) > 0:
for num_col in num_cols:
df[num_col] = df[num_col].fillna(df[num_col].mean())
return df
def load_water_quality_data():
# https://www.kaggle.com/adityakadiwal/water-potability
df = pd.read_csv('../data/water_potability.csv', sep=',')
y_df = df['Potability']
X_df = df.drop('Potability', axis=1)
X_df = preprocess_columns(X_df)
y_df = y_df.astype(int)
y_word_dict = {1: 'Potable_yes', 0: 'Potable_no'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_stroke_data():
# https://www.kaggle.com/fedesoriano/stroke-prediction-dataset
df = pd.read_csv('../data/healthcare-dataset-stroke-data.csv', sep=',')
y_df = df['stroke']
X_df = df.drop('stroke', axis=1)
X_df['hypertension'] = X_df['hypertension'].replace({1: "Yes", 0: "No"})
X_df['heart_disease'] = X_df['heart_disease'].replace({1: "Yes", 0: "No"})
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['age', 'avg_glucose_level', 'bmi']]
X_df = X_df[cat_cols+num_cols]
X_df = preprocess_columns(X_df)
y_df = y_df.astype(int)
y_word_dict = {1: 'Stroke_yes', 0: 'Stroke_no'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_telco_churn_data():
# https://www.kaggle.com/blastchar/telco-customer-churn/downloads/WA_Fn-UseC_-Telco-Customer-Churn.csv/1
df = pd.read_csv('../data/WA_Fn-UseC_-Telco-Customer-Churn.csv')
y_df = df['Churn']
X_df = df.drop(['Churn', 'customerID'], axis=1)
X_df['SeniorCitizen'] = X_df['SeniorCitizen'].replace({1: "Yes", 0: "No"})
X_df['TotalCharges'] = pd.to_numeric(X_df['TotalCharges'].replace(" ", ""))
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['tenure', 'MonthlyCharges', 'TotalCharges']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = y_df.replace({'Yes': 1, 'No': 0})
y_df = y_df.astype(int)
y_word_dict = {1: 'Churn_Yes', 0: 'Churn_No'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_fico_data():
# https://community.fico.com/s/explainable-machine-learning-challenge?tabset-158d9=3
df = pd.read_csv('../data/fico_heloc_dataset_v1.csv')
X_df = df.drop(['RiskPerformance'], axis=1)
X_df['MaxDelq2PublicRecLast12M'] = X_df['MaxDelq2PublicRecLast12M'].astype(str)
X_df['MaxDelqEver'] = X_df['MaxDelqEver'].astype(str)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
cat_cols = [cat_col for cat_col in cat_cols if cat_col in ['MaxDelq2PublicRecLast12M', 'MaxDelqEver']]
X_df = X_df[cat_cols+num_cols.tolist()]
X_df = preprocess_columns(X_df)
y_df = df['RiskPerformance']
y_df = y_df.replace({'Good': 1, 'Bad': 0})
y_df = y_df.astype(int)
y_word_dict = {1: 'Good', 0: 'Bad'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_bank_marketing_data():
# https://archive.ics.uci.edu/ml/datasets/Bank+Marketing
df = pd.read_csv('../data/bank-full.csv', sep=';')
y_df = df['y']
X_df = df.drop('y', axis=1)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['age', 'duration', 'campaign', 'pdays', 'previous']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = y_df.replace({'yes': 1, 'no': 0})
y_df = y_df.astype(int)
y_word_dict = {1: 'Deposit_subscribed_yes', 0: 'Deposit_subscribed_no'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_adult_data():
df = pd.read_csv('../data/adult_census_income.csv')
X_df = df.drop(['income'], axis=1)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['age', 'fnlwgt', 'education.num',
'capital.gain', 'capital.loss', 'hours.per.week']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = df["income"]
y_df = y_df.replace({' <=50K': 0, ' >50K': 1})
y_df = y_df.astype(int)
y_word_dict = {0: 'Income<=50K', 1: 'Income>50K'}
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset, y_word_dict
def load_airline_passenger_data():
# https://www.kaggle.com/teejmahal20/airline-passenger-satisfaction
df = pd.read_csv('../data/airline_train.csv', sep=',')
y_df = df['satisfaction']
X_df = df.drop(['Unnamed: 0', 'id', 'satisfaction'], axis=1)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
cat_cols = [cat_col for cat_col in cat_cols if cat_col in ['Gender', 'Customer Type', 'Type of Travel', 'Class']]
X_df = X_df[cat_cols + num_cols.tolist()]
X_df = preprocess_columns(X_df)
y_df = y_df.replace({'satisfied': 1, 'neutral or dissatisfied': 0})
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset
def load_car_data():
# https: // archive.ics.uci.edu / ml / datasets / automobile
df = pd.read_csv('../data/car.data', sep=',')
X_df = df.drop(['price'], axis=1)
X_df = X_df.replace("?", "")
X_df['peak-rpm'] = pd.to_numeric(X_df['peak-rpm'])
X_df['horsepower'] = pd.to_numeric(X_df['horsepower'])
X_df['stroke'] = pd.to_numeric(X_df['stroke'])
X_df['bore'] = pd.to_numeric(X_df['bore'])
X_df['normalized-losses'] = pd.to_numeric(X_df['normalized-losses'])
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['wheel-base', 'length', 'width', 'height', 'curb-weight',
'engine-size', 'bore', 'stroke', 'compression-ratio',
'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = df['price']
pt = PowerTransformer(method="box-cox")
y_np = pt.fit_transform(y_df.to_numpy().reshape(-1, 1))
y_df = pd.DataFrame(data=y_np, columns=["y"])
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset
def load_student_grade_data():
# https://archive.ics.uci.edu/ml/datasets/Student+Performance
df = pd.read_csv('../data/student-por.csv', sep=';')
X_df = df.drop(['G1', 'G2', 'G3'], axis=1)
cols = X_df.columns
num_cols = X_df._get_numeric_data().columns
cat_cols = list(set(cols) - set(num_cols))
num_cols = [num_col for num_col in num_cols if num_col in ['age', 'medu', 'fedu', 'traveltime', 'studytime',
'failures', 'famrel', 'freetime', 'goout',
'Dalc', 'Walc', 'health', 'absences']]
X_df = X_df[cat_cols + num_cols]
X_df = preprocess_columns(X_df)
y_df = df['G3']
pt = PowerTransformer()
y_np = pt.fit_transform(y_df.to_numpy().reshape(-1, 1))
y_df = pd.DataFrame(data=y_np, columns=["y"])
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset
def load_crimes_data():
# https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
df = pd.read_csv('../data/communities.data', sep=',')
X_df = df.drop(['ViolentCrimesPerPop', 'state', 'county', 'community', 'communityname string', 'fold'], axis=1)
X_df = X_df.replace("?", "")
X_df = preprocess_columns(X_df)
X_df = X_df.drop(['LemasGangUnitDeploy', 'NumKindsDrugsSeiz'], axis=1)
y_df = df['ViolentCrimesPerPop']
pt = PowerTransformer()
y_np = pt.fit_transform(y_df.to_numpy().reshape(-1, 1))
y_df = pd.DataFrame(data=y_np, columns=["y"])
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset
def load_bike_sharing_data():
# https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
df = pd.read_csv('../data/bike.csv', sep=',')
X_df = df.drop(['instant', 'dteday', 'cnt', 'casual', 'registered'], axis=1)
X_df['season'] = X_df['season'].astype(str)
X_df['yr'] = X_df['yr'].astype(str)
X_df['holiday'] = X_df['holiday'].astype(str)
X_df['weekday'] = X_df['weekday'].astype(str)
X_df['workingday'] = X_df['workingday'].astype(str)
X_df['weathersit'] = X_df['weathersit'].astype(str)
X_df = preprocess_columns(X_df)
y_df = df['cnt']
pt = PowerTransformer()
y_np = pt.fit_transform(y_df.to_numpy().reshape(-1, 1))
y_df = pd.DataFrame(data=y_np, columns=["y"])
dataset = {
'problem': 'regression',
'full': {
'X': X_df,
'y': y_df,
},
}
return dataset
def load_california_housing_data():
# https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html
df = pd.read_csv('../data/cal_housing.data', sep=',')
X_df = df.drop(['medianHouseValue'], axis=1)
X_df = preprocess_columns(X_df)
y_df = df['medianHouseValue']
pt = PowerTransformer()
y_np = pt.fit_transform(y_df.to_numpy().reshape(-1, 1))
y_df = | pd.DataFrame(data=y_np, columns=["y"]) | pandas.DataFrame |
import unittest
from abc import ABC
import numpy as np
import pandas as pd
from toolbox.ml.ml_factor_calculation import ModelWrapper, calc_ml_factor, generate_indexes
from toolbox.utils.slice_holder import SliceHolder
class MyTestCase(unittest.TestCase):
def examples(self):
# index includes non trading days
# exactly 60 occurrences of each ticker
first = pd.Timestamp(year=2010, month=1, day=1)
self.date_index = pd.MultiIndex.from_product(
[pd.date_range(start=first, end=pd.Timestamp(year=2010, month=3, day=1)),
['BOB', 'JEFF', 'CARL']], names=['date', 'symbol'])
self.expected_index_e5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first, first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first, first + pd.Timedelta(days=44)),
SliceHolder(first + pd.Timedelta(days=55), first + pd.Timedelta(days=59)))
]
self.expected_index_e7_8_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=37), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=37)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=52))),
(SliceHolder(first, first + pd.Timedelta(days=45)),
SliceHolder(first + pd.Timedelta(days=53), first + pd.Timedelta(days=59))),
]
self.expected_index_e5_10_30 = self.turn_to_datetime64(self.expected_index_e5_10_30)
self.expected_index_e7_8_30 = self.turn_to_datetime64(self.expected_index_e7_8_30)
self.expected_index_r5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first + pd.Timedelta(days=5), first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first + pd.Timedelta(days=10), first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first + pd.Timedelta(days=15), first + pd.Timedelta(days=44)),
SliceHolder(first + pd.Timedelta(days=55), first + pd.Timedelta(days=59)))
]
self.expected_index_r7_8_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + | pd.Timedelta(days=37) | pandas.Timedelta |
"""
This module contains functions to save and load data.
"""
import logging
import os.path
import json
import pickle
import yaml
import pandas as pd
logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', level=logging.DEBUG)
def construct_path(*path):
path = os.path.join(*path)
path = os.path.join(os.path.dirname(__file__), path) if not os.path.isabs(path) else path
return path
def append_csv(dataframe, *path):
path = construct_path(*path)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
dataframe.to_csv(path, index=False, mode='a', header=False)
def read_csv(*path, encoding='utf-8', **kwargs):
"""
Loads a dataframe from csv file, correctly attributing dtypes based on column names
and setting a consistent hierarchy of indices.
Args:
path: location of the csv file
Returns:
pandas.DataFrame
"""
path = construct_path(*path)
try:
df = pd.read_csv(path, sep=',', encoding=encoding, na_values=["#"], **kwargs)
except pd.errors.EmptyDataError:
df = pd.DataFrame()
# log(f'Empty dataframe found in {path}', os.path.basename(__file__))
logging.info('Empty dataframe found in {0}'.format(os.path.basename(__file__)))
return df
def write_csv(dataframe: pd.DataFrame, *path, **kwargs):
"""
Writes a DataFrame to disk as csv file, maintaining project standards.
Args:
DataFrame (pandas.DataFrame): the DataFrame to save
path: file location
"""
path = construct_path(*path)
os.makedirs(os.path.dirname(path), exist_ok=True)
dataframe.to_csv(path, index=False, **kwargs)
def read_yaml(*path):
"""
Creates a dictionary from a YALM file
"""
# Read YAML file
path = construct_path(*path)
with open(path, 'r') as stream:
data_loaded = yaml.load(stream)
return data_loaded
def write_yaml(dictionary, *path):
"""
Write dictionary as a YAML file
"""
path = construct_path(*path)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'w', encoding='utf8') as outfile:
yaml.dump(dictionary, outfile, default_flow_style=False, allow_unicode=True)
return path
def write_json(dict, *path):
"""
Writes a dictionary to disk as json file, maintaining project standards.
Args:
dict (dictionary): the dictionary to save
path: file location
"""
path = construct_path(*path)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
file_pointer = open(path, 'w')
file_pointer.write(json.dumps(dict, indent=4))
file_pointer.close()
def read_hdf(*path, **kwargs):
"""
Loads a DataFrame from hdf5 file, correctly attributing dtypes based on column names
and setting a consistent hierarchy of indices.
Args:
path: location of the hdf5 file
Returns:
pandas.DataFrame
"""
path = construct_path(*path)
df = pd.read_hdf(path, **kwargs)
return df
def get_hdf_len(*path):
"""
Returns the number of rows in an hdf file as an int.
"""
path = construct_path(*path)
with | pd.HDFStore(path) | pandas.HDFStore |
from scipy.spatial.distance import pdist
import numpy as np
import pandas as pd
def us(query, p):
"""
Scales a query to a given length, p
:param query: Time Series to be scaled
:param p: Length to scale to
:return: QP, a numpy array containing the scaled query
"""
n = query.size
QP = np.empty(shape=(n, p))
# p / n = scaling factor
for i in range(n):
curQ = query.iloc[i][0]
for j in range(p):
try:
QP[i][j] = (curQ[int(j * (len(curQ) / p))])
except Exception as e:
print(e)
return QP
def euclidian_distances(q):
ED = sum(pdist(np.array(q), 'sqeuclidean'))
return ED
def compare_scaling(query, min = None, max = None):
"""
Compares the euclidean distances of multiple scale lengths for an array of time series, and returns the scaled
query with the lowest euclidean distance
:param query: An array of time series to be scaled
:param min: Minimum length to scale to
:param max: Maximum length to scale to
:return: The query scaled to the optimal length between the min and max
"""
best_match_value = float('inf')
best_match = None
if max == None:
max = 0
for i in range(query.size):
if query.iloc[i][0].size > max:
max = query.iloc[i][0].size
if min == None:
min = 0
for i in range(query.size):
if query.iloc[i][0].size < min:
min = query.iloc[i][0].size
n = min
m = max
#Parallel probs best
for p in range(n, m):
QP = us(query, p)
dist = euclidian_distances(QP) # Compare like sizes
if dist < best_match_value:
best_match_value = dist
best_match = QP
#Reshuffle so it fits the required structure
ret = []
for i in range(query.size):
ret.append([best_match[i]])
return pd.DataFrame(ret)
def pad_zero(query, direction, scale_size = None):
"""
Pads either the prefix or suffix of time series data with zeros, up to a length defined by scale_size
:param query: An array of time series to be scaled
:param direction: Either prefix or suffix, determines what part to pad
:param scale_size: Size to scale up to
:return: A scaled array of time series
"""
#Set size if needed
if scale_size == None:
max = 0
for i in range(query.size):
if query.iloc[i][0].size > max:
max = query.iloc[i][0].size
scale_size = max
else:
for i in range(query.size):
if query.iloc[i][0].size > scale_size:
#This can't scale down
raise ValueError("Scale size must be greater than the longest series")
#Scale needed values
scaled = []
for i in range(query.size):
curQ = query.iloc[i][0].tolist()
length = query.iloc[i][0].size
for j in range(scale_size - length):
try:
if direction == 'prefix':
# Insert 0 at pos 0
curQ.insert(0,0)
elif direction == 'suffix':
curQ.append(0)
except Exception as e:
print(e)
scaled.append(pd.Series(curQ))
#Reshuffle so it fits the required structure
ret = []
for i in range(query.size):
ret.append([scaled[i]])
return pd.DataFrame(ret)
def pad_noise(query, direction, scale_size = None):
"""
Pads either the prefix or suffix of time series data with random noise, up to a length defined by scale_size
:param query: An array of time series to be scaled
:param direction: Either prefix or suffix, determines what part to pad
:param scale_size: Size to scale up to
:return: A scaled array of time series
"""
#Set size if needed
if scale_size == None:
max = 0
for i in range(query.size):
if query.iloc[i][0].size > max:
max = query.iloc[i][0].size
scale_size = max
else:
for i in range(query.size):
if query.iloc[i][0].size > scale_size:
#This can't scale down
raise ValueError("Scale size must be greater than the longest series")
#Scale needed values
scaled = []
for i in range(query.size):
curQ = query.iloc[i][0].tolist()
length = query.iloc[i][0].size
# get np mean, np std
mean = np.mean(curQ)
std = np.std(curQ)
noise = np.random.normal(mean, std, scale_size - length)
noise = noise.tolist()
noise = list(map(abs, noise))
for j in range(scale_size - length):
try:
if direction == 'prefix':
# Insert 0 at pos 0
curQ.insert(0, noise[j])
elif direction == 'suffix':
curQ.append(noise[j])
except Exception as e:
print(e)
scaled.append( | pd.Series(curQ) | pandas.Series |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
| tm.assert_series_equal(df.ftypes, ex_ftypes) | pandas.util.testing.assert_series_equal |
"""
* Copyright 2020, Maestria de Humanidades Digitales,
* Universidad de Los Andes
*
* Developed for the Msc graduation project in Digital Humanities
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# ===============================
# native python libraries
# ===============================
import os
import copy
import csv
import re
import unicodedata
import urllib
# ===============================
# extension python libraries
# ===============================
import pandas as pd
import cv2
# ===============================
# developed python libraries
# ===============================
import Conf
from Lib.Utils import Err
from Lib.Recovery.Content import Page
from Lib.Recovery.Cleaner import Topic
assert Topic
assert Page
assert Err
assert Conf
# global config variables
CFG_FOLDER = "Config"
CFG_SCHEMA = "df-schema.ini"
# loading config schema into the program
DATA_SCHEMA = Conf.configGlobal(CFG_FOLDER, CFG_SCHEMA)
# default template for the element/paint dict in gallery
DEFAULT_FRAME_SCHEMA = eval(DATA_SCHEMA.get("DEFAULT", "columns"))
# ================================================
# API for the scrapping the gallery of paintings
# ================================================
class Gallery():
"""
this class implement the gallery of the model, containing all its
elements (ie.: painintgs) contains all gallery data in memory and
helps create the data_frame for it.
"""
# =========================================
# class variables
# =========================================
webg_path = str()
localg_path = str()
imgd_path = str()
schema = copy.deepcopy(DEFAULT_FRAME_SCHEMA)
data_frame = pd.DataFrame(columns=DEFAULT_FRAME_SCHEMA)
wpage = Page()
# =========================================
# functions to create a new gallery
# =========================================
def __init__(self, *args, **kwargs):
"""
creator of the class gallery()
Args:
webg_path (str): URL for the gallery to scrap data
localg_path (str): local dirpath for the gallery data
schema (list): array with the column names for the model
data_frame (data_frame, optional): panda df with data (ie.: paints)
in the gallery, you can pass an existing df, Default is empty
wpage (Page): the current webpage the controller is scrapping
Raises:
exp: raise a generic exception if something goes wrong
Returns:
Model (Model): return a new Model() object
"""
try:
# default creator attributes
self.webg_path = str()
self.localg_path = str()
self.imgd_path = str()
self.schema = copy.deepcopy(DEFAULT_FRAME_SCHEMA)
self.data_frame = pd.DataFrame(columns=DEFAULT_FRAME_SCHEMA)
self.wpage = Page()
# when arguments are pass as parameters
if len(args) > 0:
for arg in args:
# URL of the remote gallery to scrap
if args.index(arg) == 0:
self.webg_path = arg
# local dirpath to save the gallery CSV
if args.index(arg) == 1:
self.localg_path = arg
# local dirpath to save the images
if args.index(arg) == 2:
self.imgd_path = arg
# dataframes containing the data of the gallery
if args.index(arg) == 3:
self.data_frame = arg
# if there are dict decrators in the creator
if len(kwargs) > 0:
for key in list(kwargs.keys()):
# updating schema in the model
if key == "schema":
self.schema = copy.deepcopy(kwargs[key])
self.data_frame = | pd.DataFrame(columns=self.schema) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
| tm.assert_frame_equal(df_with_missing, reloaded) | pandas.util.testing.assert_frame_equal |
import datetime
import pandas as pd
import numpy as np
import string
import os
from nltk import word_tokenize
from nltk.corpus import stopwords
from gensim.corpora import Dictionary
from gensim.matutils import corpus2csc
from sklearn.cluster import SpectralClustering
class CandidateStore:
def __init__(self, n_clusters):
self.docs = | pd.DataFrame(columns=['text', 'cluster', 'topic', 'edit', 'tokenized', 'bow']) | pandas.DataFrame |
import numpy as np
"""
This monte carlo algorithm aproximates the "true" value of the interesting
parameter/s using a random walk of normally distributed steps with mean 0 or
a mean of the last accepted step in the walk for the parameter.
"""
truth=5
tss = []
for j in range(50):
ts = []
stepsizes = [.01,.05,.1,.5,1,5,10]
index=0
while len(ts) < len(stepsizes):
w0 = 0
score1 = abs(truth-w0)
score=score1
delta = 0
t = 0
u = 0
stepsize=stepsizes[index]
while (score1 > .5)&(t<1000):
w1 = w0+np.random.normal(delta,stepsize)
score2 = abs(truth-w1)
if -score2>-score1:
delta = w1-w0
w0 = w1
score1=score2
u+=1
t+=1
print(t,score1,u)
if score1 <=.5:
ts.append(t)
index+=1
tss.append(ts)
tss=np.array(tss)
stepsize = stepsizes[np.argmin(np.mean(tss,axis=0))]
truth = 5
w0 = 0
score1 = abs(truth-w0)
score=score1
delta = 0
t = 0
u = 0
while (score1 > .5)&(t<1000):
w1 = w0+np.random.normal(delta,stepsize)
score2 = abs(truth-w1)
if -score2>-score1:
delta = w1-w0
w0 = w1
score1=score2
u+=1
t+=1
print(t,score1,u)
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
dat = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/00519/heart_failure_clinical_records_dataset.csv")
pd.set_option("display.max_columns",500)
dat.tail()
covars = ['age','anaemia','creatinine_phosphokinase',
'diabetes','ejection_fraction','high_blood_pressure',
'platelets','serum_creatinine','serum_sodium',
'sex','smoking','time']
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
Yodds = Y/(1-Y)
Yodds = np.where(Yodds==np.inf,1e16,1e-16)
Ylogodds = np.log(Yodds)
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
X['int']=1
random.seed(42)
index = np.array(random.choices([1,2,3,4,5],k=len(X)))
xv = X[index==5].copy()
yv = Ylogodds[index==5].copy()
xt = X[index!=5].copy()
yt = Ylogodds[index!=5].copy()
coefs = np.linalg.pinv(xt.T@xt)@(xt.T@yt)
predtlogodds = xt@coefs
predvlogodds = xv@coefs
predt=np.exp(predtlogodds)/(1+np.exp(predtlogodds))
predt=np.where(predt>.5,1,0)
predv=np.exp(predvlogodds)/(1+np.exp(predvlogodds))
predv=np.where(predv>.5,1,0)
act_t = np.exp(yt)/(1+np.exp(yt))
act_t=np.where(act_t>.5,1,0)
act_v = np.exp(yv)/(1+np.exp(yv))
act_v=np.where(act_v>.5,1,0)
logregt_acc=sum(np.where(predt==act_t,1,0))/len(predt)
logregv_acc = sum(np.where(predv==act_v,1,0))/len(predv)
print("logreg training acc:",logregt_acc,"val acc:",logregv_acc)
from sklearn.linear_model import LogisticRegression
xv = X[index==5].copy()
yv = Y[index==5].copy()
xt = X[index!=5].copy()
yt = Y[index!=5].copy()
lr = LogisticRegression(fit_intercept=False,solver = 'newton-cg',penalty='l2')
lr.fit(xt,yt)
sum(np.where(lr.predict(xt)==yt,1,0))/len(yt)
sum(np.where(lr.predict(xv)==yv,1,0))/len(yv)
#BASE KNN Maximizing Recall
from sklearn.neighbors import KNeighborsClassifier
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
acc = []
for i in list(range(2,30)):
avgscore=[]
for t in [1,2,3,4,5]:
xv = X[index==t].copy()
yv = Y[index==t].copy()
xt = X[~pd.Series(index).isin([t,6])].copy()
yt = Y[~pd.Series(index).isin([t,6])].copy()
knn = KNeighborsClassifier(n_neighbors=i,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
avgscore.append(score)
acc.append(np.mean(avgscore))
plt.plot(acc)
plt.xticks(list(range(28)),list(range(2,30)))
plt.show()
#k=18
k=4
k=16
def model_precision(X,Y,w,k):
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w,yt)
tp=sum(np.where((knn.predict(xv*w)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
initscores.append(score)
score=np.mean(initscores)
return score
def model_recall(X,Y,w,k):
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w,yt)
tp=sum(np.where((knn.predict(xv*w)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = recall
initscores.append(score)
score=np.mean(initscores)
return score
def sequential_MCMC(X,Y,model_fn,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20,):
#INITIAL SCORE
w0 = np.ones(len(X.columns.values))
score = model_fn(X,Y,w0,k)
scoreinit=score
wfin = []
scores = []
while len(wfin)<draws:
noupdate=0
deltachosen=False
stepsize=stepsize
score=scoreinit
delta=np.random.normal(0,stepsize/2,len(covars))
w0=np.ones(len(X.columns.values))
while noupdate<no_update_limit:
w1 = w0+np.random.normal(delta,stepsize,len(X.columns.values))
score2 = model_fn(X,Y,w1,k)
if score2>score:
print(score2,score,"accepted",noupdate)
deltachosen==True
score=score2
delta = w1-w0
w0=w1
noupdate=0
else:
#print(score2,score)
noupdate+=1
if deltachosen==False:
delta=np.random.normal(0,stepsize/2,len(X.columns.values))
if noupdate%delta_reset==delta_reset:
deltachosen=False
stepsize=stepsize*step_shrinkage
delta=np.random.normal(0,stepsize/2,len(X.columns.values))
if score>scoreinit:
wfin.append(w0)
scores.append(score)
wfin_arr=np.vstack(wfin)
return(wfin_arr,scores)
wfin_arr,scores=sequential_MCMC(X,Y,model_fn=model_precision,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20)
print(np.mean(wfin_arr,axis=0))
print(np.std(wfin_arr,axis=0))
for i in range(12):
plt.hist(wfin_arr.T[i],bins=10)
plt.title(covars[i])
plt.show()
method=np.median
xv = X[pd.Series(index).isin([6])].copy()
yv = Y[pd.Series(index).isin([6])].copy()
xt = X[pd.Series(index).isin([1,2,3,4,5])].copy()
yt = Y[pd.Series(index).isin([1,2,3,4,5])].copy()
wf=method(wfin_arr,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
wfin_arr,scores=sequential_MCMC(X,Y,model_fn=model_recall,draws=30,no_update_limit=120,
stepsize=.1,step_shrinkage=.9,
delta_reset=20)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
stepsize=.1
w0=np.ones(len(covars))
delta=np.random.normal(0,stepsize/2,len(covars))
knn.fit(xt*w0,yt)
tp=sum(np.where((knn.predict(xv*w0)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w0)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w0)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w0)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = recall
initscores.append(score)
score=np.mean(initscores)
scoreinit=score
#sum(np.where(knn.predict(xv*w0)==yv,1,0))/len(yv)
#sum(np.where(knn.predict(xt*w0)==yt,1,0))/len(yt)
wfin=[]
scores = []
while len(wfin)<30:
noupdate=0
deltachosen=False
score=scoreinit
stepsize=.1
delta=np.random.normal(0,stepsize/2,len(covars))
w0=np.ones(len(covars))
#iteration=0
while noupdate<120:
#iteration+=1
#val = iteration%4+1
score2list=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~pd.Series(index).isin([val,6])].copy()
w1 = w0+np.random.normal(delta,stepsize,len(covars))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*w1,yt)
tp=sum(np.where((knn.predict(xv*w1)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*w1)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*w1)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*w1)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score2 = sum(np.where(knn.predict(xv*w1)==yv,1,0))/len(yv)
score2 = recall
score2list.append(score2)
score2=np.mean(score2list)
if score2>score:
print(score2,score,"accepted",noupdate)
deltachosen==True
score=score2
delta = w1-w0
w0=w1
noupdate=0
else:
#print(score2,score)
noupdate+=1
if deltachosen==False:
delta=np.random.normal(0,stepsize/2,len(covars))
if noupdate%20==20:
deltachosen=False
stepsize=stepsize*.9
delta=np.random.normal(0,stepsize/2,len(covars))
if score>scoreinit:
wfin.append(w0)
scores.append(score)
wfin_arr=np.vstack(wfin)
print(np.mean(wfin_arr,axis=0))
print(np.std(wfin_arr,axis=0))
for i in range(12):
plt.hist(wfin_arr.T[i],bins=10)
plt.title(covars[i])
plt.show()
method=np.mean
xv = X[pd.Series(index).isin([6])].copy()
yv = Y[pd.Series(index).isin([6])].copy()
xt = X[pd.Series(index).isin([1,2,3,4,5])].copy()
yt = Y[pd.Series(index).isin([1,2,3,4,5])].copy()
wf=method(wfin_arr,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf,yt)
tp=sum(np.where((knn.predict(xv*wf)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
scores_ordered = sorted(range(len(scores)), key=lambda k: scores[k])
wfin_sorted = wfin_arr[scores_ordered]
wfin_selected = wfin_sorted[15:]
wf_sort=method(wfin_selected,axis=0)
knn = KNeighborsClassifier(n_neighbors=k,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt*wf_sort,yt)
tp=sum(np.where((knn.predict(xv*wf_sort)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv*wf_sort)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv*wf_sort)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv*wf_sort)==0)&(yv==1),1,0))
print('precision: ',tp/(tp+fp))
print('recall: ',tp/(tp+fn))
print('accuracy: ',(tp+tn)/(tp+fn+fp+tn))
#BASE KNN Maximizing Precision
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings("ignore")
X=dat[covars].copy()
Y=dat['DEATH_EVENT']
X=(X-np.mean(X,axis=0))/np.std(X,axis=0)
random.seed(42)
index = np.array(random.choices([1,2,3,4,5,6],k=len(X)))
acc = []
for i in list(range(2,30)):
avgscore=[]
for t in [1,2,3,4,5]:
xv = X[index==t].copy()
yv = Y[index==t].copy()
xt = X[~pd.Series(index).isin([t,6])].copy()
yt = Y[~pd.Series(index).isin([t,6])].copy()
knn = KNeighborsClassifier(n_neighbors=i,
weights='distance',
algorithm='auto', leaf_size=30, p=2,
metric='euclidean', metric_params=None,
n_jobs=None)
knn.fit(xt,yt)
tp=sum(np.where((knn.predict(xv)==1)&(yv==1),1,0))
fp=sum(np.where((knn.predict(xv)==1)&(yv==0),1,0))
tn=sum(np.where((knn.predict(xv)==0)&(yv==0),1,0))
fn=sum(np.where((knn.predict(xv)==0)&(yv==1),1,0))
precision=tp/(tp+fp)
recall=tp/(tp+fn)
#score = (sum(np.where(knn.predict(xv*w0)==yv,1,0)))/(len(yv))
score = precision
avgscore.append(score)
acc.append(np.mean(avgscore))
plt.plot(acc)
plt.xticks(list(range(28)),list(range(2,30)))
plt.show()
#k=18
k=17
initscores=[]
for val in [1,2,3,4,5]:
xv = X[pd.Series(index).isin([val])].copy()
yv = Y[pd.Series(index).isin([val])].copy()
xt = X[~pd.Series(index).isin([val,6])].copy()
yt = Y[~ | pd.Series(index) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
import streamlit as st
import streamlit.components.v1 as components
import matplotlib.pyplot as plt
import kayak
from PIL import Image
import numpy as np
filename_airport = './assets/airports.csv'
filename_aircraft = './assets/aircraft.csv'
output = './assets/output.xlsx'
blank = Image.open('./assets/blank.jpeg')
greenest = Image.open('./assets/planet-earth.png')
cheapest = Image.open('./assets/decrease.png')
shortest = Image.open('./assets/chronometer.png')
bg = Image.open('./assets/background.jpg')
plane = Image.open('./assets/plane.png')
import pandas as pd
df_airport = pd.read_csv(filename_airport)
df_airport.head()
df_aircraft = pd.read_csv(filename_aircraft)
df_aircraft.head()
departure_airport_code = "LAX"
arrival_airport_code = "SFO"
aircraft='Airbus A320'
num_of_pax = 1
st.title('ZeroCarbonFly')
st.subheader('ZeroCarbonFly is a supporting tool for sustainable travel. Our website guides you to a green flight and visualizes your effort on Zero Carbon action.')
st.info('Climate change has become a crucial issue in contemporary society. The US has pledged to achieve carbon neutrality by 2050, with a 2030 emissions target to be announced shortly. To meet the 2015 Paris Agreement, global greenhouse gas emissions need to be cut by 25– 50% over the next decade. According to the U.S. Greenhouse Gas Emissions and Sinks report by EPA, the primary source of greenhouse gas emissions in the United States is Transportation, which composed 29 percent of 2019 greenhouse gas emissions. Among all the travel patterns, air travel is the fastest-growing source of carbon emissions and emits the largest greenhouse gas. ')
st.image(bg)
airport_code = df_airport['iata_code'].tolist()
airport_code = [x for x in airport_code if pd.isnull(x) == False]
class_list = ['Economy', 'Business', 'Premium', 'First']
st.sidebar.title('Find flights:')
departure_airport_code = st.sidebar.selectbox('Departure Airport', airport_code)
arrival_airport_code = st.sidebar.selectbox('Arrival Airport', airport_code)
date = st.sidebar.date_input('Flight Date')
class_type = st.sidebar.selectbox('Class Type', class_list)
num_of_pax = st.sidebar.slider('Number of Passengers', min_value=1, max_value=10)
carry_on_bag_number = st.sidebar.selectbox('Carry-on Bags', [0,1])
checked_bag_number = st.sidebar.selectbox('Checked Bags', [0,1,2])
date = pd.to_datetime(date)
day = str(date.day)
month= str(date.month)
year= str(date.year)
df = pd.read_excel(output)
df = df.sort_values(by=['price'])
least_cost = df.iloc[0].to_numpy()
df = df.sort_values(by=['carbon'])
least_carbon = df.iloc[0].to_numpy()
df = df.sort_values(by=['duration'])
least_duration = df.iloc[0].to_numpy()
def metrics(array):
col1.caption('Book Now')
col2.metric("Carbon (kg)", str(array[16]))
col3.metric("Price (USD)", str(array[14]))
col4.metric("Duration (hr, min)", str(array[13]))
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
import pydeck as pdk
# LOADING DATA
DATE_TIME = "date/time"
DATA_URL = (
"http://s3-us-west-2.amazonaws.com/streamlit-demo-data/uber-raw-data-sep14.csv.gz"
)
@st.cache(persist=True)
def load_data(nrows):
data = pd.read_csv(DATA_URL, nrows=nrows)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis="columns", inplace=True)
data[DATE_TIME] = | pd.to_datetime(data[DATE_TIME]) | pandas.to_datetime |
import pandas as pd
import sys
from datetime import datetime
from pytz import timezone, utc
def str_list(s_cd):
cds = []
if type(s_cd) == str:
cds = []
cds.append(s_cd)
else:
cds = s_cd
return cds
def today_yymmdd():
d = pd.Timestamp.today().date().strftime('%y%m%d')
return d
def present_date():
d = pd.Timestamp.today().strftime('%Y-%m-%d')
return d
def a_week_ago():
tmp = pd.Timestamp.today().date() - pd.DateOffset(weeks=1)
d = tmp.strftime('%Y-%m-%d')
return d
def a_month_ago():
tmp = | pd.Timestamp.today() | pandas.Timestamp.today |
# coding=utf-8
# /usr/bin/env python
'''
Author: wenqiangw
Email: <EMAIL>
Date: 2020-04-30 14:42
Desc:
xgboost 模型评分卡
xgboost 参数 https://blog.csdn.net/zc02051126/article/details/46711047
score = self.score_offset + self.score_factor * np.log(odds)
'''
from pynovice.score_card.src.score_card import ScoreCardModel
import xgboost as xgb
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
class XGBModel(ScoreCardModel):
'''
用于预测某一条预处理过的特征向量得分的方法
:return:
score_label: 最终预测的评分
score_feature: 各个特征的得分字典
'''
missing_value: float
def __init__(self,missing_value=-999.0,**kwargs):
super().__init__()
self.missing_value = missing_value
self.param = self.load_xgb_param(kwargs)
self.feature_columns = None
self.model_ = None
def load_xgb_param(self,kwargs):
param={}
# 1.一般参数: 这些参数用来控制XGBoost的整体通用功能,
general_param={
'booster': 'gbtree', # [默认gbtree] 有两中模型可以选择gbtree和gblinear。gbtree使用基于树的模型进行提升计算,gblinear使用线性模型进行提升计算。
'silent': 1, # [默认0] 是否开启静默模式,0为不开启,1为开启,开启后不打印运行时信息。
'nthread': 1, # [默认取最大线程数] 这个参数用来控制最大并行的线程数
}
# 2.booster的参数,取决于使用哪种booster, 这些参数是要重点调整的。
booster_tree_param = {
'eta': 0.1, # [default=0.3] 学习率参数,就是原理中说的缩减,保证每一颗树对于结果的影响不太大,从而保证模型的效果。更新叶子节点权重时,乘以该系数,避免步长过大。参数值越大,越可能无法收敛。把学习率 eta 设置的小一些,小学习率可以使得后面的学习更加仔细。 典型值为0.01-0.2。
'gamma': 0, # [default=0] 一听这种希腊字母就知道是个系数,在树的叶子节点上作进一步分区所需的最小损失减少。越大,算法越保守。取值在[0,∞] 。通俗点讲就是,这个节点还划不划分,先看看损失减不减少了再说。同样需要cv调优。
'max_depth': 3, # [default=6] 这个没啥好说的,每棵树的最大深度,也是用来避免过拟合的,max_depth越大,模型会学到更具体更局部的样本,典型值3-10,要用cv调优。
'min_child_weight': 1, # [default=1] 大家对他的解释是决定最小叶子节点样本权重和,不太好理解。看了一些解释的文章,这个值可以理解为H值,还记得H值吗,就是损失函数对y(t-1)的二阶导数和,那么如果损失函数是平方函数(回归问题),这个就是1,如果是对数损失函数(分类问题),导数是a(1-a)的形式,a代表sigmoid函数,这样的话当y预测值非常大的时候,这个式子的值接近于0,这当然是不好的,因此你要给他设定一个阈值,小于这个阈值就不分裂了。现在可以解释了,这个值代表所有样本二阶导数的和,和上边说的叶子得分不是一个事,如果是回归问题实际代表样本个数,如果是分类问题实际代表a(1-a)所有样本计算值的加和。明白这个参数是啥以后,来看他是干嘛的,这个参数用于避免过拟合,当它的值较大时,可以避免模型学习到局部的特殊样本。举个栗子来说,对正负样本不均衡时的 0-1 分类而言,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本,实际是通过控制样本数来控制过拟合的。你们应该看出来这个值越小越容易过拟合,需要通过cv进行调整优化。
# max_leaf_nodes 树上最大节点的数量,和上面的那个参数一样,如果定义了这个参数就会忽略掉max_depth参数,我们调优还是以max_depth为主吧。
'max_delta_step': 0, # [default=0] 这参数限制每棵树权重改变的最大步长。如果这个参数的值为0,那就意味着没有约束。如果它被赋予了某个正值,那么它会让这个算法更加保守。通常,这个参数不需要设置。但是当各类别的样本十分不平衡时,它对逻辑回归是很有帮助的。也就是说这个参数不用管啊。
'subsample': 1, # [default=1] 样本采样用的,用于训练模型的子样本占整个样本集合的比例,减小这个参数的值,算法会更加保守,避免过拟合,但是如果这个值设置得过小,它可能会导致欠拟合。典型值:0.5-1。既然有个范围,给他个面子cv调优一把吧。
'colsample_bytree': 1, # [default=1] 列采样,在建立树时对特征采样的比例,前面介绍过了,和设置缩减率一样是为了干嘛来着,是为了防止过拟合的,一般设置为: 0.5-1 ,也要用cv拟合。
'tree_method': 'auto', # [default=’auto’] 还记得我说过树的生成方法,有三个可选的值, {‘auto’, ‘exact’, ‘approx’} ,分别对应 贪心算法(小数据集)/近似算法(大数据集) 。
#'scale_pos_weight': # 是用来调节正负样本不均衡问题的,用助于样本不平衡时训练的收敛(应该是增大了少数样本的学习率)。- 如何你仅仅关注预测问题的排序或者AUC指标,那么你尽管可以调节此参数。如果你希望得到真正的预测概率则不能够通过此参数来平衡样本
}
booster_linear_param = {
'lambda': 1, # [default=1] 又是个系数,这个是控制L2正则的,就是目标函数里的那个叶子节点得分前边的系数,用不用看你自己了。
'alpha': 0, # [default=0] 在建立树时对特征采样的比例有L2就有L1,用不用全凭自己了。
'lambda_bias': 0, # [default=0] 在偏置上的L2正则。缺省值为0(在L1上没有偏置项的正则,因为L1时偏置不重要)
}
# 学习目标参数,跟目标函数有关,控制学习的场景,例如在回归问题中会使用不同的参数控制排序
object_param = {
'objective': 'binary:logistic', # [ default=reg:linear ] 定义学习任务及相应的学习目标,可选的目标函数如下,包含的函数还挺多,默认是线形的。此外你还可以选择:binary:logistic 二分类的逻辑回归,返回预测的概率(不是类别).
'eval_metric':['error','auc'], # [默认值取决于objective参数的取值] 也就是说怎么计算目标函数值,根据你目标函数的形式来,对于回归问题,默认值是rmse,对于分类问题,默认值是error。
'base_score': 0.5, # [ default=0.5 ]
'seed':666, # [ default=0 ] 随机数的种子。缺省值为0
}
param.update(general_param)
param.update(booster_tree_param)
param.update(object_param)
param.update(kwargs)
return param
def train(self,train_feature, train_label, test_feature=pd.DataFrame(), test_label= | pd.DataFrame() | pandas.DataFrame |
from math import ceil
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind, pearsonr
import matplotlib.pyplot as plt
import mpld3
from mpld3 import plugins
import matplotlib
import matplotlib.patches as patches
#from mpld3.plugins import PluginBase
#import jinja2
#import json
from genda.formats import grab_gene_location
from genda.plotting import (make_rectangle, should_not_plot)
def dosage_round(geno, threshold = 0.5):
""" Rounds dosage to threshold
"""
geno[ geno < 1 - threshold] = 0
geno[np.logical_and(geno >= 1-threshold, geno <= 1 + threshold)] = 1
geno[ geno > 1 + threshold] = 2
return geno
def multiple_snp_aei_test(geno, outliers, allelic_ration, num_threshold=5):
"""
"""
raise NotImplementedError
def single_snp_aei_test(geno, outliers, allelic_ratio, num_threshold=5):
"""
"""
geno = geno[np.logical_not(outliers)]
het_combined = allelic_ratio[np.array(geno == 1)]
homo_combined = allelic_ratio[np.array(np.logical_or(geno==0, geno==2))]
if len(het_combined) < num_threshold or len(homo_combined) < num_threshold:
return(1)
else:
return(ttest_ind(het_combined, homo_combined, equal_var=False)[1])
class AEI_object(object):
""" A class storing the aei data for a particular gene.
"""
def __init__(self, aei_pvalues, gene_name, annot_table,
sufficient_hets, matrix_eQTL):
self.aei_pvalues = aei_pvalues
self.gene_name = gene_name
self.annot_table = annot_table.ix[aei_pvalues.index, :]
self.sufficient_hets = sufficient_hets
self.meQTL = matrix_eQTL
self.gene_names = 'Nothing'
def aei_bar_plot(self, dosage, cis_snp, tag_snp, gene_name=None):
""" AEI barplot
"""
nplots = 1
color = dosage_round(dosage.ix[cis_snp, self.hets_dict[tag_snp]])
fig, ax = plt.subplots(nrows=nplots, ncols=1, figsize=(12, 4*nplots),
sharey=False, sharex=True,
subplot_kw=dict(axisbg='#FFFFFF'))
if gene_name:
title = ('AEI at tag %s for %s and\n'
'colored by genotype at %s') % (gene_name, tag_snp, cis_snp)
else:
title = "AEI at tag %s and\ncolored by genotype at %s" % (tag_snp,
cis_snp)
ax.set_title(title, fontsize=20)
ax.set_xlabel('Samples', fontsize=15)
ax.set_ylabel('Allelic Fraction ($log_{2}$)', fontsize=15)
width = 0.5
allelic_ratio = self.ratios.ix[self.hets_dict[tag_snp],
tag_snp]
allelic_ratio_i = np.argsort(allelic_ratio.values)
allelic_ratio = np.log2(allelic_ratio.iloc[allelic_ratio_i])
outliers = np.logical_not(np.logical_or(
allelic_ratio < -3.0 ,
allelic_ratio > 3.0
))
color_geno = []
color = color[allelic_ratio_i][outliers]
for i in color:
if i == 0 or i == 2:
color_geno.append('green')
else:
color_geno.append('orange')
allelic_ratio = allelic_ratio[outliers]
ind = np.arange(len(allelic_ratio))
rects1 = ax.bar(ind, allelic_ratio, width, color = color_geno)
ax.set_xlim((-1, len(allelic_ratio+1)))
return(fig)
def aei_plot(self, snp_plot=None, n_sufficient_hets=50, common_only=False):
""" AEI plots in mpld3
"""
x_scale=1e6
size_maf =((200 * self.maf) + 20)
cm = plt.cm.get_cmap('winter')
if type(snp_plot) == pd.Series or type(snp_plot) == list:
suff_hets = pd.Series(snp_plot, index = | pd.Index(snp_plot) | pandas.Index |
from __future__ import print_function
# this is a class to deal with aqs data
from builtins import zip
from builtins import range
from builtins import object
import os
from datetime import datetime
from zipfile import ZipFile
import pandas as pd
from numpy import array, arange
import inspect
import requests
class AQS(object):
def __init__(self):
# self.baseurl = 'https://aqs.epa.gov/aqsweb/airdata/'
self.objtype = 'AQS'
self.daily = False
self.baseurl = 'https://aqsdr1.epa.gov/aqsweb/aqstmp/airdata/'
self.dates = [datetime.strptime('2014-06-06 12:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2014-06-06 13:00:00', '%Y-%m-%d %H:%M:%S')]
self.renamedhcols = ['datetime_local', 'datetime', 'State_Code', 'County_Code',
'Site_Num', 'Parameter_Code', 'POC', 'Latitude', 'Longitude',
'Datum', 'Parameter_Name', 'Obs', 'Units',
'MDL', 'Uncertainty', 'Qualifier', 'Method_type', 'Method_Code',
'Method_Name', 'State_Name', 'County_Name', 'Date_of_Last_Change']
self.renameddcols = ['datetime_local', 'State_Code', 'County_Code', 'Site_Num',
'Parameter_Code', 'POC', 'Latitude', 'Longitude', 'Datum',
'Parameter_Name', 'Sample_Duration', 'Pollutant_Standard',
'Units', 'Event_Type', 'Observation_Count',
'Observation_Percent', 'Obs', '1st_Max_Value',
'1st_Max Hour', 'AQI', 'Method_Code', 'Method_Name',
'Local_Site_Name', 'Address', 'State_Name', 'County_Name',
'City_Name', 'MSA_Name', 'Date_of_Last_Change']
self.savecols = ['datetime_local', 'datetime', 'SCS',
'Latitude', 'Longitude', 'Obs', 'Units', 'Species']
self.se_states = array(
['Alabama', 'Florida', 'Georgia', 'Mississippi', 'North Carolina', 'South Carolina', 'Tennessee',
'Virginia', 'West Virginia'], dtype='|S14')
self.se_states_abv = array(
['AL', 'FL', 'GA', 'MS', 'NC', 'SC', 'TN',
'VA', 'WV'], dtype='|S14')
self.ne_states = array(['Connecticut', 'Delaware', 'District Of Columbia', 'Maine', 'Maryland', 'Massachusetts',
'New Hampshire', 'New Jersey', 'New York', 'Pennsylvania', 'Rhode Island', 'Vermont'],
dtype='|S20')
self.ne_states_abv = array(['CT', 'DE', 'DC', 'ME', 'MD', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT'],
dtype='|S20')
self.nc_states = array(
['Illinois', 'Indiana', 'Iowa', 'Kentucky', 'Michigan',
'Minnesota', 'Missouri', 'Ohio', 'Wisconsin'],
dtype='|S9')
self.nc_states_abv = array(['IL', 'IN', 'IA', 'KY', 'MI', 'MN', 'MO', 'OH', 'WI'],
dtype='|S9')
self.sc_states = array(
['Arkansas', 'Louisiana', 'Oklahoma', 'Texas'], dtype='|S9')
self.sc_states_abv = array(['AR', 'LA', 'OK', 'TX'], dtype='|S9')
self.r_states = array(['Arizona', 'Colorado', 'Idaho', 'Kansas', 'Montana', 'Nebraska', 'Nevada', 'New Mexico',
'North Dakota', 'South Dakota', 'Utah', 'Wyoming'], dtype='|S12')
self.r_states_abv = array(['AZ', 'CO', 'ID', 'KS', 'MT', 'NE', 'NV', 'NM', 'ND', 'SD', 'UT', 'WY'],
dtype='|S12')
self.p_states = array(
['California', 'Oregon', 'Washington'], dtype='|S10')
self.p_states_abv = array(['CA', 'OR', 'WA'], dtype='|S10')
self.datadir = '.'
self.cwd = os.getcwd()
self.df = None # hourly dataframe
self.monitor_file = inspect.getfile(
self.__class__)[:-13] + '/data/monitoring_site_locations.dat'
self.monitor_df = None
self.d_df = None # daily dataframe
def check_file_size(self, url):
test = requests.head(url).headers
if int(test['Content-Length']) > 1000:
return True
else:
return False
def retrieve_aqs_hourly_pm25_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url1 = self.baseurl + 'hourly_88101_' + year + '.zip'
if self.check_file_size(url1):
print('Downloading Hourly PM25 FRM: ' + url1)
filename = wget.download(url1)
print('')
print('Unpacking: ' + url1)
dffrm = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
dffrm.columns = self.renamedhcols
dffrm['SCS'] = array(
dffrm['State_Code'].values * 1.E7 +
dffrm['County_Code'].values * 1.E4 + dffrm['Site_Num'].values,
dtype='int32')
else:
dffrm = pd.DataFrame(columns=self.renamedhcols)
url2 = self.baseurl + 'hourly_88502_' + year + '.zip'
if self.check_file_size(url2):
print('Downloading Hourly PM25 NON-FRM: ' + url2)
filename = wget.download(url2)
print('')
print('Unpacking: ' + url2)
dfnfrm = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
dfnfrm.columns = self.renamedhcols
dfnfrm['SCS'] = array(
dfnfrm['State_Code'].values * 1.E7 +
dfnfrm['County_Code'].values *
1.E4 + dfnfrm['Site_Num'].values,
dtype='int32')
else:
dfnfrm = pd.DataFrame(columns=self.renamedhcols)
if self.check_file_size(url1) | self.check_file_size(url2):
df = pd.concat([dfnfrm, dffrm], ignore_index=True)
df.loc[:, 'State_Code'] = pd.to_numeric(
df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
# df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
df['Species'] = 'PM2.5'
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_PM_25_88101_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_PM_25_88101_' +
year + '.hdf', 'df', format='table')
else:
df = pd.DataFrame(columns=self.renamedhcols)
return df
def retrieve_aqs_hourly_ozone_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_44201_' + year + '.zip'
print('Downloading Hourly Ozone: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_OZONE_44201_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_OZONE_44201_' +
year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_pm10_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_81102_' + year + '.zip'
print('Downloading Hourly PM10: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_PM_10_81102_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_PM_10_81102_' +
year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_so2_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42401_' + year + '.zip'
print('Downloading Hourly SO2: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_SO2_42401_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_SO2_42401_' + year +
'.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_no2_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42602_' + year + '.zip'
print('Downloading Hourly NO2: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_NO2_42602_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_NO2_42602_' + year +
'.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_co_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42101_' + year + '.zip'
print('Downloading Hourly CO: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_CO_42101_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_CO_42101_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_nonoxnoy_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_NONOxNOy_' + year + '.zip'
print('Downloading Hourly NO NOx NOy: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_NONOXNOY_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_NONOXNOY_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_voc_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_VOCS_' + year + '.zip'
print('Downloading Hourly VOCs: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df, voc=True)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_VOC_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_VOC_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_spec_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_SPEC_' + year + '.zip'
if self.check_file_size(url):
print('Downloading PM Speciation: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(
df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_SPEC_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_SPEC_' + year + '.hdf', 'df', format='table')
return df
else:
return pd.DataFrame(columns=self.renamedhcols)
def retrieve_aqs_hourly_wind_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_WIND_' + year + '.zip'
print('Downloading AQS WIND: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_WIND_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_WIND_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_temp_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_TEMP_' + year + '.zip'
print('Downloading AQS TEMP: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_TEMP_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_TEMP_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_rhdp_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_RH_DP_' + year + '.zip'
print('Downloading AQS RH and DP: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_RHDP_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_RHDP_' + year + '.hdf', 'df', format='table')
return df
def load_aqs_pm25_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_PM_25_88101_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_pm25_data(dates)
if aqs.empty:
return aqs
else:
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_voc_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_VOC_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_voc_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_ozone_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_OZONE_44201_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_ozone_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.Units = 'ppb'
aqs.Obs = aqs.Obs.values * 1000.
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_pm10_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_PM_10_81102_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_pm10_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_so2_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_SO2_42401_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_so2_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_no2_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_NO2_42602_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_no2_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_co_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_CO_42101_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_co_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_spec_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_SPEC_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
print('Retrieving Data')
aqs = self.retrieve_aqs_hourly_spec_data(dates)
if aqs.empty:
return pd.DataFrame(columns=self.renamedhcols)
else:
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_wind_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_WIND_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_wind_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_temp_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_TEMP_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_temp_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_rhdp_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_RHDP_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_rhdp_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_nonoxnoy_data(self, dates):
year = dates[0].strftime('%Y')
fname = 'AQS_HOURLY_NONOXNOY_' + year + '.hdf'
if os.path.isfile(fname):
print("File Found, Loading: " + fname)
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_hourly_nonoxnoy_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_data(self, param, dates):
if param == 'PM2.5':
df = self.load_aqs_pm25_data(dates)
elif param == 'PM10':
df = self.load_aqs_pm10_data(dates)
elif param == 'SPEC':
df = self.load_aqs_spec_data(dates)
elif param == 'CO':
df = self.load_aqs_co_data(dates)
elif param == 'OZONE':
df = self.load_aqs_ozone_data(dates)
elif param == 'SO2':
df = self.load_aqs_so2_data(dates)
elif param == 'VOC':
df = self.load_aqs_voc_data(dates)
elif param == 'NONOXNOY':
df = self.load_aqs_nonoxnoy_data(dates)
elif param == 'WIND':
df = self.load_aqs_wind_data(dates)
elif param == 'TEMP':
df = self.load_aqs_temp_data(dates)
elif param == 'RHDP':
df = self.load_aqs_rhdp_data(dates)
return df
def load_daily_data(self, param, dates):
if param == 'PM2.5':
df = self.load_aqs_daily_pm25_data(dates)
elif param == 'PM10':
df = self.load_aqs_daily_pm10_data(dates)
elif param == 'SPEC':
df = self.load_aqs_daily_spec_data(dates)
elif param == 'CO':
df = self.load_aqs_daily_co_data(dates)
elif param == 'OZONE':
df = self.load_aqs_daily_no2_data(dates)
elif param == 'SO2':
df = self.load_aqs_daily_so2_data(dates)
elif param == 'VOC':
df = self.load_aqs_daily_voc_data(dates)
elif param == 'NONOXNOY':
df = self.load_aqs_daily_nonoxnoy_data(dates)
elif param == 'WIND':
df = self.load_aqs_daily_wind_data(dates)
elif param == 'TEMP':
df = self.load_aqs_daily_temp_data(dates)
elif param == 'RHDP':
df = self.load_aqs_daily_rhdp_data(dates)
return df
def load_all_hourly_data2(self, dates, datasets='all'):
import dask
import dask.dataframe as dd
os.chdir(self.datadir)
params = ['SPEC', 'PM10', 'PM2.5', 'CO', 'OZONE',
'SO2', 'VOC', 'NONOXNOY', 'WIND', 'TEMP', 'RHDP']
dfs = [dask.delayed(self.load_data)(i, dates) for i in params]
dff = dd.from_delayed(dfs)
# dff = dff.drop_duplicates()
self.df = dff.compute()
self.df = self.change_units(self.df)
# self.df = pd.concat(dfs, ignore_index=True)
# self.df = self.change_units(self.df).drop_duplicates(subset=['datetime','SCS','Species','Obs']).dropna(subset=['Obs'])
os.chdir(self.cwd)
def load_all_daily_data(self, dates, datasets='all'):
import dask
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
os.chdir(self.datadir)
pbar = ProgressBar()
pbar.register()
params = ['SPEC', 'PM10', 'PM2.5', 'CO', 'OZONE',
'SO2', 'VOC', 'NONOXNOY', 'WIND', 'TEMP', 'RHDP']
# dfs = [dask.delayed(self.load_daily_data)(i,dates) for i in params]
# print dfs
# dff = dd.from_delayed(dfs)
# self.d_df = dff.compute()
dfs = [self.load_daily_data(i, dates) for i in params]
self.d_df = pd.concat(dfs, ignore_index=True)
self.d_df = self.change_units(self.d_df)
os.chdir(self.cwd)
def get_all_hourly_data(self, dates):
os.chdir(self.datadir)
dfs = [self.load_aqs_co_data(dates), self.load_aqs_pm10_data(dates), self.load_aqs_ozone_data(dates),
self.load_aqs_pm25_data(dates), self.load_aqs_spec_data(
dates), self.load_aqs_no2_data(dates),
self.load_aqs_so2_data(dates), self.load_aqs_voc_data(
dates), self.load_aqs_nonoxnoy_data(dates),
self.load_aqs_wind_data(dates), self.load_aqs_temp_data(dates), self.load_aqs_rhdp_data(dates)]
os.chdir(self.cwd)
def load_all_hourly_data(self, dates, datasets='all'):
os.chdir(self.datadir)
if datasets.upper() == 'PM':
dfs = [self.load_aqs_pm10_data(dates), self.load_aqs_pm25_data(
dates), self.load_aqs_spec_data(dates)]
else:
dfs = [self.load_aqs_co_data(dates), self.load_aqs_pm10_data(dates), self.load_aqs_ozone_data(dates),
self.load_aqs_pm25_data(dates), self.load_aqs_spec_data(
dates), self.load_aqs_no2_data(dates),
self.load_aqs_so2_data(dates), self.load_aqs_voc_data(
dates), self.load_aqs_nonoxnoy_data(dates),
self.load_aqs_wind_data(
dates), self.load_aqs_temp_data(dates),
self.load_aqs_rhdp_data(dates)] # ,self.load_aqs_daily_spec_data(dates)]
self.df = pd.concat(dfs, ignore_index=True)
self.df = self.change_units(self.df).drop_duplicates()
os.chdir(self.cwd)
def load_aqs_daily_pm25_data(self, dates):
from datetime import timedelta
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_PM25_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_pm25_data(dates)
# aqs = pd.read_hdf(fname)
con = (aqs.datetime >=
dates[0] - timedelta(days=1)) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_rhdp_data(self, dates):
from datetime import timedelta
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_RHDP_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_rhdp_data(dates)
# aqs = pd.read_hdf(fname)
con = (aqs.datetime >=
dates[0] - timedelta(days=1)) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_ozone_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_OZONE_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_ozone_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_nonoxnoy_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_NONOXNOY_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_nonoxnoy_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_pm10_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_PM10_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_pm10_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_so2_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_SO2_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_so2_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_no2_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_NO2_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_no2_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
self.aqsdf = aqs
return aqs
def load_aqs_daily_co_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_CO_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_co_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_temp_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_TEMP_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_temp_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_rh_dp_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_RH_DP_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_rh_dp_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_spec_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_SPEC_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_spec_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_voc_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_VOC_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_voc_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def load_aqs_daily_wind_data(self, dates):
year = dates[0].strftime('%Y')
fname = self.datadir + '/' + 'AQS_DAILY_WIND_' + year + '.hdf'
if os.path.isfile(fname):
aqs = pd.read_hdf(fname)
else:
aqs = self.retrieve_aqs_daily_wind_data(dates)
con = (aqs.datetime >= dates[0]) & (aqs.datetime <= dates[-1])
aqs = aqs[con]
aqs.index = arange(aqs.index.shape[0])
return aqs
def tzutc(self, lon, lat, dates):
from tzwhere import tzwhere
import pytz
tz = tzwhere.tzwhere(forceTZ=True, shapely=True)
a = dates.astype('M8[s]').astype('O')
local, offset = [], []
for i, j, d in zip(lon, lat, a):
l = tz.tzNameAt(j, i, forceTZ=True)
timezone = pytz.timezone(l)
n = d.replace(tzinfo=pytz.UTC)
r = d.replace(tzinfo=timezone)
rdst = timezone.normalize(r)
local.append(n - n.astimezone(timezone).utcoffset())
offset.append((rdst.utcoffset() + rdst.dst()
).total_seconds() // 3600)
return array(local), array(offset)
def get_species(self, df, voc=False):
pc = df.Parameter_Code.unique()
df['Species'] = ''
if voc:
df['Species'] = df.Parameter_Name.str.upper()
return df
for i in pc:
con = df.Parameter_Code == i
if (i == 88101) | (i == 88502):
df.loc[con, 'Species'] = 'PM2.5'
if i == 44201:
df.loc[con, 'Species'] = 'OZONE'
if i == 81102:
df.loc[con, 'Species'] = 'PM10'
if i == 42401:
df.loc[con, 'Species'] = 'SO2'
if i == 42602:
df.loc[con, 'Species'] = 'NO2'
if i == 42101:
df.loc[con, 'Species'] = 'CO'
if i == 62101:
df.loc[con, 'Species'] = 'TEMP'
if i == 88305:
df.loc[con, 'Species'] = 'OC'
if i == 88306:
df.loc[con, 'Species'] = 'NO3f'
if (i == 88307):
df.loc[con, 'Species'] = 'ECf'
if i == 88316:
df.loc[con, 'Species'] = 'ECf_optical'
if i == 88403:
df.loc[con, 'Species'] = 'SO4f'
if i == 88312:
df.loc[con, 'Species'] = 'TCf'
if i == 88104:
df.loc[con, 'Species'] = 'Alf'
if i == 88107:
df.loc[con, 'Species'] = 'Baf'
if i == 88313:
df.loc[con, 'Species'] = 'BCf'
if i == 88109:
df.loc[con, 'Species'] = 'Brf'
if i == 88110:
df.loc[con, 'Species'] = 'Cdf'
if i == 88111:
df.loc[con, 'Species'] = 'Caf'
if i == 88117:
df.loc[con, 'Species'] = 'Cef'
if i == 88118:
df.loc[con, 'Species'] = 'Csf'
if i == 88203:
df.loc[con, 'Species'] = 'Cl-f'
if i == 88115:
df.loc[con, 'Species'] = 'Clf'
if i == 88112:
df.loc[con, 'Species'] = 'Crf'
if i == 88113:
df.loc[con, 'Species'] = 'Cof'
if i == 88114:
df.loc[con, 'Species'] = 'Cuf'
if i == 88121:
df.loc[con, 'Species'] = 'Euf'
if i == 88143:
df.loc[con, 'Species'] = 'Auf'
if i == 88127:
df.loc[con, 'Species'] = 'Hff'
if i == 88131:
df.loc[con, 'Species'] = 'Inf'
if i == 88126:
df.loc[con, 'Species'] = 'Fef'
if i == 88146:
df.loc[con, 'Species'] = 'Laf'
if i == 88128:
df.loc[con, 'Species'] = 'Pbf'
if i == 88140:
df.loc[con, 'Species'] = 'Mgf'
if i == 88132:
df.loc[con, 'Species'] = 'Mnf'
if i == 88142:
df.loc[con, 'Species'] = 'Hgf'
if i == 88134:
df.loc[con, 'Species'] = 'Mof'
if i == 88136:
df.loc[con, 'Species'] = 'Nif'
if i == 88147:
df.loc[con, 'Species'] = 'Nbf'
if i == 88310:
df.loc[con, 'Species'] = 'NO3f'
if i == 88152:
df.loc[con, 'Species'] = 'Pf'
if i == 88303:
df.loc[con, 'Species'] = 'K+f'
if i == 88176:
df.loc[con, 'Species'] = 'Rbf'
if i == 88162:
df.loc[con, 'Species'] = 'Smf'
if i == 88163:
df.loc[con, 'Species'] = 'Scf'
if i == 88154:
df.loc[con, 'Species'] = 'Sef'
if i == 88165:
df.loc[con, 'Species'] = 'Sif'
if i == 88166:
df.loc[con, 'Species'] = 'Agf'
if i == 88302:
df.loc[con, 'Species'] = 'Na+f'
if i == 88184:
df.loc[con, 'Species'] = 'Naf'
if i == 88168:
df.loc[con, 'Species'] = 'Srf'
if i == 88403:
df.loc[con, 'Species'] = 'SO4f'
if i == 88169:
df.loc[con, 'Species'] = 'Sf'
if i == 88170:
df.loc[con, 'Species'] = 'Taf'
if i == 88172:
df.loc[con, 'Species'] = 'Tbf'
if i == 88160:
df.loc[con, 'Species'] = 'Snf'
if i == 88161:
df.loc[con, 'Species'] = 'Tif'
if i == 88312:
df.loc[con, 'Species'] = 'TOT_Cf'
if i == 88310:
df.loc[con, 'Species'] = 'NON-VOLITILE_NO3f'
if i == 88309:
df.loc[con, 'Species'] = 'VOLITILE_NO3f'
if i == 88186:
df.loc[con, 'Species'] = 'Wf'
if i == 88314:
df.loc[con, 'Species'] = 'C_370nmf'
if i == 88179:
df.loc[con, 'Species'] = 'Uf'
if i == 88164:
df.loc[con, 'Species'] = 'Vf'
if i == 88183:
df.loc[con, 'Species'] = 'Yf'
if i == 88167:
df.loc[con, 'Species'] = 'Znf'
if i == 88185:
df.loc[con, 'Species'] = 'Zrf'
if i == 88102:
df.loc[con, 'Species'] = 'Sbf'
if i == 88103:
df.loc[con, 'Species'] = 'Asf'
if i == 88105:
df.loc[con, 'Species'] = 'Bef'
if i == 88124:
df.loc[con, 'Species'] = 'Gaf'
if i == 88185:
df.loc[con, 'Species'] = 'Irf'
if i == 88180:
df.loc[con, 'Species'] = 'Kf'
if i == 88301:
df.loc[con, 'Species'] = 'NH4+f'
if (i == 88320) | (i == 88355):
df.loc[con, 'Species'] = 'OCf'
if (i == 88357) | (i == 88321):
df.loc[con, 'Species'] = 'ECf'
if i == 42600:
df.loc[con, 'Species'] = 'NOY'
if i == 42601:
df.loc[con, 'Species'] = 'NO'
if i == 42603:
df.loc[con, 'Species'] = 'NOX'
if (i == 61103) | (i == 61101):
df.loc[con, 'Species'] = 'WS'
if (i == 61104) | (i == 61102):
df.loc[con, 'Species'] = 'WD'
if i == 62201:
df.loc[con, 'Species'] = 'RH'
if i == 62103:
df.loc[con, 'Species'] = 'DP'
return df
@staticmethod
def change_units(df):
units = df.Units.unique()
for i in units:
con = df.Units == i
if i.upper() == 'Parts per billion Carbon'.upper():
df.loc[con, 'Units'] = 'ppbC'
if i == 'Parts per billion':
df.loc[con, 'Units'] = 'ppb'
if i == 'Parts per million':
df.loc[con, 'Units'] = 'ppm'
if i == 'Micrograms/cubic meter (25 C)':
df.loc[con, 'Units'] = 'UG/M3'.lower()
if i == 'Degrees Centigrade':
df.loc[con, 'Units'] = 'C'
if i == 'Micrograms/cubic meter (LC)':
df.loc[con, 'Units'] = 'UG/M3'.lower()
if i == 'Knots':
df.loc[con, 'Obs'] *= 0.51444
df.loc[con, 'Units'] = 'M/S'.lower()
if i == 'Degrees Fahrenheit':
df.loc[con, 'Obs'] = (df.loc[con, 'Obs'] + 459.67) * 5. / 9.
df.loc[con, 'Units'] = 'K'
if i == 'Percent relative humidity':
df.loc[con, 'Units'] = '%'
return df
def read_monitor_file(self):
if type(self.monitor_df) == type(None):
if os.path.isfile(self.monitor_file):
fname = self.monitor_file
colsinuse = [0, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
f = pd.read_csv(fname, delimiter='|',
header=None, usecols=colsinuse)
f.columns = ['SCS', 'Site_Code', 'Site_Name', 'Status', 'Agency', 'Agency_Name', 'EPA_region',
'Latitude',
'Longitude', 'Elevation', 'GMT_Offset', 'Country_Code', 'CMSA_Code', 'CMSA_Name',
'MSA_Code',
'MSA_Name', 'State_Code', 'State_Name', 'County_Code', 'County_Name', 'City_Code']
self.monitor_df = f.drop_duplicates().dropna(
subset=['SCS']).copy()
self.monitor_df.SCS = self.monitor_df.SCS.values.astype(
'int32')
else:
print(' Monitor File not found. Meta-Data city names not added')
f = None
def add_metro_metadata2(self, df):
from numpy import NaN
if type(self.monitor_df) != type(None):
dfs = self.monitor_df[['SCS', 'MSA_Name', 'State_Name', 'County_Name', 'EPA_region', 'MSA_Code',
'GMT_Offset']].drop_duplicates()
dfs.SCS = dfs.SCS.values.astype('int32')
df = pd.merge(df, dfs, on=['SCS'], how='left')
elif os.path.isfile(self.monitor_file):
print(' Monitor Station Meta-Data Found: Compiling Dataset')
self.read_monitor_file()
dfs = self.monitor_df[
['SCS', 'MSA_Name', 'State_Name', 'County_Name', 'EPA_region', 'GMT_Offset']].drop_duplicates()
dfs.SCS = dfs.SCS.values.astype('int32')
df = pd.merge(df, dfs, on=['SCS'], how='left')
return df
def retrieve_aqs_daily_voc_data(self, dates):
import wget
from numpy import NaN, int64
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'daily_VOCS_' + year + '.zip'
print('Downloading: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
def dateparse(x): return pd.datetime.strptime(x, '%Y-%m-%d')
# ZipFile(filename).extractall()
df = pd.read_csv(filename, parse_dates={'datetime_local': ["Date Local"]},
date_parser=dateparse)
df.columns = self.renameddcols
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.loc[df.Parameter_Code == 68101, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68102, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68108, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68103, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68104, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68105, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68104, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68106, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68107, 'Parameter_Code'] = NaN
df.dropna(subset=['Parameter_Code'], inplace=True)
df.loc[:, 'Parameter_Code'] = df.Parameter_Code.astype(int64)
df = self.read_monitor_and_site(df)
df['SCS'] = df.SCS.astype(str).str.zfill(9)
df['datetime'] = df.datetime_local - \
pd.to_timedelta(df.GMT_Offset, unit='h')
df = self.get_species(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_DAILY_VOC_' + year + '.hdf')
df.to_hdf('AQS_DAILY_VOC_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_daily_temp_data(self, dates):
import wget
from numpy import NaN, int64
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'daily_TEMP_' + year + '.zip'
print('Downloading: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
def dateparse(x): return pd.datetime.strptime(x, '%Y-%m-%d')
df = pd.read_csv(filename, parse_dates={'datetime_local': ["Date Local"]},
date_parser=dateparse)
df.columns = self.renameddcols
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.loc[df.Parameter_Code == 68101, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68102, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68108, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68103, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68104, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68105, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68104, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68106, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68107, 'Parameter_Code'] = NaN
df.dropna(subset=['Parameter_Code'], inplace=True)
df.loc[:, 'Parameter_Code'] = df.Parameter_Code.astype(int64)
df = self.read_monitor_and_site(df)
df['SCS'] = df.SCS.astype(str).str.zfill(9)
df['datetime'] = df.datetime_local - \
pd.to_timedelta(df.GMT_Offset, unit='h')
df = self.get_species(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_DAILY_TEMP_' + year + '.hdf')
df.to_hdf('AQS_DAILY_TEMP_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_daily_rhdp_data(self, dates):
import wget
from numpy import NaN, int64
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'daily_RH_DP_' + year + '.zip'
print('Downloading: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
def dateparse(x): return pd.datetime.strptime(x, '%Y-%m-%d')
df = pd.read_csv(filename, parse_dates={'datetime_local': ["Date Local"]},
date_parser=dateparse)
df.columns = self.renameddcols
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.loc[df.Parameter_Code == 68101, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68102, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68108, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68103, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68104, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68105, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68104, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68106, 'Parameter_Code'] = NaN
df.loc[df.Parameter_Code == 68107, 'Parameter_Code'] = NaN
df.dropna(subset=['Parameter_Code'], inplace=True)
df.loc[:, 'Parameter_Code'] = df.Parameter_Code.astype(int64)
df = self.read_monitor_and_site(df)
df['SCS'] = df.SCS.astype(str).str.zfill(9)
df['datetime'] = df.datetime_local - \
| pd.to_timedelta(df.GMT_Offset, unit='h') | pandas.to_timedelta |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.