prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import sqlalchemy as sql
import pandas as pd
from datetime import timedelta
import numpy as np
def load_nvx(nvx,tickers,start_date,end_date):
##############################################
#
# Loads Data for Fundamental Value Ratios:
#
# nvx
# 'nvt': Network Value (Marketcap) to Transaction Volume
# 'nvv': Network Value (Marketcap) to Trading Volume
# 'nva': Network Value (Marketcap) to Active Addresses
# 'metcalfe': Network Value (Marketcap) to (Active Addresses)^2
#
# tickers
# List of Tickers to get Ratios for.
# ['ticker1','ticker2']
# Complete List of possible tickers:
#
# start_date, end_date
# 'YYYY-MM-DD'
# Example: '2018-01-01'
#
# Returns a Pandas Dataframe.
# Missing Values = 'NaN'
#
##############################################
## Load Data:
tickers = ['Date'] + tickers
ticker_str = ', '.join("`{}`".format(ticker) for ticker in tickers)
engine = sql.create_engine('mysql+pymysql://protos-public:protos-public@google-<EMAIL>-west-<EMAIL>.com:3306/public')
data = pd.read_sql("Select " + str(ticker_str) + " From " + str(nvx), con=engine)
## Clean Data:
data.set_index('Date', inplace=True)
data.index = | pd.to_datetime(data.index) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import os
import sys
from queue import Queue
from typing import Dict
from dateutil import tz
import datetime as dt
from system.database.fre_database import FREDatabase
import numpy as np
import pandas as pd
import csv
from system.market_data.fre_market_data import EODMarketData
from dateutil.relativedelta import relativedelta
import pandas_market_calendars as mcal
sys.path.append('../')
database = FREDatabase()
eod_market_data = EODMarketData(os.environ.get("EOD_API_KEY"), database)
# Stock Info class based on Bollinger Bands Trading Strategy
class BollingerBandsStocksInfo:
def __init__(self, ticker, h=20, k1=2, notional=10000,
price_queue=Queue(int(20 / 5)), ):
self.Ticker = ticker
self.H = h
self.K1 = k1
self.Notional = notional
self.price_queue = price_queue
self.Std = "null"
self.MA = "null"
self.position = 0
self.Qty = 0
self.current_price_buy = 0
self.current_price_sell = 1e6
self.Tradelist = []
self.PnLlist = []
self.PnL = 0
class BBDmodelStockSelector:
# Initialize A Stock Info Dictionary
@staticmethod
def bollingerbands_stkinfo_init(stock_list) -> Dict[str, BollingerBandsStocksInfo]:
stock_info_dict = {stk: BollingerBandsStocksInfo(stk) for stk in stock_list}
return stock_info_dict
# @staticmethod
# def EDTtoUnixTime(EDTdatetime):
# utcTime = EDTdatetime.replace(tzinfo = tz.gettz('EDT')).astimezone(tz=datetime.timezone.utc)
# unixTime = utcTime.timestamp()
# return str(int(unixTime))
# @staticmethod
# def get_sp500_component(number_of_stocks=16):
# select_st = "SELECT symbol FROM sp500;"
# result_df = database.execute_sql_statement(select_st)
# print(result_df.symbol.values)
# randomIndex = np.random.randint(0, len(result_df.symbol.values), (number_of_stocks,)).tolist()
# print(randomIndex)
# with open('system/csv/server_symbols.csv', 'w') as f:
# write = csv.writer(f)
# write.writerow(result_df.symbol.values[randomIndex])
# return result_df.symbol.values[randomIndex]
# @staticmethod
# def get_selected_stock_list():
# sp500_symbol_list = BBDmodelStockSelector.get_sp500_component()
# selected_stk, stk_df = BBDmodelStockSelector.select_highvol_stock(sp500_symbol_list)
# return selected_stk, stk_df
@staticmethod
def select_highvol_stock(end_date=None, stock_list=None, interval='1m', number_of_stocks=2, lookback_window=14):
std_resultdf = pd.DataFrame(index=stock_list)
std_resultdf['std'] = 0.0
for stk in stock_list:
try:
start_date = end_date + dt.timedelta(-lookback_window)
print(start_date, end_date)
start_time = int(start_date.replace(tzinfo=dt.timezone.utc).timestamp())
end_time = int(end_date.replace(tzinfo=dt.timezone.utc).timestamp())
print('good1')
stk_data = pd.DataFrame(eod_market_data.get_intraday_data(stk, start_time, end_time))
std = stk_data.close.pct_change().shift(-1).std()
std_resultdf.loc[stk,'std'] = std
print('Volatility of return over stock: ' + stk + ' is: ' + str(std))
except:
print('Cannot get data of Stock:' + stk)
stock_selected = list(std_resultdf['std'].sort_values().index[-number_of_stocks:])
print('selected stock list:', stock_selected)
selected_df = std_resultdf.loc[stock_selected]
return stock_selected, selected_df
class BBDmodelTrainer:
stockdf = None
@classmethod
def build_trading_model(cls, stk_list=None, start_date=None):
if not stk_list:
print('stk_list_empty')
last_bday = dt.datetime.today()
nyse = mcal.get_calendar('NYSE')
start_bday = last_bday + dt.timedelta(-29)
train_end_date = (nyse.schedule(start_date=start_bday, end_date=last_bday).index[0] - dt.timedelta(1)).date()
symbols = pd.read_csv('system/csv/server_symbols.csv')
tickers = pd.concat([symbols["Ticker1"], symbols["Ticker2"]], ignore_index=True)
server_stock = tickers.drop_duplicates(keep='first').tolist()
stk_list, _ = BBDmodelStockSelector.select_highvol_stock(train_end_date, server_stock)
H_list = [40,50,60,70,80,90]
K1_list = [1.5,1.8,2.0,2.2,2.5]
cls.stockdf = cls.train_params_DBBD(stk_list, H_list, K1_list, start_bday, period='14')
return cls.stockdf
@classmethod
def train_params_DBBD(cls, stk_list, H_list, K1_list, train_end_date, period='14'):
train_start_date = train_end_date - dt.timedelta(days=int(period))
train_start = dt.datetime(train_start_date.year,train_start_date.month,train_start_date.day,9,30)
#!TODO: NEED TO CORRECTLY SET THE TRAIN START & END TIME IN ORDER TO DOWNLOAD INTRADAY DATA
train_start_time = int(train_start_date.replace(tzinfo=dt.timezone.utc).timestamp())
train_end_time = int(train_end_date.replace(tzinfo=dt.timezone.utc).timestamp())
#TEMPORARY TRAIN TIME
mkt_opentime = dt.datetime.strptime('09:30','%H:%M').time()
mkt_closetime = dt.datetime.strptime('16:00','%H:%M').time()
print(mkt_closetime)
stocks = pd.DataFrame(stk_list,columns=['Ticker'])
stocks["H"] = 0
stocks["K1"] = 0.0
stocks['Notional'] = 1000000.00 / 10
stocks["Profit_Loss_in_Training"] = 0.0
stocks['Return_in_Training'] = 0.0
stocks["Profit_Loss"] = 0.0
stocks['Return'] = 0.0
for stk in stk_list:
print("Training params for: " + stk +' ...')
train_data = pd.DataFrame(eod_market_data.get_intraday_data(stk, train_start_time, train_end_time))
### Convert UTC to EST
train_data.datetime = pd.to_datetime(train_data.datetime) - dt.timedelta(hours=5)
### Select during Trading Hour and within selected period
# print(train_data)
# print(train_data.datetime.dt.date)
# print('train end date', train_end_date.date())
# print('train start date', train_start_date.date())
train_data = train_data[(train_data.datetime.dt.time>=mkt_opentime) & (train_data.datetime.dt.time<=mkt_closetime)]
train_data = train_data[(train_data.datetime.dt.date>=train_start_date.date()) & (train_data.datetime.dt.date<train_end_date.date())]
IR_df = pd.DataFrame(index=H_list,columns=K1_list)
CumPnLdf = | pd.DataFrame(index=H_list,columns=K1_list) | pandas.DataFrame |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
s2 = raw2.iloc[:, 0]
# test freq not None
series2 = from_pandas_series(s2, chunk_size=5)
for periods in (2, -2):
for fill_value in (None, 0, 1.):
r = series2.shift(periods=periods, freq='D', fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s2.shift(periods=periods, freq='D', fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
def test_diff_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
raw1 = raw.copy()
raw1['col4'] = raw1['col4'] < 400
r = from_pandas_df(raw1, chunk_size=(10, 5)).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw1, chunk_size=5).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw, chunk_size=(5, 8)).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1))
r = from_pandas_df(raw, chunk_size=5).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1), check_dtype=False)
# test series
s = raw.iloc[:, 0]
s1 = s.copy() < 400
r = from_pandas_series(s, chunk_size=10).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s, chunk_size=5).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s1, chunk_size=5).diff(1)
pd.testing.assert_series_equal(r.execute().fetch(),
s1.diff(1))
def test_value_counts_execution(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100), name='s')
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s, chunk_size=100)
r = series.value_counts()
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
# test multi chunks
series = from_pandas_series(s, chunk_size=30)
r = series.value_counts(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(method='tree', normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(normalize=True))
# test bins and normalize
r = series.value_counts(method='tree', bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
def test_astype(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
# single chunk
df = from_pandas_df(raw)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# multiply chunks
df = from_pandas_df(raw, chunk_size=6)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# dict type
df = from_pandas_df(raw, chunk_size=5)
r = df.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
pd.testing.assert_frame_equal(expected, result)
# test arrow_string dtype
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c1': 'arrow_string'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'arrow_string'})
pd.testing.assert_frame_equal(expected, result)
# test series
s = pd.Series(rs.randint(5, size=20))
series = from_pandas_series(s)
r = series.astype('int32')
result = r.execute().fetch()
expected = s.astype('int32')
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s, chunk_size=6)
r = series.astype('arrow_string')
result = r.execute().fetch()
expected = s.astype('arrow_string')
pd.testing.assert_series_equal(result, expected)
# test index
raw = pd.Index(rs.randint(5, size=20))
mix = from_pandas_index(raw)
r = mix.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_index_equal(result, expected)
# multiply chunks
series = from_pandas_series(s, chunk_size=6)
r = series.astype('str')
result = r.execute().fetch()
expected = s.astype('str')
pd.testing.assert_series_equal(result, expected)
# test category
raw = pd.DataFrame(rs.randint(3, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=5)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=3)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
| pd.testing.assert_frame_equal(expected, result) | pandas.testing.assert_frame_equal |
import numpy as np
import pandas as pd
def mae(y_true_counts, y_pred_counts):
return np.mean(np.abs(y_true_counts - y_pred_counts))
def rmse(y_true_counts, y_pred_counts):
return np.sqrt(np.mean(np.square(y_true_counts - y_pred_counts)))
def underestimate(y_true_counts, y_pred_counts):
return 100. * np.sum((y_true_counts - y_pred_counts) * (y_pred_counts < y_true_counts)) / y_true_counts.sum()
def overestimate(y_true_counts, y_pred_counts):
return 100. * np.sum((y_pred_counts - y_true_counts) * (y_pred_counts > y_true_counts)) / y_true_counts.sum()
def difference(y_true_counts, y_pred_counts):
return underestimate(y_true_counts, y_pred_counts) + overestimate(y_true_counts, y_pred_counts)
def evaluation_results_as_dict(counts_true, counts_pred, split_name, decimals=3):
mae_v = mae(counts_true, counts_pred).round(decimals=decimals)
rmse_v = rmse(counts_true, counts_pred).round(decimals=decimals)
underestimate_v = f'{underestimate(counts_true, counts_pred):.{decimals}f}%'
overestimate_v = f'{overestimate(counts_true, counts_pred):.{decimals}f}%'
difference_v = f'{difference(counts_true, counts_pred):.{decimals}f}%'
results = {
split_name:{
'MAE': mae_v,
'RMSE': rmse_v,
'Underestimate': underestimate_v,
'Overestimate': overestimate_v,
'Difference': difference_v
}
}
return results
def evaluation_results_as_df(train_results, val_results, test_results,
architecture_name='',
sub_experiment_name='',
dataset_name=''):
rows = ['train', 'val', 'test']
data = {**train_results, **val_results, **test_results}
df = | pd.DataFrame.from_dict(data=data, orient='index') | pandas.DataFrame.from_dict |
# brightwind is a library that provides wind analysts with easy to use tools for working with meteorological data.
# Copyright (C) 2021 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from brightwind.load.load import _is_file
import numpy as np
import pandas as pd
import requests
import json
import copy
__all__ = ['MeasurementStation']
def _replace_none_date(list_or_dict):
if isinstance(list_or_dict, list):
renamed = []
for item in list_or_dict:
renamed.append(_replace_none_date(item))
return renamed
elif isinstance(list_or_dict, dict):
for date_str in ['date_from', 'date_to']:
if list_or_dict.get(date_str) is None:
list_or_dict[date_str] = DATE_INSTEAD_OF_NONE
return list_or_dict
def _get_title(property_name, schema, property_section=None):
"""
Get the title for the property name from the WRA Data Model Schema. Optionally, you can send the section of the
schema where the property should be found. This avoids finding the wrong property name when the name
is not unique.
If the property name is not found it will return itself.
:param property_name: The property name to find.
:type property_name: str
:param schema: The WRA Data Model Schema.
:type schema: dict
:param property_section: The section in the schema where the property can be found. This avoids the case where the
property_name is not unique in the schema.
:type property_section: str or None
:return: The title as stated in the schema.
:rtype: str
"""
# search through definitions first
if schema.get('definitions') is not None:
if property_name in schema.get('definitions').keys():
return schema.get('definitions').get(property_name).get('title')
# search through properties
if schema.get('properties') is not None:
# is property_name in the main properties
if property_name in schema.get('properties').keys() and property_section is None:
return schema.get('properties').get(property_name).get('title')
# is property_section part of the main properties
if property_section in schema.get('properties').keys():
property_type = schema.get('properties').get(property_section).get('type')
if property_type is not None and 'array' in property_type:
# move down into an array
result = _get_title(property_name, schema.get('properties').get(property_section)['items'])
if result != property_name:
return result
elif property_type is not None and 'object' in property_type:
# move down into an object
result = _get_title(property_name, schema.get('properties').get(property_section))
if result != property_name:
return result
# don't recognise either property_name or property_section.
# loop through each property to find an array or object to move down to
for k, v in schema.get('properties').items():
if v.get('type') is not None and 'array' in v['type']:
# move down into an array
result = _get_title(property_name, v['items'], property_section)
if result != property_name:
return result
elif v.get('type') is not None and 'object' in v['type']:
# move down into an object
result = _get_title(property_name, v, property_section)
if result != property_name:
return result
# can't find the property_name in the schema, return itself
return property_name
def _rename_to_title(list_or_dict, schema):
"""
Rename the names in a list to it's equivalent title in the schema or the keys in a dictionary. If there are
prefixes from raising a child property up to a parent level, this will find the normal schema title and add
the prefixed title to it.
:param list_or_dict: List of names or dictionary with keys to rename.
:type list_or_dict: list or dict
:param schema: The WRA Data Model Schema.
:type schema: dict
:return: A renamed list or keys in dictionary.
:rtype: list or dict
"""
prefixed_names = {}
# find all possible prefixed names and build a dict to contain it and the separator and title.
for key in PREFIX_DICT.keys():
for col in PREFIX_DICT[key]['keys_to_prefix']:
prefixed_name = key + PREFIX_DICT[key]['prefix_separator'] + col
prefixed_names[prefixed_name] = {'prefix_separator': PREFIX_DICT[key]['prefix_separator'],
'title_prefix': PREFIX_DICT[key]['title_prefix']}
if isinstance(list_or_dict, dict):
renamed_dict = {}
for k, v in list_or_dict.items():
if k in list(prefixed_names.keys()):
# break out the property name and the name, get the title and then add title_prefix to it.
property_section = k[0:k.find(prefixed_names[k]['prefix_separator'])]
property_name = k[k.find(prefixed_names[k]['prefix_separator']) + 1:]
if k in ['sensor_config.slope', 'sensor_config.offset', 'sensor_config.sensitivity',
'calibration.slope', 'calibration.offset', 'calibration.sensitivity']:
# Special cases don't add a title prefix as there is already one in the schema title
renamed_dict[_get_title(property_name, schema, property_section)] = v
else:
renamed_dict[prefixed_names[k]['title_prefix'] + _get_title(property_name, schema,
property_section)] = v
else:
# if not in the list of prefixed_names then just find the title as normal.
renamed_dict[_get_title(k, schema)] = v
return renamed_dict
elif isinstance(list_or_dict, list):
renamed_list = []
for name in list_or_dict:
if name in list(prefixed_names.keys()):
# break out the property name and the name, get the title and then add title_prefix to it.
property_section = name[0:name.find(prefixed_names[name]['prefix_separator'])]
property_name = name[name.find(prefixed_names[name]['prefix_separator']) + 1:]
if name in ['sensor_config.slope', 'sensor_config.offset', 'sensor_config.sensitivity',
'calibration.slope', 'calibration.offset', 'calibration.sensitivity']:
# Special cases don't add a title prefix as there is already one in the schema title
renamed_list.append(_get_title(property_name, schema, property_section))
else:
renamed_list.append(prefixed_names[name]['title_prefix'] + _get_title(property_name, schema,
property_section))
else:
# if not in the list of prefixed_names then just find the title as normal.
renamed_list.append(_get_title(name, schema))
return renamed_list
def _extract_keys_to_unique_list(lists_of_dictionaries):
"""
Extract the keys for a list of dictionaries and merge them into a unique list.
:param lists_of_dictionaries: List of dictionaries to pull unique keys from.
:type lists_of_dictionaries: list(dict)
:return: Merged list of keys into a unique list.
:rtype: list
"""
merged_list = list(lists_of_dictionaries[0].keys())
for idx, d in enumerate(lists_of_dictionaries):
if idx != 0:
merged_list = merged_list + list(set(list(d.keys())) - set(merged_list))
return merged_list
def _add_prefix(dictionary, property_section):
"""
Add a prefix to certain keys in the dictionary.
:param dictionary: The dictionary containing the keys to rename.
:type dictionary: dict
:return: The dictionary with the keys prefixed.
:rtype: dict
"""
prefixed_dict = {}
for k, v in dictionary.items():
if k in PREFIX_DICT[property_section]['keys_to_prefix']:
prefixed_dict[property_section + PREFIX_DICT[property_section]['prefix_separator'] + k] = v
else:
prefixed_dict[k] = v
return prefixed_dict
def _merge_two_dicts(x, y):
"""
Given two dictionaries, merge them into a new dict as a shallow copy.
"""
z = x.copy()
z.update(y)
return z
def _filter_parent_level(dictionary):
"""
Pull only the parent level keys and values i.e. do not return any child lists or dictionaries or nulls/Nones.
:param dictionary:
:return:
"""
parent = {}
for key, value in dictionary.items():
if (type(value) != list) and (type(value) != dict) and (value is not None):
parent.update({key: value})
return parent
def _flatten_dict(dictionary, property_to_bring_up):
"""
Bring a child level in a dictionary up to the parent level.
This is usually when there is an array of child levels and so the parent level is repeated.
:param dictionary: Dictionary with keys to prefix.
:type dictionary: dict
:param property_to_bring_up: The child property name to raise up to the parent level.
:type property_to_bring_up: str
:return: A list of merged dictionaries
:rtype: list(dict)
"""
result = []
parent = _filter_parent_level(dictionary)
for key, value in dictionary.items():
if (type(value) == list) and (key == property_to_bring_up):
for item in value:
child = _filter_parent_level(item)
child = _add_prefix(child, property_section=property_to_bring_up)
result.append(_merge_two_dicts(parent, child))
if (type(value) == dict) and (key == property_to_bring_up):
child = _filter_parent_level(value)
child = _add_prefix(child, property_section=property_to_bring_up)
# return a dictionary and not a list
result = _merge_two_dicts(parent, child)
# result.append(_merge_two_dicts(parent, child))
if not result:
result.append(parent)
return result
def _raise_child(dictionary, child_to_raise):
"""
:param dictionary:
:param child_to_raise:
:return:
"""
# FUTURE DEV: ACCOUNT FOR 'DATE_OF_CALIBRATION' WHEN RAISING UP MULTIPLE CALIBRATIONS
if dictionary is None:
return None
new_dict = dictionary.copy()
for key, value in dictionary.items():
if (key == child_to_raise) and (value is not None):
# Found the key to raise. Flattening dictionary.
return _flatten_dict(dictionary, child_to_raise)
# didn't find the child to raise. search down through each nested dict or list
for key, value in dictionary.items():
if (type(value) == dict) and (value is not None):
# 'key' is a dict, looping through it's own keys.
flattened_dicts = _raise_child(value, child_to_raise)
if flattened_dicts:
new_dict[key] = flattened_dicts
return new_dict
elif (type(value) == list) and (value is not None):
# 'key' is a list, looping through it's items.
temp_list = []
for idx, item in enumerate(value):
flattened_dicts = _raise_child(item, child_to_raise)
if flattened_dicts:
if isinstance(flattened_dicts, list):
for flat_dict in flattened_dicts:
temp_list.append(flat_dict)
else:
# it is a dictionary so just append it
temp_list.append(flattened_dicts)
if temp_list:
# Temp_list is not empty. Replacing 'key' with this.
new_dict[key] = temp_list
return new_dict
return None
PREFIX_DICT = {
'mast_properties': {
'prefix_separator': '.',
'title_prefix': 'Mast ',
'keys_to_prefix': ['notes', 'update_at']
},
'vertical_profiler_properties': {
'prefix_separator': '.',
'title_prefix': 'Vert. Prof. Prop. ',
'keys_to_prefix': ['notes', 'update_at']
},
'lidar_config': {
'prefix_separator': '.',
'title_prefix': 'Lidar Specific Configs ',
'keys_to_prefix': ['date_from', 'date_to', 'notes', 'update_at']
},
'sensor_config': {
'prefix_separator': '.',
'title_prefix': 'Logger ',
'keys_to_prefix': ['height_m', 'height_reference_id', 'serial_number',
'slope', 'offset', 'sensitivity',
'notes', 'update_at']
},
'column_name': {
'prefix_separator': '.',
'title_prefix': 'Column Name ',
'keys_to_prefix': ['notes', 'update_at']
},
'sensor': {
'prefix_separator': '.',
'title_prefix': 'Sensor ',
'keys_to_prefix': ['serial_number', 'notes', 'update_at']
},
'calibration': {
'prefix_separator': '.',
'title_prefix': 'Calibration ',
'keys_to_prefix': ['slope', 'offset', 'sensitivity', 'report_file_name', 'report_link',
'uncertainty_k_factor', 'date_from', 'date_to', 'notes', 'update_at']
},
'calibration_uncertainty': {
'prefix_separator': '.',
'title_prefix': 'Calibration Uncertainty ',
'keys_to_prefix': []
},
'mounting_arrangement': {
'prefix_separator': '.',
'title_prefix': 'Mounting Arrangement ',
'keys_to_prefix': ['notes', 'update_at']
},
'interference_structures': {
'prefix_separator': '.',
'title_prefix': 'Interference Structure ',
'keys_to_prefix': ['structure_type_id', 'orientation_from_mast_centre_deg', 'orientation_reference_id',
'distance_from_mast_centre_mm',
'date_from', 'date_to', 'notes', 'update_at']
}
}
DATE_INSTEAD_OF_NONE = '2100-12-31'
SENSOR_TYPE_ORDER = ['anemometer', '2d_ultrasonic', '3d_ultrasonic', 'propeller_anemometer', 'gill_propeller',
'wind_vane', 'pyranometer', 'pyrheliometer', 'thermometer', 'hygrometer', 'barometer',
'rain_gauge', 'voltmeter', 'ammeter',
'ice_detection_sensor', 'fog_sensor', 'illuminance_sensor', 'gps', 'compass', 'other']
MEAS_TYPE_ORDER = ['wind_speed', 'wind_direction', 'vertical_wind_speed',
'global_horizontal_irradiance', 'direct_normal_irradiance', 'diffuse_horizontal_irradiance',
'global_tilted_irradiance', 'global_normal_irradiance', 'soiling_loss_index', 'illuminance',
'wind_speed_turbulence',
'air_temperature', 'temperature', 'relative_humidity', 'air_pressure', 'precipitation',
'ice_detection', 'voltage', 'current',
'fog', 'carrier_to_noise_ratio', 'doppler_spectral_broadening',
'gps_coordinates', 'orientation', 'compass_direction', 'true_north_offset',
'elevation', 'altitude', 'azimuth', 'status', 'counter', 'availability', 'quality',
'tilt_x', 'tilt_y', 'tilt_z', 'timestamp', 'other']
class MeasurementStation:
"""
Create a Measurement Station object by loading in an IEA Wind Resource Assessment Data Model.
The IEA Wind: Task 43 Work Package 4 WRA Data Model was first released in January 2021. Versions of the
Data Model Schema can be found at https://github.com/IEA-Task-43/digital_wra_data_standard
The Schema associated with this data model file will be downloaded from GitHub and used to parse the data model.
:param wra_data_model: The filepath to an implementation of the WRA Data Model as a .json file or
a json formatted string or
a dictionary format of the data model.
:type wra_data_model: str or dict
:return: A simplified object to represent the data model
:rtype: MeasurementStation
"""
def __init__(self, wra_data_model):
self.__data_model = self._load_wra_data_model(wra_data_model)
version = self.__data_model.get('version')
self.__schema = self._get_schema(version=version)
self.__header = _Header(dm=self.__data_model, schema=self.__schema)
self.__meas_loc_data_model = self._get_meas_loc_data_model(dm=self.__data_model)
self.__meas_loc_properties = self.__get_properties()
self.__logger_configs = _LoggerConfigs(meas_loc_dm=self.__meas_loc_data_model,
schema=self.__schema, station_type=self.type)
self.__measurements = _Measurements(meas_loc_dm=self.__meas_loc_data_model, schema=self.__schema)
# self.__mast_section_geometry = _MastSectionGeometry()
def __getitem__(self, item):
return self.__meas_loc_properties[item]
def __iter__(self):
return iter(self.__meas_loc_properties)
def __repr__(self):
return repr(self.__meas_loc_properties)
@staticmethod
def _load_wra_data_model(wra_data_model):
"""
Load a IEA Wind Resource Assessment Data Model.
The IEA Wind: Task 43 Work Package 4 WRA Data Model was first released in January 2021. Versions of the
Data Model Schema can be found at https://github.com/IEA-Task-43/digital_wra_data_standard
*** SHOULD INCLUDE CHECKING AGAINST THE JSON SCHEMA (WHICH WOULD MEAN GETTING THE CORRECT VERSION FROM GITHUB)
AND MAKE SURE PROPER JSON
:param wra_data_model: The filepath to an implementation of the WRA Data Model as a .json file or
a json formatted string or
a dictionary format of the data model.
:return: Python dictionary of the data model.
:rtype: dict
"""
# Assess whether filepath or json str sent.
dm = dict()
if isinstance(wra_data_model, str) and '.json' == wra_data_model[-5:]:
if _is_file(wra_data_model):
with open(wra_data_model) as json_file:
dm = json.load(json_file)
elif isinstance(wra_data_model, str):
dm = json.loads(wra_data_model)
else:
# it is most likely already a dict so return itself
dm = wra_data_model
return dm
@staticmethod
def _get_schema(version):
"""
Get the JSON Schema from GitHub based on the version number in the data model.
:param version: The version from the header information from the data model json file.
:type version: str
:return: The IEA Wind Task 43 WRA Data Model Schema.
:rtype: dict
"""
schema_link = 'https://github.com/IEA-Task-43/digital_wra_data_standard/releases/download/v{}' \
'/iea43_wra_data_model.schema.json'
response = requests.get(schema_link.format(version))
if response.status_code == 404:
raise ValueError('Schema could not be downloaded from GitHub. Please check the version number in the '
'data model json file.')
schema = json.loads(response.content)
return schema
@staticmethod
def _get_meas_loc_data_model(dm):
if len(dm.get('measurement_location')) > 1:
raise Exception('More than one measurement location found in the data model. Only processing'
'the first one found. Please remove extra measurement locations.')
return dm.get('measurement_location')[0]
@property
def data_model(self):
"""
The data model from the measurement_location onwards i.e. excluding the header.
:return:
"""
return self.__meas_loc_data_model
@property
def schema(self):
return self.__schema
@property
def name(self):
return self.__meas_loc_data_model.get('name')
@property
def lat(self):
return self.__meas_loc_data_model.get('latitude_ddeg')
@property
def long(self):
return self.__meas_loc_data_model.get('longitude_ddeg')
@property
def type(self):
return self.__meas_loc_data_model.get('measurement_station_type_id')
def __get_properties(self):
meas_loc_prop = []
if self.type == 'mast':
meas_loc_prop = _flatten_dict(self.__meas_loc_data_model, property_to_bring_up='mast_properties')
elif self.type in ['lidar', 'sodar', 'flidar']:
meas_loc_prop = _flatten_dict(self.__meas_loc_data_model,
property_to_bring_up='vertical_profiler_properties')
return meas_loc_prop
def get_table(self, horizontal_table_orientation=False):
"""
Get a table representation of the attributes for the measurement station and it's mast or vertical profiler
properties.
:param horizontal_table_orientation: horizontal or vertical table orientation.
:type horizontal_table_orientation: bool
:return: A table showing all the information for the measurement station. If a
horizontal table then a pd.DataFrame is returned. If a vertical table
then a styled pd.DataFrame is returned which does not have the same
properties as a standard DataFrame.
:rtype: pd.DataFrame or pd.io.formats.style.Styler
"""
list_for_df = self.__meas_loc_properties
df = pd.DataFrame()
if horizontal_table_orientation:
list_for_df_with_titles = []
if isinstance(list_for_df, dict):
list_for_df_with_titles = [_rename_to_title(list_or_dict=list_for_df, schema=self.__schema)]
elif isinstance(list_for_df, list):
for row in list_for_df:
list_for_df_with_titles.append(_rename_to_title(list_or_dict=row, schema=self.__schema))
df = pd.DataFrame(list_for_df_with_titles, columns=_extract_keys_to_unique_list(list_for_df_with_titles))
df.set_index('Name', inplace=True)
elif horizontal_table_orientation is False:
if isinstance(list_for_df, dict):
# if a dictionary, it only has 1 row of data
titles = list(_rename_to_title(list_or_dict=list_for_df, schema=self.__schema).keys())
df = pd.DataFrame({1: list(list_for_df.values())}, index=titles)
elif isinstance(list_for_df, list):
for idx, row in enumerate(list_for_df):
titles = list(_rename_to_title(list_or_dict=row, schema=self.__schema).keys())
df_temp = pd.DataFrame({idx + 1: list(row.values())}, index=titles)
df = pd.concat([df, df_temp], axis=1, sort=False)
df = df.style.set_properties(**{'text-align': 'left'})
df = df.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
return df
@property
def properties(self):
return self.__meas_loc_properties
@property
def header(self):
# return the header info
return self.__header
@property
def logger_configs(self):
return self.__logger_configs
@property
def measurements(self):
return self.__measurements
@property
def mast_section_geometry(self):
return 'Not yet implemented.'
# return self.__mast_section_geometry
class _Header:
def __init__(self, dm, schema):
"""
Extract the header info from the data model and return either a dict or table
"""
self._schema = schema
keys = []
values = []
header_dict = {}
for key, value in dm.items():
if key != 'measurement_location':
keys.append(key)
values.append(value)
header_dict[key] = value
self._header_properties = header_dict
self._keys = keys
self._values = values
def __getitem__(self, item):
return self._header_properties[item]
def __iter__(self):
return iter(self._header_properties)
def __repr__(self):
return repr(self._header_properties)
@property
def properties(self):
return self._header_properties
def get_table(self):
# get titles for each property
titles = []
for key in self._keys:
titles.append(_get_title(key, self._schema))
df = pd.DataFrame({'': self._values}, index=titles)
df_styled = df.style.set_properties(**{'text-align': 'left'})
df_styled = df_styled.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
return df_styled
class _LoggerConfigs:
def __init__(self, meas_loc_dm, schema, station_type):
self._log_cfg_data_model = meas_loc_dm.get('logger_main_config')
self._schema = schema
self._type = station_type
self.__log_cfg_properties = self.__get_properties()
def __getitem__(self, item):
return self.__log_cfg_properties[item]
def __iter__(self):
return iter(self.__log_cfg_properties)
def __repr__(self):
return repr(self.__log_cfg_properties)
@property
def data_model(self):
"""
This is the original data model unchanged from this level down.
:return: The data model from this level down.
:rtype: Dict or List
"""
return self._log_cfg_data_model
def __get_properties(self):
log_cfg_props = []
if self._type == 'mast':
# if mast, there are no child dictionaries
log_cfg_props = self._log_cfg_data_model # logger config data model is already a list
elif self._type in ['lidar', 'flidar']:
for log_config in self._log_cfg_data_model:
log_configs_flat = _flatten_dict(log_config, property_to_bring_up='lidar_config')
for log_config_flat in log_configs_flat:
log_cfg_props.append(log_config_flat)
return log_cfg_props
def get_table(self, horizontal_table_orientation=False):
"""
Get a table representation of the attributes for the logger configurations.
If a LiDAR then the lidar specific configurations are also presented.
:param horizontal_table_orientation: horizontal or vertical table orientation.
:type horizontal_table_orientation: bool
:return: A table showing all the information for the measurement station. If a
horizontal table then a pd.DataFrame is returned. If a vertical table
then a styled pd.DataFrame is returned which does not have the same
properties as a standard DataFrame.
:rtype: pd.DataFrame or pd.io.formats.style.Styler
"""
list_for_df = self.__log_cfg_properties
df = pd.DataFrame()
if horizontal_table_orientation:
list_for_df_with_titles = []
for row in list_for_df:
list_for_df_with_titles.append(_rename_to_title(list_or_dict=row, schema=self._schema))
df = pd.DataFrame(list_for_df_with_titles, columns=_extract_keys_to_unique_list(list_for_df_with_titles))
df.set_index('Logger Name', inplace=True)
elif horizontal_table_orientation is False:
for idx, row in enumerate(list_for_df):
titles = list(_rename_to_title(list_or_dict=row, schema=self._schema).keys())
df_temp = pd.DataFrame({idx + 1: list(row.values())}, index=titles)
df = pd.concat([df, df_temp], axis=1, sort=False)
df = df.style.set_properties(**{'text-align': 'left'})
df = df.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
return df
@property
def properties(self):
return self.__log_cfg_properties
class _Measurements:
def __init__(self, meas_loc_dm, schema):
# for meas_loc in dm['measurement_location']:
self._meas_data_model = meas_loc_dm.get('measurement_point')
self._schema = schema
self.__meas_properties = self.__get_properties()
self.__meas_dict = self.__get_properties_as_dict()
# Making _Measurements emulate a dictionary.
# Not using super(_Measurements, self).__init__(*arg, **kw) as I do not want the user to __setitem__,
# __delitem__, clear, update or pop. Therefore, writing out the specific behaviour I want for the dictionary.
def __getitem__(self, key):
return self.__meas_dict[key]
def __iter__(self):
return iter(self.__meas_dict)
def __repr__(self):
return repr(self.__meas_dict)
def __len__(self):
return len(self.__meas_dict)
def __contains__(self, key):
return key in self.__meas_dict
# Don't allow copy as user needs to use copy.deepcopy to copy the dictionary, might also confuse with the object.
# def copy(self):
# return self.__meas_dict.copy()
def keys(self):
return self.__meas_dict.keys()
def values(self):
return self.__meas_dict.values()
def items(self):
return self.__meas_dict.items()
@property
def data_model(self):
return self._meas_data_model
def __get_parent_properties(self):
meas_props = []
for meas_point in self._meas_data_model:
meas_props.append(_filter_parent_level(meas_point))
return meas_props
@property
def properties(self):
return self.__meas_properties
@property
def names(self):
"""
The names of all the measurements.
:return: The list of names.
:rtype: list(str)
"""
return self.__get_names()
@property
def wspds(self):
return self.__get_properties_as_dict(measurement_type_id='wind_speed')
@property
def wspd_names(self):
return self.__get_names(measurement_type_id='wind_speed')
@property
def wspd_heights(self):
return self.get_heights(measurement_type_id='wind_speed')
@property
def wdirs(self):
return self.__get_properties_as_dict(measurement_type_id='wind_direction')
@property
def wdir_names(self):
return self.__get_names(measurement_type_id='wind_direction')
@property
def wdir_heights(self):
return self.get_heights(measurement_type_id='wind_direction')
@staticmethod
def __meas_point_merge(sensor_cfgs, sensors=None, mount_arrgmts=None):
"""
Merge the properties from sensor_cfgs, sensors and mounting_arrangements. This will account for when
each property was changed over time.
:param sensor_cfgs: Sensor cfgs properties
:type sensor_cfgs: list
:param sensors: Sensor properties
:type sensors: list
:param mount_arrgmts: Mounting arrangement properties
:type mount_arrgmts: list
:return: The properties merged together.
:rtype: list(dict)
"""
sensor_cfgs = _replace_none_date(sensor_cfgs)
sensors = _replace_none_date(sensors)
mount_arrgmts = _replace_none_date(mount_arrgmts)
date_from = [sen_config.get('date_from') for sen_config in sensor_cfgs]
date_to = [sen_config.get('date_to') for sen_config in sensor_cfgs]
if sensors is not None:
for sensor in sensors:
date_from.append(sensor.get('date_from'))
date_to.append(sensor.get('date_to'))
if mount_arrgmts is not None:
for mount_arrgmt in mount_arrgmts:
date_from.append(mount_arrgmt['date_from'])
date_to.append(mount_arrgmt['date_to'])
date_from.extend(date_to)
dates = list(set(date_from))
dates.sort()
meas_points_merged = []
for i in range(len(dates) - 1):
good_sen_config = {}
for sen_config in sensor_cfgs:
if (sen_config['date_from'] <= dates[i]) & (sen_config.get('date_to') > dates[i]):
good_sen_config = sen_config.copy()
if good_sen_config != {}:
if sensors is not None:
for sensor in sensors:
if (sensor['date_from'] <= dates[i]) & (sensor['date_to'] > dates[i]):
good_sen_config.update(sensor)
if mount_arrgmts is not None:
for mount_arrgmt in mount_arrgmts:
if (mount_arrgmt['date_from'] <= dates[i]) & (mount_arrgmt['date_to'] > dates[i]):
good_sen_config.update(mount_arrgmt)
good_sen_config['date_to'] = dates[i + 1]
good_sen_config['date_from'] = dates[i]
meas_points_merged.append(good_sen_config)
# replace 'date_to' if equals to 'DATE_INSTEAD_OF_NONE'
for meas_point in meas_points_merged:
if meas_point.get('date_to') is not None and meas_point.get('date_to') == DATE_INSTEAD_OF_NONE:
meas_point['date_to'] = None
return meas_points_merged
def __get_properties(self):
meas_props = []
for meas_point in self._meas_data_model:
# col_names_raised = _raise_child(meas_point, child_to_raise='column_name')
# sen_cfgs = _raise_child(col_names_raised, child_to_raise='sensor_config')
sen_cfgs = _raise_child(meas_point, child_to_raise='sensor_config')
calib_raised = _raise_child(meas_point, child_to_raise='calibration')
if calib_raised is None:
sensors = _raise_child(meas_point, child_to_raise='sensor')
else:
sensors = _raise_child(calib_raised, child_to_raise='sensor')
mounting_arrangements = _raise_child(meas_point, child_to_raise='mounting_arrangement')
if mounting_arrangements is None:
meas_point_merged = self.__meas_point_merge(sensor_cfgs=sen_cfgs, sensors=sensors)
else:
meas_point_merged = self.__meas_point_merge(sensor_cfgs=sen_cfgs, sensors=sensors,
mount_arrgmts=mounting_arrangements)
for merged_meas_point in meas_point_merged:
meas_props.append(merged_meas_point)
return meas_props
def __get_properties_by_type(self, measurement_type_id):
merged_properties = copy.deepcopy(self.__meas_properties)
meas_list = []
for meas_point in merged_properties:
meas_type = meas_point.get('measurement_type_id')
if meas_type is not None and meas_type == measurement_type_id:
meas_list.append(meas_point)
return meas_list
def __get_properties_as_dict(self, measurement_type_id=None):
"""
Get the flattened properties as a dictionary with name as the key. This is for easy use for accessing a
measurement point.
e.g. mm1.measurements['Spd1']
:return: Flattened properties as a dictionary
:rtype: dict
"""
meas_dict = {}
merged_properties = copy.deepcopy(self.__meas_properties)
for meas_point in merged_properties:
meas_point_name = meas_point['name']
if meas_point['measurement_type_id'] == measurement_type_id or measurement_type_id is None:
if meas_point_name in meas_dict.keys():
meas_dict[meas_point_name].append(meas_point)
else:
meas_dict[meas_point_name] = [meas_point]
return meas_dict
def __get_table_for_cols(self, columns_to_show):
"""
Get table of measurements for specific columns.
:param columns_to_show: Columns required to show in table.
:type columns_to_show: list(str)
:return: Table as a pandas DataFrame
:rtype: pd.DataFrame
"""
temp_df = pd.DataFrame(self.__meas_properties)
# select the common columns that are available
avail_cols = [col for col in columns_to_show if col in temp_df.columns]
if not avail_cols:
raise KeyError('No data to show from the list of columns provided')
# Drop all rows that have no data for the avail_cols
temp_df.dropna(axis=0, subset=avail_cols, how='all', inplace=True)
if temp_df.empty:
raise KeyError('No data to show from the list of columns provided')
# Name needs to be included in the grouping but 'date_from' and 'date_to' should not be
# as we filter for them later
required_in_avail_cols = {'include': ['name'], 'remove': ['date_from', 'date_to']}
for include_col in required_in_avail_cols['include']:
if include_col not in avail_cols:
avail_cols.insert(0, include_col)
for remove_col in required_in_avail_cols['remove']:
if remove_col in avail_cols:
avail_cols.remove(remove_col)
# Remove duplicates resulting from other info been dropped.
temp_df.sort_values(['name', 'date_from'], ascending=[True, True], inplace=True)
temp_df.fillna('-', inplace=True) # groupby drops nan so need to fill them in
# group duplicate data for the columns available
grouped_by_avail_cols = temp_df.groupby(avail_cols)
# get date_to from the last row in each group to assign to the first row.
new_date_to = grouped_by_avail_cols.last()['date_to']
df = grouped_by_avail_cols.first()[['date_from', 'date_to']]
df['date_to'] = new_date_to
df.reset_index(level=avail_cols, inplace=True)
df.sort_values(['name', 'date_from'], ascending=[True, True], inplace=True)
# get titles
title_cols = _rename_to_title(list_or_dict=list(df.columns), schema=self._schema)
df.columns = title_cols
df.set_index('Name', inplace=True)
df.replace(DATE_INSTEAD_OF_NONE, '-', inplace=True)
return df
def get_table(self, detailed=False, wind_speeds=False, wind_directions=False, calibrations=False,
mounting_arrangements=False, columns_to_show=None):
"""
Get tables to show information about the measurements made.
:param detailed: For a more detailed table that includes how the sensor is programmed into the logger,
information about the sensor itself and how it is mounted on the mast if it was.
:type detailed: bool
:param wind_speeds: Wind speed specific details.
:type wind_speeds: bool
:param wind_directions: Wind speed specific details.
:type wind_directions: bool
:param calibrations: Wind speed specific details.
:type calibrations: bool
:param mounting_arrangements: Wind speed specific details.
:type mounting_arrangements: bool
:param columns_to_show: Optionally provide a list of column names you want to see in a table. This list
should be pulled from the list of keys available in the measurements.properties.
'name', 'date_from' and 'date_to' are always inserted so no need to include them
in your list.
:type columns_to_show: list(str) or None
:return: A table showing information about the measurements made by this measurement station.
:rtype: pd.DataFrame
**Example usage**
::
import brightwind as bw
mm1 = bw.MeasurementStation(bw.demo_datasets.demo_wra_data_model)
mm1.measurements.get_table()
To get a more detailed table::
mm1.measurements.get_table(detailed=True)
To get wind speed specific details::
mm1.measurements.get_table(wind_speeds=True)
To get wind speed specific details::
mm1.measurements.get_table(wind_directions=True)
To get calibration specific details::
mm1.measurements.get_table(calibrations=True)
To get mounting specific details::
mm1.measurements.get_table(mounting_arrangements=True)
To make your own table::
columns = ['calibration.slope', 'calibration.offset', 'calibration.report_file_name', 'date_of_calibration']
mm1.measurements.get_table(columns_to_show=columns)
"""
df = pd.DataFrame()
if detailed is False and wind_speeds is False and wind_directions is False \
and calibrations is False and mounting_arrangements is False and columns_to_show is None:
# default summary table
list_for_df = self.__get_parent_properties()
list_for_df_with_titles = []
for row in list_for_df:
list_for_df_with_titles.append(_rename_to_title(list_or_dict=row, schema=self._schema))
df = pd.DataFrame(list_for_df_with_titles, columns=_extract_keys_to_unique_list(list_for_df_with_titles))
# order rows
order_index = dict(zip(MEAS_TYPE_ORDER, range(len(MEAS_TYPE_ORDER))))
df['meas_type_rank'] = df['Measurement Type'].map(order_index)
df.sort_values(['meas_type_rank', 'Height [m]'], ascending=[True, False], inplace=True)
df.drop('meas_type_rank', 1, inplace=True)
df.set_index('Name', inplace=True)
df.fillna('-', inplace=True)
elif detailed is True:
cols_required = ['name', 'oem', 'model', 'sensor_type_id', 'sensor.serial_number',
'height_m', 'boom_orientation_deg',
'date_from', 'date_to', 'connection_channel', 'measurement_units_id',
'sensor_config.slope', 'sensor_config.offset', 'calibration.slope', 'calibration.offset',
'sensor_config.notes', 'sensor.notes']
df = pd.DataFrame(self.__meas_properties)
# get what is common from both lists and use this to filter df
cols_required = [col for col in cols_required if col in df.columns]
df = df[cols_required]
# order rows
if 'sensor_type_id' in df.columns:
order_index = dict(zip(SENSOR_TYPE_ORDER, range(len(SENSOR_TYPE_ORDER))))
df['sensor_rank'] = df['sensor_type_id'].map(order_index)
df.sort_values(['sensor_rank', 'height_m'], ascending=[True, False], inplace=True)
df.drop('sensor_rank', 1, inplace=True)
else:
df.sort_values(['name', 'height_m'], ascending=[True, False], inplace=True)
# get titles
title_cols = _rename_to_title(list_or_dict=list(df.columns), schema=self._schema)
df.columns = title_cols
# tidy up
df.set_index('Name', inplace=True)
df.fillna('-', inplace=True)
df.replace(DATE_INSTEAD_OF_NONE, '-', inplace=True)
elif wind_speeds is True:
cols_required = ['name', 'measurement_type_id', 'oem', 'model', 'sensor.serial_number', 'is_heated',
'height_m', 'boom_orientation_deg', 'mounting_type_id',
'date_from', 'date_to', 'connection_channel',
'sensor_config.slope', 'sensor_config.offset', 'calibration.slope', 'calibration.offset',
'sensor_config.notes', 'sensor.notes']
df = pd.DataFrame(self.__meas_properties)
# get what is common from both lists and use this to filter df
cols_required = [col for col in cols_required if col in df.columns]
df = df[cols_required]
df = df[df['measurement_type_id'] == 'wind_speed']
df.drop('measurement_type_id', 1, inplace=True)
# order rows
df.sort_values(['height_m', 'name'], ascending=[False, True], inplace=True)
# get titles
title_cols = _rename_to_title(list_or_dict=list(df.columns), schema=self._schema)
df.columns = title_cols
# tidy up
df.set_index('Name', inplace=True)
df.fillna('-', inplace=True)
df.replace(DATE_INSTEAD_OF_NONE, '-', inplace=True)
elif wind_directions is True:
cols_required = ['name', 'measurement_type_id', 'oem', 'model', 'sensor.serial_number', 'is_heated',
'height_m', 'boom_orientation_deg', 'vane_dead_band_orientation_deg',
'orientation_reference_id',
'date_from', 'date_to', 'connection_channel',
'sensor_config.slope', 'sensor_config.offset',
'sensor_config.notes', 'sensor.notes']
df = | pd.DataFrame(self.__meas_properties) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import warnings
# warnings.filterwarnings('ignore')
# In[2]:
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import sparse
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
import pickle
# # Amazon Employee Access Challenge
# In[3]:
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
# In[4]:
train.shape
# In[5]:
test.shape
# In[6]:
y_train = train['ACTION']
# In[7]:
y_train.shape
# In[8]:
train_data = train.drop('ACTION', axis=1)
train_data.shape
# In[9]:
test_data = test.drop('id', axis=1)
test_data.shape
# ## Common Variables
# In[10]:
# define variables
random_state = 42
cv = 5
scoring = 'roc_auc'
verbose=2
# ## Common functions
# In[11]:
def save_submission(predictions, filename):
'''
Save predictions into csv file
'''
global test
submission = pd.DataFrame()
submission["Id"] = test["id"]
submission["ACTION"] = predictions
filepath = "result/sampleSubmission_"+filename
submission.to_csv(filepath, index = False)
# In[12]:
def print_graph(results, param1, param2, xlabel, ylabel, title='Plot showing the ROC_AUC score for various hyper parameter values'):
'''
Plot the graph
'''
plt.plot(results[param1],results[param2]);
plt.grid();
plt.xlabel(xlabel);
plt.ylabel(ylabel);
plt.title(title);
# In[13]:
def get_rf_params():
'''
Return dictionary of parameters for random forest
'''
params = {
'n_estimators':[10,20,50,100,200,500,700,1000],
'max_depth':[1,2,5,10,12,15,20,25],
'max_features':[1,2,3,4,5],
'min_samples_split':[2,5,7,10,20]
}
return params
# In[14]:
def get_xgb_params():
'''
Return dictionary of parameters for xgboost
'''
params = {
'n_estimators': [10,20,50,100,200,500,750,1000],
'learning_rate': uniform(0.01, 0.6),
'subsample': uniform(),
'max_depth': [3, 4, 5, 6, 7, 8, 9],
'colsample_bytree': uniform(),
'min_child_weight': [1, 2, 3, 4]
}
return params
# ### We will try following models
#
# 1. KNN
# 2. SVM
# 3. Logistic Regression
# 4. Random Forest
# 5. Xgboost
# ## Build Models on the raw data
# ## 1.1 KNN with raw features
# In[15]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[16]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[17]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[18]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[19]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_data,y_train)
# In[20]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, "knn_raw.csv")
# 
# ## 1.2 SVM with raw feature
# In[21]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[22]:
best_c=best_model.best_params_['C']
best_c
# In[23]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[24]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[25]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_data,y_train)
# In[26]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'svm_raw.csv')
# 
# ## 1.3 Logistic Regression with Raw Feature
# In[27]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[28]:
best_c=best_model.best_params_['C']
best_c
# In[29]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[30]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[31]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_data,y_train)
# In[32]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'lr_raw.csv')
# 
# ## 1.4 Random Forest with Raw Feature
# In[33]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[34]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[35]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[36]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_data,y_train)
# In[37]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# ## Features Observations:
#
# 1. MGR_ID is the most important feature followed by RESOURCE and ROLE_DEPTNAME
# In[38]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'rf_raw.csv')
# 
# ## 1.5 Xgboost with Raw Feature
# In[39]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_data,y_train)
# In[40]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[41]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[42]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_data,y_train)
# In[43]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[44]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'xgb_raw.csv')
# 
# 
# In[45]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','Raw', 0.67224, 0.68148])
x.add_row(['SVM', 'Raw', 0.50286, 0.51390])
x.add_row(['Logistic Regression', 'Raw', 0.53857, 0.53034])
x.add_row(['Random Forest', 'Raw', 0.87269, 0.87567])
x.add_row(['Xgboost', 'Raw', 0.86988, 0.87909])
print(x)
# # Observations:
#
# 1. Xgboost perform best on the raw features
# 2. Random forest also perform good on raw features
# 3. Tree based models performs better than linear models for raw features
# ## Build model on one hot encoded features
# ### 2.1 KNN with one hot encoded features
# In[46]:
train_ohe = sparse.load_npz('data/train_ohe.npz')
test_ohe = sparse.load_npz('data/test_ohe.npz')
train_ohe.shape, test_ohe.shape, y_train.shape
# In[47]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=4)
best_model = clf.fit(train_ohe,y_train)
# In[48]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[49]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[50]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[51]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[52]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, "knn_ohe.csv")
# 
# ## 2.2 SVM with one hot encoded features
# In[53]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[54]:
best_c=best_model.best_params_['C']
best_c
# In[55]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[56]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[57]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_ohe,y_train)
# In[58]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'svm_ohe.csv')
# 
# ## 2.3 Logistic Regression with one hot encoded features
# In[59]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[60]:
best_c=best_model.best_params_['C']
best_c
# In[61]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[62]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[63]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_ohe,y_train)
# In[64]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'lr_ohe.csv')
# 
# ## 2.4 Random Forest with one hot encoded features
# In[65]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[66]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[67]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[68]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_ohe,y_train)
# In[69]:
# features=train_ohe.columns
# importance=model.feature_importances_
# features=pd.DataFrame({'features':features,'value':importance})
# features=features.sort_values('value',ascending=False)
# sns.barplot('value','features',data=features);
# plt.title('Feature Importance');
# In[70]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'rf_ohe.csv')
# 
# ## 2.5 Xgboost with one hot encoded features
# In[71]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_ohe,y_train)
# In[72]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[73]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[74]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[75]:
# features=train_ohe.columns
# importance=model.feature_importances_
# features=pd.DataFrame({'features':features,'value':importance})
# features=features.sort_values('value',ascending=False)
# sns.barplot('value','features',data=features);
# plt.title('Feature Importance');
# In[76]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'xgb_ohe.csv')
# 
# 
# In[77]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','ohe', 0.81657, 0.81723])
x.add_row(['SVM', 'ohe', 0.87249, 0.87955])
x.add_row(['Logistic Regression', 'ohe', 0.87436, 0.88167])
x.add_row(['Random Forest', 'ohe', 0.84541, 0.84997])
x.add_row(['Xgboost', 'ohe', 0.84717, 0.85102])
print(x)
# # Observations:
#
# 1. One hot encoding features performs better than other encoding technique
# 2. Linear models (Logistic Regression and SVM) performs better on higher dimension
# # 3 Build Model on frequency encoding feature
# ## 3.1 KNN with frequency encoding
# In[78]:
train_df_fc = pd.read_csv('data/train_df_fc.csv')
test_df_fc = pd.read_csv('data/test_df_fc.csv')
# In[79]:
train_df_fc.shape, test_df_fc.shape, y_train.shape
# In[80]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[81]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[82]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[83]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[84]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_df_fc,y_train)
# In[85]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, "knn_fc.csv")
# 
# ## 3.2 SVM with frequency encoding
# In[86]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[87]:
best_c=best_model.best_params_['C']
best_c
# In[88]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[89]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[90]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_df_fc,y_train)
# In[91]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'svm_fc.csv')
# 
# ## 3.3 Logistic Regression with frequency encoding
# In[92]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[93]:
best_c=best_model.best_params_['C']
best_c
# In[94]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[95]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[96]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_df_fc,y_train)
# In[97]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'lr_fc.csv')
# 
# ## 3.4 Random Forest with frequency encoding
# In[98]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_fc,y_train)
# In[99]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[100]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[101]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_df_fc,y_train)
# In[103]:
features=train_df_fc.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[106]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'rf_fc.csv')
# 
# ## 3.5 Xgboost with frequency encoding
# In[107]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_df_fc,y_train)
# In[108]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[109]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[110]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_df_fc,y_train)
# In[111]:
features=train_df_fc.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[112]:
predictions = model.predict_proba(test_df_fc)[:,1]
save_submission(predictions, 'xgb_fc.csv')
# 
# 
# In[113]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','fc', 0.79715, 0.79125])
x.add_row(['SVM', 'fc', 0.60085, 0.59550])
x.add_row(['Logistic Regression', 'fc', 0.59896, 0.59778])
x.add_row(['Random Forest', 'fc', 0.87299, 0.87616])
x.add_row(['Xgboost', 'fc', 0.86987, 0.86944])
print(x)
# # Observations:
#
# 1. Tree based models performs better for this feature than linear models
# 2. KNN is doing good for every feature
# # 4 Build Model using response encoding feature
# In[114]:
train_df_rc = pd.read_csv('data/train_df_rc.csv')
test_df_rc = pd.read_csv('data/test_df_rc.csv')
# In[115]:
train_df_rc.shape, test_df_rc.shape, y_train.shape
# ## 4.1 KNN with response encoding
# In[116]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_rc,y_train)
# In[117]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[118]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[119]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[120]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_df_rc,y_train)
# In[121]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, "knn_rc.csv")
# 
# ## 4.2 SVM with response encoding
# In[122]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_rc,y_train)
# In[123]:
best_c=best_model.best_params_['C']
best_c
# In[124]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[125]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[126]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_df_rc,y_train)
# In[127]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, 'svm_rc.csv')
# 
# ## 4.3 Logistic Regression with response encoding
# In[128]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_rc,y_train)
# In[129]:
best_c=best_model.best_params_['C']
best_c
# In[130]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[131]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[132]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_df_rc,y_train)
# In[133]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, 'lr_rc.csv')
# 
# ## 4.4 Random Forest with response encoding
# In[134]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_df_rc,y_train)
# In[135]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[136]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[137]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_df_rc,y_train)
# In[138]:
features=train_df_rc.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[139]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, 'rf_rc.csv')
# 
# ## 4.5 Xgboost with response encoding
# In[140]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_df_rc,y_train)
# In[141]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[142]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[143]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_df_rc,y_train)
# In[144]:
features=train_df_rc.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[145]:
predictions = model.predict_proba(test_df_rc)[:,1]
save_submission(predictions, 'xgb_rc.csv')
# 
# 
# In[146]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','rc', 0.84352, 0.85351])
x.add_row(['SVM', 'rc', 0.85160, 0.86031])
x.add_row(['Logistic Regression', 'rc', 0.85322, 0.86180])
x.add_row(['Random Forest', 'rc', 0.83136, 0.83892])
x.add_row(['Xgboost', 'rc', 0.84135, 0.84190])
print(x)
# # Observations:
#
# 1. Every model performs good for this feature
# 2. Linear models performs better than Tree based models
# # 5 Build model on SVD feature
# In[147]:
train_svd = pd.read_csv('data/train_svd.csv')
test_svd = pd.read_csv('data/test_svd.csv')
# In[148]:
train_svd.shape, test_svd.shape, y_train.shape
# ## 5.1 KNN with SVD
# In[149]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_svd,y_train)
# In[150]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[151]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[152]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[153]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_svd,y_train)
# In[154]:
predictions = model.predict_proba(test_svd)[:,1]
save_submission(predictions, "knn_svd.csv")
# 
# ## 5.2 SVM with SVD
# In[155]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_svd,y_train)
# In[156]:
best_c=best_model.best_params_['C']
best_c
# In[157]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[158]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[159]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_svd,y_train)
# In[160]:
predictions = model.predict_proba(test_svd)[:,1]
save_submission(predictions, 'svm_svd.csv')
# 
# ## 5.3 Logistic Regression with SVD
# In[161]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_svd,y_train)
# In[162]:
best_c=best_model.best_params_['C']
best_c
# In[163]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[164]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[165]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_svd,y_train)
# In[166]:
predictions = model.predict_proba(test_svd)[:,1]
save_submission(predictions, 'lr_svd.csv')
# 
# ## 5.4 Random Forest with SVD
# In[167]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_svd,y_train)
# In[168]:
results = | pd.DataFrame(best_model.cv_results_) | pandas.DataFrame |
import sqlalchemy as sa
from sqlalchemy import or_
from flask_sqlalchemy import SQLAlchemy
import math
from anyway.utilities import init_flask
from anyway.models import AccidentMarker, Involved, School
from anyway.constants import CONST
import pandas as pd
import os
SUBTYPE_ACCIDENT_WITH_PEDESTRIAN = 1
LOCATION_ACCURACY_PRECISE = True
LOCATION_ACCURACY_PRECISE_INT = 1
INJURED_TYPE_PEDESTRIAN = 1
YISHUV_SYMBOL_NOT_EXIST = -1
CONTENT_ENCODING = 'utf-8'
HEBREW_ENCODING = 'cp1255'
ANYWAY_UI_FORMAT_MAP_ONLY = "https://www.anyway.co.il/?zoom=17&start_date={start_date}&end_date={end_date}&lat={latitude}&lon={longitude}&show_fatal=1&show_severe=1&show_light=1&approx={location_approx}&accurate={location_accurate}&show_markers=1&show_discussions=0&show_urban=3&show_intersection=3&show_lane=3&show_day=7&show_holiday=0&show_time=24&start_time=25&end_time=25&weather=0&road=0&separation=0&surface=0&acctype={acc_type}&controlmeasure=0&district=0&case_type=0&show_rsa=0&age_groups=1,2,3,4&map_only=true"
ANYWAY_UI_FORMAT_WITH_FILTERS = "https://www.anyway.co.il/?zoom=17&start_date={start_date}&end_date={end_date}&lat={latitude}&lon={longitude}&show_fatal=1&show_severe=1&show_light=1&approx={location_approx}&accurate={location_accurate}&show_markers=1&show_discussions=0&show_urban=3&show_intersection=3&show_lane=3&show_day=7&show_holiday=0&show_time=24&start_time=25&end_time=25&weather=0&road=0&separation=0&surface=0&acctype={acc_type}&controlmeasure=0&district=0&case_type=0&show_rsa=0&age_groups=1,2,3,4"
DATE_INPUT_FORMAT = '%d-%m-%Y'
DATE_URL_FORMAT = '%Y-%m-%d'
app = init_flask()
db = SQLAlchemy(app)
def get_bounding_box(latitude, longitude, distance_in_km):
latitude = math.radians(latitude)
longitude = math.radians(longitude)
radius = 6371
# Radius of the parallel at given latitude
parallel_radius = radius*math.cos(latitude)
lat_min = latitude - distance_in_km/radius
lat_max = latitude + distance_in_km/radius
lon_min = longitude - distance_in_km/parallel_radius
lon_max = longitude + distance_in_km/parallel_radius
rad2deg = math.degrees
return rad2deg(lat_min), rad2deg(lon_min), rad2deg(lat_max), rad2deg(lon_max)
def acc_inv_query(longitude, latitude, distance, start_date, end_date, school):
lat_min, lon_min, lat_max, lon_max = get_bounding_box(latitude, longitude, distance)
baseX = lon_min;
baseY = lat_min;
distanceX = lon_max;
distanceY = lat_max;
pol_str = 'POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))'.format(baseX,
baseY,
distanceX,
distanceY)
query_obj = db.session.query(Involved, AccidentMarker) \
.join(AccidentMarker, AccidentMarker.provider_and_id == Involved.provider_and_id) \
.filter(AccidentMarker.geom.intersects(pol_str)) \
.filter(Involved.injured_type == INJURED_TYPE_PEDESTRIAN) \
.filter(AccidentMarker.provider_and_id == Involved.provider_and_id) \
.filter(or_((AccidentMarker.provider_code == CONST.CBS_ACCIDENT_TYPE_1_CODE), (AccidentMarker.provider_code == CONST.CBS_ACCIDENT_TYPE_3_CODE))) \
.filter(AccidentMarker.created >= start_date) \
.filter(AccidentMarker.created < end_date) \
.filter(AccidentMarker.location_accuracy == LOCATION_ACCURACY_PRECISE_INT) \
.filter(AccidentMarker.yishuv_symbol != YISHUV_SYMBOL_NOT_EXIST) \
.filter(Involved.age_group.in_([1,2,3,4])) #ages 0-19
df = pd.read_sql_query(query_obj.with_labels().statement, query_obj.session.bind)
if LOCATION_ACCURACY_PRECISE:
location_accurate = 1
location_approx = ''
else:
location_accurate = 1
location_approx = 1
ui_url_map_only = ANYWAY_UI_FORMAT_MAP_ONLY.format(latitude=school['latitude'],
longitude=school['longitude'],
start_date=start_date.strftime(DATE_URL_FORMAT),
end_date=end_date.strftime(DATE_URL_FORMAT),
acc_type=SUBTYPE_ACCIDENT_WITH_PEDESTRIAN,
location_accurate=location_accurate,
location_approx=location_approx)
ui_url_with_filters = ANYWAY_UI_FORMAT_WITH_FILTERS.format(latitude=school['latitude'],
longitude=school['longitude'],
start_date=start_date.strftime(DATE_URL_FORMAT),
end_date=end_date.strftime(DATE_URL_FORMAT),
acc_type=SUBTYPE_ACCIDENT_WITH_PEDESTRIAN,
location_accurate=location_accurate,
location_approx=location_approx)
df['anyway_link'] = ui_url_map_only
df['anyway_link_with_filters'] = ui_url_with_filters
df['school_id'] = school['id']
df['school_name'] = school['school_name']
df['school_yishuv_symbol'] = school['yishuv_symbol']
df['school_yishuv_name'] = school['yishuv_name']
df['school_longitude'] = school['longitude']
df['school_latitude'] = school['latitude']
return df
def main(start_date, end_date, distance, output_path):
schools_query = sa.select([School])
df_schools = | pd.read_sql_query(schools_query, db.session.bind) | pandas.read_sql_query |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
# read csv
dff = | pd.read_csv(filename) | pandas.read_csv |
from datetime import datetime, timedelta
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestSeriesDtypes:
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see gh-4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see gh-9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series([str("2010-01-04")])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series([str("2010-01-04 00:00:00-05:00")])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see gh-9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series([str("1 days")])
tm.assert_series_equal(ser, expected)
def test_astype_unicode(self):
# see gh-7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see gh-7271
s = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = s.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = s.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
s.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
s.astype(dt4)
# GH16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
s.astype(dt5)
def test_astype_categories_raises(self):
# deprecated 17636, removed in GH-27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
def test_astype_from_categorical(self):
items = ["a", "b", "c", "a"]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
items = [1, 2, 3, 1]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
df = DataFrame({"cats": [1, 2, 3, 4, 5, 6], "vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = DataFrame(
{"cats": ["a", "b", "b", "a", "a", "d"], "vals": [1, 2, 3, 4, 5, 6]}
)
cats = Categorical(["a", "b", "b", "a", "a", "d"])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
lst = ["a", "b", "c", "a"]
s = Series(lst)
exp = Series(Categorical(lst, ordered=True))
res = s.astype(CategoricalDtype(None, ordered=True))
tm.assert_series_equal(res, exp)
exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True))
res = s.astype(CategoricalDtype(list("abcdef"), ordered=True))
tm.assert_series_equal(res, exp)
def test_astype_categorical_to_other(self):
value = np.random.RandomState(0).randint(0, 10000, 100)
df = DataFrame({"value": value})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
s = df["value_group"]
expected = s
tm.assert_series_equal(s.astype("category"), expected)
tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
msg = r"could not convert string to float|invalid literal for float\(\)"
with pytest.raises(ValueError, match=msg):
s.astype("float64")
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
exp = | Series(["a", "b", "b", "a", "a", "c", "c", "c"]) | pandas.Series |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import dash
from dash.dependencies import Input, Output
from dash import dcc
from dash import html
from dash.dependencies import Input, Output, State
import dash_table
from dash_table.Format import Format, Scheme
# SolCalc
from helicalc import helicalc_dir, helicalc_data
from helicalc.solcalc import SolCalcIntegrator
from helicalc.geometry import read_solenoid_geom_combined
from helicalc.cylinders import get_thick_cylinders_padded
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# load nominal PS geom
# paramdir = '/home/ckampa/coding/helicalc/dev/params/'
paramdir = helicalc_dir + 'dev/params/'
paramfile = 'Mu2e_V13'
df_PS_nom = read_solenoid_geom_combined(paramdir, paramfile).iloc[:3]
# calculate layer thickness
# FIXME!
# integration params
drz = np.array([5e-3, 1e-2])
# editable vs. dependent columns
cols_edit = ['Ri', 'x', 'y', 'z', 'rot0', 'rot1', 'rot2', 'N_layers',
'N_turns', 'I_turn']
cols_stat = ['Coil_Num', 'Ro', 'L', 'I_tot', 'N_turns_tot', 'helicity', 'h_cable',
'w_cable', 'h_sc', 'w_sc', 't_gi', 't_ci', 't_il', 'phi0_deg', 'phi1_deg',
'pitch']
# load TS+DS contribution to PS
#PSoff_file = '/home/shared_data/Bmaps/SolCalc_complete/Mau13.SolCalc.PS_region.standard.PSoff.pkl'
PSoff_file = helicalc_data+'Bmaps/aux/Mau13.SolCalc.PS_region.standard.PSoff.pkl'
df_PSoff = pd.read_pickle(PSoff_file)
df_PSoff = df_PSoff.astype(float)
# m = (df_PSoff.Y == 0.) & (np.isin(df_PSoff.X - 3.904, [0., 0.4, 0.7]))
m = (df_PSoff.Y == 0.) & (np.isin(df_PSoff.X, [3.904, 4.304, 4.604]))
df_PSoff_lines = df_PSoff[m].copy().reset_index(drop=True, inplace=False)
# print(df_PSoff_lines)
# formatting/style
green = 'rgb(159, 210, 128)'
plot_bg = 'rgb(240, 240, 240)'
button_style = {'fontSize': 'large',
'backgroundColor': green,
}
# plot globals
marker_size = 10
fsize_plot = 20
fsize_ticks = 14
# instantiate app
app = dash.Dash(name='solcalc', external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.H1('SolCalc Magnet Builder (Production Solenoid)'),
# html.H2('Coils Plot'),
dcc.Graph(id='coils-plot'),
html.H2('Coil Geometries'),
# tables
html.H3('Editable Parameters'),
dash_table.DataTable(id='editable-table',
columns=[{'name':i, 'id': i, 'hideable':True, 'type':'numeric',
'format': Format(scheme=Scheme.fixed, precision=4),} for i in cols_edit],
data=df_PS_nom[cols_edit].to_dict('records'),
editable=True),
html.Br(),
html.Button('Recalculate Field', id='calc-button', style=button_style),
# field plot
html.H2('Field Plot'),
html.Label('Plotting Options:'),
html.Label('Field Component:'),
dcc.Dropdown(
id='yaxis-column-field',
options=['Bx', 'By', 'Bz'],
value='Bz',
multi=False,
#style=desc_style,
),
html.Label('Field value or gradient?'),
dcc.RadioItems(
id='yaxis-type-field',
options=[{'label': i, 'value': i} for i in ['B_i', 'grad_z(B_i)']],
value='B_i',
labelStyle={'display': 'inline-block'},
#style=desc_style,
),
html.Label('Include TS/DS Contribution?'),
dcc.RadioItems(
id='include-TS-field',
options=[{'label': i, 'value': i} for i in ['yes', 'no']],
value='yes',
labelStyle={'display': 'inline-block'},
#style=desc_style,
),
html.Label('Individual coil contributions or combined field?'),
dcc.RadioItems(
id='indiv-contrib',
options=[{'label': i, 'value': i} for i in ['combined', 'individal']],
value='combined',
labelStyle={'display': 'inline-block'},
#style=desc_style,
),
html.Label('Field unit:'),
dcc.RadioItems(
id='field-unit',
options=[{'label': i, 'value': i} for i in ['Gauss', 'Tesla']],
value='Gauss',
labelStyle={'display': 'inline-block'},
#style=desc_style,
),
dcc.Graph(id='field-plot'),
# FIXME!
# not positive best placement for these
html.H3('Static/Dependent Parameters'),
dash_table.DataTable(id='static-table',
columns=[{'name':i, 'id': i, 'hideable':True, 'type':'numeric',
'format': Format(scheme=Scheme.fixed, precision=4),} for i in cols_stat],
data=df_PS_nom[cols_stat].to_dict('records'),
editable=False),
html.H3('Notes on Dependent Parameters'),
# dcc.Markdown('''
# $R_o = R_i + h_{cable}*N_{layers} + 2*t_{gi} + 2*t_{ci}*N_{layers} + 2*{t_il}*(N_{layers}-1)$
# '''),
#html.Div(html.P(['Notes on depdendent parameters:', html.Br(),
html.Div(html.P([
'Ro = Ri + h_cable*N_layers + 2*t_gi + 2*t_ci*N_layers + 2*t_il*(N_layers-1)', html.Br(),
'pitch = h_cable + 2*t_ci', html.Br(),
'L = pitch*N_turns + 2*t_gi [note nominal seems to use (N_turns-1)]', html.Br(),
'N_turns_tot = N_turns * N_layers', html.Br(),
'I_tot = I_turn * N_turns_tot',])),
# hidden divs for data
html.Div(children=df_PS_nom[cols_edit+cols_stat].to_json(),
id='geom-data', style={'display': 'none'}),
html.Div(id='field-data', style={'display': 'none'}),
])
# update geom div when button is clicked
@app.callback(
[Output('geom-data', 'children'),
Output('static-table', 'data'),],
[Input('calc-button', 'n_clicks'),],
[State('static-table', 'data'),
State('static-table', 'columns'),
State('editable-table', 'data'),
State('editable-table', 'columns')],
)
def update_geom_data(n_clicks, rows_stat, cols_stat, rows_edit, cols_edit):
# load data
df_edit = pd.DataFrame(rows_edit, columns=[c['name'] for c in cols_edit], dtype=float)
print(df_edit)
print(df_edit.info())
df_stat = pd.DataFrame(rows_stat, columns=[c['name'] for c in cols_stat], dtype=float)
# calculations
df_stat.loc[:, 'Ro'] = df_edit.Ri + df_stat.h_cable * df_edit.N_layers + \
2 * df_stat.t_gi + 2*df_stat.t_ci*df_edit.N_layers +\
2*df_stat.t_il*(df_edit.N_layers - 1)
df_stat.loc[:, 'L'] = df_stat.pitch * df_edit.N_turns + 2 * df_stat.t_gi
df_stat.loc[:, 'N_turns_tot'] = df_edit.N_turns * df_edit.N_layers
df_stat.loc[:, 'I_tot'] = df_edit.I_turn + df_stat.N_turns_tot
# combine results
df = pd.concat([df_stat, df_edit], axis=1)
return df.to_json(), df_stat.to_dict('records')
# update coils plot
@app.callback(
Output('coils-plot', 'figure'),
[Input('geom-data', 'children'),],
)
def plot_coils(df):
df = pd.read_json(df)
# get cylinders PS
xs, ys, zs, cs = get_thick_cylinders_padded(df, [1, 2, 3])
# get cylinders nominal PS
xs_n, ys_n, zs_n, cs_n = get_thick_cylinders_padded(df_PS_nom, [1, 2, 3])
# FIXME! Add some of the TS coils
# return surface plot
# layout
# camera
# y up
# camera = dict(
# up=dict(x=0, y=1, z=0),
# #center=dict(x=-3.904, y=0, z=9.),
# eye=dict(x=-2, y=0., z=0.)
# )
# z up
camera = dict(
up=dict(x=0, y=0, z=1),
#center=dict(x=-3.904, y=0, z=9.),
eye=dict(x=0., y=-2., z=0.)
)
layout = go.Layout(
title='Coil Layout',
height=700,
font=dict(family="Courier New", size=fsize_plot,),
margin={'l': 60, 'b': 60, 't': 60, 'r': 60},
scene=dict(aspectmode='data', camera=camera,
xaxis={'title': 'Z [m]', 'tickfont':{'size': fsize_ticks}},
yaxis={'title': 'X [m]', 'tickfont':{'size': fsize_ticks}},
zaxis={'title': 'Y [m]', 'tickfont':{'size': fsize_ticks}},),
plot_bgcolor=plot_bg,
# autosize=True,
# width=1600,
# height=800,
)
return {'data':
#[go.Surface(x=xs, y=ys, z=zs, surfacecolor=cs,
[go.Surface(x=zs_n, y=xs_n, z=ys_n, surfacecolor=cs_n,
colorscale=[[0,'rgba(0,0,0,0)'],[1,'rgba(220, 50, 103, 0.8)']],
showscale=False,
showlegend=True,
opacity=1.0,
name='PS Coils (nominal)',),
go.Surface(x=zs, y=xs, z=ys, surfacecolor=cs,
colorscale=[[0,'rgba(0,0,0,0)'],[1,'rgba(138, 207, 103, 0.8)']],
showscale=False,
showlegend=True,
opacity=1.0,
name='PS Coils (current)',),
],
'layout': layout,
}
# recalculate field
@app.callback(
Output('field-data', 'children'),
[Input('geom-data', 'children'),],
)
def calculate_field(df):
df = | pd.read_json(df) | pandas.read_json |
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale, MinMaxScaler
import pickle
def gen_norm_dict(l):
newd = {}
for i in range(len(l)):
newd[l[i]] = int(np.ceil((i+1)/10)) + 1
return newd
def pool_normalize(df,dmap):
newdf = | pd.DataFrame(index=df.index) | pandas.DataFrame |
import pandas as pd
from .utility_functions import *
def load_file(filepath):
"""
Loads a single data file into a dataframe and
appends any necessary identifier columns
This strips the sequences of moves out from the data,
along with any mousetracking data. A different file loading function
will be necessary to use any of those sequential items
NOTE: this function will not work if the files are not contained in
appropriately named directories
Arguments:
----------
:filepath is a complete relative or absolute filepath pointing to a csv file
Outputs:
----------
:DF[keep] is a pandas DataFrame containing the relevant fields
"""
assert filepath[-3:] == 'csv' # throw an error if not a csv
# pretty names for data fields
col_names = [
'Index', 'Subject ID', 'Player Color',
'Game Index', 'Move Index', 'Status',
'Black Position', 'White Position', 'Action',
'Response Time', 'Time Stamp',
'Mouse Timestamps', 'Mouse Position'
]
# final data fields
keep = [
'Subject ID', 'Condition', 'Game Index', 'Status',
'Black Position', 'White Position', 'Response Time'
]
DF = pd.read_csv(filepath, names=col_names) # load the file with pandas
# Recompute response times from timestamps
reconi = DF['Status'] == 'reconi'
reconf = DF['Status'] == 'reconf'
trial_starts = DF.loc[reconi, 'Time Stamp'].values
trial_ends = DF.loc[reconf, 'Time Stamp'].values
DF.loc[reconi, 'Response Time'] = trial_ends - trial_starts
DF = DF.loc[DF['Status'].isin(['reconi', 'reconf'])].reset_index(drop=True) # only keep initial and final board states
DF['Game Index'] = DF.index // 2 # fix game indexes
DF['Condition'] = 'Trained' if 'Trained' in filepath else 'Naive' # get condition from filepath
return DF[keep]
def load_data(filepaths):
"""
Loads all data into a single dataframe
and does some additional preprocessing
TODO: add real/fake identifiers for each position!
Arguments:
----------
:filepaths is a list of complete relative or absolute filepaths pointing to
csv files
Outputs:
----------
:DFi[keep] is a pandas DataFrame with the relevant columns
"""
# get all data into a single dataframe
loaded = [load_file(path) for path in filepaths] # load all files in filepaths into individual dataframes
DF = | pd.concat(loaded) | pandas.concat |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
# hackathon T - Hacks 3.0
# flask backend of data-cleaning website
import matplotlib.pyplot as plt
#import tensorflow as tf
#from tensorflow.keras import layers
import pandas as pd
import numpy as np
from flask import *
import os
from datetime import *
from subprocess import Popen, PIPE
from math import floor
import converter as con
from flask_ngrok import run_with_ngrok
from meanShift import Mean_Shift
from matplotlib import style
#import seaborn as sns
style.use('ggplot')
from sklearn.model_selection import train_test_split
from datetime import datetime
pd.options.display.max_rows = 10
pd.options.display.float_format = "{:.1f}".format
colors = 10*['g', 'r', 'b', 'c', 'k']
from pyparsing import (
Literal,
Word,
Group,
Forward,
alphas,
alphanums,
Regex,
ParseException,
CaselessKeyword,
Suppress,
delimitedList,
)
import math
import operator
exprStack = []
def push_first(toks):
exprStack.append(toks[0])
def push_unary_minus(toks):
for t in toks:
if t == "-":
exprStack.append("unary -")
else:
break
bnf = None
def BNF():
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
global bnf
if not bnf:
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")
ident = Word(alphas, alphanums + "_$")
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimitedList(Group(expr))
# add parse action that replaces the function identifier with a (name, number of args) tuple
def insert_fn_argcount_tuple(t):
fn = t.pop(0)
num_args = len(t[0])
t.insert(0, (fn, num_args))
fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction(
insert_fn_argcount_tuple
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(push_first)[...]
term = factor + (multop + factor).setParseAction(push_first)[...]
expr <<= term + (addop + term).setParseAction(push_first)[...]
bnf = expr
return bnf
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": int,
"round": round,
"sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0,
# functionsl with multiple arguments
"multiply": lambda a, b: a * b,
"hypot": math.hypot,
# functions with a variable number of arguments
"all": lambda *a: all(a),
}
def evaluate_stack(s):
op, num_args = s.pop(), 0
if isinstance(op, tuple):
op, num_args = op
if op == "unary -":
return -evaluate_stack(s)
if op in "+-*/^":
# note: operands are pushed onto the stack in reverse order
op2 = evaluate_stack(s)
op1 = evaluate_stack(s)
return opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in fn:
# note: args are pushed onto the stack in reverse order
args = reversed([evaluate_stack(s) for _ in range(num_args)])
return fn[op](*args)
elif op[0].isalpha():
raise Exception("invalid identifier '%s'" % op)
else:
# try to evaluate as int first, then as float if int fails
try:
return int(op)
except ValueError:
return float(op)
def test(s):
val = "NA"
exprStack[:] = []
try:
results = BNF().parseString(s, parseAll=True)
val = evaluate_stack(exprStack[:])
except ParseException as pe:
print(s, "failed parse:", str(pe))
except Exception as e:
print(s, "failed eval:", str(e), exprStack)
return val
def feature_pie(filename, feature1, feature2, class_size = 10):
df = pd.read_csv(filename)
sums = df.groupby(df[feature1])[feature2].sum()
plt.axis('equal')
plt.pie(sums, labels=sums.index, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Pie chart on basis of "+feature2)
name = filename.split('.')
plt.savefig(name[0]+".png")
plt.close()
def feature_scatter(filename, feature1, feature2):
df = pd.read_csv(filename)
plt.axis('equal')
plt.pie(feature1, feature2, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Scatter plot between "+feature1+" and "+feature2)
name = filename.split('.')
plt.savefig(name[0]+".png")
plt.close()
def new_feature(filename, com, name):
df = pd.read_csv(filename)
com = com.split(',')
formula = "_"
temp = "_"
for i, c in enumerate(com):
if c == "formula":
formula = com[i+1]
temp = formula
vals = []
i = 0
print(name)
if name != " ":
i = 1
n = len(df)
for j in range(n):
for k, c in enumerate(com):
if k%2 == 0:
if c == "formula":
break
formula = formula.replace(c, str(df.at[j, com[k+1]]))
vals.append(test(formula))
formula = temp
col = len(df.axes[1])
print(vals)
df[name] = vals
"""
if name != " ":
df.insert(col, vals, True)
else:
df.insert(col, vals, True)
"""
del df['Unnamed: 0']
os.remove(filename)
df.to_csv(filename)
def disp(filename):
df = pd.read_csv(filename)
n_row = str(len(df))
n_col = str(len(df.axes[1]))
col = []
for c in df.columns:
col.append(c)
types = df.dtypes.tolist()
f = open(filename, "r+")
line0 = f.readline()
line1 = f.readline()
line2 = f.readline()
line3 = f.readline()
line4 = f.readline()
line5 = f.readline()
f.close()
return n_row, n_col, col, types, line0, line1, line2, line3, line4, line5
def stat(filename, feature, func):
df = pd.read_csv(filename)
ans = 0
print(filename,feature,func)
print(df)
if func == "mean":
ans = df[feature].mean()
if func == "max":
ans = df[feature].max()
if func == "min":
ans = df[feature].min()
if func == "sum":
ans = df[feature].sum()
return ans
def freq(filename, feature, condition):
df = pd.read_csv(filename)
condition = condition.split(' ')
if condition[0] == "=":
print(int(condition[1]))
counts = df[feature].value_counts().to_dict()
if condition[1] == 'N/A':
try:
return str(counts['N/A'])
except:
return '0'
try:
return str(counts[int(condition[1])])
except:
return '0'
elif condition[0] == ">":
count = 0
df = pd.read_csv(filename)
n = df.columns.get_loc(feature)
for i in range(len(df)):
if int(df.at[i, n]) > int(condition[1]):
count = count + 1
return str(count)
elif condition[0] == "<":
count = 0
df = pd.read_csv(filename)
n = df.columns.get_loc(feature)
for i in range(len(df)):
if df.at[i, n] < int(condition[1]):
count = count + 1
return count
def drop(filename, feature, condition):
df = pd.read_csv(filename)
condition = condition.split(' ')
if condition[0] == "=":
df.drop(df[df[feature] == int(condition[1])].index, inplace = True)
elif condition[0] == ">":
df.drop(df[df[feature] > int(condition[1])].index, inplace = True)
elif condition[0] == "<":
df.drop(df[df[feature] < int(condition[1])].index, inplace = True)
def ms(filename, feature1, feature2):
name = filename.split('.')
df = pd.read_csv(filename)
n = df.columns.get_loc(feature1)
mat1 = df.iloc[:, n].values
m = df.columns.get_loc(feature2)
mat2 = df.iloc[:, m].values
combined = np.vstack((mat1, mat2)).T
combined = combined.tolist()
clf = Mean_Shift()
clf.fit(combined)
centroids = clf.centroids
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker='x', color=color, s=150, linewidths=5)
for c in centroids:
plt.scatter(centroids[c][0], centroids[c][1], color='k', marker='*', s=150, linewidths=5)
plt.savefig("static/ms_"+name[0].split('/')[-1]+".png")
plt.close()
def dataDivide(df, percent):
train_df=df.sample(frac=percent,random_state=200) #random state is a seed value
test_df=df.drop(train.index)
return train_df, test_df
def scale(train_df, test_df, scale = 1):
train_df["median_house_value"] /= scale_factor
test_df["median_house_value"] /= scale_factor
return train_df, test_df
def build_model(my_learning_rate):
"""Create and compile a simple linear regression model."""
# Most simple tf.keras models are sequential.
model = tf.keras.models.Sequential()
# Add one linear layer to the model to yield a simple linear regressor.
model.add(tf.keras.layers.Dense(units=1, input_shape=(1,)))
# Compile the model topography into code that TensorFlow can efficiently
# execute. Configure training to minimize the model's mean squared error.
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=my_learning_rate),
loss="mean_squared_error",
metrics=[tf.keras.metrics.RootMeanSquaredError()])
return model
def train_model(model, df, feature, label, my_epochs,
my_batch_size=None, my_validation_split=0.1):
"""Feed a dataset into the model in order to train it."""
history = model.fit(x=df[feature],
y=df[label],
batch_size=my_batch_size,
epochs=my_epochs,
validation_split=my_validation_split)
# Gather the model's trained weight and bias.
trained_weight = model.get_weights()[0]
trained_bias = model.get_weights()[1]
# The list of epochs is stored separately from the
# rest of history.
epochs = history.epoch
# Isolate the root mean squared error for each epoch.
hist = pd.DataFrame(history.history)
rmse = hist["root_mean_squared_error"]
return epochs, rmse, history.history
def plot_the_loss_curve(epochs, mae_training, mae_validation, filename):
name = filename.split('.')
"""Plot a curve of loss vs. epoch."""
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Root Mean Squared Error")
plt.plot(epochs[1:], mae_training[1:], label="Training Loss")
plt.plot(epochs[1:], mae_validation[1:], label="Validation Loss")
plt.legend()
# We're not going to plot the first epoch, since the loss on the first epoch
# is often substantially greater than the loss for other epochs.
merged_mae_lists = mae_training[1:] + mae_validation[1:]
highest_loss = max(merged_mae_lists)
lowest_loss = min(merged_mae_lists)
delta = highest_loss - lowest_loss
print(delta)
top_of_y_axis = highest_loss + (delta * 0.05)
bottom_of_y_axis = lowest_loss - (delta * 0.05)
plt.ylim([bottom_of_y_axis, top_of_y_axis])
plt.save("static/nn_"+name[0]+".png")
app = Flask(__name__)
#app.secret_key = 'maidoublequotesmelikhrhahu'
#run_with_ngrok(app)
@app.route('/', methods=['GET', 'POST'])
def basic():
if request.method == 'POST':
if request.files['file'].filename != '':
f = request.files.get('file')
varrr = "static/"+f.filename
err=f.save(varrr)
name = f.filename.split('.')
ext = name[-1]
name = name[0]
if ext == "csv":
con.csvtojson("static/"+f.filename, "static/"+name+".json")
os.remove("static/"+f.filename)
con.jsontocsv("static/"+name+".json", "static/"+f.filename)
if ext == "json":
con.jsontocsv("static/"+f.filename, "static/"+name+".csv")
elif ext == "xml":
con.xmltocsv("static/"+f.filename, "static/"+name+".csv")
elif ext == "nc":
con.netCDFtocsv("static/"+f.filename, "static/"+name+".csv")
n_row, n_col, col, types, line0, line1, line2, line3, line4, line5 = disp("static/"+name+".csv")
res = make_response(render_template("filedata.html", filename = f.filename, n_row = n_row, n_col = n_col, col = col, types = types, lists = "../static/"+name+".csv?"+str(datetime.now()), convertable=["json", "xml", "nc"]))
res.set_cookie("filename", value=f.filename)
return res
return render_template("upload.html")
@app.route('/Info', methods=['GET', 'POST'])
def info():
filename = request.cookies.get('filename')
name = filename.split('.')
n_row, n_col, col, types, line0, line1, line2, line3, line4, line5 = disp("static/"+name[0]+".csv")
return render_template("filedata.html", filename = filename, n_row = n_row, n_col = n_col, col = col, types = types, lists = "../static/"+name[0]+".csv?"+str(datetime.now()), convertable=["json", "xml", "nc"])
@app.route('/stat', methods=['GET', 'POST'])
def stats():
if request.method == 'GET':
filename = request.args.get('filename').split('/')[-1]
name = filename.split('.')
ext = name[-1]
name = name[0]
if ext == "json":
con.jsontocsv("static/"+filename, "static/"+name+".csv")
elif ext == "nc":
con.netCDFtocsv("static/"+filename, "static/"+name+".csv")
elif ext == "xml":
con.xmltocsv("static/"+filename, "static/"+name+".csv")
feature = request.args.get('feature')
func = request.args.get('func')
ans = stat("static/"+name+".csv", feature, func)
print(ans,type(ans))
return str(ans)
return render_template("upload.html")
@app.route('/con', methods = ['GET', 'POST'])
def conv():
if request.method == 'GET':
filename = request.args.get('filename')
name = filename.split('.')
ext = name[-1]
name = name[0]
to = request.args.get('to')
if ext == "csv":
if to == "json":
con.csvtojson("static/"+filename, "static/"+name+"."+to)
elif to == "xml":
con.csvtoxml("static/"+filename, "static/"+name+"."+to)
elif to == "nc":
con.csvtonetCDF("static/"+filename, "static/"+name+"."+to)
elif ext == "json":
if to == "csv":
con.jsontocsv("static/"+filename, "static/"+name+"."+to)
elif to == "xml":
con.jsontoxml("static/"+filename, "static/"+name+"."+to)
elif to == "nc":
con.jsontonetCDF("static/"+filename, "static/"+name+"."+to)
elif ext == "xml":
if to == "json":
con.xmltojson("static/"+filename, "static/"+name+"."+to)
elif to == "csv":
con.xmltocsv("static/"+filename, "static/"+name+"."+to)
elif to == "nc":
con.xmltonetCDF("static/"+filename, "static/"+name+"."+to)
elif ext == "nc":
if to == "json":
con.netCDFtojson("static/"+filename, "static/"+name+"."+to)
elif to == "csv":
con.netCDFtocsv("static/"+filename, "static/"+name+"."+to)
elif to == "xml":
con.netCDFtoxml("static/"+filename, "static/"+name+"."+to)
return "../static/"+name+"."+to
return render_template("upload.html")
@app.route('/analyse', methods = ['GET', 'POST'])
def analyse():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
feature1 = request.args.get('feature1')
feature2 = request.args.get('feature2')
if feature1 == None:
return render_template("analysis.html", col = col)
feature_pie("static/"+name+".csv", feature1, feature2)
return str("../static/"+name+".png")
return render_template("analysis.html", col = col)
@app.route('/anAdd', methods = ['GET', 'POST'])
def anAdd():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
kname = request.args.get('name')
print(kname)
com = request.args.get('formula')
new_feature("static/"+filename, com, kname)
feature1 = request.args.get('feature1')
feature_pie("static/"+name+".csv", feature1, kname)
return "../static/"+name+".png"
@app.route('/clean', methods = ['GET', 'POST'])
def clean():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'POST':
feature1 = request.form['feature1']
feature2 = request.form['feature2']
feature_scatter("static/"+name+".csv", feature1, feature2)
return render_template("clean.html", col = col, img = "static/"+name+".png")
return render_template("clean.html", col = col)
@app.route('/clAdd', methods = ['GET', 'POST'])
def clAdd():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
kname = request.form['name']
com = request.form['formula']
new_feature("static/"+name+".csv", com, kname)
feature_scatter("static/"+name+".csv", feature1, kname)
return "../static/"+name+".png"
@app.route('/freq', methods = ['GET', 'POST'])
def fre():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
feature = request.args.get('feature')
cond = request.args.get('cond')
freqq = freq('static/'+name+".csv", feature, cond)
return freqq
return render_template("clean.html", col = col)
@app.route('/drop', methods = ['GET', 'POST'])
def dro():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
feature = request.args.get('feature')
cond = request.args.get('cond')
drop(filename, feature, cond)
return
return render_template("clean.html", col = col)
@app.route('/ms', methods = ['GET', 'POST'])
def mShift():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = | pd.read_csv("static/"+name+".csv") | pandas.read_csv |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import numpy as np
import pandas as pd
import streamlit as st
from cadCAD_tools.execution import easy_run
from cadCAD_tools.preparation import sweep_cartesian_product
from baseline_model.params import INITIAL_STATE, TIMESTEPS, SAMPLES, DAYS_PER_TIMESTEP
from baseline_model.structure import BLOCKS
from baseline_model.types import BaselineMinting, BaselineModelSweepParams, GrowthScenario, SimpleMinting
from utils import load_constants
C = CONSTANTS = load_constants()
@st.cache
def run_cadcad_model(
fall_after_beginning,
growth_fall,
stable_after_fall,
growth_stable,
take_off_after_stable,
growth_take_off,
steady_after_take_off,
growth_steady,
):
NPO = C["network_power"]["optimistic"]
RESULTS = []
# Run pessimistic, optimistic scenarios
SCENARIOS = [
GrowthScenario(
label="optimistic",
fall_after_beginning=NPO["fall_after_beginning"] + C["days_after_launch"],
stable_after_fall=NPO["stable_after_fall"],
take_off_after_stable=NPO["take_off_after_stable"],
steady_after_take_off=NPO["steady_after_take_off"],
growth_fall=NPO["growth_fall"],
growth_stable=NPO["growth_stable"],
growth_take_off=NPO["growth_take_off"],
growth_steady=NPO["growth_steady"],
),
GrowthScenario(
label="pessimistic",
fall_after_beginning=fall_after_beginning + C["days_after_launch"],
stable_after_fall=stable_after_fall,
take_off_after_stable=take_off_after_stable,
steady_after_take_off=steady_after_take_off,
growth_fall=growth_fall,
growth_stable=growth_stable,
growth_take_off=growth_take_off,
growth_steady=growth_steady,
),
]
RAW_PARAMS = BaselineModelSweepParams(
timestep_in_days=[DAYS_PER_TIMESTEP],
baseline_activated=[True, False],
network_power_scenario=SCENARIOS,
simple_mechanism=[SimpleMinting()],
baseline_mechanism=[BaselineMinting()],
)
PARAMS = sweep_cartesian_product(RAW_PARAMS)
RUN_ARGS = (INITIAL_STATE, PARAMS, BLOCKS, TIMESTEPS, SAMPLES)
RESULTS.append(easy_run(*RUN_ARGS))
# Run baseline scenario
RUN_ARGS = (
{**INITIAL_STATE, "network_power": INITIAL_STATE["baseline"]},
{**RAW_PARAMS, "baseline_activated": [True], "network_power_scenario": [GrowthScenario("baseline")]},
BLOCKS,
TIMESTEPS,
SAMPLES,
)
RESULTS.append(easy_run(*RUN_ARGS))
# Post-process results
df = post_process_results( | pd.concat(RESULTS) | pandas.concat |
import psycopg2
import psycopg2
import sqlalchemy as salc
import numpy as np
import warnings
import datetime
import pandas as pd
import json
from math import pi
from flask import request, send_file, Response
# import visualization libraries
from bokeh.io import export_png
from bokeh.embed import json_item
from bokeh.plotting import figure
from bokeh.models import Label, LabelSet, ColumnDataSource, Legend
from bokeh.palettes import Colorblind
from bokeh.layouts import gridplot
from bokeh.transform import cumsum
warnings.filterwarnings('ignore')
def create_routes(server):
def quarters(month, year):
if 1 <= month <= 3:
return '01' + '/' + year
elif 4 <= month <= 6:
return '04' + '/' + year
elif 5 <= month <= 9:
return '07' + '/' + year
elif 10 <= month <= 12:
return '10' + '/' + year
def new_contributor_data_collection(repo_id, required_contributions):
rank_list = []
for num in range(1, required_contributions + 1):
rank_list.append(num)
rank_tuple = tuple(rank_list)
contributor_query = salc.sql.text(f"""
SELECT * FROM (
SELECT ID AS
cntrb_id,
A.created_at AS created_at,
date_part('month', A.created_at::DATE) AS month,
date_part('year', A.created_at::DATE) AS year,
A.repo_id,
repo_name,
full_name,
login,
ACTION,
rank() OVER (
PARTITION BY id
ORDER BY A.created_at ASC
)
FROM
(
(
SELECT
canonical_id AS ID,
created_at AS created_at,
repo_id,
'issue_opened' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.issues
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = issues.reporter_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
repo_id = {repo_id}
AND pull_request IS NULL
GROUP BY
canonical_id,
repo_id,
issues.created_at,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
canonical_id AS ID,
TO_TIMESTAMP( cmt_author_date, 'YYYY-MM-DD' ) AS created_at,
repo_id,
'commit' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.commits
LEFT OUTER JOIN augur_data.contributors ON cntrb_email = cmt_author_email
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date, cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
repo_id = {repo_id}
GROUP BY
repo_id,
canonical_email,
canonical_id,
commits.cmt_author_date,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
message.cntrb_id AS ID,
created_at AS created_at,
commits.repo_id,
'commit_comment' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.commit_comment_ref,
augur_data.commits,
augur_data.message
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date, cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
commits.cmt_id = commit_comment_ref.cmt_id
AND commits.repo_id = {repo_id}
AND commit_comment_ref.msg_id = message.msg_id
GROUP BY
ID,
commits.repo_id,
commit_comment_ref.created_at,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
issue_events.cntrb_id AS ID,
issue_events.created_at AS created_at,
issues.repo_id,
'issue_closed' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.issues,
augur_data.issue_events
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = issue_events.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
issues.repo_id = {repo_id}
AND issues.issue_id = issue_events.issue_id
AND issues.pull_request IS NULL
AND issue_events.cntrb_id IS NOT NULL
AND ACTION = 'closed'
GROUP BY
issue_events.cntrb_id,
issues.repo_id,
issue_events.created_at,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
pr_augur_contributor_id AS ID,
pr_created_at AS created_at,
pull_requests.repo_id,
'open_pull_request' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.pull_requests
LEFT OUTER JOIN augur_data.contributors ON pull_requests.pr_augur_contributor_id = contributors.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
pull_requests.repo_id = {repo_id}
GROUP BY
pull_requests.pr_augur_contributor_id,
pull_requests.repo_id,
pull_requests.pr_created_at,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
message.cntrb_id AS ID,
msg_timestamp AS created_at,
pull_requests.repo_id as repo_id,
'pull_request_comment' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
augur_data.pull_requests,
augur_data.pull_request_message_ref,
augur_data.message
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
pull_requests.repo_id = {repo_id}
AND pull_request_message_ref.pull_request_id = pull_requests.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
GROUP BY
message.cntrb_id,
pull_requests.repo_id,
message.msg_timestamp,
contributors.cntrb_full_name,
contributors.cntrb_login
) UNION ALL
(
SELECT
issues.reporter_id AS ID,
msg_timestamp AS created_at,
issues.repo_id as repo_id,
'issue_comment' AS ACTION,
contributors.cntrb_full_name AS full_name,
contributors.cntrb_login AS login
FROM
issues,
issue_message_ref,
message
LEFT OUTER JOIN augur_data.contributors ON contributors.cntrb_id = message.cntrb_id
LEFT OUTER JOIN (
SELECT DISTINCT ON ( cntrb_canonical ) cntrb_full_name,
cntrb_canonical AS canonical_email,
data_collection_date,
cntrb_id AS canonical_id
FROM augur_data.contributors
WHERE cntrb_canonical = cntrb_email ORDER BY cntrb_canonical
) canonical_full_names ON canonical_full_names.canonical_email =contributors.cntrb_canonical
WHERE
issues.repo_id = {repo_id}
AND issue_message_ref.msg_id = message.msg_id
AND issues.issue_id = issue_message_ref.issue_id
AND issues.pull_request_id = NULL
GROUP BY
issues.reporter_id,
issues.repo_id,
message.msg_timestamp,
contributors.cntrb_full_name,
contributors.cntrb_login
)
) A,
repo
WHERE
ID IS NOT NULL
AND A.repo_id = repo.repo_id
GROUP BY
A.ID,
A.repo_id,
A.ACTION,
A.created_at,
repo.repo_name,
A.full_name,
A.login
ORDER BY
cntrb_id
) b
WHERE RANK IN {rank_tuple}
""")
df = pd.read_sql(contributor_query, server.augur_app.database)
df = df.loc[~df['full_name'].str.contains('bot', na=False)]
df = df.loc[~df['login'].str.contains('bot', na=False)]
df = df.loc[~df['cntrb_id'].isin(df[df.duplicated(['cntrb_id', 'created_at', 'repo_id', 'rank'])]['cntrb_id'])]
# add yearmonths to contributor
df[['month', 'year']] = df[['month', 'year']].astype(int).astype(str)
df['yearmonth'] = df['month'] + '/' + df['year']
df['yearmonth'] = pd.to_datetime(df['yearmonth'])
# add column with every value being one, so when the contributor df is concatenated
# with the months df, the filler months won't be counted in the sums
df['new_contributors'] = 1
# add quarters to contributor dataframe
df['month'] = df['month'].astype(int)
df['quarter'] = df.apply(lambda x: quarters(x['month'], x['year']), axis=1, result_type='reduce')
df['quarter'] = pd.to_datetime(df['quarter'])
return df
def months_data_collection(start_date, end_date):
# months_query makes a df of years and months, this is used to fill
# the months with no data in the visualizations
months_query = salc.sql.text(f"""
SELECT *
FROM
(
SELECT
date_part( 'year', created_month :: DATE ) AS year,
date_part( 'month', created_month :: DATE ) AS MONTH
FROM
(SELECT *
FROM (
SELECT created_month :: DATE
FROM generate_series (TIMESTAMP '{start_date}', TIMESTAMP '{end_date}', INTERVAL '1 month' ) created_month ) d ) x
) y
""")
months_df = pd.read_sql(months_query, server.augur_app.database)
# add yearmonths to months_df
months_df[['year', 'month']] = months_df[['year', 'month']].astype(float).astype(int).astype(str)
months_df['yearmonth'] = months_df['month'] + '/' + months_df['year']
months_df['yearmonth'] = pd.to_datetime(months_df['yearmonth'])
# filter months_df with start_date and end_date, the contributor df is filtered in the visualizations
months_df = months_df.set_index(months_df['yearmonth'])
months_df = months_df.loc[start_date: end_date].reset_index(drop=True)
# add quarters to months dataframe
months_df['month'] = months_df['month'].astype(int)
months_df['quarter'] = months_df.apply(lambda x: quarters(x['month'], x['year']), axis=1)
months_df['quarter'] = pd.to_datetime(months_df['quarter'])
return months_df
def get_repo_id_start_date_and_end_date():
now = datetime.datetime.now()
repo_id = int(request.args.get('repo_id'))
start_date = str(request.args.get('start_date', "{}-01-01".format(now.year - 1)))
end_date = str(request.args.get('end_date', "{}-{}-{}".format(now.year, now.month, now.day)))
return repo_id, start_date, end_date
def filter_out_repeats_without_required_contributions_in_required_time(repeat_list, repeats_df, required_time,
first_list):
differences = []
for i in range(0, len(repeat_list)):
time_difference = repeat_list[i] - first_list[i]
total = time_difference.days * 86400 + time_difference.seconds
differences.append(total)
repeats_df['differences'] = differences
# remove contributions who made enough contributions, but not in a short enough time
repeats_df = repeats_df.loc[repeats_df['differences'] <= required_time * 86400]
return repeats_df
def compute_fly_by_and_returning_contributors_dfs(input_df, required_contributions, required_time, start_date):
# create a copy of contributor dataframe
driver_df = input_df.copy()
# remove first time contributors before begin date, along with their second contribution
mask = (driver_df['yearmonth'] < start_date)
driver_df = driver_df[~driver_df['cntrb_id'].isin(driver_df.loc[mask]['cntrb_id'])]
# determine if contributor is a drive by by finding all the cntrb_id's that do not have a second contribution
repeats_df = driver_df.copy()
repeats_df = repeats_df.loc[repeats_df['rank'].isin([1, required_contributions])]
# removes all the contributors that only have a first contirbution
repeats_df = repeats_df[
repeats_df['cntrb_id'].isin(repeats_df.loc[driver_df['rank'] == required_contributions]['cntrb_id'])]
repeat_list = repeats_df.loc[driver_df['rank'] == required_contributions]['created_at'].tolist()
first_list = repeats_df.loc[driver_df['rank'] == 1]['created_at'].tolist()
repeats_df = repeats_df.loc[driver_df['rank'] == 1]
repeats_df['type'] = 'repeat'
repeats_df = filter_out_repeats_without_required_contributions_in_required_time(
repeat_list, repeats_df, required_time, first_list)
repeats_df = repeats_df.loc[repeats_df['differences'] <= required_time * 86400]
repeat_cntrb_ids = repeats_df['cntrb_id'].to_list()
drive_by_df = driver_df.loc[~driver_df['cntrb_id'].isin(repeat_cntrb_ids)]
drive_by_df = drive_by_df.loc[driver_df['rank'] == 1]
drive_by_df['type'] = 'drive_by'
return drive_by_df, repeats_df
def add_caption_to_visualizations(caption, required_contributions, required_time, plot_width):
caption_plot = figure(width=plot_width, height=200, margin=(0, 0, 0, 0))
caption_plot.add_layout(Label(
x=0,
y=160,
x_units='screen',
y_units='screen',
text='{}'.format(caption.format(required_contributions, required_time)),
text_font='times',
text_font_size='15pt',
render_mode='css'
))
caption_plot.outline_line_color = None
return caption_plot
def format_new_cntrb_bar_charts(plot, rank, group_by_format_string):
plot.xgrid.grid_line_color = None
plot.y_range.start = 0
plot.axis.minor_tick_line_color = None
plot.outline_line_color = None
plot.title.align = "center"
plot.title.text_font_size = "18px"
plot.yaxis.axis_label = 'Second Time Contributors' if rank == 2 else 'New Contributors'
plot.xaxis.axis_label = group_by_format_string
plot.xaxis.axis_label_text_font_size = "18px"
plot.yaxis.axis_label_text_font_size = "16px"
plot.xaxis.major_label_text_font_size = "16px"
plot.xaxis.major_label_orientation = 45.0
plot.yaxis.major_label_text_font_size = "16px"
return plot
def add_charts_and_captions_to_correct_positions(chart_plot, caption_plot, rank, contributor_type,
row_1, row_2, row_3, row_4):
if rank == 1 and (contributor_type == 'All' or contributor_type == 'repeat'):
row_1.append(chart_plot)
row_2.append(caption_plot)
elif rank == 2 or contributor_type == 'drive_by':
row_3.append(chart_plot)
row_4.append(caption_plot)
def get_new_cntrb_bar_chart_query_params():
group_by = str(request.args.get('group_by', "quarter"))
required_contributions = int(request.args.get('required_contributions', 4))
required_time = int(request.args.get('required_time', 365))
return group_by, required_contributions, required_time
def remove_rows_before_start_date(df, start_date):
mask = (df['yearmonth'] < start_date)
result_df = df[~df['cntrb_id'].isin(df.loc[mask]['cntrb_id'])]
return result_df
def remove_rows_with_null_values(df, not_null_columns=[]):
"""Remove null data from pandas df
Parameters
-- df
description: the dataframe that will be modified
type: Pandas Dataframe
-- list_of_columns
description: columns that are searched for NULL values
type: list
default: [] (means all columns will be checked for NULL values)
IMPORTANT: if an empty list is passed or nothing is passed it will check all columns for NULL values
Return Value
-- Modified Pandas Dataframe
"""
if len(not_null_columns) == 0:
not_null_columns = df.columns.to_list()
total_rows_removed = 0
for col in not_null_columns:
rows_removed = len(df.loc[df[col].isnull() == True])
if rows_removed > 0:
print(f"{rows_removed} rows have been removed because of null values in column {col}")
total_rows_removed += rows_removed
df = df.loc[df[col].isnull() == False]
if total_rows_removed > 0:
print(f"\nTotal rows removed because of null data: {total_rows_removed}");
else:
print("No null data found")
return df
def get_needed_columns(df, list_of_columns):
"""Get only a specific list of columns from a Pandas Dataframe
Parameters
-- df
description: the dataframe that will be modified
type: Pandas Dataframe
-- list_of_columns
description: columns that will be kept in dataframe
type: list
Return Value
-- Modified Pandas Dataframe
"""
return df[list_of_columns]
def filter_data(df, needed_columns, not_null_columns=[]):
"""Filters out the unneeded rows in the df, and removed NULL data from df
Parameters
-- df
description: the dataframe that will be modified
type: Pandas Dataframe
-- needed_columns
description: the columns to keep in the dataframe
-- not_null_columns
description: columns that will be searched for NULL data,
if NULL values are found those rows will be removed
default: [] (means all columns in needed_columns list will be checked for NULL values)
IMPORTANT: if an empty list is passed or nothing is passed it will check
all columns in needed_columns list for NULL values
Return Value
-- Modified Pandas Dataframe
"""
if all(x in needed_columns for x in not_null_columns):
df = get_needed_columns(df, needed_columns)
df = remove_rows_with_null_values(df, not_null_columns)
return df
else:
print("Developer error, not null columns should be a subset of needed columns")
return df
@server.app.route('/{}/contributor_reports/new_contributors_bar/'.format(server.api_version), methods=["GET"])
def new_contributors_bar():
repo_id, start_date, end_date = get_repo_id_start_date_and_end_date()
group_by, required_contributions, required_time = get_new_cntrb_bar_chart_query_params()
input_df = new_contributor_data_collection(repo_id=repo_id, required_contributions=required_contributions)
months_df = months_data_collection(start_date=start_date, end_date=end_date)
# TODO remove full_name from data for all charts since it is not needed in vis generation
not_null_columns = ['cntrb_id', 'created_at', 'month', 'year', 'repo_id', 'repo_name', 'login', 'action',
'rank', 'yearmonth', 'new_contributors', 'quarter']
input_df = remove_rows_with_null_values(input_df, not_null_columns)
if len(input_df) == 0:
return Response(response="There is no data for this repo, in the database you are accessing",
mimetype='application/json',
status=200)
repo_dict = {repo_id: input_df.loc[input_df['repo_id'] == repo_id].iloc[0]['repo_name']}
contributor_types = ['All', 'repeat', 'drive_by']
ranks = [1, 2]
row_1, row_2, row_3, row_4 = [], [], [], []
all_df = remove_rows_before_start_date(input_df, start_date)
drive_by_df, repeats_df = compute_fly_by_and_returning_contributors_dfs(input_df, required_contributions,
required_time, start_date)
for rank in ranks:
for contributor_type in contributor_types:
# do not display these visualizations since drive-by's do not have second contributions, and the
# second contribution of a repeat contributor is the same thing as the all the second time contributors
if (rank == 2 and contributor_type == 'drive_by') or (rank == 2 and contributor_type == 'repeat'):
continue
if contributor_type == 'repeat':
driver_df = repeats_df
caption = """This graph shows repeat contributors in the specified time period. Repeat contributors
are contributors who have made {} or more contributions in {} days and their first contribution is
in the specified time period. New contributors are individuals who make their first contribution
in the specified time period."""
elif contributor_type == 'drive_by':
driver_df = drive_by_df
caption = """This graph shows fly by contributors in the specified time period. Fly by contributors
are contributors who make less than the required {} contributions in {} days. New contributors are
individuals who make their first contribution in the specified time period. Of course, then, “All
fly-by’s are by definition first time contributors”. However, not all first time contributors are
fly-by’s."""
elif contributor_type == 'All':
if rank == 1:
driver_df = all_df
# makes df with all first time contributors
driver_df = driver_df.loc[driver_df['rank'] == 1]
caption = """This graph shows all the first time contributors, whether they contribute once, or
contribute multiple times. New contributors are individuals who make their first contribution
in the specified time period."""
if rank == 2:
driver_df = all_df
# creates df with all second time contributors
driver_df = driver_df.loc[driver_df['rank'] == 2]
caption = """This graph shows the second contribution of all
first time contributors in the specified time period."""
# y_axis_label = 'Second Time Contributors'
# filter by end_date, this is not done with the begin date filtering because a repeat contributor
# will look like drive-by if the second contribution is removed by end_date filtering
mask = (driver_df['yearmonth'] < end_date)
driver_df = driver_df.loc[mask]
# adds all months to driver_df so the lists of dates will include all months and years
driver_df = pd.concat([driver_df, months_df])
data = pd.DataFrame()
if group_by == 'year':
data['dates'] = driver_df[group_by].unique()
# new contributor counts for y-axis
data['new_contributor_counts'] = driver_df.groupby([group_by]).sum().reset_index()[
'new_contributors']
# used to format x-axis and title
group_by_format_string = "Year"
elif group_by == 'quarter' or group_by == 'month':
# set variables to group the data by quarter or month
if group_by == 'quarter':
date_column = 'quarter'
group_by_format_string = "Quarter"
elif group_by == 'month':
date_column = 'yearmonth'
group_by_format_string = "Month"
# modifies the driver_df[date_column] to be a string with year and month,
# then finds all the unique values
data['dates'] = np.unique(np.datetime_as_string(driver_df[date_column], unit='M'))
# new contributor counts for y-axis
data['new_contributor_counts'] = driver_df.groupby([date_column]).sum().reset_index()[
'new_contributors']
# if the data set is large enough it will dynamically assign the width, if the data set is
# too small it will by default set to 870 pixel so the title fits
if len(data['new_contributor_counts']) >= 15:
plot_width = 46 * len(data['new_contributor_counts'])
else:
plot_width = 870
# create a dict convert an integer number into a word
# used to turn the rank into a word, so it is nicely displayed in the title
numbers = ['Zero', 'First', 'Second']
num_conversion_dict = {}
for i in range(1, len(numbers)):
num_conversion_dict[i] = numbers[i]
number = '{}'.format(num_conversion_dict[rank])
# define pot for bar chart
p = figure(x_range=data['dates'], plot_height=400, plot_width=plot_width,
title="{}: {} {} Time Contributors Per {}".format(repo_dict[repo_id],
contributor_type.capitalize(), number,
group_by_format_string),
y_range=(0, max(data['new_contributor_counts']) * 1.15), margin=(0, 0, 10, 0))
p.vbar(x=data['dates'], top=data['new_contributor_counts'], width=0.8)
source = ColumnDataSource(
data=dict(dates=data['dates'], new_contributor_counts=data['new_contributor_counts']))
# add contributor_count labels to chart
p.add_layout(LabelSet(x='dates', y='new_contributor_counts', text='new_contributor_counts', y_offset=4,
text_font_size="13pt", text_color="black",
source=source, text_align='center'))
plot = format_new_cntrb_bar_charts(p, rank, group_by_format_string)
caption_plot = add_caption_to_visualizations(caption, required_contributions, required_time, plot_width)
add_charts_and_captions_to_correct_positions(plot, caption_plot, rank, contributor_type, row_1,
row_2, row_3, row_4)
# puts plots together into a grid
grid = gridplot([row_1, row_2, row_3, row_4])
filename = export_png(grid)
return send_file(filename)
@server.app.route('/{}/contributor_reports/new_contributors_stacked_bar/'.format(server.api_version),
methods=["GET"])
def new_contributors_stacked_bar():
repo_id, start_date, end_date = get_repo_id_start_date_and_end_date()
group_by, required_contributions, required_time = get_new_cntrb_bar_chart_query_params()
input_df = new_contributor_data_collection(repo_id=repo_id, required_contributions=required_contributions)
months_df = months_data_collection(start_date=start_date, end_date=end_date)
needed_columns = ['cntrb_id', 'created_at', 'month', 'year', 'repo_id', 'repo_name', 'login', 'action',
'rank', 'yearmonth', 'new_contributors', 'quarter']
input_df = filter_data(input_df, needed_columns)
if len(input_df) == 0:
return Response(response="There is no data for this repo, in the database you are accessing",
mimetype='application/json',
status=200)
repo_dict = {repo_id: input_df.loc[input_df['repo_id'] == repo_id].iloc[0]['repo_name']}
contributor_types = ['All', 'repeat', 'drive_by']
ranks = [1, 2]
row_1, row_2, row_3, row_4 = [], [], [], []
all_df = remove_rows_before_start_date(input_df, start_date)
drive_by_df, repeats_df = compute_fly_by_and_returning_contributors_dfs(input_df, required_contributions,
required_time, start_date)
for rank in ranks:
for contributor_type in contributor_types:
# do not display these visualizations since drive-by's do not have second contributions,
# and the second contribution of a repeat contributor is the same thing as the all the
# second time contributors
if (rank == 2 and contributor_type == 'drive_by') or (rank == 2 and contributor_type == 'repeat'):
continue
if contributor_type == 'repeat':
driver_df = repeats_df
caption = """This graph shows repeat contributors in the specified time period. Repeat contributors
are contributors who have made {} or more contributions in {} days and their first contribution is
in the specified time period. New contributors are individuals who make their first contribution in
the specified time period."""
elif contributor_type == 'drive_by':
driver_df = drive_by_df
caption = """This graph shows fly by contributors in the specified time period. Fly by contributors
are contributors who make less than the required {} contributions in {} days. New contributors are
individuals who make their first contribution in the specified time period. Of course, then, “All
fly-by’s are by definition first time contributors”. However, not all first time contributors are
fly-by’s."""
elif contributor_type == 'All':
if rank == 1:
driver_df = all_df
# makes df with all first time contributors
driver_df = driver_df.loc[driver_df['rank'] == 1]
caption = """This graph shows all the first time contributors, whether they contribute once, or
contribute multiple times. New contributors are individuals who make their first contribution in
the specified time period."""
if rank == 2:
driver_df = all_df
# creates df with all second time contributor
driver_df = driver_df.loc[driver_df['rank'] == 2]
caption = """This graph shows the second contribution of all first time
contributors in the specified time period."""
# y_axis_label = 'Second Time Contributors'
# filter by end_date, this is not done with the begin date filtering because a repeat contributor will
# look like drive-by if the second contribution is removed by end_date filtering
mask = (driver_df['yearmonth'] < end_date)
driver_df = driver_df.loc[mask]
# adds all months to driver_df so the lists of dates will include all months and years
driver_df = pd.concat([driver_df, months_df])
actions = ['open_pull_request', 'pull_request_comment', 'commit', 'issue_closed', 'issue_opened',
'issue_comment']
data = pd.DataFrame()
if group_by == 'year':
# x-axis dates
data['dates'] = driver_df[group_by].unique()
for contribution_type in actions:
data[contribution_type] = \
pd.concat([driver_df.loc[driver_df['action'] == contribution_type], months_df]).groupby(
group_by).sum().reset_index()['new_contributors']
# new contributor counts for all actions
data['new_contributor_counts'] = driver_df.groupby([group_by]).sum().reset_index()[
'new_contributors']
# used to format x-axis and graph title
group_by_format_string = "Year"
elif group_by == 'quarter' or group_by == 'month':
# set variables to group the data by quarter or month
if group_by == 'quarter':
date_column = 'quarter'
group_by_format_string = "Quarter"
elif group_by == 'month':
date_column = 'yearmonth'
group_by_format_string = "Month"
# modifies the driver_df[date_column] to be a string with year and month,
# then finds all the unique values
data['dates'] = np.unique(np.datetime_as_string(driver_df[date_column], unit='M'))
# new_contributor counts for each type of action
for contribution_type in actions:
data[contribution_type] = \
| pd.concat([driver_df.loc[driver_df['action'] == contribution_type], months_df]) | pandas.concat |
import pandas as pd
import numpy as np
# Expand a list within a column to a single item in the list per row
# (duplicate all other items in the row)
def unlistify_pandas_column(df, column):
col_idx = check_column_name(df, column)
# Helper function to expand and repeat the column col_idx
def expand_and_repeat_column(d):
row = list(d.values[0])
bef = row[:col_idx]
aft = row[col_idx+1:]
col = row[col_idx]
if col:
z = [bef + [c] + aft for c in col]
else:
return pd.DataFrame([bef + [np.nan] + aft])
if len(col) == 0:
z = [bef + [np.nan] + aft]
return | pd.DataFrame(z) | pandas.DataFrame |
import numpy as np
import pandas as pd
import multiprocessing as mp
import statsmodels.api as sm
from ..multiprocessing import mp_pandas_obj
def t_val_linreg(close):
x = np.ones((close.shape[0], 2))
x[:, 1] = np.arange(close.shape[0])
ols = sm.OLS(close, x).fit()
return ols.tvalues[1]
def _get_bins_from_trend(molecule, close, min_step, max_step, step):
out = pd.DataFrame(index=molecule, columns=['t1', 't_val','bin'])
hrzns = list(range(min_step, max_step + 1, step))
for dt0 in molecule:
iloc0 = close.index.get_loc(dt0)
if iloc0 + max(hrzns) > close.shape[0]:
continue
df0 = | pd.Series() | pandas.Series |
import covasim as cv
import covasim.defaults as cvd
import covasim.utils as cvu
import numba as nb
import numpy as np
import pandas as pd
from collections import defaultdict
def generate_people(n_people: int, mixing: pd.DataFrame, reference_ages: pd.Series, households: pd.Series) -> cv.People:
'''
From demographic data (cencus) households are generated, in this way we generate people and assign
them to a household in the same action. Base for generating the multi-layered network - NOT for the
simple random network.
Requires: Household mixing matrix (See https://github.com/mobs-lab/mixing-patterns)
Householder age distribution (Cencus data)
Household size distribution (Cencus data)
Number of individuals to generate.
Creates a cv.People object.
'''
# Number of households to generate
total_people = sum(households.index * households.values)
household_percent = households / total_people
n_households = (n_people * household_percent).round().astype(int)
# Adjust one-person households to match the
n_households[1] += n_people - sum(n_households * n_households.index)
# Select householder, based on householder age distribution
household_heads = np.random.choice(reference_ages.index, size=sum(n_households), p=reference_ages.values / sum(reference_ages))
# Create households, based on the formerly created householders and household mixing matrices
h_clusters, ages = _make_households(n_households, n_people, household_heads, mixing)
# Parse into a cv.People object
contacts = cv.Contacts()
contacts['H'] = clusters_to_layer(h_clusters)
people = cv.People(pars={'pop_size': n_people}, age=ages)
people.contacts = contacts
return people
def add_school_contacts(people: cv.People, mean_contacts: float):
'''
Add school contact layer, from mean classroom size and already generated people, to cv.People instance.
Actual classroom size is drawn from poisson distribution.
Everyone under 18 are assigned to a classroom cluster.
'''
classrooms = []
# Create classrooms of children of same age, assign a teacher from the adult (>21) population
for age in range(0, 18):
children_thisage = cvu.true(people.age == age)
classrooms.extend(create_clusters(children_thisage, mean_contacts))
teachers = np.random.choice(cvu.true(people.age > 21), len(classrooms), replace=False)
for i in range(len(classrooms)):
classrooms[i].append(teachers[i])
# Add to cv.People instance
people.contacts['S'] = clusters_to_layer(classrooms)
def add_work_contacts(people: cv.People, mean_contacts: float):
'''
Add work contact layer, from mean number of coworkers and already generated people, to a cv.People instance.
Actual size of workplace cluster drawn from poisson distribution.
Everyone in the age interval [18, 65] are assigned to a workplace cluster.
'''
work_inds = cvu.true((people.age > 18) & (people.age <= 65))
work_cl = create_clusters(work_inds, mean_contacts)
# Add to cv.People instance
people.contacts['W'] = clusters_to_layer(work_cl)
def add_other_contacts(people: cv.People, layers: pd.DataFrame, legacy=True):
"""
Add layers according to a layer file
Args:
people: A cv.People instance to add new layers to
layer_members: Dict containing {layer_name:[indexes]} specifying who is able to have interactions within each layer
layerfile: Dataframe from `layers.csv` where the index is the layer name
"""
for layer_name, layer in layers.iterrows():
if layer['cluster_type'] in {'home', 'school', 'work'}:
# Ignore these cluster types, as they should be instantiated with
# - home: make_people()
# - school: add_school_contacts()
# - work: add_work_contacts()
continue
age_min = 0 if pd.isna(layer['age_lb']) else layer['age_lb']
age_max = np.inf if pd.isna(layer['age_ub']) else layer['age_ub']
age_eligible = cvu.true((people.age >= age_min) & (people.age <= age_max))
n_people = int(layer['proportion'] * len(age_eligible))
inds = np.random.choice(age_eligible, n_people, replace=False)
if layer['cluster_type'] == 'cluster':
# Create a clustered layer based on the mean cluster size
assert pd.isna(layer['dynamic']), 'Dynamic clusters not supported yet'
clusters = create_clusters(inds, layer['contacts'])
people.contacts[layer_name] = clusters_to_layer(clusters)
elif layer['cluster_type'] == 'complete':
# For a 'complete' layer, treat the layer members as a single cluster
assert pd.isna(layer['dynamic']), 'Dynamic complete clusters not supported yet'
people.contacts[layer_name] = clusters_to_layer([inds])
elif layer['cluster_type'] == 'random':
people.contacts[layer_name] = RandomLayer(inds, layer['contacts'], layer['dispersion'], dynamic=(not | pd.isna(layer['dynamic']) | pandas.isna |
import logging
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
class ChartIndicatorException(Exception):
pass
class PlottingExeception(ChartIndicatorException):
pass
class TraceCandlesException(ChartIndicatorException):
pass
class ErrorImplementingIndicator(ChartIndicatorException):
pass
log = logging.getLogger("candlestick-chart-indicator")
class CandlestickChartIndicator(ABC):
"""
Base class responsible for the implementation of candlestick graphics, and their data.
detail:
This class implements a "Chain of Responsibility" design pattern.
https://en.wikipedia.org/wiki/Chain-of-responsibility_pattern.
"""
@abc.abstractmethod
def inicate(self):
pass
class MA(CandlestickChartIndicator):
"""
Class responsible for implementing a simple Moving Average that stops
filter out price fluctuations helping to identify trends.
"""
def indicate(self, data_frame, data=[], **kwargs):
try:
ma = data_frame['close'].rolling(window=kwargs.get("days", 21)).mean()
trace_avg = go.Scatter(x=ma.index, y=MA, name='MA', line=dict(color='#BEBECF'), opacity=0.8)
data.append(trace_avg)
except (ErrorImplementingIndicator, TypeError) as e:
log.warning(f"Error implementing 'ma' indicator: {e}")
finally:
return data
class EMA(CandlestickChartIndicator):
"""
Class responsible for implementing an exponential moving average
EMA = Price today * K + EMA yesterday x (1-k) where K = 2 /(N+1)
"""
def indicate(self, data_frame, data=[], **kwargs):
try:
k = (2 / (kwargs.get("days", 21) + 1))
ma = data_frame['close'].rolling(window=kwargs.get("days", 21)).mean()
ema_data = pd.DataFrame(index=ma.index)
ema_data['PRICE'] = data_frame['close']
ema_data['MA'] = ma
ema_data['EMA'] = np.NaN
ema_data['EMA'][0] = ema_data['MA'][1]
for i in range(1, len(ema_data)):
ema_data['EMA'][i] = (ema_data['PRICE'][i] * k) + ((1-k) * ema_data['EMA'][i-1])
trace_ema = go.Scatter(
x=ema_data.index, y=ema_data['MA'], name='EMA', line=dict(color='#17BECF'), opacity=0.8)
data.append(trace_ema)
except (ErrorImplementingIndicator, TypeError) as e:
log.warning(f"Error implementing 'ema' indicator: {e}")
finally:
return data
class CrossingMovingAvarege(CandlestickChartIndicator):
"""
Class responsible for implementing the crossing of moving averages that consists of indicating
buying and selling an asset whenever the averages cross.
detail:
This indicator consists of 2 sets of simple moving averages. an acquaintance
as short average or short and another known as long average or long whenever short crosses
the long down we make a sale, whenever the long crosses the short up we buy.
"""
def indicate(self, data_frame, data=[], **kwargs):
try:
short_rolling = data_frame['close'].rolling(window=kwargs.get("short_rolling", 9)).mean()
long_rolling = data_frame['close'].rolling(window=kwargs.get("long_rolling", 21)).mean()
trace_short_rolling = go.Scatter(
x=short_rolling.index, y=short_rolling, name='SHORT', line=dict(color='#17BECF'), opacity=0.5)
trace_long_rolling = go.Scatter(
x=long_rolling.index, y=long_rolling, name='LONG', line=dict(color='#17becf'), opacity=0.5)
data.append(trace_short_rolling)
data.append(trace_long_rolling)
except (ErrorImplementingIndicator, TypeError) as e:
log.warning(f"Error implementing 'crossing moving avarege' indicator: {e}")
finally:
return data
class MACD(CandlestickChartIndicator):
"""
Class responsible for implementing a MACD -> Convergence - Divergence
of the moving average, which uses 3 exponential moving averages.
"""
def indicator(self, data_frame, data=[], **kwargs):
try:
high_average = data_frame['max'].rolling(window=kwargs.get("high", 8)).mean()
low_average = data_frame['min'].rolling(window=kwargs.get("low", 8)).mean()
hilo_high = | pd.DataFrame(index=data_frame.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
data=pd.read_csv('iris.csv')
data=np.array(data)
data=np.mat(data[:,0:4])
#数据长度
length=len(data)
#通过核函数在输入空间计算核矩阵
k=np.mat(np.zeros((length,length)))
for i in range(0,length):
for j in range(i,length):
k[i,j]=(np.dot(data[i],data[j].T))**2
k[j,i]=k[i,j]
name=range(length)
test= | pd.DataFrame(columns=name,data=k) | pandas.DataFrame |
import pandas as pd
from neuralprophet import NeuralProphet, set_random_seed
from src.demand_prediction.events_models import save_events_model, load_events_model
from src.config import SEED
def NeuralProphetEvents(future_events, past_events, events_name, train, test, leaf_name, model_name,
start_pred_time, events_dates, use_cache=False):
test_name = leaf_name
test_df = test.pd_dataframe()
train_df = train.pd_dataframe()
train_df['ds'] = train_df.index
train_df = train_df.rename(columns={'Quantity': 'y'})
name_path_model = leaf_name + "_" + model_name + "_" + start_pred_time
model = load_events_model(name_path_model)
if model is None or not use_cache:
print("Training Event Neural Prophet")
set_random_seed(SEED)
model = NeuralProphet()
model = model.add_country_holidays("US", mode="additive", lower_window=-1, upper_window=1)
model.add_events(events_name)
history_df = model.create_df_with_events(train_df, past_events)
print("Event Neural Prophet Fitting")
metrics = model.fit(history_df, freq='D')
save_events_model(model, name_path_model)
save_events_model(history_df, name_path_model + "_history_df")
else:
print("Loaded Event Neural Prophet")
history_df = load_events_model(name_path_model + "_history_df")
if history_df is None:
print("Creating History df Neural Prophet")
history_df = model.create_df_with_events(train_df, past_events)
save_events_model(history_df, name_path_model + "_history_df")
print("Start Predicting:")
future = model.make_future_dataframe(df=history_df, events_df=future_events, periods=len(test))
forecast = model.predict(future)
preds = forecast[['ds', 'yhat1']]
predictions = | pd.DataFrame(preds) | pandas.DataFrame |
import os
from PIL import Image
import importlib
from datetime import datetime
import logging
import pandas as pd
import core.util as Util
class InfoLogger():
"""
use logging to record log, only work on GPU 0 by judging global_rank
"""
def __init__(self, opt):
self.opt = opt
self.rank = opt['global_rank']
self.phase = opt['phase']
self.setup_logger(None, opt['path']['experiments_root'], opt['phase'], level=logging.INFO, screen=False)
self.logger = logging.getLogger(opt['phase'])
self.infologger_ftns = {'info', 'warning', 'debug'}
def __getattr__(self, name):
if self.rank != 0: # info only print on GPU 0.
def wrapper(info, *args, **kwargs):
pass
return wrapper
if name in self.infologger_ftns:
print_info = getattr(self.logger, name, None)
def wrapper(info, *args, **kwargs):
print_info(info, *args, **kwargs)
return wrapper
@staticmethod
def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False):
""" set up logger """
l = logging.getLogger(logger_name)
formatter = logging.Formatter(
'%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%y-%m-%d %H:%M:%S')
log_file = os.path.join(root, '{}.log'.format(phase))
fh = logging.FileHandler(log_file, mode='a+')
fh.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
l.addHandler(sh)
class VisualWriter():
"""
use tensorboard to record visuals, support 'add_scalar', 'add_scalars', 'add_image', 'add_images', etc. funtion.
Also integrated with save results function.
"""
def __init__(self, opt, logger):
log_dir = opt['path']['tb_logger']
self.result_dir = opt['path']['results']
enabled = opt['train']['tensorboard']
self.rank = opt['global_rank']
self.writer = None
self.selected_module = ""
if enabled and self.rank==0:
log_dir = str(log_dir)
# Retrieve vizualization writer.
succeeded = False
for module in ["tensorboardX", "torch.utils.tensorboard"]:
try:
self.writer = importlib.import_module(module).SummaryWriter(log_dir)
succeeded = True
break
except ImportError:
succeeded = False
self.selected_module = module
if not succeeded:
message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on " \
"this machine. Please install TensorboardX with 'pip install tensorboardx', upgrade PyTorch to " \
"version >= 1.1 to use 'torch.utils.tensorboard' or turn off the option in the 'config.json' file."
logger.warning(message)
self.epoch = 0
self.iter = 0
self.phase = ''
self.tb_writer_ftns = {
'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',
'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
}
self.tag_mode_exceptions = {'add_histogram', 'add_embedding'}
self.custom_ftns = {'close'}
self.timer = datetime.now()
def set_iter(self, epoch, iter, phase='train'):
self.phase = phase
self.epoch = epoch
self.iter = iter
def save_images(self, results):
result_path = os.path.join(self.result_dir, self.phase)
os.makedirs(result_path, exist_ok=True)
result_path = os.path.join(result_path, str(self.epoch))
os.makedirs(result_path, exist_ok=True)
''' get names and corresponding images from results[OrderedDict] '''
try:
names = results['name']
outputs = Util.postprocess(results['result'])
for i in range(len(names)):
Image.fromarray(outputs[i]).save(os.path.join(result_path, names[i]))
except:
raise NotImplementedError('You must specify the context of name and result in save_current_results functions of model.')
def close(self):
self.writer.close()
print('Close the Tensorboard SummaryWriter.')
def __getattr__(self, name):
"""
If visualization is configured to use:
return add_data() methods of tensorboard with additional information (step, tag) added.
Otherwise:
return a blank function handle that does nothing
"""
if name in self.tb_writer_ftns:
add_data = getattr(self.writer, name, None)
def wrapper(tag, data, *args, **kwargs):
if add_data is not None:
# add phase(train/valid) tag
if name not in self.tag_mode_exceptions:
tag = '{}/{}'.format(self.phase, tag)
add_data(tag, data, self.iter, *args, **kwargs)
return wrapper
elif name in self.custom_ftns:
customfunc = getattr(self.writer, name, None)
def wrapper(*args, **kwargs):
if customfunc is not None:
customfunc(*args, **kwargs)
return wrapper
else:
# default action for returning methods defined in this class, set_step() for instance.
try:
attr = object.__getattr__(name)
except AttributeError:
raise AttributeError("type object '{}' has no attribute '{}'".format(self.selected_module, name))
return attr
class LogTracker:
"""
record training numerical indicators.
"""
def __init__(self, *keys, phase='train'):
self.phase = phase
self._data = | pd.DataFrame(index=keys, columns=['total', 'counts', 'average']) | pandas.DataFrame |
import warnings
import pydot
import graphviz
# Take a look at the raw data :
import pandas as pd
from pandas import Series
from pandas import DataFrame
from pandas import read_csv
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
import matplotlib
# be able to save images on server
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from math import sqrt
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/fchollet/keras/issues/2280#issuecomment-306959926
import os
import sys
import errno
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
import keras
from keras.layers import Input, Convolution1D, Dense, MaxPooling1D, Flatten, Conv2D
from keras.layers import LSTM
from keras.callbacks import Callback
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
# be able to save images on server
# matplotlib.use('Agg')
import time
import datetime
from keras.models import load_model
import multiprocessing
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Hide messy TensorFlow warnings
warnings.filterwarnings("ignore") # Hide messy Numpy warnings
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" %
self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
class RData:
def __init__(self, path, n_weeks=26):
self.path = path
self.data = {}
# load dataset
self.data['raw'] = self.load_data()
# config
self.n_weeks = n_weeks
self.n_features = int(len(self.data['raw'][0].columns))
print("number of features: {}".format(self.n_features))
# scale data
self.scaler = preprocessing.MinMaxScaler()
self.scale()
# reframe data
self.reframe()
# self.state_list_name = self.data.state.unique()
self.split_data()
# print(self.n_features)
# Return specific data
def __getitem__(self, index):
return self.data[index]
# load dataset
def load_data(self):
raw = read_csv(self.path)
raw = raw.fillna(0)
# print(raw['0'].head())
# raw = raw.drop(["0"], axis = 1)
# print(raw.head())
# transform column names
raw.columns = map(str.lower, raw.columns)
# raw.rename(columns={'weekend': 'date'}, inplace=True)
latitudeList = raw.latitude.unique()
longitudeList = raw.longitude.unique()
data_list = list()
cell_label = list()
for la in latitudeList:
for lo in longitudeList:
data = raw[(raw.latitude == la) & (raw.longitude == lo)]
if(len(data) == 260):
select = [
#'date',
#'year',
#'month',
#'week',
#'week_temp',
#'week_prcp',
#'latitude',
#'longitude',
'mean_ili',
#'ili_activity_label',
#'ili_activity_group'
]
# One Hot Encoding
data = pd.get_dummies(data[select])
# print(data.head(1))
data_list.append(data)
cell_label.append('lat {} - long {}'.format(la, lo))
#print("The data for latitude {} and longitude {} contains {} rows".format(
# la, lo, len(data)))
self.data['cell_labels'] = cell_label
print("The are {} cell in the data".format(len(data_list)))
return data_list
# convert series to supervised learning
@staticmethod
def series_to_supervised(df, n_in=26, n_out=26, dropnan=True):
from pandas import concat
data = DataFrame(df)
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
input_list, target_list = list(), list()
input_names, target_names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
input_list.append(df.shift(i))
input_names += [('var%d(t-%d)' % (j + 1, i))
for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
target_list.append(df.shift(-i))
if i == 0:
target_names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
target_names += [('var%d(t+%d)' % (j + 1, i))
for j in range(n_vars)]
# put it all together
samples = concat(input_list, axis=1)
samples.columns = input_names
targets = concat(target_list, axis=1)
targets.columns = target_names
# drop rows with NaN values
if dropnan:
targets.fillna(-1, inplace=True)
samples.fillna(-1, inplace=True)
supervised = [samples, targets]
return supervised
# convert series to supervised learning
@staticmethod
def series_to_reframed(data, n_in=26, n_out=26, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = | pd.DataFrame(data) | pandas.DataFrame |
#############################################################################################
# This script is to read netCDF files, and save the data fields into a single Excel spreadsheet or multiple csv files.
# The netCDF files are normally suggested, but sometimes the collaborators have no experience of handling netCDF files.
# Here I use the MEIC emission inventory netcdf files as the example.
import os
import pandas as pd
import xarray as xr
#############################################################################################
#############################################################################################
# simple example: MEIC emissions at 05x0666 (emissions from all sectors are merged: files already prepared for direct use in GEOS-Chem)
os.chdir('/rds/projects/2018/maraisea-glu-01/RDS/RDS_Data/BTH_project/Inventory/MEIC_05x0666/')
# open netcdf files
MEIC_OC = xr.open_dataset("MEIC_OC.05x0666.nc")
MEIC_BC = xr.open_dataset("MEIC_BC.05x0666.nc")
print(MEIC_OC,MEIC_BC,sep="\n###################")
# convert xarray data array to pandas dataframe
def xarray_to_pandas(data):
data = data.to_dataframe()
data.reset_index(inplace=True)
return data
# why the for loop doesn't work here?
MEIC_OC_df = xarray_to_pandas(MEIC_OC)
MEIC_BC_df = xarray_to_pandas(MEIC_BC)
# extract values in 2017 and group by month
MEIC_OC_2017 = []
MEIC_BC_2017 = []
for i in range(12):
MEIC_OC_2017.append(MEIC_OC_df[(pd.DatetimeIndex(MEIC_OC_df['time']).year == 2017) &
(pd.DatetimeIndex(MEIC_OC_df['time']).month == i +1)])
MEIC_BC_2017.append(MEIC_BC_df[(pd.DatetimeIndex(MEIC_BC_df['time']).year == 2017) &
(pd.DatetimeIndex(MEIC_BC_df['time']).month == i +1)])
# think about a better/safer way to drop rows where all data fields are "NaN"
for i in range(len(MEIC_OC_2017)):
MEIC_OC_2017[i] = MEIC_OC_2017[i][MEIC_OC_2017[i]['OC_agriculture'] >= 0]
MEIC_BC_2017[i] = MEIC_BC_2017[i][MEIC_BC_2017[i]['BC_agriculture'] >= 0]
# reset index
MEIC_OC_2017 = [x.reset_index(drop=True) for x in MEIC_OC_2017]
MEIC_BC_2017 = [x.reset_index(drop=True) for x in MEIC_BC_2017]
# save results to a single xlsx file
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/BTH/geoschem')
yymm=list(range(201701,201713))
writer=pd.ExcelWriter(r"MEIC_OC_2017_05x0666.xlsx")
for i,data in enumerate(MEIC_OC_2017):
data.to_excel(writer,sheet_name="{0}".format(yymm[i]))
writer.save()
writer=pd.ExcelWriter(r"MEIC_BC_2017_05x0666.xlsx")
for i,data in enumerate(MEIC_BC_2017):
data.to_excel(writer,sheet_name="{0}".format(yymm[i]))
writer.save()
# save results to multiple csv files
for i in range(12):
MEIC_OC_2017[i].to_csv("MEIC_OC_05x0666"+str(yymm[i])+".csv",index=False,sep=',')
MEIC_BC_2017[i].to_csv("MEIC_BC_05x0666"+str(yymm[i])+".csv",index=False,sep=',')
#############################################################################################
#############################################################################################
# complicated example: MEIC emissions at 025x025 (emissions from all sectors are seperated: raw files)
os.chdir('/rds/projects/2018/maraisea-glu-01/RDS/RDS_Data/BTH_project/Inventory/MEIC_025x025')
# first, how to remove all the items defined previously in this job?
background = [file for file in globals().keys()]
del background
# import multiple files
import glob
import re
MEIC_OC_IND = glob.glob("*industry-OC.nc")
MEIC_OC_POW = glob.glob("*power-OC.nc")
MEIC_OC_TRA = glob.glob("*transportation-OC.nc")
MEIC_OC_RES = glob.glob("*residential-OC.nc")
MEIC_OC_AGR = glob.glob("*agriculture-OC.nc")
# group the items, so you can perform the same functions for all
# if I can clean the working space, I think I will be able to group the items which name start with "MEIC_"?
# or are there other ways to combine the files with similar names more efficiently? As there will be more data fields and items to be defined.
all_MEIC_OC = [MEIC_OC_IND,MEIC_OC_POW,MEIC_OC_TRA,MEIC_OC_RES,MEIC_OC_AGR]
# sort all files numerically
for file in all_MEIC_OC:
file.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
# check the sorted files
print('number of files:',len(MEIC_OC_IND),MEIC_OC_IND[0],MEIC_OC_IND[-1],sep=" ")
# To read all files together "all_MEIC_OC = [xr.open_dataset(file) for x in all_MEIC_OC for file in x]"
# but this is not a good option here, because the emission rate fields are named "z" in the raw files for all the species
# and there is no info/attribute within the file to distinguish each species
# so for now, I extract emission from each sector seperately
MEIC_OC_IND = [xr.open_dataset(file) for file in MEIC_OC_IND]
MEIC_OC_POW = [xr.open_dataset(file) for file in MEIC_OC_POW]
MEIC_OC_TRA = [xr.open_dataset(file) for file in MEIC_OC_TRA]
MEIC_OC_RES = [xr.open_dataset(file) for file in MEIC_OC_RES]
MEIC_OC_AGR = [xr.open_dataset(file) for file in MEIC_OC_AGR]
# convert xarray data array to pandas dataframe
MEIC_OC_IND_df = [xarray_to_pandas(data) for data in MEIC_OC_IND]
MEIC_OC_POW_df = [xarray_to_pandas(data) for data in MEIC_OC_POW]
MEIC_OC_TRA_df = [xarray_to_pandas(data) for data in MEIC_OC_TRA]
MEIC_OC_RES_df = [xarray_to_pandas(data) for data in MEIC_OC_RES]
MEIC_OC_AGR_df = [xarray_to_pandas(data) for data in MEIC_OC_AGR]
# check one example
print(MEIC_OC_IND_df[0].head())
# but why the loop below does not work?
# for x in all_MEIC:
# x = [xr.open_dataset(file) for file in x]
# so the lat and lon are not provided in the raw file, we have to generate those on our own
lon = np.arange(70+0.25/2,150,0.25)
lat = np.arange(60-0.25/2,10,-.25)
print(len(lon)*len(lat))
def expand_grid(lon, lat):
xG, yG = np.meshgrid(lon, lat) # create the actual grid
xG = xG.flatten() # make the grid 1d
yG = yG.flatten() # same
return | pd.DataFrame({'lon':xG, 'lat':yG}) | pandas.DataFrame |
import os
from argparse import ArgumentParser
from collections import defaultdict
import numpy as np
import pandas as pd
from tqdm import tqdm
from load import implicit_load
MIN_RATINGS = 20
USER_COLUMN = 'user_id'
ITEM_COLUMN = 'item_id'
TRAIN_RATINGS_FILENAME = 'train-ratings.csv'
TEST_RATINGS_FILENAME = 'test-ratings.csv'
TEST_NEG_FILENAME = 'test-negative.csv'
def parse_args():
parser = ArgumentParser()
parser.add_argument('path', type=str,
help='Path to reviews CSV file from MovieLens')
parser.add_argument('output', type=str,
help='Output directory for train and test CSV files')
parser.add_argument('-n', '--negatives', type=int, default=999,
help='Number of negative samples for each positive'
'test example')
parser.add_argument('-s', '--seed', type=int, default=0,
help='Random seed to reproduce same negative samples')
return parser.parse_args()
def main():
args = parse_args()
np.random.seed(args.seed)
print("Loading raw data from {}".format(args.path))
df = implicit_load(args.path, sort=False)
print("Filtering out users with less than {} ratings".format(MIN_RATINGS))
grouped = df.groupby(USER_COLUMN)
df = grouped.filter(lambda x: len(x) >= MIN_RATINGS)
print("Mapping original user and item IDs to new sequential IDs")
original_users = df[USER_COLUMN].unique()
original_items = df[ITEM_COLUMN].unique()
user_map = {user: index for index, user in enumerate(original_users)}
item_map = {item: index for index, item in enumerate(original_items)}
df[USER_COLUMN] = df[USER_COLUMN].apply(lambda user: user_map[user])
df[ITEM_COLUMN] = df[ITEM_COLUMN].apply(lambda item: item_map[item])
assert df[USER_COLUMN].max() == len(original_users) - 1
assert df[ITEM_COLUMN].max() == len(original_items) - 1
print("Creating list of items for each user")
# Need to sort before popping to get last item
df.sort_values(by='timestamp', inplace=True)
all_ratings = set(zip(df[USER_COLUMN], df[ITEM_COLUMN]))
user_to_items = defaultdict(list)
for row in tqdm(df.itertuples(), desc='Ratings', total=len(df)):
user_to_items[getattr(row, USER_COLUMN)].append(getattr(row, ITEM_COLUMN)) # noqa: E501
test_ratings = []
test_negs = []
all_items = set(range(len(original_items)))
print("Generating {} negative samples for each user"
.format(args.negatives))
for user in tqdm(range(len(original_users)), desc='Users', total=len(original_users)): # noqa: E501
test_item = user_to_items[user].pop()
all_ratings.remove((user, test_item))
all_negs = all_items - set(user_to_items[user])
all_negs = sorted(list(all_negs)) # determinism
test_ratings.append((user, test_item))
test_negs.append(list(np.random.choice(all_negs, args.negatives)))
print("Saving train and test CSV files to {}".format(args.output))
df_train_ratings = pd.DataFrame(list(all_ratings))
df_train_ratings['fake_rating'] = 1
df_train_ratings.to_csv(os.path.join(args.output, TRAIN_RATINGS_FILENAME),
index=False, header=False, sep='\t')
df_test_ratings = pd.DataFrame(test_ratings)
df_test_ratings['fake_rating'] = 1
df_test_ratings.to_csv(os.path.join(args.output, TEST_RATINGS_FILENAME),
index=False, header=False, sep='\t')
df_test_negs = | pd.DataFrame(test_negs) | pandas.DataFrame |
"""
Metrics for assessing imputation quality
Het/Hom ratio
Improving imputation quality in BEAGLE for crop and
livestock data
- switch-error rate for imputation quality
- idem ?
Comparison and assessment of family-
and population-based genotype imputation
methods in large pedigrees
- Mean squared correlation (R^2) = Pearson’s squared correlation: [0, 1].
- concordance rate (CR): overestimates the imputation accuracy for rare variants
- imputation quality score (IQS): agreement ratio, (-Inf, 1],
based on the Kappa statistic
A New Statistic to Evaluate Imputation Reliability
IQS:
The computation of IQS requires the posterior probabilities of AA, AB and BB as output by the imputation program.
--> with Beagle 4.1: gprobs, in dic['corr'] files as GT:DS:GP ex. 0|0:0.51:0.55,0.38,0.07
gprobs=[true/false]specifies whether a GP (genotype probability)
format field will be included in the output VCF file (default: gprobs=true)
MaCH: Using Sequence and Genotype Data to Estimate Haplotypes and Unobserved Genotypes
1 SNP with alleles A and B. Let n_A/A , n_A/B , n_B/B = number of times
each possible genotype was sampled after I = n_A/A + n_A/B + n_B/B iterations
Most likely genotype = genotype that was sampled most frequently
Expected number of counts of allele A: g = (2*n_A/A + n_A/B)/I
1) Genotype Quality Score: GQS = n_IG /I,
n_IG = number of iterations where the given genotype was selected as the most likely one
This ity can be averaged over all genotypes for a
particular marker to ify the average accuracy of imputation for that marker
2) Accuracy: alpha = sum(GQS_i, i=1:N)/N,
N number of individuals
3) R²: E(r² with true genotypes) = Var(g)/((4*n_A/A + n_A/B)/I - [(2*n_A/A + n_A/B)/I]²),
Estimated r² with true genotypes, Var(g) be the variance of estimated genotype:
a better measure of imputation quality for a marker is the estimated r² between
true allele counts and estimated allele counts. This ity can be estimated by
comparing the variance of the estimated genotype scores with what would be expected if
genotype scores were observed without error.
https://en.wikipedia.org/wiki/Cohen's_kappa
Cohen's kappa coefficient K is a statistic which measures inter-rater agreement for qualitative (categorical) items.
Generally thought to be more robust than simple percent agreement calculation, as that coeff takes into account
the possibility of agreement occuring by chance. But: difficult to interpret indices of agreement?
If no agreement between the raters other than the one that would be expected by chance: K = 0,
If K < 0: there is no effective agreement between the raters or the agreement is worse than random.
Weighted kappa K exists.
κ's tendency to take the observed categories' frequencies as givens, which can make it unreliable for measuring
agreement in situations such as the diagnosis of rare diseases. In these situations, κ tends to underestimate
the agreement on the rare category.
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cohen_kappa_score.html#sklearn.metrics.cohen_kappa_score
Implementation of Cohen's kappa:
sklearn.metrics.cohen_kappa_score(y1, y2, labels=None, weights=None, sample_weight=None)
y1, y2: arrays of same length (n_samples)
http://courses.washington.edu/cmling/lab7.html
Using the python interpreter and the nltk metrics package, calculate inter-annotator agreement (both kappa and alpha).
Note that AnnotationTask is a type of object, with methods kappa() and alpha().
When you call nltk.metrics.AnnotationTask() it returns an object of that type, which in the example below is stored
in the variable task. See: http://www.nltk.org/api/nltk.metrics.html
import nltk
toy_data = [
['1', 5723, 'ORG'],
['2', 5723, 'ORG'],
['1', 55829, 'LOC'],
['2', 55829, 'LOC'],
['1', 259742, 'PER'],
['2', 259742, 'LOC'],
['1', 269340, 'PER'],
['2', 269340, 'LOC']
]
task = nltk.metrics.agreement.AnnotationTask(data=toy_data)
task.kappa()
task.alpha()
The nltk metrics package also provides for calculating and printing confusion matrices, a way of displaying which labels
were 'mistaken' for which other ones. Unfortunately, this functionality requires a different format for the input.
In particular, it wants two lists of labels (in the same order).
"""
import os, sys
import numpy as np
import pandas as pd
import numba
from scipy.stats import pearsonr, zscore
from scipy.special import softmax
from sklearn import metrics, preprocessing
from typing import *
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))
sys.path.insert(0, rootdir)
from genotypooler.poolSNPs import dataframe as vcfdf
from genotypooler.poolSNPs.metrics.misc import normalize, min_max_scale
from genotypooler.persotools.files import *
ArrayLike = NewType('ArrayLike', Union[Sequence, List, Set, Tuple, Iterable, np.ndarray, int, float, str])
#TODO: evaluate phase/switch rate
class QualityGT(object):
"""
Implement different methods for assessing imputation performance:
* accuracy and recall per variant per genotype (cross-table)
* correlation per variant and/or per sample between imputed and true genotypes
* difference per variant and/or per sample between imputed and true genotypes
* allele dosage
"""
def __init__(self, truefile: FilePath, imputedfile: FilePath, ax: object, idx: str = 'id'):
self.trueobj = vcfdf.PandasMixedVCF(truefile, format='GT', indextype=idx)
self.imputedobj = vcfdf.PandasMixedVCF(imputedfile, format='GT', indextype=idx)
self._axis = ax
@property
def axis(self):
return self._axis
@axis.setter
def set_axis(self, ax):
if ax == 0 or ax == 'variants':
self._axis = 0
elif ax == 1 or ax == 'samples':
self._axis = 1
else:
self._axis = None
@staticmethod
def square(x):
return x ** 2
def pearsoncorrelation(self) -> pd.Series:
"""
Compute Pearson's correlation coefficient between true and imputed genotypes.
Correlation between variants (ax=1 i.e. mean genotypes along samples axis),
or correlation between samples (ax=0 i.e. mean genotypes along variant axis),
or global correlation (ax=None i.e. mean of flattened array)
:return: correlation coefficients and p-value for each
"""
#TODO: replace by Allele Frequency correlation as described in Beagle09?
true = self.trueobj.trinary_encoding().values
imputed = self.imputedobj.trinary_encoding().values
scorer = lambda t: pearsonr(t[0], t[1])[0] # keeps only correlation, not p-value
score = list(map(scorer, zip(true, imputed)))
# astype(str) casts Series type to discrete classes
rsqr = pd.Series(score, index=self.trueobj.variants, name='r_squared').apply(self.square)
# squared correlation
return rsqr
def diff(self) -> pd.DataFrame:
"""
Compute absolute genotype difference element-wise i.i per variant per sample
:return: absolute difference true vs. imputed genotypes
"""
truedf = self.trueobj.trinary_encoding()
imputeddf = self.imputedobj.trinary_encoding()
absdiffdf = truedf.sub(imputeddf).abs()
return absdiffdf
def concordance(self) -> pd.Series:
"""
Compute concordance between true and imputed genotypes
i.e. 1 - the Z-norm of the absolute difference of true vs. imputed genotypes?
:return:
"""
# absdiff = self.diff() # equals 0 when true = imputed, else can be 1 or 2 (very unlikely 2?)
absdiff = self.diff() / 2 # restricts values to 0.0, 0.5, 1.0
# absdiffnorm = absdiff.apply(min_max_scale, axis=1, raw=True) # homemade minmax scaler
absdiffnorm = absdiff.apply(preprocessing.minmax_scale, axis=1, raw=True) # sklearn minmax scaler
discord_score = absdiffnorm.mean(axis=1) # discordance
concord_score = 1 - discord_score # concordance = 1 - discordance
concord = pd.Series(concord_score, index=self.trueobj.variants, name='concordance')
return concord
@staticmethod
def expectation(a: np.ndarray, freq: np.ndarray):
"""
sum(Pr(G=x)*x)
:param a:
:param freq:
:return:
"""
return np.multiply(a, freq).sum()
def alleledosage(self) -> Tuple[pd.Series]:
# TODO: add by Standardized Allele Frequency Error as described in Beagle09?
"""
Compute alternate allele dosage.
Makes sense only accross a population i.e. mean values along samples axis.
Allele dosage = 2 * AAF, for a diploid organism
:return:
"""
truedos = self.trueobj.trinary_encoding().values.mean(axis=1)
imputeddos = self.imputedobj.trinary_encoding().values.mean(axis=1)
strue = pd.Series(truedos, index=self.trueobj.variants, name='truedos')
simputed = pd.Series(imputeddos, index=self.imputedobj.variants, name='imputeddos')
return strue, simputed
@property
def precision(self, avg: str = 'weighted') -> pd.Series:
"""
Compute precision score for the imputed genotypes.
The precision is the ratio tp / (tp + fp) where tp is the number of true positives and
fp the number of false positives. The precision is intuitively the ability of the classifier
not to label as positive a sample that is negative. The best value is 1 and the worst value is 0.
:param avg: 'weighted' needed for multiclass classification
:return:
"""
true = self.trueobj.trinary_encoding().values
imputed = self.imputedobj.trinary_encoding().values
scorer = lambda t: metrics.precision_score(t[0].astype(str),
t[1].astype(str),
average=avg)
score = list(map(scorer, zip(true, imputed)))
# astype(str) casts Series type to discrete classes
return pd.Series(score, index=self.trueobj.variants, name='precision_score')
@property
def accuracy(self) -> pd.Series:
"""
Compute accuracy score for the imputed genotypes.
In multilabel classification, this function computes subset accuracy i.e. the number of exact true matches.
The accuracy is the ratio tp / (tp + fp + tn + fn) for each class.
Equal to Jaccard index in the case of multilabel classification tasks.
Jaccard similarity coefficient is defined as the size of the intersection
divided by the size of the union of two label sets.
:return:
"""
true = self.trueobj.trinary_encoding().values
imputed = self.imputedobj.trinary_encoding().values
scorer = lambda t: metrics.accuracy_score(t[0].astype(str),
t[1].astype(str))
score = list(map(scorer, zip(true, imputed)))
# astype(str) casts Series type to discrete classes
return pd.Series(score, index=self.trueobj.variants, name='accuracy_score')
@property
def recall(self, avg: str = 'weighted') -> pd.Series:
"""
Compute recall score for the imputed genotypes.
The recall is the ratio tp / (tp + fn) where tp is the number of true positives and
fn the number of false negatives. The recall is intuitively the ability of the classifier
to find all the positive samples.
The best value is 1 and the worst value is 0.
:return:
"""
true = self.trueobj.trinary_encoding().values
imputed = self.imputedobj.trinary_encoding().values
scorer = lambda t: metrics.recall_score(t[0].astype(str),
t[1].astype(str),
average=avg)
score = list(map(scorer, zip(true, imputed)))
# astype(str) casts Series type to discrete classes
return pd.Series(score, index=self.trueobj.variants, name='recall_score')
@property
def f1_score(self, avg: str = 'weighted') -> pd.Series:
"""
F1-score for the genotypes
:return:
"""
true = self.trueobj.trinary_encoding().values
imputed = self.imputedobj.trinary_encoding().values
scorer = lambda t: metrics.f1_score(t[0].astype(str),
t[1].astype(str),
average=avg)
score = list(map(scorer, zip(true, imputed)))
# astype(str) casts Series type to discrete classes
return pd.Series(score, index=self.trueobj.variants, name='f1_score')
@numba.vectorize
def mylog_numba(x):
"""
Numba-enhanced computation of logarithm with 1e-05 cutoff
"""
return np.log(x) if x > 1e-05 else np.log(1e-05)
@numba.vectorize
def myprodlog_numba(x, y):
"""
Numba-enhanced computation of element-wise entropy
"""
return -np.multiply(x, mylog_numba(y))
# Numba-enhanced function do not accept ANY argument being a Pandas object
@numba.jit # numba.vectorize yields same perf
def entro_plain_numba(nptrue: np.ndarray, nppred: np.ndarray):
"""
Numba-enhanced computation of cross entropy i.e. sum of entropies for the 3 dimensions (genotypes)
"""
return myprodlog_numba(nptrue, nppred).sum(axis=-1)
@numba.jit # NOT numba.vectorize
def logfill(x):
"""
Adjust values for cross-entropy calculation
Ex. rs1836444 -0.0 inf NaN -> rs1836444 0.0 5.0 0.0
"""
if x == -0.0 or x == np.nan:
return 0.0
elif x == np.inf:
return 5.0
else:
return x
def compute_entro_numba(dftrue: pd.DataFrame, dfpred: pd.DataFrame) -> pd.DataFrame:
"""
This function acts as wrapper that bridges Numba calculation and Pandas objects
"""
result = myprodlog_numba(dftrue.values, dfpred.values)
return pd.DataFrame(result).applymap(logfill).fillna(0.0)
class QualityGL(object):
"""
Implement cross-entropy method for assessing imputation performance from GL.
Numba-enhanced calculations.
"""
def __init__(self, truefile: FilePath, imputedfile: FilePath, ax: object, fmt: str = 'GP', idx: str = 'id'):
self.trueobj = vcfdf.PandasMixedVCF(truefile, format='GL', indextype=idx)
self.imputedobj = vcfdf.PandasMixedVCF(imputedfile, format=fmt, indextype=idx)
self._axis = ax
@property
def axis(self):
return self._axis
@axis.setter
def set_axis(self, ax):
if ax == 0 or ax == 'variants':
self._axis = 0
elif ax == 1 or ax == 'samples':
self._axis = 1
else:
self._axis = None
def intergl_entropy(self, g_true: pd.Series, g_pred: pd.Series) -> np.ndarray:
"""
Compute entropy from two GL series for a sample as
E = -sum(p_true * log(p_imputed), sum over the 3 GL values at every mmarker
p_imputed set to 10^-12 if equal to 0.0
Usual logarithm log, NOT log10 for entropy calculation
"""
g_true = pd.Series(g_true)
g_pred = pd.Series(g_pred) # comes as tuples of str
# pandas.DataFrame.combine: both data frames must have the SAME column names
dftrue = pd.DataFrame.from_records(g_true.values,
index=g_true.index,
columns=['RR', 'RA', 'AA']).astype(float)
dftrue = pd.DataFrame(np.power(10.0, dftrue.values),
index=g_true.index,
columns=['RR', 'RA', 'AA'])
# GL are logged!
dfpred = pd.DataFrame.from_records(g_pred.values,
index=g_pred.index,
columns=['RR', 'RA', 'AA']).astype(float)
g_entro = compute_entro_numba(dftrue, dfpred) # using Numba speeds up execution by a factor of 5-6
return g_entro.sum(axis=1).rename(
g_true.name).to_numpy() # return type has to be np.ndarray for proper use with combine then
@property
def cross_entropy(self) -> pd.Series:
"""
For genotypes likelihoods
Entropy for the genotypes, aCROSS two populations.
Not confuse with intrapop entropy
entropy = alpha * sum(p_true * log(p_imputed) for every GL for every sample) at 1 marker
:return:
"""
true = self.trueobj.genotypes()
imputed = self.imputedobj.genotypes()
# these come as arrays of tuples
entro = true.combine(imputed, self.intergl_entropy)
score = entro.mean(axis=1)
return | pd.Series(score, index=self.trueobj.variants, name='cross_entropy') | pandas.Series |
"""
Notes
-----
This test and docs/source/usage/iss/iss_cli.sh test the same code paths and should be updated
together
"""
import os
import unittest
import numpy as np
import pandas as pd
import pytest
from starfish.test.full_pipelines.cli._base_cli_test import CLITest
from starfish.types import Features
EXPERIMENT_JSON_URL = "https://d2nhj9g34unfro.cloudfront.net/20181005/ISS-TEST/experiment.json"
@pytest.mark.slow
class TestWithIssData(CLITest, unittest.TestCase):
@property
def spots_file(self):
return "decoded-spots.nc"
@property
def subdirs(self):
return (
"max_projected",
"transforms",
"registered",
"filtered",
"results",
)
@property
def stages(self):
return (
[
"starfish", "validate", "experiment", EXPERIMENT_JSON_URL,
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][primary]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "max_projected", "primary_images.json"),
"MaxProj",
"--dims", "c",
"--dims", "z"
],
[
"starfish", "learn_transform",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "max_projected", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "transforms", "transforms.json"),
"Translation",
"--reference-stack",
f"@{EXPERIMENT_JSON_URL}[fov_001][dots]",
"--upsampling", "1000",
"--axes", "r"
],
[
"starfish", "apply_transform",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][primary]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "registered", "primary_images.json"),
"--transformation-list", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "transforms", "transforms.json"),
"Warp",
],
[
"starfish", "filter",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "registered", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][nuclei]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "nuclei.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][dots]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "dots.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "detect_spots",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc"),
"--blobs-stack", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "dots.json"),
"--blobs-axis", "r", "--blobs-axis", "c",
"BlobDetector",
"--min-sigma", "4",
"--max-sigma", "6",
"--num-sigma", "20",
"--threshold", "0.01",
],
[
"starfish", "segment",
"--primary-images", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"--nuclei", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "nuclei.json"),
"-o", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "label_image.png"),
"Watershed",
"--nuclei-threshold", ".16",
"--input-threshold", ".22",
"--min-distance", "57",
],
[
"starfish", "target_assignment",
"--label-image",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "label_image.png"),
"--intensities", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc"),
"Label",
],
[
"starfish", "decode",
"-i", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc"),
"--codebook",
f"@{EXPERIMENT_JSON_URL}",
"-o", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "decoded-spots.nc"),
"PerRoundMaxChannelDecoder",
],
# Validate results/{spots,targeted-spots,decoded-spots}.nc
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc")
],
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc")
],
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "decoded-spots.nc")
],
)
def verify_results(self, intensities):
# TODO make this test stronger
genes, counts = np.unique(
intensities.coords[Features.TARGET], return_counts=True)
gene_counts = | pd.Series(counts, genes) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"2.统计shop历史被购买的次数"
dataFeat['shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_buy_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"3.统计shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_buy_count,result.shop_count))
result['shop_buy_ratio'] = buy_ratio
"4.统计shop历史未被够买的次数"
result['shop_not_buy_count'] = result['shop_count'] - result['shop_buy_count']
return result
def get_timestamp_feat(data,dataFeat):
"context_timestamp的特征提取"
result = pd.DataFrame(dataFeat['context_timestamp'])
result = result.drop_duplicates(['context_timestamp'],keep='first')
"1.统计context_timestamp出现次数"
dataFeat['context_timestamp_count'] = dataFeat['context_timestamp']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['context_timestamp_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"2.统计context_timestamp历史被购买的次数"
dataFeat['context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_timestamp_buy_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"3.统计context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_timestamp_buy_count,result.context_timestamp_count))
result['context_timestamp_buy_ratio'] = buy_ratio
"4.统计context_timestamp历史未被够买的次数"
result['context_timestamp_not_buy_count'] = result['context_timestamp_count'] - result['context_timestamp_buy_count']
return result
def get_item_brand_feat(data,dataFeat):
"item_brand的特征提取"
result = pd.DataFrame(dataFeat['item_brand_id'])
result = result.drop_duplicates(['item_brand_id'],keep='first')
"1.统计item_brand出现次数"
dataFeat['item_brand_count'] = dataFeat['item_brand_id']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_count',aggfunc='count').reset_index()
del dataFeat['item_brand_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"2.统计item_brand历史被购买的次数"
dataFeat['item_brand_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_brand_buy_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"3.统计item_brand转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_brand_buy_count,result.item_brand_count))
result['item_brand_buy_ratio'] = buy_ratio
"4.统计item_brand历史未被够买的次数"
result['item_brand_not_buy_count'] = result['item_brand_count'] - result['item_brand_buy_count']
return result
def get_item_city_feat(data,dataFeat):
"item_city的特征提取"
result = pd.DataFrame(dataFeat['item_city_id'])
result = result.drop_duplicates(['item_city_id'],keep='first')
"1.统计item_city出现次数"
dataFeat['item_city_count'] = dataFeat['item_city_id']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_count',aggfunc='count').reset_index()
del dataFeat['item_city_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"2.统计item_city历史被购买的次数"
dataFeat['item_city_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_city_buy_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"3.统计item_city转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_city_buy_count,result.item_city_count))
result['item_city_buy_ratio'] = buy_ratio
"4.统计item_city历史未被够买的次数"
result['item_city_not_buy_count'] = result['item_city_count'] - result['item_city_buy_count']
return result
def get_user_gender_feat(data,dataFeat):
"user_gender的特征提取"
result = pd.DataFrame(dataFeat['user_gender_id'])
result = result.drop_duplicates(['user_gender_id'],keep='first')
"1.统计user_gender出现次数"
dataFeat['user_gender_count'] = dataFeat['user_gender_id']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_count',aggfunc='count').reset_index()
del dataFeat['user_gender_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"2.统计user_gender历史被购买的次数"
dataFeat['user_gender_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_gender_buy_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"3.统计user_gender转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_gender_buy_count,result.user_gender_count))
result['user_gender_buy_ratio'] = buy_ratio
"4.统计user_gender历史未被够买的次数"
result['user_gender_not_buy_count'] = result['user_gender_count'] - result['user_gender_buy_count']
return result
def get_user_occupation_feat(data,dataFeat):
"user_occupation的特征提取"
result = | pd.DataFrame(dataFeat['user_occupation_id']) | pandas.DataFrame |
import time
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from datetime import datetime as dt
from py4jps.resources import JpsBaseLib
# Local KG location (fallback)
FALLBACK_KG = "http://localhost:9999/blazegraph/"
# Output location
OUTPUT_FOLDER = "/var/www/html/gas-grid"
# Maximum batch size for results
BATCH_SIZE = 50_000
# SPARQL query string
QUERY = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ns1: <http://www.theworldavatar.com/ontology/ontocape/upper_level/system.owl#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX gasgrid: <http://www.theworldavatar.com/ontology/ontogasgrid/gas_network_system.owl#>
PREFIX loc: <http://www.bigdata.com/rdf/geospatial/literals/v1#>
PREFIX geo: <http://www.bigdata.com/rdf/geospatial#>
PREFIX comp: <http://www.theworldavatar.com/ontology/ontogasgrid/gas_network_components.owl#>
SELECT ?location ?order ?label
WHERE
{
?pipe rdf:type gasgrid:GridPipeline.
?pipe rdfs:label ?label.
?pipe ns1:hasSubsystem ?segment.
?segment gasgrid:hasEndPart ?end.
?end gasgrid:entersPipeConnection ?connection.
?connection loc:lat-lon ?location.
?connection gasgrid:hasOrder ?order.
}"""
def initialiseGateway():
"""
Initialise the JPS Base Library
"""
jpsBaseLibGW = JpsBaseLib()
jpsBaseLibGW.launchGateway()
jpsBaseLibView = jpsBaseLibGW.createModuleView()
jpsBaseLibGW.importPackages(jpsBaseLibView, "uk.ac.cam.cares.jps.base.query.*")
return jpsBaseLibView.RemoteStoreClient(getKGLocation("ontogasgrid"))
def getKGLocation(namespace):
"""
Determines the correct URL for the KG's SPARQL endpoint.
Arguments:
namespace - KG namespace.
Returns:
Full URL for the KG.
"""
# Check for the KG_LOCATION environment variable, using local fallback
kgRoot = os.getenv('KG_LOCATION', FALLBACK_KG)
if kgRoot.endswith("/"):
return kgRoot + "namespace/" + namespace + "/sparql"
else:
return kgRoot + "/namespace/" + namespace + "/sparql"
def outputPipes():
"""
Queries the KG for data on pipes then outputs it
to a GeoJSON file.
"""
kgClient = initialiseGateway()
print("Using KG endpoint:", getKGLocation("ontogasgrid"))
gotAllResults = False
offset = 1
iteration = 1
totalResults = 0
result = []
# Run query in batches
while not gotAllResults:
print("INFO: Submitting request #" + str(iteration) + " at", dt.now())
print("INFO: Limit is " + str(BATCH_SIZE) + ", offset is " + str(offset))
finalQuery = QUERY + " LIMIT " + str(BATCH_SIZE) + " OFFSET " + str(offset)
batchResult = kgClient.executeQuery(finalQuery)
batchResult = batchResult.toList()
for singleResult in batchResult:
result.append(singleResult)
# Check if we have all results
if len(batchResult) < BATCH_SIZE:
gotAllResults = True
else:
if totalResults == 0:
offset += (BATCH_SIZE - 1)
else:
offset += BATCH_SIZE
iteration += 1
totalResults += len(batchResult)
num_ret = len(result)
ret_array = np.zeros((num_ret,4),dtype='object')
header = ['lat','lon','order','name']
for i in tqdm(range(num_ret)):
lat,lon = result[i]['location'].split('#')
ret_array[i,:] = [lat, lon, float(result[i]['order']), result[i]['label']]
result = | pd.DataFrame(ret_array, columns=header) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-07-04 15:34
# @Author : minp
# @contact : <EMAIL>
# @Site :
# @File : modeldata.py
# @Software: PyCharm
import datetime
import pandas as pd
from stockMongoDB.mmdb.base_model import BaseModel
# from Calf.exception import MongoIOError, FileError, ExceptionInfo, \
# WarningMessage, SuccessMessage # 这都是一些关于异常处理的自定义方法,可以先不管,代码中报错的可以先注释掉
class ModelData(object):
"""
有关公共模型所有的IO(数据库)将通过这个类实现.
通用的IO方法
"""
def __init__(self, location=None, dbname=None):
self.location = location
self.dbname = dbname
pass
# @classmethod
def field(self, table_name, field_name, filter=None):
"""
Query the value of a field in the database
:param filter:
:param table_name: the database's table name
:param field_name: the table's field name
:return: all values in database
"""
try:
return BaseModel(table_name, self.location,
self.dbname).distinct(field_name, filter)
except Exception as e:
raise e
# raise MongoIOError('query the field raise a error')
# @classmethod
def max(self, table_name, field='_id', **kw):
"""
找到满足kw条件的field列上的最大值
:param table_name:
:param field:
:param kw:
:return:
"""
try:
if not isinstance(field, str):
raise TypeError('field must be an instance of str')
cursor = BaseModel(table_name, self.location,
self.dbname).query(sql=kw, field={field: True})
if cursor.count():
d = pd.DataFrame(list(cursor))
m = d.loc[:, [field]].max()[field]
else:
m = None
cursor.close()
return m
except Exception as e:
raise e
# @classmethod
def min(self, table_name, field='_id', **kw):
"""
找到满足kw条件的field列上的最小值
:param table_name:
:param field:
:param kw:
:return:
"""
try:
if not isinstance(field, str):
raise TypeError('field must be an instance of str')
cursor = BaseModel(table_name, self.location,
self.dbname).query(sql=kw, field={field: True})
if cursor.count():
d = pd.DataFrame(list(cursor))
m = d.loc[:, [field]].min()[field]
else:
m = None
cursor.close()
return m
except Exception as e:
raise e
# @classmethod
def insert_data(self, table_name, data):
"""
一个简易的数据插入接口
:param table_name:
:param data:
:return:
"""
try:
if len(data):
data['datetime'] = data.index
data['insertdate'] = datetime.datetime.today()
d = data.to_dict(orient='records')
BaseModel(table_name, self.location,
self.dbname).insert_batch(d)
except Exception as e:
raise e
# raise MongoIOError('Failed with insert data by MongoDB')
def insert_one(self, table_name, data):
"""
insert one record
:param table_name:
:param data: a dict
:return:
"""
try:
BaseModel(table_name, self.location,
self.dbname).insert(data)
except Exception as e:
raise e
# raise MongoIOError('Failed with insert data by MongoDB')
def read_one(self, table_name, field=None, **kw):
"""
有时候只需要读一条数据,没必要使用read_data,
:param table_name:
:param field:
:param kw:
:return: a dict or None
"""
try:
cursor = BaseModel(table_name, self.location,
self.dbname).query_one(kw, field)
except Exception as e:
raise e
# ExceptionInfo(e)
finally:
return cursor
# @classmethod
def read_data(self, table_name, field=None, **kw):
"""
一个简易的数据读取接口
:param table_name:
:param field:
:param kw:
:return:
"""
try:
cursor = BaseModel(table_name, self.location,
self.dbname).query(kw, field)
data = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
return result.astype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_round_example = """>>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceil_example = """>>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def ravel(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.ravel(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view("i8")
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices"
)
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
if | lib.is_scalar(val) | pandas._libs.lib.is_scalar |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2021 Recurve Analytics, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import pytest
import os
import random
import sqlite3
from tempfile import mkdtemp
from flexvalue.calculations import FlexValueRun
from flexvalue.settings import ACC_COMPONENTS_ELECTRICITY, ACC_COMPONENTS_GAS
@pytest.fixture
def metered_ids():
return [f"id_{i}" for i in range(5)]
@pytest.fixture
def deer_ids():
return ["DEER_LS_1", "DEER_LS_2"]
@pytest.fixture
def metered_load_shape(metered_ids):
random.seed(0)
output = []
for _id in metered_ids:
for hour in range(8760):
savings = random.random() * 0.1
output.append(
{"identifier": _id, "hour_of_year": hour, "hourly_mwh_savings": savings}
)
df = (
pd.DataFrame(output)
.pivot(index="hour_of_year", columns="identifier", values="hourly_mwh_savings")
.reset_index()
.set_index("hour_of_year")
)
df.columns.name = None
return df
@pytest.fixture
def user_inputs(metered_ids, deer_ids):
return pd.DataFrame(
[
{
"ID": id_,
"load_shape": id_,
"start_year": 2021,
"start_quarter": 1,
"utility": "PGE",
"climate_zone": "CZ1",
"units": 1,
"eul": 5,
"ntg": 1.0,
"discount_rate": 0.0766,
"admin": 100,
"measure": 2000,
"incentive": 1000,
"therms_profile": "winter",
"therms_savings": 400,
"mwh_savings": 1,
}
for id_ in metered_ids + deer_ids
]
).set_index("ID")
@pytest.fixture
def database_year(pytestconfig):
database_year = pytestconfig.getoption("database_year")
if not database_year:
database_year = "1111"
db_path = mkdtemp()
os.environ["DATABASE_LOCATION"] = db_path
con = sqlite3.connect(f"{db_path}/{database_year}.db")
random.seed(1)
acc_elec_cols = {
col: random.random()
for col in ACC_COMPONENTS_ELECTRICITY + ["marginal_ghg"]
}
df_acc_elec = pd.DataFrame(
[
{
"climate_zone": "CZ1",
"utility": "PGE",
"hour_of_year": hour,
"hour_of_day": hour % 24,
"year": year,
"month": (
pd.Timestamp("2020-01-01") + | pd.Timedelta(hour, unit="H") | pandas.Timedelta |
"""
Introduction
--------------
This python file contains the source code used to carry the data preparation
process
Code
------
"""
# -*- coding: utf-8 -*-
import logging
import pandas as pd
from pathlib import Path
from datetime import datetime
import sqlite3
BASE_RAW_DATA_DIR = 'data/raw'
"""
str: Base raw data directory
"""
BASE_PROCESSED_DATA_DIR = 'data/processed'
"""
str: Base processed data directory
"""
GPU_CSV_FILE = BASE_RAW_DATA_DIR + '/gpu.csv'
"""
str: gpu.csv file location
"""
CHECK_CSV_FILE = BASE_RAW_DATA_DIR + '/application-checkpoints.csv'
"""
str: application-checkpoints.csv filename file location
"""
TASK_CSV_FILE = BASE_RAW_DATA_DIR + '/task-x-y.csv'
"""
str: task-x-y.csv file location
"""
PROCESSED_CSV_FILE = BASE_PROCESSED_DATA_DIR + '/processed.csv'
"""
str: processed.csv final dataset file location
"""
TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
"""
str: string used to format timestamp for datetime conversion
"""
def timestamp_conv(df):
""" Converts a timestamp to datetime
Parameters
----------
df
dataframe to convert to datetime
-------
float
converted timestamp
"""
df = df.apply(lambda x: (datetime.strptime(x, TIMESTAMP_FORMAT)))
return(df)
def clean_gpu(gpu_df):
"""Clean gpu dataframe by dropping uneeded serial number and
fixes timestamp format to datetime
Parameters
----------
gpu_df
gpu dataframe to clean
Returns
-------
pandas.core.frame.DataFrame
Cleaned GPU dataframe
"""
# Drop uneeded serial column
gpu_df.drop(columns='gpuSerial', inplace=True)
gpu_df['timestamp'] = timestamp_conv(gpu_df['timestamp'])
return(gpu_df)
def merge_check_task(checkpoints_df, tasks_df):
"""merge (left join) checkpoints with task df through job and task id
Parameters
----------
checkpoints_df
application checkpoints dataframe to merge
tasks_df
tasks dataframe to merge
Returns
-------
pandas.core.frame.DataFrame
Cleaned GPU dataframe
"""
# Use left join on taskId and jobId
check_task_df = checkpoints_df.merge(tasks_df,
on=['taskId', 'jobId'], how='left')
return (check_task_df)
def clean_check_task(check_task_df):
"""Removes uneeded ids and fixes timestamp format to datetime
for merged application checkpoints and tasks df
Parameters
----------
check_task_df
merged application checkpoints and tasks df to clean
Returns
-------
pandas.core.frame.DataFrame
Cleaned GPU dataframe
"""
# Drop uneeded ids
check_task_df.drop(columns= ['jobId', 'taskId'], inplace=True)
# Fix date format
check_task_df['timestamp'] = timestamp_conv(check_task_df['timestamp'])
return(check_task_df)
def merge_check_task_gpu(gpu_df, check_task_df):
"""merge (left join) gpu df with first merged df through host and timestamp
Parameters
----------
check_task_df
application checkpoints and tasks megred dataframe to merge with gpu df
gpu_df
gpu dataframe to merge
Returns
-------
pandas.core.frame.DataFrame
Cleaned GPU dataframe
"""
# Record start and stop times for events and drop old timestamps
check_task_df_start = check_task_df[
check_task_df['eventType'] == 'START']
check_task_df_stop = check_task_df[
check_task_df['eventType'] == 'STOP']
check_task_df_start.rename(
index=str, columns={"timestamp": "start_time"}, inplace = True)
check_task_df_stop.rename(
index=str, columns={"timestamp": "stop_time"}, inplace = True)
check_task_df_stop.drop('eventType', axis = 1, inplace = True)
check_task_df_start.drop('eventType', axis = 1, inplace = True)
# Make each field record start and stop combined
check_task_df = pd.merge( check_task_df_start, check_task_df_stop,
on=['hostname', 'eventName', 'x', 'y', 'level'])
# Remove any timestamps that occur out of the gpu dataset
check_task_df = check_task_df[
(check_task_df['start_time'] >= gpu_df['timestamp'][0]) &
(check_task_df['stop_time']
<= gpu_df['timestamp'][len(gpu_df)-1])]
# Use sqllite to only combine with gpu if timestamp is between times
# connection to sql
conn = sqlite3.connect(':memory:')
# move dataframes to sql
check_task_df.to_sql('CheckTask', conn, index=False)
gpu_df.to_sql('Gpu', conn, index=False)
# SQL query
query = '''
SELECT *
FROM Gpu
LEFT JOIN CheckTask ON gpu.hostname = CheckTask.hostname
WHERE gpu.timestamp >= CheckTask.start_time
AND gpu.timestamp <= CheckTask.stop_time
'''
# get new df
merged_df = pd.read_sql_query(query, conn)
# drop duplicate hostname row (index 8)
merged_df = merged_df.loc[:,~merged_df.columns.duplicated()]
# group for averages (average stats for every task)
functions = {
'powerDrawWatt': 'mean', 'gpuTempC': 'mean',
'gpuUtilPerc': 'mean', 'gpuMemUtilPerc': 'mean',
'start_time': 'first', 'stop_time': 'first',
'gpuUUID' : 'first'}
merged_df = merged_df.groupby(
['hostname', 'eventName', 'x', 'y', 'level'],
as_index=False, sort=False
).agg(functions)
return(merged_df)
def main():
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
# Read datasets in
gpu_df = | pd.read_csv(GPU_CSV_FILE) | pandas.read_csv |
import pandas as pd
from pathlib import Path
from loguru import logger
from random import choices
import privacy
from simulate_row import simulate_row
ROOT_DIRECTORY = Path(__file__).absolute().parent.parent.parent
DATA_DIRECTORY = ROOT_DIRECTORY / "data"
ground_truth_file = DATA_DIRECTORY / "ground_truth_detroit.csv"
output_file = DATA_DIRECTORY / "submission.csv"
number_histos = 4 # Create 4 histograms
population_queries = 1 # Use one population query for number of incidents
sample = 0 # Do not use sampling
sample_size = 1 # Sample size is 1
epsilons = [1.0] # Use and epsilon value of 1.0
# Define the combined columns for the 4 histograms
combo_dict = {'type': ['engine_area_c', 'exposure_c', 'incident_type_c', 'property_use_c', 'detector_c', 'structure_stat_c'],
'injury': ['cinjury_c', 'cfatal_c', 'finjury_c', 'ffatal_c'],
'call': ['call_month_c', 'call_day_c', 'call_hour_c'],
'result': ['dispatch_n', 'arrival_n', 'clear_n']
}
# Define the number dictionary for each numeric column
num_dict = {'dispatch_n': [1000, 50, 5000],
'arrival_n': [1000, 50, 5000],
'clear_n': [5000, 50, 10000]
}
# The main program
def main():
# Load the ground truth and check for proper formatting
logger.info("begin pre-processing")
ground_truth = pd.read_csv(ground_truth_file)
valid = privacy.check_input(ground_truth, combo_dict, num_dict)
if valid != 1:
return
# Preprocess the ground truth
df, num_decodes, col_decodes = privacy.preprocess(ground_truth, combo_dict, num_dict)
privacy.histo_test(df, combo_dict)
logger.info("end pre-processing")
# main for loop
for epsilon in epsilons:
# Create dataframe for final results
header = list(ground_truth.columns)
final_df = pd.DataFrame(columns=header)
final_list = []
# sensitivity = (histograms x sample size) + population queries
sensitivity = (number_histos * sample_size) + population_queries
# Create the incidents - population count
num_incidents = len(df)
num_incidents_noise = int(privacy.laplaceMechanism(num_incidents, sensitivity, epsilon))
# Create the four histograms
logger.info(f"begin histogram creation {epsilon}")
type_pop, type_w = privacy.create_private_histo(df, 'type', sample, sample_size, sensitivity, epsilon)
injury_pop, injury_w = privacy.create_private_histo(df, 'injury', sample, sample_size, sensitivity, epsilon)
call_pop, call_w = privacy.create_private_histo(df, 'call', sample, sample_size, sensitivity, epsilon)
result_pop, result_w = privacy.create_private_histo(df, 'result', sample, sample_size, sensitivity, epsilon)
# Create the individual incidents
for i in range(num_incidents_noise):
type_value = choices(type_pop, type_w, k=1)
injury_value = choices(injury_pop, injury_w, k=1)
call_value = choices(call_pop, call_w, k=1)
result_value = choices(result_pop, result_w, k=1)
row = simulate_row(i,
type_value[0],
injury_value[0],
call_value[0],
result_value[0],
num_dict,
num_decodes,
col_decodes
)
final_list.append(row)
# Output the dataset
logger.info('writing data to output file')
final_df = | pd.DataFrame.from_dict(final_list) | pandas.DataFrame.from_dict |
#%%
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import datetime
import copy
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
#%%
raw_path = os.path.join("data", "raw")
clean_path = os.path.join("data", "clean")
# Loading Electronidex data
data_orders = pd.read_csv(
os.path.join(raw_path, "orders_translated.csv"), sep=";", decimal=","
)
data_items = pd.read_csv(os.path.join(raw_path, "lineitems.csv"), sep=";", decimal=",")
data_categories = pd.read_csv(os.path.join(clean_path, "product_categories.csv"))
# loading Blackwell data
data_blackwell = pd.read_csv(
os.path.join(raw_path, "existingproductattributes2017.csv")
)
#%%
# Cleaning electronindex data
# Keeping only the orders with state "Completed"
data_orders.query("state == 'Completed'", inplace=True)
# removing whitespace from sku for a clean join with categories
data_items["sku"] = data_items.sku.str.strip()
# Keeping only interesting columns
data_orders.drop(columns=["state", "created_date", "total_paid"], inplace=True)
data_items.drop(columns=["id", "product_id", "date"], inplace=True)
# recoding the categories in Electronindex data to match those of Blackwell
new_categories = {
"accessories": "Accessories",
"smartphone": "Smartphone",
"tablet": "Tablet",
"display": "Display",
"laptop": "Laptop",
"other": "Other",
"extended warranty": "ExtendedWarranty",
"pc": "PC",
"smartwatch": "Smartwatch",
"service": "Service",
"camera": "Camera",
"software": "Software",
"printer": "Printer",
}
data_categories.columns = ["sku", "category"]
data_categories.replace(dict(category=new_categories), inplace=True)
# Lets combine items to completed orders with an inner join to keep only items
# from completed orders
data_orders_items = data_orders.join(
data_items.set_index("id_order"), how="inner", on="id_order"
)
# Adding the product categories
data_electronidex = data_orders_items.join(
data_categories.set_index("sku"), how="left", on="sku"
)
# replacing missing categories with "unknown"
data_electronidex.category.fillna("Unknown", inplace=True)
# Dropping the Extended warranties as the information from these is
# in general not that interesting
data_electronidex.query("category != 'ExtendedWarranty'", inplace=True)
# dropping the now unnecesary id_order column
# data_electronidex.drop(columns=["id_order"], inplace=True)
#%%
# No missing values
data_electronidex.isnull().sum()
# Checking data quality of product quantity and unit price
# There are no suprising values in product quantity
data_electronidex.product_quantity.min()
data_electronidex.product_quantity.max()
data_electronidex.product_quantity.mean()
data_electronidex.product_quantity.median()
# There are no suprising values in product quantity
data_electronidex.unit_price.min()
data_electronidex.unit_price.max()
data_electronidex.unit_price.mean()
data_electronidex.unit_price.median()
sns.boxplot(data_electronidex.unit_price)
# the maximum price seems weird
data_electronidex[data_electronidex.unit_price == data_electronidex.unit_price.max()]
# But there is only one observation with this product and the category is unknown. We let this stand
data_electronidex[
data_electronidex.sku
== max(
data_electronidex[
data_electronidex.unit_price == data_electronidex.unit_price.max()
]["sku"]
)
]
#%%
# calculating total price of items taking into account the amount of items
data_electronidex["price"] = (
data_electronidex["product_quantity"] * data_electronidex["unit_price"]
)
# creating a separate dataset for electronidex product prices
data_electronidex_products_medprice = data_electronidex.groupby(
["sku", "category"], as_index=False
)["unit_price"].median()
# dropping the now unnecessary sku and unit price columns
data_electronidex.drop(columns=["sku", "unit_price"], inplace=True)
#%%
# Cleaning Blackwell data
# keeping only interesting columns
data_blackwell = data_blackwell[["ProductType", "Price", "Volume", "ProfitMargin"]]
# Adding combined price and profit from all purchases
data_blackwell["Price_total"] = data_blackwell["Price"] * data_blackwell["Volume"]
data_blackwell["Profit_total"] = (
data_blackwell["Price"] * data_blackwell["Volume"] * data_blackwell["ProfitMargin"]
)
data_blackwell["Profit_per_unit"] = (
data_blackwell["Price"] * data_blackwell["ProfitMargin"]
)
data_blackwell["Profit_perc_share"] = (
data_blackwell["Profit_total"] * 100 / data_blackwell["Profit_total"].sum()
).round(1)
# Dropping the Extended warranties as the information from these # seems false and is also in general not that interesting
data_blackwell.query("ProductType != 'ExtendedWarranty'", inplace=True)
# Dropping original price and profit margin
data_blackwell.drop(columns=["ProfitMargin"], inplace=True)
#%%
# Aggregating
data_electronidex_sales = data_electronidex.groupby(["category"], as_index=False)[
["product_quantity", "price"]
].sum()
data_blackwell_sales = data_blackwell.groupby(["ProductType"], as_index=False)[
"Volume", "Price_total", "Profit_total"
].sum()
#%%
data_electronidex_sales["product_quantity"] = (
data_electronidex_sales["product_quantity"]
.divide(data_electronidex_sales.product_quantity.sum())
.multiply(100)
)
data_electronidex_sales["price"] = (
data_electronidex_sales["price"]
.divide(data_electronidex_sales.price.sum())
.multiply(100)
)
data_blackwell_sales["Volume"] = (
data_blackwell_sales["Volume"]
.divide(data_blackwell_sales.Volume.sum())
.multiply(100)
)
data_blackwell_sales["Price_total"] = (
data_blackwell_sales["Price_total"]
.divide(data_blackwell_sales.Price_total.sum())
.multiply(100)
)
data_blackwell_sales["Profit_total"] = (
data_blackwell_sales["Profit_total"]
.divide(data_blackwell_sales.Profit_total.sum())
.multiply(100)
)
#%%
# unifying labels for convenience
data_electronidex_sales.columns = ["category", "volume_perc", "price_perc"]
data_blackwell_sales.columns = ["category", "price_perc", "volume_perc", "profit_perc"]
#%%
# combine dataframes for plotting
data_electronidex_sales["Company"] = "Electronindex"
data_blackwell_sales["Company"] = "Blackwell"
data_sales = | pd.concat([data_electronidex_sales, data_blackwell_sales], sort=False) | pandas.concat |
import logging
import shutil
import sys
import datetime
import os
import netCDF4
import numpy
import pandas as pd
import rasterio
import rasterstats
import requests
import xarray
from rasterio.enums import Resampling
FFGS_REGIONS = [('Hispaniola', 'hispaniola'), ('Central America', 'centralamerica')]
def setenvironment(threddspath, wrksppath):
"""
Dependencies: os, shutil, datetime, urllib.request, app_settings (options)
"""
logging.info('\nSetting the Environment for the GFS Workflow')
# determine the most day and hour of the day timestamp of the most recent GFS forecast
now = datetime.datetime.utcnow()
if now.hour > 21:
timestamp = now.strftime("%Y%m%d") + '18'
elif now.hour > 15:
timestamp = now.strftime("%Y%m%d") + '12'
elif now.hour > 9:
timestamp = now.strftime("%Y%m%d") + '06'
elif now.hour > 3:
timestamp = now.strftime("%Y%m%d") + '00'
else:
now = now - datetime.timedelta(days=1)
timestamp = now.strftime("%Y%m%d") + '18'
logging.info('determined the timestamp to download: ' + timestamp)
# perform a redundancy check, if the last timestamp is the same as current, abort the workflow
timefile = os.path.join(threddspath, 'gfs_timestamp.txt')
if not os.path.exists(timefile):
redundant = False
with open(timefile, 'w') as tf:
tf.write(timestamp)
os.chmod(timefile, 0o777)
else:
with open(timefile, 'r') as file:
lasttime = file.readline()
if lasttime == timestamp:
# use the redundant check to exacpt the function because its already been run
redundant = True
logging.info('The last recorded timestamp is the timestamp we determined, aborting workflow')
return timestamp, redundant
elif lasttime == 'clobbered':
# if you marked clobber is true, dont check for old folders from partially completed workflows
redundant = False
else:
# check to see if there are remnants of partially completed runs and dont destroy old folders
redundant = False
chk_hisp = os.path.join(wrksppath, 'hispaniola', 'gfs_GeoTIFFs_resampled')
chk_centr = os.path.join(wrksppath, 'centralamerica', 'gfs_GeoTIFFs_resampled')
if os.path.exists(chk_hisp) and os.path.exists(chk_centr):
logging.info('There are data for this timestep but the workflow wasn\'t finished. Analyzing...')
return timestamp, redundant
# create the file structure and their permissions for the new data
for region in FFGS_REGIONS:
logging.info('Creating APP WORKSPACE (GeoTIFF) file structure for ' + region[1])
new_dir = os.path.join(wrksppath, region[1], 'gfs_GeoTIFFs')
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
os.mkdir(new_dir)
os.chmod(new_dir, 0o777)
new_dir = os.path.join(wrksppath, region[1], 'gfs_GeoTIFFs_resampled')
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
os.mkdir(new_dir)
os.chmod(new_dir, 0o777)
logging.info('Creating THREDDS file structure for ' + region[1])
new_dir = os.path.join(threddspath, region[1], 'gfs')
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
os.mkdir(new_dir)
os.chmod(new_dir, 0o777)
new_dir = os.path.join(threddspath, region[1], 'gfs', timestamp)
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
os.mkdir(new_dir)
os.chmod(new_dir, 0o777)
for filetype in ('gribs', 'netcdfs', 'processed'):
new_dir = os.path.join(threddspath, region[1], 'gfs', timestamp, filetype)
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
os.mkdir(new_dir)
os.chmod(new_dir, 0o777)
logging.info('All done setting up folders, on to do work')
return timestamp, redundant
def download_gfs(threddspath, timestamp, region, model):
logging.info('\nStarting GFS grib Downloads for ' + region)
# set filepaths
gribsdir = os.path.join(threddspath, region, model, timestamp, 'gribs')
# if you already have a folder with data for this timestep, quit this function (you dont need to download it)
if not os.path.exists(gribsdir):
logging.info('There is no download folder, you must have already processed them. Skipping download stage.')
return True
elif len(os.listdir(gribsdir)) >= 28:
logging.info('There are already 28 forecast steps in here. Dont need to download them')
return True
# otherwise, remove anything in the folder before starting (in case there was a partial download)
else:
shutil.rmtree(gribsdir)
os.mkdir(gribsdir)
os.chmod(gribsdir, 0o777)
# # get the parts of the timestamp to put into the url
time = datetime.datetime.strptime(timestamp, "%Y%m%d%H").strftime("%H")
fc_date = datetime.datetime.strptime(timestamp, "%Y%m%d%H").strftime("%Y%m%d")
# This is the List of forecast timesteps for 5 days (6-hr increments). download them all
fc_steps = ['006', '012', '018', '024', '030', '036', '042', '048', '054', '060', '066', '072', '078', '084',
'090', '096', '102', '108', '114', '120', '126', '132', '138', '144', '150', '156', '162', '168']
# this is where the actual downloads happen. set the url, filepath, then download
subregions = {
'hispaniola': 'subregion=&leftlon=-75&rightlon=-68&toplat=20.5&bottomlat=17',
'centralamerica': 'subregion=&leftlon=-94.25&rightlon=-75.5&toplat=19.5&bottomlat=5.5',
}
for step in fc_steps:
url = 'https://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_0p25.pl?file=gfs.t' + time + 'z.pgrb2.0p25.f' + step + \
'&lev_surface=on&var_APCP=on&' + subregions[region] + '&dir=%2Fgfs.' + fc_date + '%2F' + time
fc_timestamp = datetime.datetime.strptime(timestamp, "%Y%m%d%H")
file_timestep = fc_timestamp + datetime.timedelta(hours=int(step))
filename_timestep = datetime.datetime.strftime(file_timestep, "%Y%m%d%H")
filename = filename_timestep + '.grb'
logging.info('downloading the file ' + filename)
filepath = os.path.join(gribsdir, filename)
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except requests.HTTPError as e:
errorcode = e.response.status_code
logging.info('\nHTTPError ' + str(errorcode) + ' downloading ' + filename + ' from\n' + url)
if errorcode == 404:
logging.info('The file was not found on the server, trying an older forecast time')
elif errorcode == 500:
logging.info('Probably a problem with the URL. Check the log and try the link')
return False
logging.info('Finished Downloads')
return True
def gfs_tiffs(threddspath, wrksppath, timestamp, region, model):
"""
Script to combine 6-hr accumulation grib files into 24-hr accumulation geotiffs.
Dependencies: datetime, os, numpy, rasterio
"""
logging.info('\nStarting to process the ' + model + ' gribs into GeoTIFFs')
# declare the environment
tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')
gribs = os.path.join(threddspath, region, model, timestamp, 'gribs')
netcdfs = os.path.join(threddspath, region, model, timestamp, 'netcdfs')
# if you already have gfs netcdfs in the netcdfs folder, quit the function
if not os.path.exists(gribs):
logging.info('There is no gribs folder, you must have already run this step. Skipping conversions')
return
# otherwise, remove anything in the folder before starting (in case there was a partial conversion)
else:
shutil.rmtree(netcdfs)
os.mkdir(netcdfs)
os.chmod(netcdfs, 0o777)
shutil.rmtree(tiffs)
os.mkdir(tiffs)
os.chmod(tiffs, 0o777)
# create a list of all the files of type grib and convert to a list of their file paths
files = os.listdir(gribs)
files = [grib for grib in files if grib.endswith('.grb')]
files.sort()
# Read raster dimensions only once to apply to all rasters
path = os.path.join(gribs, files[0])
raster_dim = rasterio.open(path)
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each 24-hr raster (east, south, west, north, width, height)
geotransform = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width, height)
# Add rasters together to form 24-hr raster
for i in files:
logging.info('working on file ' + i)
path = os.path.join(gribs, i)
src = rasterio.open(path)
file_array = src.read(1)
# using the last grib file for the day (path) convert it to a netcdf and set the variable to file_array
logging.info('opening grib file ' + path)
obj = xarray.open_dataset(path, engine='cfgrib', backend_kwargs={'filter_by_keys': {'typeOfLevel': 'surface'}})
logging.info('converting it to a netcdf')
ncname = i.replace('.grb', '.nc')
logging.info('saving it to the path ' + path)
ncpath = os.path.join(netcdfs, ncname)
obj.to_netcdf(ncpath, mode='w')
logging.info('converted')
logging.info('writing the correct values to the tp array')
nc = netCDF4.Dataset(ncpath, 'a')
nc['tp'][:] = file_array
nc.close()
logging.info('created a netcdf')
# Specify the GeoTIFF filepath
tif_filename = i.replace('grb', 'tif')
tif_filepath = os.path.join(tiffs, tif_filename)
# Save the 24-hr raster
with rasterio.open(
tif_filepath,
'w',
driver='GTiff',
height=file_array.shape[0],
width=file_array.shape[1],
count=1,
dtype=file_array.dtype,
nodata=numpy.nan,
crs='+proj=latlong',
transform=geotransform,
) as dst:
dst.write(file_array, 1)
logging.info('wrote it to a GeoTIFF\n')
# clear the gribs folder now that we're done with this
shutil.rmtree(gribs)
return
def resample(wrksppath, region, model):
"""
Script to resample rasters from .25 o .0025 degree in order for rasterstats to work
Dependencies: datetime, os, numpy, rasterio
"""
logging.info('\nResampling the rasters for ' + region)
# Define app workspace and sub-paths
tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')
resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')
# Create directory for the resampled GeoTIFFs
if not os.path.exists(tiffs):
logging.info('There is no tiffs folder. You must have already resampled them. Skipping resampling')
return
# List all Resampled GeoTIFFs
files = os.listdir(tiffs)
files = [tif for tif in files if tif.endswith('.tif')]
files.sort()
# Read raster dimensions
path = os.path.join(tiffs, files[0])
raster_dim = rasterio.open(path)
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each resampled raster (east, south, west, north, width, height)
geotransform_res = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width * 100, height * 100)
# Resample each GeoTIFF
for file in files:
path = os.path.join(tiffs, file)
logging.info(path)
with rasterio.open(path) as dataset:
data = dataset.read(
out_shape=(int(dataset.height * 100), int(dataset.width * 100)),
# Reduce 100 to 10 if using the whole globe
resampling=Resampling.nearest
)
# Convert new resampled array from 3D to 2D
data = numpy.squeeze(data, axis=0)
# Specify the filepath of the resampled raster
resample_filename = file.replace('.tif', '_resampled.tif')
resample_filepath = os.path.join(resampleds, resample_filename)
# Save the GeoTIFF
with rasterio.open(
resample_filepath,
'w',
driver='GTiff',
height=data.shape[0],
width=data.shape[1],
count=1,
dtype=data.dtype,
nodata=numpy.nan,
crs='+proj=latlong',
transform=geotransform_res,
) as dst:
dst.write(data, 1)
# delete the non-resampled tiffs now that we dont need them
shutil.rmtree(tiffs)
return
def zonal_statistics(wrksppath, timestamp, region, model):
"""
Script to calculate average precip over FFGS polygon shapefile
Dependencies: datetime, os, pandas, rasterstats
"""
logging.info('\nDoing Zonal Statistics on ' + region)
# Define app workspace and sub-paths
resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')
shp_path = os.path.join(wrksppath, region, 'shapefiles', 'ffgs_' + region + '.shp')
stat_file = os.path.join(wrksppath, region, model + 'results.csv')
# check that there are resampled tiffs to do zonal statistics on
if not os.path.exists(resampleds):
logging.info('There are no resampled tiffs to do zonal statistics on. Skipping Zonal Statistics')
return
# List all Resampled GeoTIFFs
files = os.listdir(resampleds)
files = [tif for tif in files if tif.endswith('.tif')]
files.sort()
# do zonal statistics for each resampled tiff file and put it in the stats dataframe
stats_df = pd.DataFrame()
for i in range(len(files)):
logging.info('starting zonal statistics for ' + files[i])
ras_path = os.path.join(resampleds, files[i])
stats = rasterstats.zonal_stats(
shp_path,
ras_path,
stats=['count', 'max', 'mean'],
geojson_out=True
)
timestep = files[i][:10]
# for each stat that you get out, write it to the dataframe
logging.info('writing the statistics for this file to the dataframe')
for j in range(len(stats)):
temp_data = stats[j]['properties']
temp_data.update({'Forecast Timestamp': timestamp})
temp_data.update({'Timestep': timestep})
temp_df = pd.DataFrame([temp_data])
stats_df = stats_df.append(temp_df, ignore_index=True)
# write the resulting dataframe to a csv
logging.info('\ndone with zonal statistics, rounding values, writing to a csv file')
stats_df = stats_df.round({'max': 1, 'mean': 1})
stats_df.to_csv(stat_file, index=False)
# delete the resampled tiffs now that we dont need them
logging.info('deleting the resampled tiffs directory')
shutil.rmtree(resampleds)
return
def nc_georeference(threddspath, timestamp, region, model):
"""
Description: Intended to make a THREDDS data server compatible netcdf file out of an incorrectly structured
netcdf file.
Author: <NAME>, 2019
Dependencies: netCDF4, os, datetime
see github/rileyhales/datatools for more details
"""
logging.info('\nProcessing the netCDF files')
# setting the environment file paths
netcdfs = os.path.join(threddspath, region, model, timestamp, 'netcdfs')
processed = os.path.join(threddspath, region, model, timestamp, 'processed')
# if you already have processed netcdfs files, skip this and quit the function
if not os.path.exists(netcdfs):
logging.info('There are no netcdfs to be converted. Skipping netcdf processing.')
return
# otherwise, remove anything in the folder before starting (in case there was a partial processing)
else:
shutil.rmtree(processed)
os.mkdir(processed)
os.chmod(processed, 0o777)
# list the files that need to be converted
net_files = os.listdir(netcdfs)
files = [file for file in net_files if file.endswith('.nc')]
logging.info('There are ' + str(len(files)) + ' compatible files.')
# read the first file that we'll copy data from in the next blocks of code
logging.info('Preparing the reference file')
path = os.path.join(netcdfs, net_files[0])
netcdf_obj = netCDF4.Dataset(path, 'r', clobber=False, diskless=True)
# get a dictionary of the dimensions and their size and rename the north/south and east/west ones
dimensions = {}
for dimension in netcdf_obj.dimensions.keys():
dimensions[dimension] = netcdf_obj.dimensions[dimension].size
dimensions['lat'] = dimensions['latitude']
dimensions['lon'] = dimensions['longitude']
dimensions['time'] = 1
del dimensions['latitude'], dimensions['longitude']
# get a list of the variables and remove the one's i'm going to 'manually' correct
variables = netcdf_obj.variables
del variables['valid_time'], variables['step'], variables['latitude'], variables['longitude'], variables['surface']
variables = variables.keys()
# min lat and lon and the interval between values (these are static values
netcdf_obj.close()
# this is where the files start getting copied
for file in files:
logging.info('Working on file ' + str(file))
openpath = os.path.join(netcdfs, file)
savepath = os.path.join(processed, 'processed_' + file)
# open the file to be copied
original = netCDF4.Dataset(openpath, 'r', clobber=False, diskless=True)
duplicate = netCDF4.Dataset(savepath, 'w', clobber=True, format='NETCDF4', diskless=False)
# set the global netcdf attributes - important for georeferencing
duplicate.setncatts(original.__dict__)
# specify dimensions from what we copied before
for dimension in dimensions:
duplicate.createDimension(dimension, dimensions[dimension])
# 'Manually' create the dimensions that need to be set carefully
duplicate.createVariable(varname='lat', datatype='f4', dimensions='lat')
duplicate.createVariable(varname='lon', datatype='f4', dimensions='lon')
# create the lat and lon values as a 1D array
duplicate['lat'][:] = original['latitude'][:]
duplicate['lon'][:] = original['longitude'][:]
# set the attributes for lat and lon (except fill value, you just can't copy it)
for attr in original['latitude'].__dict__:
if attr != "_FillValue":
duplicate['lat'].setncattr(attr, original['latitude'].__dict__[attr])
for attr in original['longitude'].__dict__:
if attr != "_FillValue":
duplicate['lon'].setncattr(attr, original['longitude'].__dict__[attr])
# copy the rest of the variables
hour = 6
for variable in variables:
# check to use the lat/lon dimension names
dimension = original[variable].dimensions
if 'latitude' in dimension:
dimension = list(dimension)
dimension.remove('latitude')
dimension.append('lat')
dimension = tuple(dimension)
if 'longitude' in dimension:
dimension = list(dimension)
dimension.remove('longitude')
dimension.append('lon')
dimension = tuple(dimension)
if len(dimension) == 2:
dimension = ('time', 'lat', 'lon')
if variable == 'time':
dimension = ('time',)
# create the variable
duplicate.createVariable(varname=variable, datatype='f4', dimensions=dimension)
# copy the arrays of data and set the timestamp/properties
date = datetime.datetime.strptime(timestamp, "%Y%m%d%H")
date = datetime.datetime.strftime(date, "%Y-%m-%d %H:00:00")
if variable == 'time':
duplicate[variable][:] = [hour]
hour = hour + 6
duplicate[variable].long_name = original[variable].long_name
duplicate[variable].units = "hours since " + date
duplicate[variable].axis = "T"
# also set the begin date of this data
duplicate[variable].begin_date = timestamp
if variable == 'lat':
duplicate[variable][:] = original[variable][:]
duplicate[variable].axis = "Y"
if variable == 'lon':
duplicate[variable][:] = original[variable][:]
duplicate[variable].axis = "X"
else:
duplicate[variable][:] = original[variable][:]
duplicate[variable].axis = "lat lon"
duplicate[variable].long_name = original[variable].long_name
duplicate[variable].begin_date = timestamp
duplicate[variable].units = original[variable].units
# close the files, delete the one you just did, start again
original.close()
duplicate.sync()
duplicate.close()
# delete the netcdfs now that we're done with them triggering future runs to skip this step
shutil.rmtree(netcdfs)
logging.info('Finished File Conversions')
return
def new_ncml(threddspath, timestamp, region, model):
logging.info('\nWriting a new ncml file for this date')
# create a new ncml file by filling in the template with the right dates and writing to a file
ncml = os.path.join(threddspath, region, model, 'wms.ncml')
date = datetime.datetime.strptime(timestamp, "%Y%m%d%H")
date = datetime.datetime.strftime(date, "%Y-%m-%d %H:00:00")
with open(ncml, 'w') as file:
file.write(
'<netcdf xmlns="http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2">\n'
' <variable name="time" type="int" shape="time">\n'
' <attribute name="units" value="hours since ' + date + '"/>\n'
' <attribute name="_CoordinateAxisType" value="Time" />\n'
' <values start="6" increment="6" />\n'
' </variable>\n'
' <aggregation dimName="time" type="joinExisting" recheckEvery="1 hour">\n'
' <scan location="' + timestamp + '/processed/"/>\n'
' </aggregation>\n'
'</netcdf>'
)
logging.info('Wrote New .ncml')
return
def new_colorscales(wrksppath, region, model):
# set the environment
logging.info('\nGenerating a new color scale csv for the ' + model + ' results')
colorscales = os.path.join(wrksppath, region, model + 'colorscales.csv')
results = os.path.join(wrksppath, region, model + 'results.csv')
logging.info(results)
answers = pd.DataFrame(columns=['cat_id', 'cum_mean', 'mean', 'max'])
res_df = | pd.read_csv(results, index_col=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 2021, last edited 27 Oct 2021
Fiber flow emissions calculations module - class version
Inputs:
Excel file with old PPI market & emissions data ('FiberModelAll_Python_v3-yields.xlsx')
Outputs:
Dict of keys 'old','new','forest','trade' with emissions calcs
(*testing inputs*
x = 'FiberModelAll_Python_v2.xlsx'
f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
pbpVolOld.columns = [x[:-2] for x in pbpVolOld.columns]
consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=34, nrows=3, index_col=0)
rLevel = pd.read_excel(x, 'Demand', usecols="F:K", skiprows=16, nrows=5)
rLevel = {t: list(rLevel[t][np.isfinite(rLevel[t])].values) for t in fProd}
fProd = [t for t in f2pVolOld.iloc[:,:6].columns]
fProdM = [t for t in f2pVolOld.iloc[:,:7].columns]
rFiber = f2pVolOld.index[:16]
vFiber = f2pVolOld.index[16:]
rPulp = [p for p in pbpVolOld.index if 'Rec' in p]
vPulp = [q for q in pbpVolOld.index if 'Vir' in q]
fPulp = [f for f in pbpVolOld.index]
import numpy as np
f2pYld = pd.read_excel(x, 'Fiber', usecols="I:O", skiprows=1, nrows=21)
f2pYld.index = np.concatenate([rFiber.values, vFiber.values], axis=0)
pulpYld = pd.read_excel(x, 'Pulp', usecols="D", skiprows=1, nrows=14)
pulpYld.index = rPulp + vPulp
transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
rsdlbio = rsdlbio.fillna(0)
rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
rsdlfos = rsdlfos.fillna(0)
woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
exportOld.iloc[:,:-1] = exportOld.iloc[:,:-1]
exportNew = exportOld.iloc[:,:-1] * 1.5
exportNew.columns = ['exportNew']
exportNew = exportNew.assign(TransCode=exportOld['TransCode'].values)
fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
)
@author: <NAME>
"""
import pandas as pd
import numpy as np
class en_emissions(): # energy & emissions
def __init__(cls,xls,fProd,rLevel,f2pYld,pulpYld,f2pVolNew,pbpVolNew,consCollNew,exportNew,demandNew):
# xls (str) - name of Excel spreadsheet to pull data from
# fProd (list) - list of products in current scenario
# rLevel (df) - recycled content level by product
# f2pYld (df) - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld (df) - pulp to product yield; pulp as index
# f2pVolNew (df) - fiber to pulp volume (in short tons); indexed by pulp name
# pbpVolNew (df) - pulp by product volume; indexed by pulp name
# consCollNew (df) - domestic consumption, collection, and recovery by product
# demandNew (df) - new demand by product; indexed by rec level
uC = 0.907185 # unit conversion of MM US ton to Mg/metric ton
cls.fProd = fProd
cls.fProdM = fProd + ['Market']
cls.rLevel = rLevel
cls.f2pYld = f2pYld
cls.pulpYld = pulpYld
cls.f2pVolNew = f2pVolNew * uC
cls.pbpVolNew = pbpVolNew * uC
cls.consCollNew = consCollNew * uC
cls.exportNew = exportNew * uC
cls.demandNew = {t: demandNew[t] * uC for t in demandNew.keys()}
with pd.ExcelFile(xls) as x:
# Old data
cls.f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
cls.f2pVolOld.iloc[:,:-1] = cls.f2pVolOld.iloc[:,:-1] * uC * 1000
cls.f2pVolNew = cls.f2pVolNew.assign(TransCode=cls.f2pVolOld['TransCode'].values)
cls.pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
cls.pbpVolOld.columns = [x[:-2] for x in cls.pbpVolOld.columns] # has .1 after column names for pandas duplicate
cls.pbpVolOld.iloc[:,:-1] = cls.pbpVolOld.iloc[:,:-1] * uC * 1000
cls.pbpVolNew = cls.pbpVolNew.assign(TransCode=cls.pbpVolOld['TransCode'].values)
cls.prodLD = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=19, nrows=5, index_col=0) * uC * 1000
cls.prodDemand = pd.read_excel(x, 'OldData', usecols="A:G", skiprows=26, nrows=1, index_col=0) * uC * 1000
cls.consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=29, nrows=3, index_col=0) * uC * 1000
cls.exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
cls.exportOld.iloc[:,:-1] = cls.exportOld.iloc[:,:-1] * uC * 1000
cls.exportNew = cls.exportNew.assign(TransCode=cls.exportOld['TransCode'].values)
cls.fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
cls.rFiber = cls.f2pVolOld.index[:16]
cls.vFiber = cls.f2pVolOld.index[16:]
cls.rPulp = [p for p in cls.pbpVolOld.index if 'Rec' in p]
cls.vPulp = [q for q in cls.pbpVolOld.index if 'Vir' in q]
cls.fPulp = [f for f in cls.pbpVolOld.index]
# Emissions Info
cls.chemicals = pd.read_excel(x, 'nonFiber', usecols="A:B,E:L", skiprows=2, nrows=42, index_col=0)
cls.eolEmissions = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=2, nrows=3, index_col=0)
cls.bfEI = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=2, nrows=3, index_col=0)
cls.bfEI.columns = [x[:-2] for x in cls.bfEI.columns] # has .1 after column names for some reason
cls.bioPct = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=8, nrows=2, index_col=0)
cls.pwpEI = pd.read_excel(x, 'EmTables', usecols="O:P", skiprows=14, nrows=5, index_col=0)
cls.bfCO2 = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=9, nrows=2, index_col=0)
cls.fuelTable = pd.read_excel(x, 'EmTables', usecols="A:M", skiprows=15, nrows=13, index_col=0)
cls.fuelTable = cls.fuelTable.fillna(0)
cls.rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
cls.rsdlbio = | pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 14 20:18:24 2019
@author: verascity
This is a little housekeeping script while I work out the kinks on this
classifier; it will eventually go away!
"""
import pandas as pd
df1 = | pd.read_csv('vaccine_df_01312019.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
print(pd.Series([11, 20, 30, 20, 30, 30, 20])) #Cria uma coluna com os dados desta lista
print()
print( | pd.Series([10, 20, 30, 33], index=["a", "b", "c", "d"]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 13:51:54 2020
@author: SE
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 16:57:13 2020
@author: SE
"""
import re
import pandas as pd
from matplotlib import pyplot as plt
from datetime import datetime
from collections import Counter
import numpy as np
import random
import os
#Please specify your dataset directory.
os.chdir("your dataset directory")
df_PM=pd.read_csv("1_RQ2_LDA_topics_mapped_by_major_minor_detail_27_6_20.csv", low_memory=False)
gk = df_PM.groupby('Semi_major')
#"""
RUBY_MRI=[]
JVM=[]
Perl=[]
Multi=[]
Python=[]
Gc=[]
R=[]
NET=[]
Node=[]
Julia=[]
Dart_VM=[]
Zend=[]
Elm=[]
Topic=[]
#no of posts after manual categorization
n_RUBY_MRI=13189
n_JVM=74557
n_Perl=1127
n_Multi=10856
n_Python=999
n_Gc=42025
n_R=550
n_NET=9976
n_Node=60475
n_Julia=6098
n_Dart_VM=463
n_Zend=242
n_Elm=1574
for i in range(0, 10):
Topic.append(i)
data=gk.get_group(i)
df4=data.reset_index()
RUBY_MRI1=[]
JVM1=[]
Perl1=[]
Multi1=[]
Python1=[]
Gc1=[]
R1=[]
NET1=[]
Node1=[]
Julia1=[]
Dart_VM1=[]
Zend1=[]
Elm1=[]
for j in range(0, len(df4)):
if df4['post_Enviroment'][j]=='Ruby MRI':
RUBY_MRI1.append('Ruby MRI')
if df4['post_Enviroment'][j]=='JVM':
JVM1.append('JVM')
if df4['post_Enviroment'][j]=='Perl':
Perl1.append('Perl')
if df4['post_Enviroment'][j]=='Multi':
Multi1.append('Multi')
if df4['post_Enviroment'][j]=='Python':
Python1.append('Python')
if df4['post_Enviroment'][j]=='Gc':
Gc1.append('Gc')
if df4['post_Enviroment'][j]=='R':
R1.append('R')
if df4['post_Enviroment'][j]=='.NET':
NET1.append('.NET')
if df4['post_Enviroment'][j]=='Node.js':
Node1.append('Node.js')
if df4['post_Enviroment'][j]=='Julia':
Julia1.append('Julia')
if df4['post_Enviroment'][j]=='Dart VM':
Dart_VM1.append('Dart VM')
if df4['post_Enviroment'][j]=='Zend Engine':
Zend1.append('Zend Engine')
if df4['post_Enviroment'][j]=='Elm':
Elm1.append('Elm')
RUBY_MRI.append((len(RUBY_MRI1)/n_RUBY_MRI)*100)
JVM.append((len(JVM1)/n_JVM)*100)
Perl.append((len(Perl1)/n_Perl)*100)
Multi.append((len(Multi1)/n_Multi)*100)
Python.append((len(Python1)/n_Python)*100)
Gc.append((len(Gc1)/n_Gc)*100)
R.append((len(R1)/n_R)*100)
NET.append((len(NET1)/n_NET)*100)
Node.append((len(Node1)/n_Node)*100)
Julia.append((len(Julia1)/n_Julia)*100)
Dart_VM.append((len(Dart_VM1)/n_Dart_VM)*100)
Zend.append((len(Zend1)/n_Zend)*100)
Elm.append((len(Elm1)/n_Elm)*100)
dict={'Topic':Topic,'Ruby MRI':RUBY_MRI, 'JVM':JVM, 'Perl':Perl, 'Multi':Multi, 'Python':Python,'Gc':Gc,'R':R,'.NET':NET, 'Node.js':Node, 'Julia':Julia, 'Dart VM':Dart_VM, 'Zend Engine':Zend, 'Elm':Elm}
LDA_data= | pd.DataFrame(dict) | pandas.DataFrame |
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
# 订单与商品
prior = pd.read_csv('D://A//data//instacart//order_products__prior.csv')
# 商品购买信息
product = pd.read_csv('D://A//data//instacart//products.csv')
# 用户与订单信息
order = pd.read_csv('D://A//data//instacart//orders.csv')
# 商品与类别
aisles = pd.read_csv('D://A//data//instacart//aisles.csv')
# 合并数据
pp = pd.merge(prior, product, on=['product_id', 'product_id'])
po = | pd.merge(pp, order, on=['order_id', 'order_id']) | pandas.merge |
import json
import glob
import re
import os
from io import StringIO
from pathlib import Path
import numpy as np
import click
import pandas as pd
import requests
from lxml import etree as ET
from ocrd_models.ocrd_page import parse
from ocrd_utils import bbox_from_points
from .ned import ned
from .ner import ner
from .tsv import read_tsv, write_tsv, extract_doc_links
from .ocr import get_conf_color
@click.command()
@click.argument('tsv-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('url-file', type=click.Path(exists=False), required=True, nargs=1)
def extract_document_links(tsv_file, url_file):
parts = extract_doc_links(tsv_file)
urls = [part['url'] for part in parts]
urls = pd.DataFrame(urls, columns=['url'])
urls.to_csv(url_file, sep="\t", quoting=3, index=False)
@click.command()
@click.argument('tsv-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('annotated-tsv-file', type=click.Path(exists=False), required=True, nargs=1)
def annotate_tsv(tsv_file, annotated_tsv_file):
parts = extract_doc_links(tsv_file)
annotated_parts = []
for part in parts:
part_data = StringIO(part['header'] + part['text'])
df = pd.read_csv(part_data, sep="\t", comment='#', quoting=3)
df['url_id'] = len(annotated_parts)
annotated_parts.append(df)
df = | pd.concat(annotated_parts) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in a TypeError
msg = r"'fill_value' \('d'\) is not in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value='d')
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
msg = ("level should contain all level names or all level numbers, not"
" a mixture of the two")
with pytest.raises(ValueError, match=msg):
df2.stack(level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Supports Kp index values. Downloads data from ftp.gfz-potsdam.de or SWPC.
Parameters
----------
platform
'sw'
name
'kp'
tag
- '' : Standard Kp data
- 'forecast' : Grab forecast data from SWPC (next 3 days)
- 'recent' : Grab last 30 days of Kp data from SWPC
Note
----
Standard Kp files are stored by the first day of each month. When downloading
use kp.download(start, stop, freq='MS') to only download days that could
possibly have data. 'MS' gives a monthly start frequency.
The forecast data is stored by generation date, where each file contains the
forecast for the next three days. Forecast data downloads are only supported
for the current day. When loading forecast data, the date specified with the
load command is the date the forecast was generated. The data loaded will span
three days. To always ensure you are loading the most recent data, load
the data with tomorrow's date.
::
kp = pysat.Instrument('sw', 'kp', tag='recent')
kp.download()
kp.load(date=kp.tomorrow())
Recent data is also stored by the generation date from the SWPC. Each file
contains 30 days of Kp measurements. The load date issued to pysat corresponds
to the generation date.
The recent and forecast data should not be used with the data padding option
available from pysat.Instrument objects.
Warnings
--------
The 'forecast' Kp data loads three days at a time. The data padding feature
and multi_file_day feature available from the pyast.Instrument object
is not appropriate for Kp 'forecast' data.
This material is based upon work supported by the
National Science Foundation under Grant Number 1259508.
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
Custom Functions
----------------
filter_geoquiet
Filters pysat.Instrument data for given time after Kp drops below gate.
"""
import functools
import numpy as np
import os
import pandas as pds
import pysat
import logging
logger = logging.getLogger(__name__)
platform = 'sw'
name = 'kp'
tags = {'': '',
'forecast': 'SWPC Forecast data next (3 days)',
'recent': 'SWPC provided Kp for past 30 days'}
sat_ids = {'': ['', 'forecast', 'recent']}
# generate todays date to support loading forecast data
now = pysat.datetime.now()
today = pysat.datetime(now.year, now.month, now.day)
# set test dates
_test_dates = {'': {'': pysat.datetime(2009, 1, 1),
'forecast': today + pds.DateOffset(days=1)}}
def load(fnames, tag=None, sat_id=None):
"""Load Kp index files
Parameters
------------
fnames : pandas.Series
Series of filenames
tag : str or NoneType
tag or None (default=None)
sat_id : str or NoneType
satellite id or None (default=None)
Returns
---------
data : pandas.DataFrame
Object containing satellite data
meta : pysat.Meta
Object containing metadata such as column names and units
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
from pysat.utils.time import parse_date
meta = pysat.Meta()
if tag == '':
# Kp data stored monthly, need to return data daily
# the daily date is attached to filename
# parse off the last date, load month of data, downselect to desired
# day
data = pds.DataFrame()
# set up fixed width format for these files
colspec = [(0, 2), (2, 4), (4, 6), (7, 10), (10, 13), (13, 16),
(16, 19), (19, 23), (23, 26), (26, 29), (29, 32), (32, 50)]
for filename in fnames:
# the daily date is attached to filename
# parse off the last date, load month of data, downselect to the
# desired day
fname = filename[0:-11]
date = pysat.datetime.strptime(filename[-10:], '%Y-%m-%d')
temp = pds.read_fwf(fname, colspecs=colspec, skipfooter=4,
header=None, parse_dates=[[0, 1, 2]],
date_parser=parse_date, index_col='0_1_2')
idx, = np.where((temp.index >= date) &
(temp.index < date + | pds.DateOffset(days=1) | pandas.DateOffset |
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import shap
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
# from .utils import Boba_Utils as u
class Boba_Model_Diagnostics():
def __init__(self):
pass
def run_model_diagnostics(self, model, X_train, X_test, y_train, y_test, target):
self.get_model_stats(model, X_train, X_test, y_train, y_test, target)
self.plot_shap_imp(model,X_train)
self.plot_shap_bar(model,X_train)
self.residual_plot(model,X_test,y_test,target)
self.residual_density_plot(model,X_test,y_test,target)
self.identify_outliers(model, X_test, y_test,target)
self.residual_mean_plot(model,X_test,y_test,target)
self.residual_variance_plot(model,X_test,y_test,target)
self.PVA_plot(model,X_test,y_test,target)
self.inverse_PVA_plot(model,X_train,y_train,target)
self.estimates_by_var(model,X_train,y_train,target,'Age')
self.error_by_var(model,X_train,y_train,target,'Age')
self.volatility_by_var(model,X_train,y_train,target,'Age')
def get_model_stats(self, model, X_train, X_test, y_train, y_test, target):
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
test_RMSE = np.sqrt(mean_squared_error(y_test, test_pred)),
test_R2 = model.score(X_test,y_test),
test_MAE = mean_absolute_error(y_test, test_pred),
train_RMSE = np.sqrt(mean_squared_error(y_train, train_pred)),
train_R2 = model.score(X_train,y_train),
train_MAE = mean_absolute_error(y_train, train_pred),
df = pd.DataFrame(data = {'RMSE': np.round(train_RMSE,4),
'R^2': np.round(train_R2,4),
'MAE': np.round(train_MAE,4)}, index = ['train'])
df2 = pd.DataFrame(data = {'RMSE': np.round(test_RMSE,4),
'R^2': np.round(test_R2,4),
'MAE': np.round(test_MAE,4)}, index = ['test'])
print("Model Statistics for {}".format(target))
print('-'*40)
print(df)
print('-'*40)
print(df2)
print('-'*40)
def plot_shap_imp(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train)
plt.show()
def plot_shap_bar(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train, plot_type='bar')
plt.show()
def feature_imp(self,model,X_train,target):
sns.set_style('darkgrid')
names = X_train.columns
coef_df = pd.DataFrame({"Feature": names, "Importance": model.feature_importances_},
columns=["Feature", "Importance"])
coef_df = coef_df.sort_values('Importance',ascending=False)
coef_df
fig, ax = plt.subplots()
sns.barplot(x="Importance", y="Feature", data=coef_df.head(20),
label="Importance", color="b",orient='h')
plt.title("XGB Feature Importances for {}".format(target))
plt.show()
def residual_plot(self,model, X_test, y_test,target):
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
fig, ax = plt.subplots()
ax.scatter(pred, residuals)
ax.plot([pred.min(), pred.max()], [0, 0], 'k--', lw=4)
ax.set_xlabel('Predicted')
ax.set_ylabel('Residuals')
plt.title("Residual Plot for {}".format(target))
plt.show()
def residual_density_plot(self,model, X_test, y_test,target):
sns.set_style('darkgrid')
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
sns.distplot(residuals)
plt.title("Residual Density Plot for {}".format(target))
plt.show()
def residual_variance_plot(self, model, X_test, y_test,target):
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby(pd.qcut(y_temp[target], 10))['residuals'].std()
res_var.index = [1,2,3,4,5,6,7,8,9,10]
res_var = res_var.reset_index()
ax = sns.lineplot(x="index", y="residuals", data=res_var)
plt.title("Residual Variance plot for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("Residual Variance")
plt.show()
except:
pass
def residual_mean_plot(self, model, X_test, y_test,target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby(pd.qcut(y_temp['pred'], 10))['residuals'].mean()
res_var.index = [1,2,3,4,5,6,7,8,9,10]
res_var = res_var.reset_index()
ax = sns.lineplot(x="index", y="residuals", data=res_var)
plt.title("Residual Mean plot for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("Residual Mean")
plt.show()
except:
pass
def PVA_plot(self,model, X_test, y_test, target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['predicted'] = pred
y_temp['residuals'] = residuals
pva = y_temp.groupby(pd.qcut(y_temp['predicted'], 10))[target,'predicted'].mean()
pva.index = [1,2,3,4,5,6,7,8,9,10]
pva = pva.reset_index()
pva = pva.rename(columns={target: "actual"})
df = pva.melt('index', var_name='cols', value_name='vals')
sns.factorplot(x="index", y="vals", hue='cols', data=df,legend_out=False)
plt.title("Predicted v Actual Chart by Deciles for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("{}".format(target))
plt.legend(loc='upper left')
plt.show()
except:
pass
def inverse_PVA_plot(self, model,X_test, y_test,target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['predicted'] = pred
y_temp['residuals'] = residuals
pva = y_temp.groupby(pd.qcut(y_temp[target], 10))[target,'predicted'].mean()
pva.index = [1,2,3,4,5,6,7,8,9,10]
pva = pva.reset_index()
pva = pva.rename(columns={target: "actual"})
df = pva.melt('index', var_name='cols', value_name='vals')
sns.factorplot(x="index", y="vals", hue='cols', data=df,legend_out=False)
plt.title("Actual v Predicted Chart by Deciles for {}".format(target))
plt.xlabel("Actual Decile")
plt.ylabel("{}".format(target))
plt.legend(loc='upper left')
plt.show()
except:
pass
def identify_outliers(self, model, X_test, y_test,target):
master_df = pd.read_csv('data/processed/'+self.position_group+'/master_df.csv',index_col=0)
index_list = list(X_test.index)
master_df = master_df.iloc[index_list,:]
pred_df = pd.DataFrame(data = {'pred':model.predict(X_test),
'residuals':pd.Series(model.predict(X_test),index=X_test.index) - pd.Series(y_test[target])},index=X_test.index)
master_df = | pd.merge(master_df,pred_df,left_index=True,right_index=True) | pandas.merge |
"""
This network uses the last 26 observations of gwl, tide, and rain to predict the next 18
values of gwl for well MMPS-175
"""
import pandas as pd
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import keras
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Activation
from math import sqrt
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import random as rn
import os
matplotlib.rcParams.update({'font.size': 8})
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# def create_weights(train_labels):
# obs_mean = np.mean(train_labels, axis=-1)
# obs_mean = np.reshape(obs_mean, (n_batch, 1))
# obs_mean = np.repeat(obs_mean, n_ahead, axis=1)
# weights = (train_labels + obs_mean) / (2 * obs_mean)
# return weights
#
#
# def sq_err(y_true, y_pred):
# return K.square(y_pred - y_true)
#
#
def mse(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=-1)
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def pw_rmse(y_true, y_pred):
# num_rows, num_cols = K.int_shape(y_true)[0], K.int_shape(y_true)[1]
# print(num_rows, num_cols)
act_mean = K.mean(y_true, axis=-1)
# print("act_mean 1 is:", act_mean)
act_mean = K.reshape(act_mean, (n_batch, 1))
# print("act_mean is: ", act_mean)
mean_repeat = K.repeat_elements(act_mean, n_ahead, axis=1)
# print("mean_repeat is:", mean_repeat)
weights = (y_true+mean_repeat)/(2*mean_repeat)
return K.sqrt(K.mean((K.square(y_pred - y_true)*weights), axis=-1))
# configure network
n_lags = 116
n_ahead = 18
n_features = 3
n_train = 52551
n_test = 8359
n_epochs = 500
n_neurons = 10
n_batch = 52551
# load dataset
dataset_raw = read_csv("C:/Users/<NAME>/Documents/HRSD GIS/Site Data/MMPS_175_no_blanks.csv",
index_col=None, parse_dates=True, infer_datetime_format=True)
# dataset_raw = dataset_raw[0:len(dataset_raw)-1]
# split datetime column into train and test for plots
train_dates = dataset_raw[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[:n_train]
test_dates = dataset_raw[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[n_train:]
test_dates = test_dates.reset_index(drop=True)
test_dates['Datetime'] = pd.to_datetime(test_dates['Datetime'])
# drop columns we don't want to predict
dataset = dataset_raw.drop(dataset_raw.columns[[0]], axis=1)
values = dataset.values
values = values.astype('float32')
gwl = values[:, 0]
gwl = gwl.reshape(gwl.shape[0], 1)
tide = values[:, 1]
tide = tide.reshape(tide.shape[0], 1)
rain = values[:, 2]
rain = rain.reshape(rain.shape[0], 1)
# normalize features with individual scalers
gwl_scaler, tide_scaler, rain_scaler = MinMaxScaler(), MinMaxScaler(), MinMaxScaler()
gwl_scaled = gwl_scaler.fit_transform(gwl)
tide_scaled = tide_scaler.fit_transform(tide)
rain_scaled = rain_scaler.fit_transform(rain)
scaled = np.concatenate((gwl_scaled, tide_scaled, rain_scaled), axis=1)
# frame as supervised learning
reframed = series_to_supervised(scaled, n_lags, n_ahead)
values = reframed.values
# split into train and test sets
train, test = values[:n_train, :], values[n_train:, :]
# split into input and outputs
input_cols, label_cols = [], []
for i in range(values.shape[1]):
if i <= n_lags*n_features-1:
input_cols.append(i)
elif i % 3 != 0:
input_cols.append(i)
elif i % 3 == 0:
label_cols.append(i)
train_X, train_y = train[:, input_cols], train[:, label_cols] # [start:stop:increment, (cols to include)]
test_X, test_y = test[:, input_cols], test[:, label_cols]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
#create weights for peak weighted rmse loss function
# weights = create_weights(train_y)
# load model here if needed
# model = keras.models.load_model("C:/Users/<NAME>/PycharmProjects/Tensorflow/keras_models/mmps175.h5",
# custom_objects={'pw_rmse':pw_rmse})
# set random seeds for model reproducibility as suggested in:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# define model
model = Sequential()
model.add(LSTM(units=n_neurons, input_shape=(None, train_X.shape[2])))
# model.add(LSTM(units=n_neurons, return_sequences=True, input_shape=(None, train_X.shape[2])))
# model.add(LSTM(units=n_neurons, return_sequences=True))
# model.add(LSTM(units=n_neurons))
model.add(Dropout(.1))
model.add(Dense(input_dim=n_neurons, activation='linear', units=n_ahead))
# model.add(Activation('linear'))
model.compile(loss=pw_rmse, optimizer='adam')
tbCallBack = keras.callbacks.TensorBoard(log_dir='C:/tmp/tensorflow/keras/logs', histogram_freq=0, write_graph=True,
write_images=False)
earlystop = keras.callbacks.EarlyStopping(monitor='loss', min_delta=0.0001, patience=5, verbose=1, mode='auto')
history = model.fit(train_X, train_y, batch_size=n_batch, epochs=n_epochs, verbose=2, shuffle=False,
callbacks=[earlystop, tbCallBack])
# save model
# model.save("C:/Users/<NAME>/PycharmProjects/Tensorflow/keras_models/mmps175.h5")
# plot model history
# plt.plot(history.history['loss'], label='train')
# # plt.plot(history.history['val_loss'], label='validate')
# # plt.legend()
# # ticks = np.arange(0, n_epochs, 1) # (start,stop,increment)
# # plt.xticks(ticks)
# plt.xlabel("Epochs")
# plt.ylabel("Loss")
# plt.tight_layout()
# plt.show()
# make predictions
trainPredict = model.predict(train_X)
yhat = model.predict(test_X)
inv_trainPredict = gwl_scaler.inverse_transform(trainPredict)
inv_yhat = gwl_scaler.inverse_transform(yhat)
inv_y = gwl_scaler.inverse_transform(test_y)
inv_train_y = gwl_scaler.inverse_transform(train_y)
# save test predictions and observed
inv_yhat_df = DataFrame(inv_yhat)
inv_yhat_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/predicted.csv")
inv_y_df = DataFrame(inv_y)
inv_y_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/observed.csv")
# calculate RMSE for whole test series (each forecast step)
RMSE_forecast = []
for i in np.arange(0, n_ahead, 1):
rmse = sqrt(mean_squared_error(inv_y[:, i], inv_yhat[:, i]))
RMSE_forecast.append(rmse)
RMSE_forecast = DataFrame(RMSE_forecast)
rmse_avg = sqrt(mean_squared_error(inv_y, inv_yhat))
print('Average Test RMSE: %.3f' % rmse_avg)
RMSE_forecast.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps175_results/RMSE.csv")
# calculate RMSE for each individual time step
RMSE_timestep = []
for i in np.arange(0, inv_yhat.shape[0], 1):
rmse = sqrt(mean_squared_error(inv_y[i, :], inv_yhat[i, :]))
RMSE_timestep.append(rmse)
RMSE_timestep = DataFrame(RMSE_timestep)
# plot rmse vs forecast steps
plt.plot(RMSE_forecast, 'ko')
ticks = np.arange(0, n_ahead, 1) # (start,stop,increment)
plt.xticks(ticks)
plt.ylabel("RMSE (ft)")
plt.xlabel("Forecast Step")
plt.tight_layout()
plt.show()
# plot training predictions
plt.plot(inv_train_y[:, 0], label='actual')
plt.plot(inv_trainPredict[:, 0], label='predicted')
plt.xlabel("Timestep")
plt.ylabel("GWL (ft)")
plt.title("Training Predictions")
# ticks = np.arange(0, n_ahead, 1)
# plt.xticks(ticks)
plt.legend()
plt.tight_layout()
plt.show()
# plot test predictions for Hermine, Julia, and Matthew
dates = | DataFrame(test_dates[["Datetime"]][n_lags:-n_ahead+1]) | pandas.DataFrame |
"""Module used for backtesting and trading via OANDA v20 REST API."""
__all__ = [
"find_instruments",
"get_price_data",
"Oanda",
"MAJORS",
"EXOTICS",
"FOREX",
"INDICES",
"COMMODITIES",
"METALS",
"BONDS",
"ALL_SYMBOLS",
"G10_USD",
"EM_USD",
"ALL_USD",
]
import configparser
from datetime import datetime
from functools import lru_cache
from inspect import getmembers, isfunction
import json
import logging
import os
import pickle
import time
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
from urllib.parse import urlencode
import urllib.request as ur
import pandas as pd
try:
from pandas import json_normalize
except ImportError:
from pandas.io.json import json_normalize
from .helpers import swap_sign
from .utils import get_factor_data, combine_factors, get_performance, print_progress
Factor = Callable[..., Tuple[pd.DataFrame, Optional[Union[int, Sequence[float]]]]]
def _get_hostname_headers() -> Tuple[str, Dict[str, str]]:
"""Return the V20 REST server hostname and the header fields for HTTP requests."""
try:
hostname = os.environ["OANDA_HOSTNAME"]
token = os.environ["OANDA_TOKEN"]
except KeyError:
config = configparser.ConfigParser()
config_filepath = os.path.join(os.path.dirname(__file__), "config.ini")
try:
with open(config_filepath, "r") as config_file:
config.read_file(config_file)
hostname = config.get("oanda", "hostname")
token = config.get("oanda", "token")
except FileNotFoundError:
logger = logging.getLogger(__name__)
logger.error(
f"OANDA v20 REST API config file is not found. "
f"Please answer to generate it:"
)
account_type = input("- What is your account type? `Live` or `Practice`?\n")
if account_type.lower() in ["live", "l"]:
hostname = "https://api-fxtrade.oanda.com"
elif account_type.lower() in ["practice", "p"]:
hostname = "https://api-fxpractice.oanda.com"
else:
raise ValueError(f"Type `{account_type}` not available.")
token = input("- Provide your personal access token:\n")
config["oanda"] = {"hostname": hostname, "token": token}
with open(config_filepath, "w") as config_file:
config.write(config_file)
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"Connection": "Keep-Alive",
"AcceptDatetimeFormat": "RFC3339",
}
return hostname, headers
def find_instruments(symbol: str, universe: List[str]) -> List[str]:
"""Return the universe of instruments containing the given symbol."""
instruments = []
for instrument in universe:
base, quote = instrument.split("_")
if symbol in (base, quote):
instruments.append(instrument)
return instruments
def get_price_data(
instruments: Sequence[str],
symbol: Optional[str] = None, # Run _arrange_price_data
save: bool = False, # Serialize price_data for faster retrieval
granularity: str = "D",
count: int = 500,
end: Union[str, float] = datetime.utcnow().timestamp(),
**kwargs, # See https://developer.oanda.com/rest-live-v20/instrument-ep/
) -> pd.DataFrame:
"""Return historical OHLCV candles."""
freq = {
"S5": "5S", # 5 second candlesticks, minute alignment
"S10": "10S", # 10 second candlesticks, minute alignment
"S15": "15S", # 15 second candlesticks, minute alignment
"S30": "30S", # 30 second candlesticks, minute alignment
"M1": "T", # 1 minute candlesticks, minute alignment
"M2": "2T", # 2 minute candlesticks, hour alignment
"M4": "4T", # 4 minute candlesticks, hour alignment
"M5": "5T", # 5 minute candlesticks, hour alignment
"M10": "10T", # 10 minute candlesticks, hour alignment
"M15": "15T", # 15 minute candlesticks, hour alignment
"M30": "30T", # 30 minute candlesticks, hour alignment
"H1": "H", # 1 hour candlesticks, hour alignment
"H2": "2H", # 2 hour candlesticks, day alignment
"H3": "3H", # 3 hour candlesticks, day alignment
"H4": "4H", # 4 hour candlesticks, day alignment
"H6": "6H", # 6 hour candlesticks, day alignment
"H8": "8H", # 8 hour candlesticks, day alignment
"H12": "12H", # 12 hour candlesticks, day alignment
"D": "B", # 1 day candlesticks, day alignment
"W": "W-MON", # 1 week candlesticks, aligned to start of week
}
granularity = granularity.upper()
if granularity not in freq:
raise ValueError(
f"Granularity `{granularity}` not available - "
f"choose from {list(freq.keys())}."
)
h = str(hash(f"{instruments} {symbol} {granularity} {count} {kwargs}"))
try:
with open(h + ".pickle", "rb") as f:
price_data = pickle.load(f)
except FileNotFoundError:
count_list = [5000] * (count // 5000)
if count % 5000 != 0:
count_list.append(count % 5000)
objs = []
prefix = "Collecting price data:"
start_time = time.time()
for i, instrument in enumerate(instruments):
print_progress(i, len(instruments), prefix, f"`{instrument}`")
if instrument not in ALL_SYMBOLS:
raise ValueError(f"Instrument `{instrument}` not available.")
to_time = end
responses = []
for c in count_list:
hostname, headers = _get_hostname_headers()
endpoint = f"/v3/instruments/{instrument}/candles"
params = {
"granularity": granularity,
"count": c,
"to": to_time,
**kwargs,
}
url = hostname + endpoint + "?" + urlencode(params)
req = ur.Request(url, headers=headers)
with ur.urlopen(req) as r:
df = json_normalize(json.loads(r.read()), "candles").set_index(
"time"
)
to_time = df.index[0]
df.index = pd.to_datetime(df.index, utc=True)
df.drop("complete", axis=1, inplace=True)
columns = {
**{c: "open" for c in df.columns if c.endswith(".o")},
**{c: "high" for c in df.columns if c.endswith(".h")},
**{c: "low" for c in df.columns if c.endswith(".l")},
**{c: "close" for c in df.columns if c.endswith(".c")},
}
df.rename(columns=columns, inplace=True)
df = df.resample(freq[granularity]).agg(
{
"open": "first",
"high": "max",
"low": "min",
"close": "last",
"volume": "sum",
}
)
df = df.astype(float)
responses.append(df)
time.sleep(0.1)
objs.append(pd.concat(responses).sort_index())
suffix = f"in {time.time() - start_time:.1f} s"
print_progress(len(instruments), len(instruments), prefix, suffix)
price_data = pd.concat(objs, axis=1, keys=instruments)
price_data = _arrange_price_data(price_data, symbol)
price_data = price_data.ffill().dropna()
price_data.index.freq = price_data.index.inferred_freq
if save:
with open(h + ".pickle", "wb") as f:
pickle.dump(price_data, f, protocol=pickle.HIGHEST_PROTOCOL)
return price_data
def _arrange_price_data(price_data: pd.DataFrame, symbol: str) -> pd.DataFrame:
"""Arrange the instruments to be quoted in the given symbol."""
arranged = | pd.DataFrame() | pandas.DataFrame |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import pandas as pd
import numpy as np
from activitysim.core import simulate
from activitysim.core import tracing
from activitysim.core import pipeline
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import expressions
from .util import estimation
from .util.tour_frequency import process_atwork_subtours
logger = logging.getLogger(__name__)
def add_null_results(trace_label, tours):
logger.info("Skipping %s: add_null_results", trace_label)
tours['atwork_subtour_frequency'] = np.nan
pipeline.replace_table("tours", tours)
@inject.step()
def atwork_subtour_frequency(tours,
persons_merged,
chunk_size,
trace_hh_id):
"""
This model predicts the frequency of making at-work subtour tours
(alternatives for this model come from a separate csv file which is
configured by the user).
"""
trace_label = 'atwork_subtour_frequency'
model_settings_file_name = 'atwork_subtour_frequency.yaml'
tours = tours.to_frame()
work_tours = tours[tours.tour_type == 'work']
# - if no work_tours
if len(work_tours) == 0:
add_null_results(trace_label, tours)
return
model_settings = config.read_model_settings(model_settings_file_name)
estimator = estimation.manager.begin_estimation('atwork_subtour_frequency')
model_spec = simulate.read_model_spec(file_name=model_settings['SPEC'])
coefficients_df = simulate.read_model_coefficients(model_settings)
model_spec = simulate.eval_coefficients(model_spec, coefficients_df, estimator)
alternatives = simulate.read_model_alts('atwork_subtour_frequency_alternatives.csv', set_index='alt')
# merge persons into work_tours
persons_merged = persons_merged.to_frame()
work_tours = | pd.merge(work_tours, persons_merged, left_on='person_id', right_index=True) | pandas.merge |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), | Timestamp("2011-01-02") | pandas.Timestamp |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/6/16 15:28
Desc: 东方财富网-数据中心-特色数据-千股千评
http://data.eastmoney.com/stockcomment/
"""
from datetime import datetime
import pandas as pd
import requests
from tqdm import tqdm
def stock_comment_em() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评
http://data.eastmoney.com/stockcomment/
:return: 千股千评数据
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "SECURITY_CODE",
"sortTypes": "1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_DMSK_TS_STOCKNEW",
"quoteColumns": "f2~01~SECURITY_CODE~CLOSE_PRICE,f8~01~SECURITY_CODE~TURNOVERRATE,f3~01~SECURITY_CODE~CHANGE_RATE,f9~01~SECURITY_CODE~PE_DYNAMIC",
"columns": "ALL",
"filter": "",
"token": "<KEY>",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"代码",
"-",
"交易日",
"名称",
"-",
"-",
"-",
"最新价",
"涨跌幅",
"-",
"换手率",
"主力成本",
"市盈率",
"-",
"-",
"机构参与度",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"综合得分",
"上升",
"目前排名",
"关注指数",
"-",
]
big_df = big_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"换手率",
"市盈率",
"主力成本",
"机构参与度",
"综合得分",
"上升",
"目前排名",
"关注指数",
"交易日",
]
]
big_df["最新价"] = pd.to_numeric(big_df["最新价"], errors="coerce")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"], errors="coerce")
big_df["换手率"] = pd.to_numeric(big_df["换手率"], errors="coerce")
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"], errors="coerce")
big_df["主力成本"] = pd.to_numeric(big_df["主力成本"], errors="coerce")
big_df["机构参与度"] = pd.to_numeric(big_df["机构参与度"], errors="coerce")
big_df["综合得分"] = pd.to_numeric(big_df["综合得分"], errors="coerce")
big_df["上升"] = pd.to_numeric(big_df["上升"], errors="coerce")
big_df["目前排名"] = pd.to_numeric(big_df["目前排名"], errors="coerce")
big_df["关注指数"] = pd.to_numeric(big_df["关注指数"], errors="coerce")
big_df["交易日"] = pd.to_datetime(big_df["交易日"]).dt.date
return big_df
def stock_comment_detail_zlkp_jgcyd_em(symbol: str = "600000") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评-主力控盘-机构参与度
https://data.eastmoney.com/stockcomment/stock/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 主力控盘-机构参与度
:rtype: pandas.DataFrame
"""
url = f"https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"reportName": "RPT_DMSK_TS_STOCKEVALUATE",
"filter": f'(SECURITY_CODE="{symbol}")',
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"sortColumns": "TRADE_DATE",
"sortTypes": "-1",
"_": "1655387358195",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df = temp_df[["TRADE_DATE", "ORG_PARTICIPATE"]]
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df.sort_values(["date"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["value"] = pd.to_numeric(temp_df["value"]) * 100
return temp_df
def stock_comment_detail_zhpj_lspf_em(symbol: str = "600000") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评-综合评价-历史评分
https://data.eastmoney.com/stockcomment/stock/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 综合评价-历史评分
:rtype: pandas.DataFrame
"""
url = f"https://data.eastmoney.com/stockcomment/api/{symbol}.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(
[
data_json["ApiResults"]["zhpj"]["HistoryScore"]["XData"],
data_json["ApiResults"]["zhpj"]["HistoryScore"]["Ydata"]["Score"],
data_json["ApiResults"]["zhpj"]["HistoryScore"]["Ydata"]["Price"],
]
).T
temp_df.columns = ["日期", "评分", "股价"]
temp_df["日期"] = str(datetime.now().year) + "-" + temp_df["日期"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df.sort_values(["日期"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["评分"] = pd.t | o_numeric(temp_df["评分"]) | pandas.to_numeric |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators_Original.ipynb (unless otherwise specified).
__all__ = ['racdiv', 'pasi', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hh40inc', 'hh60inc', 'hh75inc',
'hhchpov', 'hhm75', 'hhpov', 'hhs', 'hsdipl', 'lesshs', 'male', 'nilf', 'othrcom', 'p2more', 'pubtran',
'age5', 'age24', 'age64', 'age18', 'age65', 'affordm', 'affordr', 'bahigher', 'carpool', 'drvalone',
'hh25inc', 'mhhi', 'nohhint', 'novhcl', 'paa', 'ppac', 'phisp', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav29', 'trav45', 'trav44', 'unempl', 'unempr', 'walked']
# Cell
#File: racdiv.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B02001 - Race
# Universe: Total Population
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def racdiv( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B02001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df_hisp = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
df_hisp = df_hisp.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df_hisp = df_hisp.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino'] = df_hisp['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['African-American%'] = df[ 'B02001_003E_Total_Black_or_African_American_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['White%'] = df[ 'B02001_002E_Total_White_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['American Indian%'] = df[ 'B02001_004E_Total_American_Indian_and_Alaska_Native_alone' ]/ df[ 'B02001_001E_Total' ] * 100
df1['Asian%'] = df[ 'B02001_005E_Total_Asian_alone' ] / df[ 'B02001_001E_Total' ] * 100
df1['Native Hawaii/Pac Islander%'] = df[ 'B02001_006E_Total_Native_Hawaiian_and_Other_Pacific_Islander_alone'] / df[ 'B02001_001E_Total' ] * 100
df1['Hisp %'] = df['B03002_012E_Total_Hispanic_or_Latino'] / df[ 'B02001_001E_Total' ] * 100
# =1-(POWER(%AA/100,2)+POWER(%White/100,2)+POWER(%AmerInd/100,2)+POWER(%Asian/100,2) + POWER(%NativeAm/100,2))*(POWER(%Hispanci/100,2) + POWER(1-(%Hispanic/100),2))
df1['Diversity_index'] = ( 1- (
( df1['African-American%'] /100 )**2
+( df1['White%'] /100 )**2
+( df1['American Indian%'] /100 )**2
+( df1['Asian%'] /100 )**2
+( df1['Native Hawaii/Pac Islander%'] /100 )**2
)*(
( df1['Hisp %'] /100 )**2
+(1-( df1['Hisp %'] /100) )**2
) ) * 100
return df1['Diversity_index']
# Cell
#File: pasi.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def pasi( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['Asian%NH'] = df[ 'B03002_006E_Total_Not_Hispanic_or_Latino_Asian_alone' ]/ tot * 100
return df1['Asian%NH']
# Cell
#File: elheat.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def elheat( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_004E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_004E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <elheat_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_004E','B25040_001E'])
)
update vital_signs.data
set elheat = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: empl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe - Population 16 years and over
# Table Creates: empl, unempl, unempr, nilf
#purpose: Produce Workforce and Economic Development - Percent Population 16-64 Employed Indicator
#input: Year
#output:
import pandas as pd
import glob
def empl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64
#/
#nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <empl_14> */ --
WITH tbl AS (
select csa,
( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY[ 'B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_007E','B23001_014E','B23001_021E','B23001_028E','B23001_035E','B23001_042E','B23001_049E','B23001_056E','B23001_063E','B23001_070E','B23001_093E','B23001_100E','B23001_107E','B23001_114E','B23001_121E','B23001_128E','B23001_135E','B23001_142E','B23001_149E','B23001_156E'])
)
update vital_signs.data
set empl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: fam.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def fam( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Delete Unassigned--Jail
df = df[df.index != 'Unassigned--Jail']
# Move Baltimore to Bottom
bc = df.loc[ 'Baltimore City' ]
df = df.drop( df.index[1] )
df.loc[ 'Baltimore City' ] = bc
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
return df1['18Under']
# Cell
#File: female.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def female( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['onlyTheLadies'] = df[ 'B01001_026E_Total_Female' ]
return df1['onlyTheLadies']
# Cell
#File: femhhs.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: male, hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def femhhs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma.
rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder'
str16 = rootStr + ',_no_husband_present'
str17 = rootStr + '_no_husband_present'
str19 = rootStr + ',_no_spouse_present'
femhh = str17 if year == '17' else str19 if year == '19' else str16
# Actually produce the data
df1['total'] = df[ 'B11005_001E_Total' ]
df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100
df1['FemaleHH'] = df[ femhh ] / df['B11005_002E_Total_Households_with_one_or_more_people_under_18_years'] * 100
df1['FamHHChildrenUnder18'] = df['B11005_003E_Total_Households_with_one_or_more_people_under_18_years_Family_households']
df1['FamHHChildrenOver18'] = df['B11005_012E_Total_Households_with_no_people_under_18_years_Family_households']
df1['FamHH'] = df1['FamHHChildrenOver18'] + df1['FamHHChildrenUnder18']
return df1['FemaleHH']
# Cell
#File: heatgas.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25040 - HOUSE HEATING FUEL
# Universe - Occupied housing units
# Table Creates: elheat, heatgas
#purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator
#input: Year
#output:
import pandas as pd
import glob
def heatgas( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25040_002E','B25040_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25040_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25040_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <heatgas_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_002E','B25040_001E'])
)
update vital_signs.data
set heatgas = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hh40inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 25K-40K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh40inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 006
key = getColName(df, '006')
val = getColByName(df, '006')
fi[key] = val
# append into that dataframe col 007
key = getColName(df, '007')
val = getColByName(df, '007')
fi[key] = val
# append into that dataframe col 008
key = getColName(df, '008')
val = getColByName(df, '008')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1)
"""
/* hh40inc */ --
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3]) / value[4] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_006E','B19001_007E','B19001_008E','B19001_001E'])
)
UPDATE vital_signs.data
set hh40inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hh60inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household 45-60K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh60inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 009
key = getColName(df, '009')
val = getColByName(df, '009')
fi[key] = val
# append into that dataframe col 010
key = getColName(df, '010')
val = getColByName(df, '010')
fi[key] = val
# append into that dataframe col 011
key = getColName(df, '011')
val = getColByName(df, '011')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1)
"""
/* hh60inc */ --
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3]) / value[4] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_009E','B19001_010E','B19001_011E','B19001_001E'])
)
UPDATE vital_signs.data
set hh60inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hh75inc.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income 60-70K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hh75inc( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 012
key = getColName(df, '012')
val = getColByName(df, '012')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
#12/1
return fi.apply(lambda x: ( x[fi.columns[1] ] / x[fi.columns[0]])*100, axis=1)
"""
/* hh75inc */ --
WITH tbl AS (
select csa,
( value[1] / value[2] )*100 as result
from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_012E','B19001_001E'])
)
UPDATE vital_signs.data
set hh75inc = result from tbl where data.csa = tbl.csa and data_year = '2013';
"""
# Cell
#File: hhchpov.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B17001 - POVERTY STATUS IN THE PAST 12 MONTHS BY SEX BY AGE
# Universe: Population for whom poverty status is determined more information
#purpose: Produce Household Poverty Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhchpov( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B17001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E']
for col in columns:
denominators = addKey(df, denominators, col)
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] #Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S1701_C03_002E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return fi['final']
"""
/* <hhchpov_14> */
WITH tbl AS (
select csa,
( (value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12])
/ nullif(
(value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13] + value[14] + value[15] + value[16] + value[17] + value[18] + value[19] + value[20] + value[21] + value[22] + value[23] + value[24] ),
0)
) * 100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B17001_004E','B17001_005E','B17001_006E','B17001_007E','B17001_008E','B17001_009E','B17001_018E','B17001_019E','B17001_020E','B17001_021E','B17001_022E','B17001_023E','B17001_033E','B17001_034E','B17001_035E','B17001_036E','B17001_037E','B17001_038E','B17001_047E','B17001_048E','B17001_049E','B17001_050E','B17001_051E','B17001_052E'])
)
update vital_signs.data
set hhchpov = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: hhm75.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME V
# HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS)
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Household Income Over 75K Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhm75( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# val1.__class__.__name__
#
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 001
key = getColName(df, '001')
val = getColByName(df, '001')
fi[key] = val
# append into that dataframe col 002
key = getColName(df, '002')
val = getColByName(df, '002')
fi[key] = val
# append into that dataframe col 003
key = getColName(df, '003')
val = getColByName(df, '003')
fi[key] = val
# append into that dataframe col 004
key = getColName(df, '004')
val = getColByName(df, '004')
fi[key] = val
# append into that dataframe col 005
key = getColName(df, '005')
val = getColByName(df, '005')
fi[key] = val
# append into that dataframe col 006
key = getColName(df, '006')
val = getColByName(df, '006')
fi[key] = val
# append into that dataframe col 007
key = getColName(df, '007')
val = getColByName(df, '007')
fi[key] = val
# append into that dataframe col 008
key = getColName(df, '008')
val = getColByName(df, '008')
fi[key] = val
# append into that dataframe col 009
key = getColName(df, '009')
val = getColByName(df, '009')
fi[key] = val
# append into that dataframe col 010
key = getColName(df, '010')
val = getColByName(df, '010')
fi[key] = val
# append into that dataframe col 011
key = getColName(df, '011')
val = getColByName(df, '011')
fi[key] = val
# append into that dataframe col 012
key = getColName(df, '012')
val = getColByName(df, '012')
fi[key] = val
# Delete Rows where the 'denominator' column is 0 -> like the Jail
fi = fi[fi[fi.columns[0]] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: ( ( x[fi.columns[0]]-( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ]+ x[fi.columns[4] ]+ x[fi.columns[5] ]+ x[fi.columns[6] ]+ x[fi.columns[7] ]+ x[fi.columns[8] ]+ x[fi.columns[9] ]+ x[fi.columns[10] ]+ x[fi.columns[11] ] ) ) / x[fi.columns[0]])*100, axis=1)
# Cell
#File: hhpov.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B17017 - Household Poverty, Uses Table B17017 which includes V
# Poverty Status in the Past 12 Months by Household Type by Age of Householder (Universe = households)
#purpose: Produce Household Poverty Indicator
#input: Year
#output:
import pandas as pd
import glob
def hhpov( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B17017*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# create a new dataframe for giggles
fi = pd.DataFrame()
# append into that dataframe col 003
key = getColName(df, '003')
val = getColByName(df, '003')
fi[key] = val
# append into that dataframe col 032
key = getColName(df, '032')
val = getColByName(df, '032')
fi[key] = val
# construct the denominator, returns 0 iff the other two rows are equal.
fi['denominator'] = nullIfEqual( df, '003', '032')
# Delete Rows where the 'denominator' column is 0
fi = fi[fi['denominator'] != 0]
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
#~~~~~~~~~~~~~~~
return fi.apply(lambda x: (x[fi.columns[0]] / x['denominator'])*100, axis=1)
# Cell
#File: hhs.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
# Table Creates: hhs, fam, femhhs
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def hhs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['tot'] = df[ 'B11005_001E_Total' ]
return df1['tot']
# Cell
#File: hsdipl.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES
#purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) With High School Diploma and Some College or Associates Degree
#Table Uses: B06009 - lesshs, hsdipl, bahigher
#input: Year
#output:
import pandas as pd
import glob
def hsdipl( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B06009_003E','B06009_004E','B06009_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B06009_003E','B06009_004E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B06009_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( ( value[1] + value[2] ) / nullif(value[3],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <hsdipl_14> */ --
WITH tbl AS (
select csa,
( ( value[1] + value[2] ) / nullif(value[3],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_003E','B06009_004E','B06009_001E'])
)
update vital_signs.data
set hsdipl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: lesshs.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES
#purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) With Less Than a High School Diploma or GED Indicator
#Table Uses: B06009 - lesshs, hsdipl, bahigher
#input: Year
#output:
import pandas as pd
import glob
def lesshs( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B06009_002E','B06009_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B06009_002E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B06009_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[1] / nullif(value[2],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <lesshs_14> */ --
WITH tbl AS (
select csa,
( value[1] / nullif(value[2],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_002E','B06009_001E'])
)
update vital_signs.data
set lesshs = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: male.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def male( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['onlyTheFellas'] = df[ 'B01001_002E_Total_Male' ]
return df1['onlyTheFellas']
# Cell
#File: nilf.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe - Population 16 years and over
# Table Creates: empl, unempl, unempr, nilf
#purpose: Produce Workforce and Economic Development - Percent Population 16-64 Not in Labor Force Indicator
#input: Year
#output:
import pandas as pd
import glob
def nilf( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_009E', 'B23001_016E', 'B23001_023E', 'B23001_030E', 'B23001_037E', 'B23001_044E', 'B23001_051E', 'B23001_058E', 'B23001_065E', 'B23001_072E', 'B23001_095E', 'B23001_102E', 'B23001_109E', 'B23001_116E', 'B23001_123E', 'B23001_130E', 'B23001_137E', 'B23001_144E', 'B23001_151E', 'B23001_158E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B23001_009E', 'B23001_016E', 'B23001_023E', 'B23001_030E', 'B23001_037E', 'B23001_044E', 'B23001_051E', 'B23001_058E', 'B23001_065E', 'B23001_072E', 'B23001_095E', 'B23001_102E', 'B23001_109E', 'B23001_116E', 'B23001_123E', 'B23001_130E', 'B23001_137E', 'B23001_144E', 'B23001_151E', 'B23001_158E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --not in labor force 16-64
# /
# nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <nilf_14> */ --
WITH tbl AS (
select csa,
( (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --not in labor force 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric
as result
from vital_signs.get_acs_vars_csa_and_bc('2014', ARRAY['B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_009E','B23001_016E','B23001_023E','B23001_030E','B23001_037E','B23001_044E','B23001_051E','B23001_058E','B23001_065E','B23001_072E','B23001_095E','B23001_102E','B23001_109E','B23001_116E','B23001_123E','B23001_130E','B23001_137E','B23001_144E','B23001_151E','B23001_158E'])
)
update vital_signs.data
set nilf = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: othrcom.py
#Author: <NAME>
#Date: 1/24/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 years and over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population Using Other Means to Commute to Work (Taxi, Motorcycle, Bicycle, Other) Indicator
#input: Year
#output:
import pandas as pd
import glob
def othrcom( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_041E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_041E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08101_001E','B08101_049E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[3] / nullif((value[1]-value[2]),0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1]
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
# 100- "6.7", "59.8", "9.2", "18.4", "3.7", = 2.2
# 100- (walked + drvalone + carpool + pubtran + workfromhome(13e))
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_010E,S0801_C01_003E,S0801_C01_004E,S0801_C01_009E,S0801_C01_013E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
walked = float(table.loc[1, table.columns[1]] )
drvalone = float(table.loc[1, table.columns[2]] )
carpool = float(table.loc[1, table.columns[3]] )
pubtran = float(table.loc[1, table.columns[4]] )
workfromhome = float(table.loc[1, table.columns[5]] )
fi['final']['Baltimore City'] = 100 - ( walked + drvalone + carpool + pubtran + workfromhome )
return fi['final']
"""
/* <othrcom_14> */ --
WITH tbl AS (
select csa,
( value[3] / nullif((value[1]-value[2]),0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B08101_001E','B08101_049E','B08101_041E'])
)
update vital_signs.data
set othrcom = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: p2more.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE
# Universe: Total Population
# Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def p2more( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# Append the one column from the other ACS Table
df['B03002_012E_Total_Hispanic_or_Latino']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
tot = df[ 'B03002_001E_Total' ]
df1['TwoOrMore%NH'] = df['B03002_009E_Total_Not_Hispanic_or_Latino_Two_or_more_races'] / tot * 100
return df1['TwoOrMore%NH']
# Cell
#File: pubtran.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 Years and Over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population that Uses Public Transportation to Get to Work Indicator
#input: Year
#output:
import pandas as pd
import glob
def pubtran( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_025E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_025E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08101_001E','B08101_049E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( value[3] / nullif((value[1]-value[2]),0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1]
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_009E&for=county%3A510&in=state%3A24&key=<KEY>'
table = pd.read_json(url, orient='records')
fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]])
return fi['final']
""" /* <pubtran_14> */ --
WITH tbl AS (
select csa,
( value[3] / nullif((value[1]-value[2]),0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B08101_001E','B08101_049E','B08101_025E'])
)
update vital_signs.data
set pubtran = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: age5.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age5( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
# Under 5
df1['under_5'] = ( df[ 'B01001_003E_Total_Male_Under_5_years' ]
+ df[ 'B01001_027E_Total_Female_Under_5_years' ]
) / total * 100
return df1['under_5']
# Cell
#File: age24.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age24( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['eighteen_to_24'] = ( df[ 'B01001_007E_Total_Male_18_and_19_years' ]
+ df[ 'B01001_008E_Total_Male_20_years' ]
+ df[ 'B01001_009E_Total_Male_21_years' ]
+ df[ 'B01001_010E_Total_Male_22_to_24_years' ]
+ df[ 'B01001_031E_Total_Female_18_and_19_years' ]
+ df[ 'B01001_032E_Total_Female_20_years' ]
+ df[ 'B01001_033E_Total_Female_21_years' ]
+ df[ 'B01001_034E_Total_Female_22_to_24_years' ]
) / total * 100
return df1['eighteen_to_24']
# Cell
#File: age64.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age64( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['twentyfive_to_64'] = ( df[ 'B01001_011E_Total_Male_25_to_29_years' ]
+ df[ 'B01001_012E_Total_Male_30_to_34_years' ]
+ df[ 'B01001_013E_Total_Male_35_to_39_years' ]
+ df[ 'B01001_014E_Total_Male_40_to_44_years' ]
+ df[ 'B01001_015E_Total_Male_45_to_49_years' ]
+ df[ 'B01001_016E_Total_Male_50_to_54_years' ]
+ df[ 'B01001_017E_Total_Male_55_to_59_years' ]
+ df[ 'B01001_018E_Total_Male_60_and_61_years' ]
+ df[ 'B01001_019E_Total_Male_62_to_64_years' ]
+ df[ 'B01001_035E_Total_Female_25_to_29_years' ]
+ df[ 'B01001_036E_Total_Female_30_to_34_years' ]
+ df[ 'B01001_037E_Total_Female_35_to_39_years' ]
+ df[ 'B01001_038E_Total_Female_40_to_44_years' ]
+ df[ 'B01001_039E_Total_Female_45_to_49_years' ]
+ df[ 'B01001_040E_Total_Female_50_to_54_years' ]
+ df[ 'B01001_041E_Total_Female_55_to_59_years' ]
+ df[ 'B01001_042E_Total_Female_60_and_61_years' ]
+ df[ 'B01001_043E_Total_Female_62_to_64_years' ]
) / total * 100
return df1['twentyfive_to_64']
# Cell
#File: age18.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age18( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['five_to_17'] = ( df[ 'B01001_004E_Total_Male_5_to_9_years' ]
+ df[ 'B01001_005E_Total_Male_10_to_14_years' ]
+ df[ 'B01001_006E_Total_Male_15_to_17_years' ]
+ df[ 'B01001_028E_Total_Female_5_to_9_years' ]
+ df[ 'B01001_029E_Total_Female_10_to_14_years' ]
+ df[ 'B01001_030E_Total_Female_15_to_17_years' ]
) / total * 100
return df1['five_to_17']
# Cell
#File: age65.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: Year
#output:
import pandas as pd
import glob
def age65( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = df.sum(numeric_only=True)
# df.columns
total = df['B01001_001E_Total']
df1 = pd.DataFrame()
df1['CSA'] = df.index
df1.set_index('CSA', drop = True, inplace = True)
df1['sixtyfive_and_up'] = ( df[ 'B01001_020E_Total_Male_65_and_66_years' ]
+ df[ 'B01001_021E_Total_Male_67_to_69_years' ]
+ df[ 'B01001_022E_Total_Male_70_to_74_years' ]
+ df[ 'B01001_023E_Total_Male_75_to_79_years' ]
+ df[ 'B01001_024E_Total_Male_80_to_84_years' ]
+ df[ 'B01001_025E_Total_Male_85_years_and_over' ]
+ df[ 'B01001_044E_Total_Female_65_and_66_years' ]
+ df[ 'B01001_045E_Total_Female_67_to_69_years' ]
+ df[ 'B01001_046E_Total_Female_70_to_74_years' ]
+ df[ 'B01001_047E_Total_Female_75_to_79_years' ]
+ df[ 'B01001_048E_Total_Female_80_to_84_years' ]
+ df[ 'B01001_049E_Total_Female_85_years_and_over' ]
) / total * 100
return df1['sixtyfive_and_up']
# Cell
#File: affordm.py
#Author: <NAME>
#Date: 1/25/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25091 - MORTGAGE STATUS BY SELECTED MONTHLY OWNER COSTS AS A PERCENTAGE OF HOUSEHOLD INCOME IN THE PAST 12 MONTHS
# Universe: Owner-occupied housing units
# Table Creates:
#purpose: Produce Housing and Community Development - Affordability Index - Mortgage Indicator
#input: Year
#output:
import pandas as pd
import glob
def affordm( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25091*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25091_008E','B25091_009E','B25091_010E','B25091_011E','B25091_002E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25091_008E','B25091_009E','B25091_010E','B25091_011E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25091_002E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
WITH tbl AS (
select csa,
( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25091_008E','B25091_009E','B25091_010E','B25091_011E','B25091_002E'])
)
update vital_signs.data
set affordm = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: affordr.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B25070 - GROSS RENT AS A PERCENTAGE OF HOUSEHOLD INCOME IN THE PAST 12 MONTHS
# Universe: Renter-occupied housing units
#purpose: Produce Housing and Community Development - Affordability Index - Rent Indicator
#input: Year
#output:
import pandas as pd
import glob
def affordr( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B25070*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B25070_007E','B25070_008E','B25070_009E','B25070_010E','B25070_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B25070_007E','B25070_008E','B25070_009E','B25070_010E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B25070_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
WITH tbl AS (
select csa,
( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25070_007E','B25070_008E','B25070_009E','B25070_010E','B25070_001E'])
)
update vital_signs.data
set affordr = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
"""
# Cell
#File: bahigher.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES
#purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) with a Bachelor's Degree or Above
#Table Uses: B06009 - lesshs, hsdipl, bahigher
#input: Year
#output:
import pandas as pd
import glob
def bahigher( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B06009_005E','B06009_006E','B06009_001E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B06009_005E','B06009_006E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B06009_001E']
for col in columns:
denominators = addKey(df, denominators, col)
# construct the denominator, returns 0 iff the other two rows are equal.
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( ( value[1] + value[2] ) / nullif(value[3],0) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.sum(axis=1)
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
return fi['final']
"""
/* <hsdipl_14> */ --
WITH tbl AS (
select csa,
( ( value[1] + value[2] ) / nullif(value[3],0) )*100::numeric as result
from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_003E','B06009_004E','B06009_001E'])
)
update vital_signs.data
set hsdipl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014';
B06009_004E
label "Estimate!!Total!!Some college or associate's degree"
B06009_003E
label "Estimate!!Total!!High school graduate (includes equivalency)"
B06009_002E
label "Estimate!!Total!!Less than high school graduate"
B06009_001E
label "Estimate!!Total"
B06009_005E
label "Estimate!!Total!!Bachelor's degree"
B06009_006E
label "Estimate!!Total!!Graduate or professional degree"
"""
# Cell
#File: carpool.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE
# Universe: Workers 16 Years and Over
# Table Creates: othrcom, drvalone, carpool, pubtran, walked
#purpose: Produce Sustainability - Percent of Population that Carpool to Work Indicator
#input: Year
#output:
import pandas as pd
import glob
def carpool( year ):
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
def sumInts(df): return df.sum(numeric_only=True)
#~~~~~~~~~~~~~~~
# Step 1)
# Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder.
#~~~~~~~~~~~~~~~
fileName = ''
for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'):
fileName = name
df = pd.read_csv( fileName, index_col=0 )
# Aggregate by CSA
# Group By CSA so that they may be opperated on
df = df.groupby('CSA')
# Aggregate Numeric Values by Sum
df = sumInts(df)
# Add 'BALTIMORE' which is the SUM of all the CSAs
#~~~~~~~~~~~~~~~
# Step 2)
# Prepare the columns
#~~~~~~~~~~~~~~~
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08101_001E','B08101_049E','B08101_017E']
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = pd.DataFrame()
columns = ['B08101_017E']
for col in columns:
numerators = addKey(df, numerators, col)
# Denominators
denominators = pd.DataFrame()
columns = ['B08101_001E','B08101_049E']
for col in columns:
denominators = addKey(df, denominators, col)
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation + final mods
# ( value[3] / (value[1]-value[2]) )*100
#~~~~~~~~~~~~~~~
fi['numerator'] = numerators.sum(axis=1)
fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1]
fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0
fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100
#~~~~~~~~~~~~~~~
# Step 4)
# Add Special Baltimore City Data
#~~~~~~~~~~~~~~~
url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_004E&for=county%3A510&in=state%3A24&key=<KEY>'
table = | pd.read_json(url, orient='records') | pandas.read_json |
"""Get the log of the simulation objects in a pandas dataframe."""
import pandas as pd
from openclsim.model import get_subprocesses
def get_log_dataframe(simulation_object, id_map=None):
"""Get the log of the simulation objects in a pandas dataframe.
Parameters
----------
simulation_object
object from which the log is returned as a dataframe sorted by "Timestamp"
id_map
by default uuids are not resolved. id_map solves this at request:
* a list of top-activities of which also all sub-activities
will be resolved, e.g.: [while_activity]
* a manual id_map to resolve uuids to labels, e.g. {'uuid1':'name1'}
"""
if id_map is None:
id_map = []
if isinstance(id_map, list):
id_map = {act.id: act.name for act in get_subprocesses(id_map)}
else:
id_map = id_map if id_map else {}
df = (
pd.DataFrame(simulation_object.log)
.sort_values(by=["Timestamp"])
.sort_values(by=["Timestamp"])
)
return pd.concat(
[
(
df.filter(items=["ActivityID"])
.rename(columns={"ActivityID": "Activity"})
.replace(id_map)
),
pd.DataFrame(simulation_object.log).filter(["Timestamp", "ActivityState"]),
pd.DataFrame(simulation_object.log["ObjectState"]),
| pd.DataFrame(simulation_object.log["ActivityLabel"]) | pandas.DataFrame |
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
def _check_moment_func(
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
series=None,
frame=None,
**kwargs,
):
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(series[-50:]))
frame_result = get_result(frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
ser = series[::2].resample("B").mean()
frm = frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(ser, window=win, min_periods=minp)
frame_result = get_result(frm, window=win, min_periods=minp)
else:
series_result = get_result(ser, window=win, min_periods=0)
frame_result = get_result(frm, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(series) - 1, len(series)):
result = get_result(series, len(series) + 1, min_periods=minp)
expected = get_result(series, len(series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(series, len(series) + 1, min_periods=0)
expected = get_result(series, len(series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, min_periods=0, center=True)
print(result)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
series_xp = (
get_result(
series.reindex(list(series.index) + s), window=25, min_periods=minp
)
.shift(-12)
.reindex(series.index)
)
frame_xp = (
get_result(
frame.reindex(list(frame.index) + s), window=25, min_periods=minp
)
.shift(-12)
.reindex(frame.index)
)
series_rs = get_result(series, window=25, min_periods=minp, center=True)
frame_rs = get_result(frame, window=25, min_periods=minp, center=True)
else:
series_xp = (
get_result(
series.reindex(list(series.index) + s), window=25, min_periods=0
)
.shift(-12)
.reindex(series.index)
)
frame_xp = (
get_result(
frame.reindex(list(frame.index) + s), window=25, min_periods=0
)
.shift(-12)
.reindex(frame.index)
)
series_rs = get_result(series, window=25, min_periods=0, center=True)
frame_rs = get_result(frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
| tm.assert_series_equal(series_xp, series_rs) | pandas._testing.assert_series_equal |
import os, sys
import time, logging, random
import pickle, random
from collections import Counter
import pandas as pd
import numpy as np
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_val_predict, StratifiedShuffleSplit, \
cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
# from sklearn.model_selection import cross_val_score,
from sklearn.metrics import accuracy_score, auc, confusion_matrix, classification_report, matthews_corrcoef
from sklearn.metrics import roc_auc_score, \
roc_curve # .roc_auc_score(y_true, y_score, average='macro', sample_weight=None, max_fpr=None)
# from ProteinGraphML.MLTools.Data import Data
# this model system will hopefully make a simple API for dealing with large data
# iterating on our platform across domains
class Result:
data = None
predictions = None
space = None
predLabel = None
def __init__(self, dataOut, predictions, space="", modelDIR=None):
self.data = dataOut
self.predictions = predictions
# self.modelName = modelName
self.space = space
# print("HERE IS THE MODEL")
self.resultDIR = modelDIR
# we put the functions here which actually convert the data to a binary score
self.predLabel = [round(p) for p in self.predictions] # generate label using probability
# print ('PRINT ALL VALUES....>>>')
# print (self.predictions, len(self.predictions))
# print (self.predLabel, len(self.predLabel))
# print (self.data.labels, len(self.data.labels))
def acc(self):
return Output("ACC", accuracy_score(self.data.labels, self.predLabel))
def mcc(self): # Add MCC since data is imbalanced
return Output("MCC", matthews_corrcoef(self.data.labels, self.predLabel))
def roc(self):
roc = Output("AUCROC", roc_auc_score(self.data.labels, self.predictions))
# roc.fileOutput(self.modelName)
return roc
def ConfusionMatrix(self):
return ConfusionMatrix(self.data.labels, self.predLabel)
def rocCurve(self):
# fpr, tpr, threshold = metrics.roc_curve(y_test, preds)
fpr, tpr, threshold = roc_curve(self.data.labels, self.predictions)
rocCurve = RocCurve("rocCurve", fpr, tpr)
logging.info("RESULT DIR: {0}".format(self.resultDIR))
# rocCurve.fileOutput(self.resultDIR)
return rocCurve
def report(self):
return Report(self.data.labels, self.predLabel)
class Output: # base output...
data = None
stringType = None
def __init__(self, type, modelOutput):
self.data = modelOutput
self.stringType = type
def fileOutput(self, modelName): # now what if its a table? or a graph?
rootName = self.stringType
base = modelName + "/" + rootName
# this is ...
# if os.path.isdir("../results"):
# if os.path.isdir(base):
# if not os.path.isdir("results"):
# os.mkdir("results")
# if not os.path.isdir(base):
# os.mkdir(base)
# os.mkdir(path)
logging.info("results/" + modelName)
f = open(base, "w")
f.write(str(self.textOutput()[1])) # this needs to be some kind of representation
f.close()
def textOutput(self):
return (self.stringType, self.data)
def printOutput(self, file=None):
if file is not None:
print(self.data, file=file)
# print(self.textOutput(),file=file)
else:
print(self.data)
# print(self.textOutput())
# FEATURE VISUALIZER
# class FeatureVisualizer(Output): # this requires the model....
# def __init__(self,labels,predictions):
class LabelOutput(Output):
def __init__(self, labels, predictions):
self.labels = labels
self.predictions = predictions
self.data = self.setData()
def setData(self):
pass
class ConfusionMatrix(LabelOutput):
def setData(self):
return confusion_matrix(self.labels, self.predictions)
class Report(LabelOutput):
def setData(self):
return classification_report(self.labels, self.predictions)
class RocCurve(Output):
fpr = None
tpr = None
def __init__(self, type, fpr, tpr):
# self.data = modelOutput
self.stringType = type
self.fpr = fpr
self.tpr = tpr
def fileOutput(self, file=None, fileString=None):
rootName = self.stringType
# base = modelName+"/"+rootName
logging.info("ROOT: {0}".format(rootName))
# root is the type...
# print('HERE IS THE BASE',fileString)
roc_auc = auc(self.fpr, self.tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(self.fpr, self.tpr, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
if fileString is not None:
pltfile = fileString + '.png'
logging.info("INFO: AUC-ROC curve will be saved as {0}".format(pltfile))
plt.savefig(pltfile)
# plt ROC curves for n folds
def fileOutputForAverage(self, savedData, fileString=None, folds=5):
rootName = self.stringType
logging.info("ROOT: {0}".format(rootName))
rocValues = []
for n in range(folds):
labels, predictions = zip(*list(savedData[n])) # unzip the data
# predictions = list(zip(*savedData[n])[1]) #unzip the data
fpr, tpr, threshold = roc_curve(labels, predictions)
roc_auc = auc(fpr, tpr)
rocValues.append(roc_auc)
plt.plot(fpr, tpr, color='gainsboro')
plt.plot(fpr, tpr, color='darkblue', label='Mean AUC = %0.3f' % np.mean(rocValues))
plt.plot(fpr, tpr, color='darkred', label='Median AUC = %0.3f' % np.median(rocValues))
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.title('Receiver Operating Characteristic,' + 'Range: ' + str('%.3f' % np.min(rocValues)) + ' - ' + str(
'%.3f' % np.max(rocValues)))
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
# logging.info("RESULT DIR: {0}".format(self.resultDIR))
if fileString is not None:
pltfile = fileString + '.png'
logging.info("INFO: AUC-ROC curve will be saved as {0}".format(pltfile))
plt.savefig(pltfile)
def printOutput(self, file=None):
if file is not None: # if we've got a file, we wont print it
return
# fpr, tpr, threshold = metrics.roc_curve(y_test, preds)
roc_auc = auc(self.fpr, self.tpr)
# method I: plt
plt.title('Receiver Operating Characteristic')
plt.plot(self.fpr, self.tpr, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
class BaseModel:
MODEL_PROCEDURE = ""
def __init__(self, MODEL_PROCEDURE, RESULT_DIR=None):
self.MODEL_PROCEDURE = MODEL_PROCEDURE
if RESULT_DIR is None: # control will NEVER come here as RESULT_DIR is mandatory now
self.MODEL_RUN_NAME = "{0}-{1}".format(self.MODEL_PROCEDURE, str(int(time.time())))
self.MODEL_DIR = "results/{0}".format(self.MODEL_RUN_NAME)
else:
self.MODEL_RUN_NAME = "{0}".format(self.MODEL_PROCEDURE)
self.MODEL_DIR = RESULT_DIR
def getFile(self):
self.createDirectoryIfNeed(self.MODEL_DIR)
WRITEFILE = self.MODEL_DIR + '/metrics_' + self.MODEL_PROCEDURE + '.txt'
# open(WRITEDIR, 'a').close()
fileName = WRITEFILE
writeSpace = open(fileName, 'w')
return writeSpace
def createDirectoryIfNeed(self, dir):
logging.info("AYYEE: {0}".format(dir))
if not os.path.isdir(dir):
os.mkdir(dir)
def setClassifier(self, classifier):
self.m = classifier
def createResultObjects(self, testData, outputTypes, predictions, saveData=True):
self.createDirectoryIfNeed("results")
if saveData: # we can turn off saving of data...
writeSpace = self.getFile()
print(self.m, file=writeSpace)
print("", file=writeSpace)
resultList = []
# resultObject = Result(testData,predictions,self.MODEL_RUN_NAME,modelDIR=self.MODEL_RUN_NAME)
resultObject = Result(testData, predictions, modelDIR=self.MODEL_DIR)
for resultType in outputTypes:
print(resultType, file=writeSpace)
logging.info("HERES MODEL NAME: {0}".format(self.MODEL_RUN_NAME))
newResultObject = getattr(resultObject, resultType)() # self.MODEL_RUN_NAME
# print(type(newResultObject))
resultList.append(newResultObject)
# print(resultObject) print("MODEL DIR",self.MODEL_PROCEDURE) #self.MODEL_RUN_NAME if resultType ==
# "rocCurve" and self.MODEL_PROCEDURE == "XGBCrossVal": # if it's XGB cross val we will write output
# (hack)
if resultType == "rocCurve":
aucFileName = self.MODEL_DIR + '/auc_' + self.MODEL_PROCEDURE
# newResultObject.fileOutput(fileString=self.MODEL_RUN_NAME)
newResultObject.fileOutput(fileString=aucFileName)
else:
newResultObject.printOutput(file=writeSpace)
# resultObject.printOutput(file=writeSpace)
print("", file=writeSpace)
else:
for resultType in outputTypes:
newResultObject = getattr(resultObject, resultType)(self.MODEL_RUN_NAME)
resultList.append(newResultObject)
# for each of the items in the result list, write them to the shared space
# print ('resultList...........', resultList)
if (len(resultList) == 1):
return resultList[0]
else:
return iter(resultList)
class SkModel(BaseModel):
m = None
def train(self, trainData, param=None):
# clf = LogisticRegression(random_state=0, solver='lbfgs',multi_class='multinomial')#.fit(X, y)
self.m = clf.fit(trainData.features, trainData.labels)
def predict(self, testData, outputTypes):
# inputData = xgb.DMatrix(testData.features)
predictions = self.m.predict(testData.features)
return self.createResultObjects(testData, outputTypes, predictions)
def cross_val_predict(self, testData, outputTypes):
# clf = LogisticRegression(random_state=0, solver='lbfgs',multi_class='multinomial')#.fit(X, y)
# self.m = clf.fit(testData.features,testData.labels)
predictions = cross_val_predict(self.m, testData.features, y=testData.labels, cv=10)
return self.createResultObjects(testData, outputTypes, predictions)
class XGBoostModel(BaseModel):
m = None
param = None
def setParam(self, ):
self.param = param
def train(self, trainData, param):
# print (param)
# np.random.seed(1234)
# random.seed(1234)
# dtrain = xgb.DMatrix(trainData.features,label=trainData.labels)
# bst = xgb.train(param, dtrain, num_boost_round=47) #use the default values of parameters
# self.m = bst
# modelName = self.MODEL_DIR + '/' + self.MODEL_PROCEDURE + '.model'
# bst.save_model(modelName)
###FOR SKLEARN WRAPPER###
bst = xgb.XGBClassifier(**param).fit(trainData.features, trainData.labels)
# self.m = bst
modelName = self.MODEL_DIR + '/' + self.MODEL_PROCEDURE + '.model'
pickle.dump(bst, open(modelName, 'wb'))
logging.info('Trained ML Model was saved as {0}'.format(modelName))
def predict(self, testData, outputTypes):
inputData = xgb.DMatrix(testData.features)
predictions = self.m.predict(inputData) #
# print ('predictions.................', predictions)
# ypred_bst = np.array(bst.predict(dtest,ntree_limit=bst.best_iteration))`
# ypred_bst = ypred_bst > 0.5
# ypred_bst = ypred_bst.astype(int)
# if "report" in outputTypes: # small hack for the report feature, we can use this to make sure
return self.createResultObjects(testData, outputTypes, predictions)
def predict_using_saved_model(self, testData, idDescription, idNameSymbol, modelName, infoFile):
# bst = xgb.Booster({'nthread':8})
# bst.load_model(modelName)
# inputData = xgb.DMatrix(testData.features)
# predictions = bst.predict(inputData)
###FOR SKLEARN WRAPPER###
bst = pickle.load(open(modelName, 'rb'))
print(bst.get_xgb_params())
inputData = testData.features # for wrapper
class01Probs = bst.predict_proba(inputData) # for wrapper
predictions = [i[1] for i in class01Probs] # select class1 probability - wrapper
proteinInfo = self.fetchProteinInformation(infoFile)
self.savePredictedProbability(testData, predictions, idDescription, idNameSymbol, proteinInfo, "TEST")
# def cross_val_predict(self,testData,outputTypes):
def cross_val_predict(self, testData, idDescription, idNameSymbol, idSource, outputTypes, params={}, cv=5):
logging.info("Running XGboost 5-fold cross-validation on the train set")
metrics = {"roc": 0., "mcc": 0., "acc": 0.}
clf = xgb.XGBClassifier(**params)
self.m = clf
class01Probs = cross_val_predict(self.m, testData.features, y=testData.labels, cv=cv,
method='predict_proba') # calls sklearn's cross_val_predict
predictions = [i[1] for i in class01Probs] # select class1 probability
roc, rc, acc, mcc, CM, report = self.createResultObjects(testData, outputTypes, predictions)
metrics["roc"] = roc.data
metrics["mcc"] = mcc.data
metrics["acc"] = acc.data
# find important features and save them in a text file
importance = Counter(
clf.fit(testData.features, testData.labels).get_booster().get_score(importance_type='gain'))
self.saveImportantFeatures(importance, idDescription, idNameSymbol, idSource=idSource)
self.saveImportantFeaturesAsPickle(importance)
# save predicted class 1 probability in a text file
# proteinInfo = self.fetchProteinInformation(infoFile)
self.savePredictedProbability(testData, predictions, idDescription, idNameSymbol, "", "TRAIN")
# train the model using all train data and save it
self.train(testData, param=params)
# return roc,acc,mcc, CM,report,importance
logging.info("METRICS: {0}".format(str(metrics)))
def average_cross_val(self, allData, idDescription, idNameSymbol, idSource, outputTypes, iterations, testSize=0.2,
params={}):
# This function divides the data into train and test sets 'n' (number of folds) times.
# Model trained on the train data is tested on the test data. Average MCC, Accuracy and ROC
# is reported.
logging.info("Running ML models to compute average MCC/ROC/ACC")
importance = None
metrics = {"average-roc": 0., "average-mcc": 0., "average-acc": 0.} # add mcc and accuracy too
logging.info("=== RUNNING {0} FOLDS".format(iterations))
# Initialize variable to store predicted probs of test data
predictedProb_ROC = []
predictedProbs = {} # will be used for o/p file
seedAUC = {} # to store seed value and corresponding classification resutls
for r in range(iterations):
predictedProb_ROC.append([])
# print (predictedProb)
for k in range(0, iterations):
logging.info("DOING {0} FOLD".format(k + 1))
clf = xgb.XGBClassifier(**params)
self.m = clf
randomState = 1000 + k
trainData, testData = allData.splitSet(testSize, randomState)
# Train the model
bst = clf.fit(trainData.features, trainData.labels)
# test the model
class01Probs = bst.predict_proba(testData.features)
predictions = [i[1] for i in class01Probs] # select class1 probability
roc, acc, mcc = self.createResultObjects(testData, outputTypes, predictions)
# append predicted probability and true class for ROC curve
predictedProb_ROC[k] = zip(testData.labels.tolist(), predictions)
proteinIds = list(testData.features.index.values)
# print ('Selected ids are: ', proteinIds)
for p in range(len(proteinIds)):
try:
predictedProbs[proteinIds[p]].append(predictions[p])
except:
predictedProbs[proteinIds[p]] = [predictions[p]]
# print (predictedProb)
metrics["average-roc"] += roc.data
metrics["average-mcc"] += mcc.data
metrics["average-acc"] += acc.data
seedAUC[randomState] = [roc.data, acc.data, mcc.data]
# model.predict ...
if importance:
importance = importance + Counter(bst.get_booster().get_score(importance_type='gain'))
else:
importance = Counter(bst.get_booster().get_score(importance_type='gain'))
# compute average values
for key in importance:
importance[key] = importance[key] / iterations
for key in metrics:
metrics[key] = metrics[key] / iterations
avgPredictedProbs = {}
for k, v in predictedProbs.items():
avgPredictedProbs[k] = np.mean(v)
logging.info("METRICS: {0}".format(str(metrics))) # write this metrics to a file...
self.saveImportantFeatures(importance, idDescription, idNameSymbol,
idSource=idSource) # save important features
self.saveImportantFeaturesAsPickle(importance)
self.saveSeedPerformance(seedAUC)
# print (avgPredictedProb)
self.savePredictedProbability(allData, avgPredictedProbs, idDescription, idNameSymbol, "",
"AVERAGE") # save predicted probabilities
# plot ROC curves
rc = RocCurve("rocCurve", None, None)
aucFileName = self.MODEL_DIR + '/auc_' + self.MODEL_PROCEDURE
rc.fileOutputForAverage(predictedProb_ROC, fileString=aucFileName, folds=iterations)
# FEATURE SEARCH, will create the dataset with different sets of features, and search over them to get resutls
def gridSearch(self, allData, idDescription, idNameSymbol, outputTypes, paramGrid, rseed, nthreads):
# split test and train data
# testSize = 0.20
# trainData, testData = allData.splitSet(testSize, rseed)
# print (trainData.features.shape)
# print (testData.features.shape)
# print (trainData.labels)
# print (testData.labels)
logging.info("XGBoost parameters search started")
clf = xgb.XGBClassifier(random_state=rseed)
random_search = GridSearchCV(clf, n_jobs=nthreads,
param_grid=paramGrid,
scoring='roc_auc', cv=5, verbose=7)
# save the output of each iteration of gridsearch to a file
tempFileName = self.MODEL_DIR + '/temp.tsv'
sys.stdout = open(tempFileName, 'w')
random_search.fit(allData.features, allData.labels)
# model trained with best parameters
bst = random_search.best_estimator_
# self.m = bst
sys.stdout.close()
self.saveBestEstimator(str(bst))
# predict the test data using the best estimator
# metrics = {"roc":0., "mcc":0., "acc":0.}
# class01Probs = bst.predict_proba(testData.features)
# predictions = [i[1] for i in class01Probs] #select class1 probability
# roc,acc,mcc = self.createResultObjects(testData,outputTypes,predictions)
# metrics["roc"] = roc.data
# metrics["mcc"] = mcc.data
# metrics["acc"] = acc.data
# find imporant features and save them in a text file
# importance = Counter(bst.get_booster().get_score(importance_type='gain'))
# self.saveImportantFeatures(importance, idDescription)
# self.saveImportantFeaturesAsPickle(importance)
# save predicted class 1 probabilty in a text file
# self.savePredictedProbability(testData, predictions, idDescription, idNameSymbol, "TRAIN")
# train the model using all train data and save it
# self.train(allData, param=random_search.best_params_)
# save the XGBoost parameters for the best estimator
# return roc,acc,mcc, CM,report,importance
# logging.info("METRICS: {0}".format(str(metrics)))
# save the xgboost parameters selected using GirdSearchCV
def saveBestEstimator(self, estimator):
xgbParamFile = self.MODEL_DIR + '/XGBParameters.txt'
logging.info("XGBoost parameters for the best estimator written to: {0}".format(xgbParamFile))
# save the optimized parameters for XGboost
paramVals = estimator.strip().split('(')[1].split(',')
with open(xgbParamFile, 'w') as fo:
fo.write('{')
for vals in paramVals:
keyVal = vals.strip(' ').split('=')
if ('scale_pos_weight' in keyVal[0] or
'n_jobs' in keyVal[0] or
'nthread' in keyVal[0] or
'None' in keyVal[1]):
continue
elif (')' in keyVal[1]): # last parameter
line = "'" + keyVal[0].strip().strip(' ') + "': " + keyVal[1].strip().strip(' ').strip(')') + '\n'
else:
line = "'" + keyVal[0].strip().strip(' ') + "': " + keyVal[1].strip().strip(' ').strip(')') + ',\n'
fo.write(line)
fo.write('}')
# save parameters used in each iteration
tuneFileName = self.MODEL_DIR + '/tune.tsv'
logging.info("Parameter values in each iteration of GridSearchCV written to: {0}".format(tuneFileName))
ft = open(tuneFileName, 'w')
headerWritten = 'N'
tempFileName = self.MODEL_DIR + '/temp.tsv'
with open(tempFileName, 'r') as fin:
for line in fin:
header = ''
rec = ''
if ('score' in line):
if (headerWritten == 'N'):
vals = line.strip().strip('[CV]').split(',')
for val in vals:
k, v = val.strip(' ').split('=')
header = header + k + '\t'
rec = rec + v + '\t'
ft.write(header + '\n')
ft.write(rec + '\n')
headerWritten = 'Y'
else:
vals = line.strip().strip('[CV]').split(',')
for val in vals:
k, v = val.strip(' ').split('=')
rec = rec + v + '\t'
ft.write(rec + '\n')
ft.close()
os.remove(tempFileName) # delete temp file
# Save important features as pickle file. It will be used by visualization code
def saveImportantFeaturesAsPickle(self, importance):
'''
Save important features in a pickle dictionary
'''
featureFile = self.MODEL_DIR + '/featImportance_' + self.MODEL_PROCEDURE + '.pkl'
logging.info("IMPORTANT FEATURES WRITTEN TO PICKLE FILE {0}".format(featureFile))
with open(featureFile, 'wb') as ff:
pickle.dump(importance, ff, pickle.HIGHEST_PROTOCOL)
# Save seed number and corresponding AUC, ACC and MCC
def saveSeedPerformance(self, seedAUC):
'''
Save important features in a pickle dictionary
'''
seedFile = self.MODEL_DIR + '/seed_val_auc.tsv'
logging.info("SEED VALUES AND THEIR CORRESPONDING AUC/ACC/MCC WRITTEN TO {0}".format(seedFile))
with open(seedFile, 'w') as ff:
hdr = 'Seed' + '\t' + 'AUC' + '\t' + 'Accuracy' + '\t' + 'MCC' + '\n'
ff.write(hdr)
for k, v in seedAUC.items():
rec = str(k) + '\t' + str(v[0]) + '\t' + str(v[1]) + '\t' + str(v[2]) + '\n'
ff.write(rec)
# Save the important features in a text file.
def saveImportantFeatures(self, importance, idDescription, idNameSymbol, idSource=None):
"""
This function saves the important features in a text file.
"""
dataForDataframe = {'Feature': [], 'Symbol': [], 'Cell_id': [], 'Drug_name': [],
'Tissue': [], 'Source': [], 'Name': [], 'Gain Value': []}
for feature, gain in importance.items():
dataForDataframe['Feature'].append(feature)
dataForDataframe['Gain Value'].append(gain)
if feature.lower().islower(): # alphanumeric feature
# source
if idSource is not None and feature in idSource:
dataForDataframe['Source'].append(idSource[feature])
else:
dataForDataframe['Source'].append('')
# Name
if feature in idDescription:
dataForDataframe['Name'].append(idDescription[feature])
else:
dataForDataframe['Name'].append('')
logging.debug('INFO: saveImportantFeatures - Unknown feature = {0}'.format(feature))
# Symbol
if feature in idNameSymbol:
dataForDataframe['Symbol'].append(idNameSymbol[feature])
else:
dataForDataframe['Symbol'].append('')
else: # numeric feature
# Source
if idSource is not None:
dataForDataframe['Source'].append(idSource[int(feature)])
else:
dataForDataframe['Source'].append('')
# Name
if int(feature) in idDescription:
dataForDataframe['Name'].append(idDescription[int(feature)])
else:
dataForDataframe['Name'].append('')
logging.debug('INFO: saveImportantFeatures - Unknown feature = {0}'.format(feature))
# Symbol
if int(feature) in idNameSymbol:
dataForDataframe['Symbol'].append(idNameSymbol[int(feature)])
else:
dataForDataframe['Symbol'].append('')
# for CCLE only
if feature in idSource and idSource[feature] == "ccle":
cid = feature[:feature.index('_')]
tissue = feature[feature.index('_') + 1:]
dataForDataframe['Cell_id'].append(cid)
dataForDataframe['Tissue'].append(tissue)
dataForDataframe['Drug_name'].append('')
# for LINCS only.
# LINCS features contain pert_id and cell_id, separated by :. The drug_id in “olegdb” is the DrugCentral
# ID, which is DrugCentral chemical structure (active ingredient) ID. The pert_id from LINCS features is
# used as drug_id to fetch the drug name from the dictionary.
elif feature in idSource and idSource[feature] == "lincs":
drugid = feature[:feature.index(':')]
try:
drugname = idSource['drug_' + drugid]
except:
drugname = ''
cid = feature[feature.index(':') + 1:]
dataForDataframe['Cell_id'].append(cid)
dataForDataframe['Drug_name'].append(drugname)
dataForDataframe['Tissue'].append('')
else:
dataForDataframe['Cell_id'].append('')
dataForDataframe['Drug_name'].append('')
dataForDataframe['Tissue'].append('')
# for k,v in dataForDataframe.items():
# print(k, len(v))
df = | pd.DataFrame(dataForDataframe) | pandas.DataFrame |
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import (
Base,
check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
import pandas.tseries.offsets as offsets
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self, raw):
self._check_moment_func(
np.nansum, name="sum", zero_min_periods_equal=False, raw=raw
)
def test_rolling_count(self, raw):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(
counter, name="count", has_min_periods=False, fill_value=0, raw=raw
)
def test_rolling_mean(self, raw):
self._check_moment_func(np.mean, name="mean", raw=raw)
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(self, f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"exponential": {"tau": 10},
}
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_median(self, raw):
self._check_moment_func(np.median, name="median", raw=raw)
def test_rolling_min(self, raw):
self._check_moment_func(np.min, name="min", raw=raw)
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self, raw):
self._check_moment_func(np.max, name="max", raw=raw)
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_rolling_quantile(self, q, raw):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1.0 * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = pd.date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
[0.0, np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5],
[np.nan, 0.7, 0.6],
],
)
def test_rolling_quantile_interpolation_options(
self, quantile, interpolation, data
):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
msg = "Interpolation 'invalid' is not supported"
with pytest.raises(ValueError, match=msg):
s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
def test_rolling_quantile_param(self):
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile("foo")
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
def f(x):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning,
)
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name="apply", func=f, raw=raw)
def test_rolling_std(self, raw):
self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw)
self._check_moment_func(
lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw
)
def test_rolling_std_1obs(self):
vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.0] * 5)
tm.assert_series_equal(result, expected)
result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series(
[
0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
]
)
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self, raw):
self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw)
self._check_moment_func(
lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw
)
@td.skip_if_no_scipy
def test_rolling_skew(self, raw):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw)
@td.skip_if_no_scipy
def test_rolling_kurt(self, raw):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw)
def _check_moment_func(
self,
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
**kwargs,
):
# inject raw
if name == "apply":
kwargs = copy.copy(kwargs)
kwargs["raw"] = raw
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample("B").mean()
frame = self.frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(series, window=win, min_periods=minp)
frame_result = get_result(frame, window=win, min_periods=minp)
else:
series_result = get_result(series, window=win, min_periods=0)
frame_result = get_result(frame, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1, min_periods=minp)
expected = get_result(self.series, len(self.series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1, min_periods=0)
expected = get_result(self.series, len(self.series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, min_periods=0, center=True)
print(result)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=minp, center=True
)
frame_rs = get_result(
self.frame, window=25, min_periods=minp, center=True
)
else:
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=0, center=True
)
frame_rs = get_result(self.frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestRollingMomentsConsistency(Base):
def setup_method(self, method):
self._create_data()
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(self, func):
check_pairwise_moment(self.frame, "rolling", func, window=10, min_periods=5)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{
k: getattr(self.frame[k].rolling(window=10), method)(frame2[k])
for k in self.frame
}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning,
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name,
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
_flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
],
)
@td.skip_if_no_scipy
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
| tm.assert_series_equal(s_result, s_expected) | pandas._testing.assert_series_equal |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def heart_dataset(dir='heart.csv'):
df = pd.read_csv(dir)
# Filter out values for number of major (calcified) vessels
# 4 is a placeholder for NaN in this dataset for some reason.
# This dataset is literally riddled with inconsistencies
# and this is one of them.
df = df[df['ca'] != 4].copy()
chest_pain = | pd.get_dummies(df['cp'], prefix='cp') | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 12 13:48:34 2019
@author: vrrodovalho
This module contains scripts for plotting graphs of Qualitative Proteomics
- Donuts for COG categories, with 2 hierachical levels
- Donuts for subcellular localizations with just 1 level
- Simple Venn diagramms with 2 categories and 1 intersection
- Heatmaps that shows frequencies distribution in 2 categories
"""
import os
import sys
import pathlib
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib_venn
def calculate_cogs(cogs_annotation, cogs_hierarchy, annot_col='COG',
hierarchy_col='l2'):
'''
Returns the COG hierarchy with the respective COG category frequencies.
Parameters
----------
cogs_annotation : DataFrame
A dataframe containing a column with COG categories.
cogs_hierarchy : DataFrame
A dataframe containing a column with COG categories distribuition.
annot_col : str, optional
The name of the column in which there is the annotation for COG
categories. The default is 'COG'.
hierarchy_col : str, optional
The name of the column in which there is the desired level for COG
hierarchy representation. The default is 'l2'.
Returns
-------
cogs : DataFrame
A DataFrame containing COG categories frequencies in a given dataset.
'''
annot = cogs_annotation.copy()
cogs = cogs_hierarchy.copy()
# calculate freqs
annot[annot_col].fillna('?', inplace=True)
COGs = list(''.join(annot[annot_col].tolist()))
COGs_freqs = {i:COGs.count(i) for i in set(COGs)}
# complete cogs with freqs and create lists for graphs
cogs['freqs'] = cogs[hierarchy_col].map(COGs_freqs)
cogs.dropna(subset=['freqs'], inplace=True)
# relative freqs
cogs['rel'] = round(cogs['freqs'] / cogs['freqs'].sum() * 100,1)
return cogs
def construct_list_of_colors(cogs_freqs, higher_hierarchy_col='l1'):
'''
Produces the external and internal colors for a COG nested donut plot.
Parameters
----------
cogs_freqs : DataFrame
A DataFrame containing COG categories frequencies and the COG
2-level hierarchy system.
higher_hierarchy_col : str
The name of the column in cogs_freqs where there is the higer COG
category. Example: metabolism, information processing...
The default is 'l1'.
Returns
-------
A tuple of 2 lists of RGB colors. Each RGB color is a tuple of 3 values.
These are the colors to be used internally and externally in the nested
donut plot.
external_colors : list
Lists of darker RGB colors that will be used externally.
internal_colors : list
Lists of gradually lighter RGB colors that will be used internally.
'''
palette = [plt.cm.Blues, plt.cm.Oranges, plt.cm.Purples, plt.cm.Greys]
external_colors = [palette[0](0.7), palette[1](0.7),
palette[2](0.7), palette[3](0.7)]
internal_colors = []
subgroups_sizes = cogs_freqs.groupby(higher_hierarchy_col).size().to_dict()
group = 0
higher_hierarchy_group = list(
cogs_freqs.loc[:,higher_hierarchy_col].unique())
for subgroup in higher_hierarchy_group:
size = subgroups_sizes[subgroup]
color_group = palette[group]
group += 1
decimals = np.arange(0.60, 0.05, -0.05)
colors = [color_group(i) for i in decimals[:size]]
internal_colors.extend(colors)
return external_colors, internal_colors
def plot_nested_cog(cogs_annotation, annot_col, cogs_hierarchy_map,
hierarchy_levels=['l1','l2'], output_dir='', filename='',
save_fig=True, file_format='tif', dpi=600):
'''
Plot a nested donut graph, with a 2-level hierarchy system for COG
categories.
Parameters
----------
cogs_annotation : DataFrame
DataFrame containing one column with COG categories annotation.
annot_col: str
The name of the column in cogs_annotation where the annotation is.
cogs_hierarchy_map : DataFrame
A DataFrame contaning at least 2 columns with the 2-level hierarchy
system.
hierarchy_levels : list
The list of 2 strings representing the names of the columns in
cogs_hierarchy_map that contains hierarchy level data.
The default is ['l1','l2'].
output_dir : str
The path where the file will be saved.
filename : str
The name of the file that will be saved.
save_fig : bool
Wether to save or not the image in a file. The default is True.
Returns
-------
cogs_freqs : DataFrame
A DataFrame containing COG categories frequencies in a given dataset.
'''
lvl = hierarchy_levels
cogs_freqs = calculate_cogs(cogs_annotation=cogs_annotation,
annot_col=annot_col,
cogs_hierarchy=cogs_hierarchy_map,
hierarchy_col=lvl[1])
l1 = cogs_freqs.groupby(lvl[0]).sum()['freqs'].reset_index()
l2 = cogs_freqs[[lvl[1],'freqs']].reset_index()
l1_names, l1_values = l1[lvl[0]].to_list(), l1['freqs'].to_list()
l2_names, l2_values = l2[lvl[1]].to_list(), l2['freqs'].to_list()
l1_names_lines = ['\nAND '.join(i.split('AND')) for i in l1_names]
# Create colors
external_colors, internal_colors = construct_list_of_colors(cogs_freqs,
higher_hierarchy_col='l1')
# initialize figure
fig, ax = plt.subplots()
ax.axis('equal')
# First Ring (outside)
mypie, _ = ax.pie(l1_values, radius=1.3, labels=l1_names_lines,
labeldistance=1.1,
colors=external_colors )
plt.setp( mypie, width=0.3, edgecolor='white')
# Second Ring (Inside)
mypie2, _ = ax.pie(l2_values, radius=1.3-0.3,
labels=l2_names, labeldistance=0.8,
colors=internal_colors)
plt.setp( mypie2, width=0.4, edgecolor='white')
plt.margins(0,0)
if save_fig:
output_file = output_dir / filename
plt.savefig(output_file,format=file_format, dpi=dpi,
bbox_inches="tight")
# show it
plt.show()
return cogs_freqs
def plot_donut(df, data_column='', title='', relative=True, absolute=True,
palette=sns.color_palette("colorblind", 30), save_fig=True,
output_dir='', file_name='', file_format='tiff', dpi=600):
'''
Plots a DONUT graph, representing proportions of categories.
Parameters
----------
df : DataFrame
A dataframe containing a column in which is the data whose frequencies
will be plotted.
data_column : str
The name of the column in df where the data is placed.
title : str, optional
A title that will be plotted above the gaph. The default is ''.
relative : bool, optional
Wether to include relative frequencies of the categories (%).
The default is True.
absolute : bool, optional
Wether to include absolute frequencies of the categories.
The default is True.
palette : Seaborn palette object, optional
Seaborn color palette object. The default is
sns.color_palette("colorblind", 30).
save_fig : bool, optional
Wether to save the image to a file. The default is True.
output_dir : str, optional
Directory where the image will be saved.
file_name : str, optional
File name to save image.
file_format : str, optional
File format to save image. The default is 'tiff'.
dpi : int, optional
Resolution to save image. The default is 600.
Returns
-------
data : DataFrame
DataFrame containing the calculated frequencies.
'''
df = df.copy()
# renames
rename_dict = {"Cytoplasmic Membrane":"Membrane"}
df[data_column].replace(rename_dict, inplace=True)
# get data, count them and put the results in X and Y lists
df[data_column].fillna('?', inplace=True)
df = df.reset_index()
items = df[data_column].tolist()
freqs = {i:items.count(i) for i in set(items)}
data = pd.DataFrame.from_dict(freqs, orient='index', columns=['abs'])
freqs_rel = {i: round(items.count(i)/len(items)*100,1) \
for i in set(items)}
data['rel'] = data.index.to_series().map(freqs_rel)
data = data.sort_values(by=['abs'], ascending=False)
y = data['abs']
# choose representation in absolute values and/or percentage
if relative and absolute:
data['formatted'] = data.index.astype(str) + ' ' + \
data['abs'].astype(str) + ' (' + \
data['rel'].astype(str) + '%)'
elif relative and not absolute:
data['formatted'] = data.index.astype(str) + ' ' + \
' (' + \
data['rel'].astype(str) + '%)'
elif not relative and absolute:
data['formatted'] = data.index.astype(str) + ' (' + \
data['abs'].astype(str) + ')'
x = data['formatted']
# start plotting
fig, ax = plt.subplots()
ax.axis('equal')
my_circle=plt.Circle( (0,0), 0.6, color='white')
plt.pie(y, labels=x, colors=palette, startangle=0,counterclock=False)
p = plt.gcf()
p.gca().add_artist(my_circle)
# draw!
plt.draw()
plt.title(title)
# save figure
if save_fig:
plt.savefig(output_dir/file_name, format=file_format, dpi=dpi,
bbox_inches="tight")
return data
def plot_venn(df, data_column="", weighted=False, intersection_cat='UF/YEL',
title='', palette=sns.color_palette("colorblind", 30),
save_fig=True, output_dir='', file_name='', file_format='tiff',
dpi=600):
'''
This is a function for plotting a simple venn diagramm with 2 categories.
Parameters
----------
df : DataFrame
A dataframe containing a column in which there is the data to be
plotted in a Venn diagramm.
data_column : str
The name of the column in df where the data is placed.
weighted : bool
If False, venn circles have the same sizes. If True, the sizes are
proportional to the numeric value.
intersection_cat : str
One of the 3 categories in data_column, that will be considered the
intersection category in the venn diagramm.
title : str, optional
A title that will be plotted above the gaph. The default is ''.
palette : Seaborn palette object, optional
Seaborn color palette object. The default is
sns.color_palette("colorblind", 30).
save_fig : bool, optional
Wether to save the image to a file. The default is True.
output_dir : str, optional
Directory where the image will be saved.
file_name : str, optional
File name to save image.
file_format : str, optional
File format to save image. The default is 'tiff'.
dpi : int, optional
Resolution to save image. The default is 600.
Returns
-------
data : DataFrame
A DataFrame containing the values used to plot the venn.
'''
df = df.copy()
# get data, count them and put the results in X and Y lists
df[data_column].fillna('?', inplace=True)
df = df.reset_index()
items = df[data_column].tolist()
freqs = {i:items.count(i) for i in set(items)}
data = pd.DataFrame.from_dict(freqs, orient='index', columns=['freq'])
data = data.sort_values(by=['freq'], ascending=False)
# prepare subsets and set_labels for venn
intersection_value = data.loc[ intersection_cat, 'freq']
data = data.drop([intersection_cat])
x = data.index.to_list()
y = data['freq']
subsets = (y[0], y[1], intersection_value)
set_labels = (x[0], x[1])
# start plotting
fig, ax = plt.subplots()
if weighted:
v = matplotlib_venn.venn2(subsets=subsets, set_labels=set_labels)
else:
v = matplotlib_venn.venn2_unweighted(subsets=subsets,
set_labels=set_labels)
# set colors and alphas
v.get_patch_by_id('10').set_color(palette[0])
v.get_patch_by_id('10').set_alpha(0.8)
v.get_patch_by_id('01').set_color(palette[1])
v.get_patch_by_id('01').set_alpha(0.8)
v.get_patch_by_id('11').set_color(palette[2])
v.get_patch_by_id('11').set_edgecolor('none')
v.get_patch_by_id('11').set_alpha(0.6)
# set font sizes
for text in v.set_labels:
text.set_fontsize(14)
for text in v.subset_labels:
text.set_fontsize(16)
# set label positions
lbl_a = v.get_label_by_id("A")
xa, ya = lbl_a.get_position()
lbl_a.set_position((xa-0.2, ya+0.05))
lbl_b = v.get_label_by_id("B")
xb, yb = lbl_b.get_position()
lbl_b.set_position((xb+0.25, yb+0.1))
# draw!
plt.draw()
plt.title(title)
# save figure
if save_fig:
plt.savefig(output_dir/file_name, format=file_format, dpi=dpi,
bbox_inches="tight")
return data
def helper_frequencies(df, freq_column, split_char=False,
forbidden_prefix='map'):
'''
This is a helper function that extracts qualitative data from a column
of a DataFrame, counts them and return a dictionary of frequencies.
If there is more than 1 value in a row, it is possible to split this
row and account for each value separately. It is also possible to
exclude values based on a prefix.
Parameters
----------
df : DataFrame
A dataframe containing the data to be analyzed.
freq_column : str
The column in df which contains the data whose frequencies will be
calculated.
split_char : str, bool or NoneType, optional
A character that will be used to split each row if there are multiple
values in each row. If this is set to False, each row will be
considered a single value. If this is set to None, each row will be
considered to contain multiple data, but no delimiter character.
If this is set to a string, this string will be considered the split
character that separates the values in each row. The default is False.
forbidden_prefix : str, optional
Values starting with the string set in this parameter will be excluded
from analysis. The default is 'map'.
Returns
-------
freqs : dict
A dictionary of frequencies.
'''
values = df[freq_column].tolist()
if split_char == False:
new_values = values
elif split_char == None:
string = ''.join(values)
new_values = list(string)
else:
string = split_char.join(values)
new_values = string.split(split_char)
if forbidden_prefix:
filtered_list = [x for x in new_values \
if not x.startswith(forbidden_prefix)]
else:
filtered_list = new_values
freqs = {i:filtered_list.count(i) for i in set(filtered_list)}
return freqs
def compare_n_frequencies(df, freq_column='COG', category_column='medium',
category_map={}, split_char=',', drop_empties=False):
'''
This functions compare the frequencies of values in 2 or more categories.
Example: the frequencies of COG categories (the values) are compared in
2 conditions (defined as categories), such as 2 culture media.
It returns a dataframe with multiple frequencies columns, one column
for each category that ws specified.
Parameters
----------
df : DataFrame
DESCRIPTION.
freq_column : str, optional
The name of the column where are the values whose frequency will be
calculated. The default is 'COG'.
category_column : str, optional
The name of the column where the categories are specified. Each one
of this categories will be represented as a column of frequencies in
the final dataframe. The default is 'medium'.
category_map : dict, optional
A dictionary that maps multiple values for each category, grouping
values in categories or changing their name. If each individual value
should be accounted as its own category, without change, an empty
dictionary should be passed. Otherwise, categories should be organized
in the format {'':[]}. The default is {}.
split_char : str, bool or NoneType, optional
A character that will be used to split each row if there are multiple
values in each row. If this is set to False, each row will be
considered a single value. If this is set to None, each row will be
considered to contain multiple data, but no delimiter character.
If this is set to a string, this string will be considered the split
character that separates the values in each row. The default is False.
drop_empties : bool, optional
This allows to choose if empty annotations (nan) will not be
considered, being dropped (True) or considered (False) and atributted
to unknown function (S/?). The default is False.
Returns
-------
new_df : TYPE
DESCRIPTION.
'''
df = df.copy()
# choose if empty annotations will not be considered or considered as
# unknown function (S/?)
if drop_empties:
df.dropna(subset=[freq_column], inplace=True)
else:
if freq_column == "COG":
df[freq_column].fillna('S', inplace=True)
else:
df[freq_column].fillna('?', inplace=True)
# if category mapping is supplied, get them.
# otherwise, consider each unique value in category column
if category_map:
categories = list(category_map.keys())
else:
categories = list(df[category_column].unique())
category_map = { i:[i] for i in categories }
# and a list of all COG categories found
all_COGs = []
# generate a dict of frequencies for each category
frequencies = {}
for cat in categories:
sub_df = df.loc[ df[category_column].isin( category_map[cat] ),:]
freqs = helper_frequencies(sub_df, freq_column=freq_column,
split_char=split_char)
all_COGs.extend(list(freqs.keys()))
frequencies[cat] = freqs
# generate a dataframe with all COG categories found and a column
# with the frequencies for each category
new_df = pd.DataFrame(list(set(all_COGs)), columns=[freq_column])
for cat in categories:
freqs = frequencies[cat]
new_df[cat] = new_df[freq_column].map(freqs)
# if nan was assigned to a category frequency, replace it by 0
new_df.fillna(0, inplace=True)
# add a column with the total sum to the dataframe
new_df['Total'] = new_df.loc[:,categories].sum(axis=1)
return new_df
def plot_heatmap(df, cat1_col='COG', cat1_split_char=False, cat2_col='',
cat2_map='', sort_heatmap_by='Total', extra_map={},
replace_names={}, save_fig=True, output_dir='', filename='',
file_format='tif', dpi=600, colors="Blues"):
'''
Plots a heatmap to show frequencies distribution towards 2 categories:
a main category, such as COG category, and a secondary category,
such as culture medium.
Parameters
----------
df : DataFrame
A Dataframe containing at least 2 columns, representing the 2
variables that should be considered in the representation.
cat1_col : str
The name of the column of the main category. The default is 'COG'.
cat1_split_char : str, bool or NoneType, optional
A character that will be used to split each row if there are multiple
values in each row. If this is set to False, each row will be
considered a single value. If this is set to None, each row will be
considered to contain multiple data, but no delimiter character.
If this is set to a string, this string will be considered the split
character that separates the values in each row. The default is False.
cat2_col : str
The name of the column of the secondary category.
The default is 'medium'.
cat2_map : dict, optional
A dictionary that maps multiple values for each category, grouping
values in categories or changing their name. If each individual value
should be accounted as its own category, without change, an empty
dictionary should be passed. Otherwise, categories should be organized
in the format {'':[]}. The default is {}.
sort_heatmap_by : str, optional
The name of the column in final heatmap that will be used to sort it.
The default is 'Total', the column produced with the sum.
extra_map : dict, optional
A dictionary to replace some texts in heatmap, for a more complete
annotation, if needed. The keys should be the the categories in
cat1_col. The default is {}.
replace_names : dict, optional
A dictionary to replace column names in the heatmap for a better
representation, id needed. The default is {}.
save_fig : bool, optional
Wether to save the image to a file. The default is True.
output_dir : str, optional
Directory where the image will be saved.
file_name : str, optional
File name to save image.
file_format : str, optional
File format to save image. The default is 'tiff'.
dpi : int, optional
Resolution to save image. The default is 600.
colors : str, optional
Colors for the heatmap. The default is "Blues".
Returns
-------
df : DataFrame.
A dataframe representing the final heatmap
'''
# calculate frequencies for main category
column = cat1_col
freqs = compare_n_frequencies(df, freq_column=column, drop_empties=False,
category_column=cat2_col,
category_map=cat2_map,
split_char=cat1_split_char)
# replace columns name in order to show in heatmap
df = freqs.copy()
if column in replace_names:
column = replace_names[column]
df.columns = df.columns.to_series().replace(replace_names)
df = df.set_index(column)
df = df.astype(int)
df = df.reset_index()
# if we have to complete the text of the main category that will be showed
# in the heatmap
if extra_map:
df[column] = df[column].map(extra_map)
# if we need to sort the heatmap, for better presentation of the colors
if len(sort_heatmap_by) > 0:
df = df.sort_values(by=[sort_heatmap_by],ascending=False)
# start plotting
# choose sizes for heatmap depending if its a COG heatmap or smaller ones
sns.set_style("dark")
len_y, len_x = df.shape[0], df.shape[1]
if column == 'COG category':
sns.set(font_scale=0.8)
cog_size=(19,5)
len_x = cog_size[1]
len_y = cog_size[0]
fig2 = plt.figure(figsize=[4, 4], constrained_layout=False)
else:
sns.set(font_scale=1.3)
fig2 = plt.figure(constrained_layout=False)
# add subplots
gs = fig2.add_gridspec(len_y, len_x)
ax1 = fig2.add_subplot(gs[:,:-1])
ax2 = fig2.add_subplot(gs[:,-1:])
df = df.set_index(column)
df1 = df.iloc[:,:-1]
df2 = df.iloc[:,-1:]
# plot main heatmap
sns.heatmap(df1, cmap=colors, center=1, annot=True, # annot_kws=squares_font,
fmt="d", ax=ax1, cbar = False, robust=True)
df = df.reset_index()
# y and x labels
ax1.set_yticklabels(labels=df[column], va='center', rotation=0,
position=(0,0.28))
ax1.set_xticklabels(labels=df.columns[1:-1])
# plot totals heatmap
df2.index.name = None
sns.heatmap(df2, cmap=colors, center=1, annot=True, #annot_kws=squares_font,
fmt="d", ax=ax2, cbar=True, xticklabels=True, yticklabels=False,
robust=True)
# save figure
if save_fig:
fig2.savefig(output_dir/filename, format=file_format, dpi=dpi,
bbox_inches="tight")
return df
def hierarchical_grouped_barplots(df, cat1_col='COG', cat1_split_char=False,
cat2_col='', cat2_map='',
sort_heatmap_by='Total', extra_map={},
replace_names={}, save_fig=True,
output_dir='', filename='',
file_format='tif', dpi=600, colors="Blues"):
# calculate frequencies for main category
column = cat1_col
freqs = compare_n_frequencies(df, freq_column=column, drop_empties=False,
category_column=cat2_col,
category_map=cat2_map,
split_char=cat1_split_char)
# replace columns name in order to show in heatmap
df = freqs.copy()
if column in replace_names:
column = replace_names[column]
df.columns = df.columns.to_series().replace(replace_names)
df = df.set_index(column)
df = df.astype(int)
df = df.reset_index()
# if we have to complete the text of the main category that will be showed
# in the heatmap
if extra_map:
df[column] = df[column].map(extra_map)
# if we need to sort the heatmap, for better presentation of the colors
if len(sort_heatmap_by) > 0:
df = df.sort_values(by=[sort_heatmap_by],ascending=False)
df = pd.melt(df, id_vars=['COG category'], var_name='Condition',
value_name='Absolute frequency',
value_vars=['UF-only','UF-YEL', 'YEL-only'])
# penguins = sns.load_dataset("penguins")
# fig = plt.figure(figsize=[4, 4], constrained_layout=False)
# Draw a nested barplot by species and sex
g = sns.catplot(data=df, kind="bar", x="COG category", y='Absolute frequency',
hue="Condition", ci="sd", palette="colorblind", alpha=.9,
height=6, aspect=2.0)
# g.despine(left=True)
# g.legend.set_title("")
return df
##############################################################################
# DIRECTORY SYSTEM
src_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
main_dir = os.path.dirname(src_dir)
root_dir = os.path.dirname(main_dir)
data_dir = pathlib.Path(main_dir) / 'data'
input_dir = pathlib.Path(data_dir) / 'input'
output_dir = pathlib.Path(data_dir) / 'output'
sys.path.insert(0, root_dir)
# FILE PATHS
proteomics_SEC_and_UC_file = input_dir / 'proteomics_SEC_and_UC_curated.xlsx'
proteomics_UC_file = input_dir / 'proteomics_UC.xlsx'
proteomics_core_file = input_dir / 'proteome_core.xlsx'
proteomics_accessory_file = input_dir / 'proteome_accessory.xlsx'
proteomics_single_file = input_dir / 'proteome_single.xlsx'
proteomics_not_EVs_file = input_dir / 'proteome_not_EVs.xlsx'
proteomics_SEC_only_file = input_dir / 'proteome_SEC_only.xlsx'
proteomics_UC_only_file = input_dir / 'proteome_UC_only.xlsx'
cogs_file = input_dir / 'COGs.xlsx'
# READ FILES
proteomics_SEC_and_UC = pd.read_excel(proteomics_SEC_and_UC_file)
proteomics_UC = pd.read_excel(proteomics_UC_file)
proteomics_core = pd.read_excel(proteomics_core_file)
proteomics_accessory = pd.read_excel(proteomics_accessory_file)
proteomics_single = | pd.read_excel(proteomics_single_file) | pandas.read_excel |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
( | Timestamp('2013-06-28', offset='BQ') | pandas.Timestamp |
import pandas
import numpy
from src.variants.variant import Variant
from src.structs import DistanceStruct
from src.variants.deep_search.feature_extractor import FeatureExtractor
from src.variants.deep_search.indexer import Indexer
class DeepSearchVariant(Variant):
name = "Deep Search with Annoy"
def __init__(self, fasta_file: str, sequence_type: str, image_folder: str):
super().__init__(fasta_file, sequence_type)
self._image_folder = image_folder
self._input_shape = (2000, 2000, 3)
def build_matrix(self) -> DistanceStruct:
features = FeatureExtractor(self._input_shape)
indexer = Indexer(self._image_folder, self._names, features)
names = [".".join(name.split("/")[-1].split(".")[:-1]) for name in indexer.image_list]
diff = set(self._names).difference(set(names))
if diff:
raise IOError(f"Sequences without image created: {diff}")
data = indexer.build()
df = | pandas.DataFrame() | pandas.DataFrame |
from matplotlib.pyplot import title
import streamlit as st
import pandas as pd
import altair as alt
import pydeck as pdk
import os
import glob
from wordcloud import WordCloud
import streamlit_analytics
path = os.path.dirname(__file__)
streamlit_analytics.start_tracking()
@st.cache
def load_gnd_top_daten(typ):
gnd_top_df = | pd.DataFrame() | pandas.DataFrame |
import re
import win32com.client as win32
import datetime
import os.path
import shutil
from ast import literal_eval
import unicodedata
import pandas as pd
import numpy as np
import tkinter as tk
import variableFile
class WorkbookEvents:
'''Main class to define the interesting evens of the excel file'''
def OnSheetSelectionChange(self, *args):
'''Saves value of selected cell
The previous value of the cell is restored if value read came from scanner
'''
variableFile.previousValue = args[1].Value
def OnSheetChange(self, *args):
'''Traces changes in excel sheet'''
variableFile.addressChanged = args[1].Address
variableFile.changedValue.set(str(args[1].Value))
def OnBeforeClose(self, *args):
'''Event before the workbook closes and before asking
to save changes.
NOTE: THere will be a problem if the user does not close it
'''
variableFile.excelOpen.set(tk.FALSE)
pass
class XlReadWrite:
'''Class that handles reading, cleaning, processing
of the open excel.
After the file has been open/created/selected the user
can start scanning devices. Every QR read is processed to
determine what parameters should be included in the excel.
The excel heads correspond to the AI included in the QR codes.
If the user modifies or inserts manualy a value, it wont be
processed as QR (Except if it has an AI in brackets (AI))
WorkFlow:
User options:
- Open: Opens any excel the user selects and
reads its contents
- New: User indicates the date of delivery and
the program creates a new file based on
the template.
- Select: The user can choose between the excel files
opened.
A dataframe with all the pumps is created every time the excel
is updated. This is not the optimal way to do it, but it is necessary
to handle all possible changes not related with QR codes.
'''
def __init__(self,parentFrame):
self.xl = None
self.parent = parentFrame
self.xlWorkbook = None
self.dirPath = os.path.expanduser('~\\Desktop\\REQUEST FORMS')
def openXl(self):
'''Tries to open excel. Launches excel if not open.'''
self.restartObjects()
try:
self.xl = win32.GetActiveObject('Excel.Application')
except:
try:
self.xl = win32.Dispatch('Excel.Application')
except:
self.parent.readyVar.set('Excel not available. Please make sure excel is installed')
self.parent.readLbl.config(foreground = 'red')
def saveExcel(self):
'''Save excel file'''
self.xlWorkbook.Save()
def restartObjects(self):
'''Sets all win32 objects references to None
This is redundant, but was necessary to check possible problems
'''
self.xl = None
self.xlWorkbook = None
self.xlWorkbookEvents = None
def checkDate(self,date):
'''Function that checks the date is valid
'''
dateRegx = re.compile(r'(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[13-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})')
corrDate = re.search(dateRegx, date)
if corrDate:
return date
else:
raise ValueError
def openWb(self,filePath):
'''Opens an excel file selected by the user
That file will be the one to work with
'''
self.openXl()
try:
self.xlWorkbook = self.xl.Workbooks.Open(filePath)
self.xlWorkbookEvents = win32.WithEvents(self.xlWorkbook, WorkbookEvents)
self.parent.readyVar.set('{}'.format(filePath.split('/')[-1])) # Gets name of file
self.parent.readLbl.config(foreground = 'green')
self.xl.Visible = True
self.readExcel()
variableFile.excelOpen.set(tk.TRUE)
except:
self.parent.readyVar.set('ERROR opening excel. Contact support')
def fillXlOpenList(self):
'''Creates a list with all opened excel files
This list corresponds to the values of combobox on GUI
'''
try:
self.xl = win32.GetActiveObject('Excel.Application')
if self.xl.Workbooks.Count == 0:
return []
else:
xlList = []
for xl in range(1,self.xl.Workbooks.Count + 1):
xlList.append(self.xl.Workbooks(xl).Name)
return xlList
except:
return []
def selectWbActive(self,name):
'''Sets the selected workbook as working workbook
'''
self.openXl()
try:
self.xlWorkbook = self.xl.Workbooks(name)
self.xlWorkbookEvents = win32.WithEvents(self.xlWorkbook,WorkbookEvents)
self.parent.readyVar.set(name)
self.parent.readLbl.config(foreground = 'green')
self.xl.Visible = True
self.readExcel()
variableFile.excelOpen.set(tk.TRUE)
except:
self.parent.readyVar.set('Error with excel file. Please panic')
self.parent.readLbl.config(foreground = 'red')
def newWb(self,date=None):
'''Creates new request file in REQUEST FORM folder
If folder does not exists, it is created.
The new file is a copy of the template included with the program.
The file is named as REQUEST FORM + DATE.
The date is introduced by the user through GUI
'''
self.openXl()
try:
corrDate = self.checkDate(date)
name = 'REQUEST FORM {}.xlsx'.format(corrDate)
if not os.path.isdir(self.dirPath):
os.mkdir(os.path.expanduser(self.dirPath))
# NOTE: folder template required to work
source = os.path.join(os.path.dirname(__file__),'templates','REQUEST FORM TEMPLATE.xlsx')
destiny = os.path.join(self.dirPath,name)
if not os.path.isfile(os.path.join(self.dirPath,name)):
shutil.copy(source, destiny)
self.xlWorkbook = self.xl.Workbooks.Open(destiny)
self.xlWorkbookEvents = win32.WithEvents(self.xlWorkbook,WorkbookEvents)
self.xl.Visible = True
self.parent.readyVar.set(name)
self.parent.readLbl.config(foreground = 'green')
self.xlWorkbook.Worksheets('Sheet1').Range('$B$1').Value = corrDate
self.xlWorkbook.Save()
variableFile.excelOpen.set(tk.TRUE)
self.readExcel()
else:
self.parent.fileExists()
except ValueError:
self.parent.wrognDate()
def readExcel(self):
'''Reads current excel and creates a file with all pumps included
File used to check duplicates and do analytics
'''
try:
self.values = self.xlWorkbook.Worksheets('Sheet1').UsedRange.Value
except:
print('No sheet called Sheet1')
self.heads = self.values[1] # Request form heads are in row 2
vals = self.values[2:] # Values start at row 3
self.xlHeadsAI = []
# FIXME: if the excel has a column that is not in AI variableFile it will be a problem
for head in self.heads:
for key in variableFile.AI.keys():
if head in variableFile.AI[key]:
self.xlHeadsAI.append(key[1:-1]) # removed first and last item corresponding to ()
tempDict = self.excelValToDict(vals)
self.dfValues = pd.DataFrame(tempDict)
if not self.dfValues.empty:
self.dfValues = self.dfValues.convert_dtypes() # Converts columns types to the corresponding dtypes
for col in self.dfValues.select_dtypes(include = 'string'):
self.dfValues[col] = self.dfValues[col].str.normalize('NFKD') # Normalises unicode to include whitespaces (instead of \xa0)
def excelValToDict(self,vals):
'''Converts tuples from excel into a dictionary
Columns correspond to excel head values
'''
tempMap = map(list,zip(*vals)) # Transposes excel value tuples
tempDict = dict(zip(self.heads,list(tempMap)))
return tempDict
def processChanges(self,n,m,x):
'''Function that process changes on excel
Splits last value changed by () to get AI and values
Adds dictionary with AI as keys and values as values to existing df
'''
readQR = str(variableFile.changedValue.get())
valsAI = [tuple(i.split(')')) for i in readQR.split('(')]
tempList = []
try: # If multiple cells selected consider deleting
isDelete = all(item is None for tup in literal_eval(readQR) for item in tup)
except:
isDelete = False
# NOTE: headsAI dependent on Excel column names. If not correct, wrong results.
if valsAI[0][0] == '' and not isDelete: # Input comes from QR
for vals in valsAI:
for head in self.xlHeadsAI:
if vals[0] == head:
tempList.append((self.heads[self.xlHeadsAI.index(head)],vals[1]))
break
if tempList: # Only append values if list not empty
if self.dfValues.empty:
self.dfValues = pd.DataFrame(columns = self.heads) #Make sure all columns are present in the df
self.dfValues = self.dfValues.append(dict(tempList),ignore_index = True)
self.dfValues.replace({np.nan: None}, inplace = True)
self.writeExcel()
elif isDelete:
self.deleteCell(literal_eval(readQR))
self.formatExcel()
else: # Update value introduced by user in dfValues
modCell = self.multipleCellChange()
try:
self.dfValues.iloc[modCell[0],modCell[1]] = readQR
except (IndexError, AttributeError) as _: # If user modifies cell after last row, read excel again
self.readExcel()
# If client request files has been loaded, update the table
if self.parent.myParent.existsTable():
self.parent.myParent.updateTable(self.returnCountDevices())
def deleteCell(self,read):
'''Function that proccesses the deleting of a cell/Range of cells'''
modCell = self.multipleCellChange()
tempDict = self.excelValToDict(read)
tempDf = | pd.DataFrame(tempDict) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
tm.assert_series_equal(pd.Timedelta('3 days') + s, exp)
tm.assert_series_equal(s + pd.Timedelta('3 days'), exp)
s = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + s, | pd.Series(['ax', np.nan, 'ax']) | pandas.Series |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
#################################################
# Test context free predict_expectation() method
################################################
def test_exp_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards= | pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]) | pandas.Series |
import streamlit as st
import pandas as pd
import numpy as np
import pydeck as pdk
import plotly.graph_objects as go
import plotly.express as px
crime = | pd.read_csv('data/crime_cleaned.csv') | pandas.read_csv |
#!/usr/bin/env python
import os,sys
import pandas as pd
import argparse
daismdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,daismdir)
import daism.modules.simulation as simulation
import daism.modules.training as training
import daism.modules.prediction as prediction
#--------------------------------------
#--------------------------------------
# main()
parser = argparse.ArgumentParser(description='DAISM-XMBD deconvolution.')
subparsers = parser.add_subparsers(dest='subcommand', help='Select one of the following sub-commands')
# create the parser for the "one-stop DAISM-DNN" command
parser_a = subparsers.add_parser('DAISM', help='one-stop DAISM-XMBD',description="one-stop DAISM-XMBD")
parser_a.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_a.add_argument("-caliexp", type=str, help="Calibration samples expression file", default=None)
parser_a.add_argument("-califra", type=str, help="Calibration samples ground truth file", default=None)
parser_a.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_a.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_a.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_a.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_a.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "DAISM simulation" command
parser_b = subparsers.add_parser('DAISM_simulation', help='training set simulation using DAISM strategy',description='training set simulation using DAISM strategy.')
parser_b.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_b.add_argument("-caliexp", type=str, help="Calibration samples expression file", default=None)
parser_b.add_argument("-califra", type=str, help="Calibration samples ground truth file", default=None)
parser_b.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_b.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_b.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_b.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "Generic simulation" command
parser_c = subparsers.add_parser('Generic_simulation', help='training set simulation using purified cells only',description='training set simulation using purified cells only.')
parser_c.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_c.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_c.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_c.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_c.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "training" command
parser_d = subparsers.add_parser('training', help='train DNN model',description='train DNN model.')
parser_d.add_argument("-trainexp", type=str, help="Simulated samples expression file", default=None)
parser_d.add_argument("-trainfra", type=str, help="Simulated samples ground truth file", default=None)
parser_d.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_d.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "prediction" command
parser_e = subparsers.add_parser('prediction', help='predict using a trained model',description='predict using a trained model.')
parser_e.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_e.add_argument("-model", type=str, help="Deep-learing model file trained by DAISM", default="../output/DAISM_model.pkl")
parser_e.add_argument("-celltype", type=str, help="Model celltypes", default="../output/DAISM_model_celltypes.txt")
parser_e.add_argument("-feature", type=str, help="Model feature", default="../output/DAISM_model_feature.txt")
parser_e.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_e.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
class Options:
random_seed = 777
min_f = 0.01
max_f = 0.99
lr = 1e-4
batchsize = 64
num_epoches = 500
ncuda = 0
def main():
# parse some argument lists
inputArgs = parser.parse_args()
if os.path.exists(inputArgs.outdir)==False:
os.mkdir(inputArgs.outdir)
#### DAISM modules ####
if (inputArgs.subcommand=='DAISM'):
# Load calibration data
caliexp = pd.read_csv(inputArgs.caliexp, sep="\t", index_col=0)
califra = pd.read_csv(inputArgs.califra, sep="\t", index_col=0)
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Preprocess purified data
mode = "daism"
commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample,caliexp,califra)
# Create training dataset
mixsam, mixfra, celltypes, feature = simulation.daism_simulation(caliexp,califra,C_all,Options.random_seed,inputArgs.N,inputArgs.platform,Options.min_f,Options.max_f)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/DAISM_feature.txt',sep='\t')
pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/DAISM_celltypes.txt',sep='\t')
print('Writing training data...')
# Save training data
mixsam.to_csv(inputArgs.outdir+'/output/DAISM_mixsam.txt',sep='\t')
mixfra.to_csv(inputArgs.outdir+'/output/DAISM_mixfra.txt',sep='\t')
# Training model
model = training.dnn_training(mixsam,mixfra,Options.random_seed,inputArgs.outdir+"/output/",Options.num_epoches,Options.lr,Options.batchsize,Options.ncuda,inputArgs.net)
# Save signature genes and celltype labels
pd.DataFrame(list(mixfra.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_celltypes.txt',sep='\t')
pd.DataFrame(list(mixsam.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_feature.txt',sep='\t')
# Prediction
result = prediction.dnn_prediction(model, test_sample, list(mixfra.index), list(mixsam.index),Options.ncuda)
# Save predicted result
result.to_csv(inputArgs.outdir+'/output/DAISM_result.txt',sep='\t')
############################
#### simulation modules ####
############################
#### DAISM simulation modules ####
if (inputArgs.subcommand=='DAISM_simulation'):
# Load calibration data
caliexp = pd.read_csv(inputArgs.caliexp, sep="\t", index_col=0)
califra = pd.read_csv(inputArgs.califra, sep="\t", index_col=0)
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Preprocess purified data
mode ="daism"
commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample,caliexp,califra)
# Create training dataset
mixsam, mixfra, celltypes, feature = simulation.daism_simulation(caliexp,califra,C_all,Options.random_seed,inputArgs.N,inputArgs.platform,Options.min_f,Options.max_f)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/DAISM_feature.txt',sep='\t')
pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/DAISM_celltypes.txt',sep='\t')
print('Writing training data...')
# Save training data
mixsam.to_csv(inputArgs.outdir+'/output/DAISM_mixsam.txt',sep='\t')
mixfra.to_csv(inputArgs.outdir+'/output/DAISM_mixfra.txt',sep='\t')
#### Generic simulation modules ####
if (inputArgs.subcommand=='Generic_simulation'):
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Preprocess purified data
mode = "generic"
commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample)
# Create training dataset
mixsam, mixfra, celltypes, feature = simulation.generic_simulation(C_all,Options.random_seed,inputArgs.N,inputArgs.platform,commongenes)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/Generic_feature.txt',sep='\t')
| pd.DataFrame(celltypes) | pandas.DataFrame |
#============================================================================================
# Name : main.py
# Author : <NAME>, <NAME>
# Version : 1.0
# Copyright : Copyright (C) Secure Systems Group, Aalto University {https://ssg.aalto.fi/}
# License : This code is released under Apache 2.0 license
#============================================================================================
from clustering import RecAgglo, SampleClust, AggloClust
import numpy as np
import pandas as pd
from parsing import Parser
def main():
parser = Parser()
args = parser.args
infile = args.infile
outfile = args.outfile
verbose = args.verbose
skip_index = args.skip_index
delta_a = args.delta_a
delta_fc = args.delta_fc
d_max = args.d_max
rho_mc = args.rho_mc
rho_s = args.rho_s
weights = list(map(float, args.weight.strip('[]').split(',')))
algorithm = args.algo
df = | pd.read_csv(infile, dtype='str') | pandas.read_csv |
import numpy as np
import pandas as pd
import time, gc
from GV_Catalogue_Gen import angularDistance
def genSigmaCatalogue(CATALOGUE, mag_limit = 6, FOV_limit = 20):
'''
Generates the mean of the sigma for each star in the catalogue.
Sigma between star A and star B is defined as (1/6) of the angular
distance between the two stars.
Such values of sigma are calculated for star A to every other star
in the catalogue that are its nearest neighbours, i.e., all those
stars within a circular FOV defined by FOV_limit.
This set of sigma values is defined as sigma_n.
The mean of all the elements of sigma_n gives us mu_n.
This mean value is paired with the corresponding star A.
This process repeats for every star in the catalogue, and the star IDs
the corresponding mu_n values are collated in a dataframe.
Parameters
----------
CATALOGUE : pd.Dataframe
The 'master' star catalogue on which the function works
mag_limit : floating-point number, default = 6
The upper magnitude limit of stars that are required in the reference catalogue
FOV_limit: floating-point number, default = 20
Defines the circular radius (in degrees) which demarcates which stars from the
catalogue are to be considered as nearest neighbours for a given star
Returns
-------
SIGMA_CATALOGUE : pd.Dataframe
The dataframe collated from the star IDs and their corresponding mu_n
'''
# Start clock-1
start1 = time.time()
# Generate restricted catalogue based on upper magnitude limit
temp0 = CATALOGUE[CATALOGUE.Mag <= mag_limit]
# Number of rows in the resticted catalogue
rows = temp0.shape[0]
# Resets the index of <temp0>
temp0.index = list(range(rows))
# Prints total number of stars in <temp0> and the (n)X(n-1)- unique combinations per star
print('Number of stars - ', rows)
print('Number of unique combinations per star= ', (rows-1)*rows)
# Initialize the number of iterations to take place
no_iter = (rows)
# Initialize SIGMA_CATALOGUE
SIGMA_CATALOGUE = pd.DataFrame(columns=['Star_ID', 'mu_n'])
for i in range(no_iter):
# Throws error if an iteration runs beyond number of available rows in <temp0>
assert i<(rows), 'IndexError: iterating beyond available number of rows'
# Generates <temp1> dataframe which has the (i - th) star of <temp0>
# repetated (rows-1) times
temp1 = pd.DataFrame(columns = ['Star_ID1','RA_1', 'Dec_1', 'Mag_1'])
s1, ra, dec, mag = temp0.iloc[i]
temp1.loc[0] = [s1] + [ra] + [dec] + [mag]
temp1 = | pd.concat([temp1]*(rows-1), ignore_index=True) | pandas.concat |
'''
'''
import spacy
import numpy as np
import pandas as pd
from pprint import pprint
import scipy.spatial.distance
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import json
import re
import os
def normal(token):
# Should the token be kept? (=is normal)
# Spacy treats 'To' (title case) as *not a stop word*, but
# gensim will not compute tf-idf for 'To'. To remove 'To' as a stop word here, I
# do an extra test to see if the lower case token is a stop word.
return not token.is_stop and not token.is_punct and not nlp.vocab[token.lower_].is_stop
def tokenizer(input_string):
doc = nlp(input_string)
tokens = [token for token in doc if normal(token)]
return tokens
def lemmatizer(tokens):
lemmas = [t.lemma_ for t in tokens]
return lemmas
def vectorizer(tokens):
vectors = [t.vector for t in tokens]
return vectors
nlp = spacy.load('en_core_web_md', entity = False, parser = False)
# Connect to local PostgreSQL
user = 'ubuntu'
password = ''
dbname = 'congress'
host = 'localhost'
local_port = '5432'
es = "postgresql+psycopg2://"+user+":"+password+"@/"+dbname+"?host="+host+"&port="+local_port
engine = sqlalchemy.create_engine(es)
print(engine)
Session = sessionmaker(bind=engine)
session = Session()
print('Session created')
socialTagVectors = pd.read_csv('socialTagVectors.csv')
congressRareTags = session.execute("SELECT bill_id, social_tags FROM congress_tagging;")#pd.read_csv('congress_rare_tags.csv', header = 0)
congressRareTags = congressRareTags.fetchall()
congressbillid = [i[0] for i in congressRareTags]
congresstag = [i[1] for i in congressRareTags]
congressRareTags = | pd.DataFrame({'bill_id': congressbillid, 'social_tags': congresstag}) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 19 21:18:30 2020
@author: rahikalantari
"""
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 4 05:29:48 2020
@author: rahikalantari
"""
import datetime
import pandas as pd
import numpy as np
from datetime import datetime as dt
task = 'future' #'historic''future'
event = 'death'#'cases''death'
if task == 'historic':
foldername = 'Historic'
else:
foldername = 'Future'
us_state_code = {
'Alabama': '01', 'Alaska': '02', 'Arizona': '04', 'Arkansas': '05', 'California': '06', 'Colorado': '08',
'Connecticut': '09', 'Delaware': '10', 'Florida': '12', 'Georgia': '13', 'Hawaii': '15', 'Idaho': '16',
'Illinois': '17', 'Indiana': '18', 'Iowa': '19', 'Kansas': '20', 'Kentucky': '21', 'Louisiana': '22',
'Maine': '23', 'Maryland': '24', 'Massachusetts': '25', 'Michigan': '26', 'Minnesota': '27', 'Mississippi': '28',
'Missouri': '29', 'Montana': '30', 'Nebraska': '31', 'Nevada': '32', 'New Hampshire': '33', 'New Jersey': '34',
'New Mexico': '35', 'New York': '36', 'North Carolina': '37', 'North Dakota': '38', 'Ohio': '39', 'Oklahoma': '40',
'Oregon': '41', 'Pennsylvania': '42', 'Rhode Island': '44', 'South Carolina': '45', 'South Dakota': '46',
'Tennessee': '47', 'Texas': '48', 'Utah': '49', 'Vermont': '50', 'Virginia': '51', 'Washington': '53',
'West Virginia': '54', 'Wisconsin': '55', 'Wyoming': '56', 'District of Columbia':'11'}
task = 'future' #'historic''future'
event = 'death'#'cases''death'
if task == 'historic':
foldername = 'Historic'
else:
foldername = 'Future'
if event == 'death':
death_mean = pd.read_csv('results/'+foldername+'_prediction/death_cum.csv')
death_weekly = pd.read_csv('results/'+foldername+'_prediction/death_weekly.csv')
daily_cases_weekly = pd.read_csv('results/'+foldername+'_prediction/daily_cases_weekly.csv')
death_mean.drop(death_mean.loc[death_mean['Var1']=='StateX'].index, inplace=True)
death_mean =death_mean.reset_index(drop=True)
death_weekly.drop(death_weekly.loc[death_weekly['Var1']=='StateX'].index, inplace=True)
death_weekly =death_weekly.reset_index(drop=True)
daily_cases_weekly.drop(daily_cases_weekly.loc[daily_cases_weekly['Var1']=='StateX'].index, inplace=True)
daily_cases_weekly =daily_cases_weekly.reset_index(drop=True)
death_mean = death_mean.rename(columns=death_mean.loc[0,:])
death_mean=(death_mean.drop(index=0))#.drop('01/22/0020',axis=1)
death_weekly = death_weekly.rename(columns=death_weekly.loc[0,:])
death_weekly=(death_weekly.drop(index=0))#.drop('01/22/0020',axis=1)
daily_cases_weekly = daily_cases_weekly.rename(columns= daily_cases_weekly.loc[0,:])
daily_cases_weekly = (daily_cases_weekly.drop(index=0))#.drop('01/22/0020',axis=1)
#daily_cases_weekly = daily_cases_weekly_col['type'].loc[daily_cases_weekly_col['type'] =='NA' | daily_cases_weekly_col['type']=='0.025' | daily_cases_weekly_col['type']=='0.1'| daily_cases_weekly_col['type']== '0.25'| daily_cases_weekly_col['type']=='0.500'| daily_cases_weekly_col['type']== '0.750'| daily_cases_weekly_col['type']=='0.900'| daily_cases_weekly_col['type']=='0.975']
daily_cases_weekly_col1 = []
#aily_cases_weekly = pd.read_csv('results/'+foldername+'_prediction/daily_cases_weekly.csv')
# death_lower = pd.read_csv('results/'+foldername+'_prediction/death_lowerBound_cum.csv')
# death_lower = death_lower.rename(columns=death_lower.loc[0,:])
# death_lower = (death_lower.drop(index=0))#.drop('01/22/0020',axis=1)
# death_higher = pd.read_csv('results/'+foldername+'_prediction/death_upperBound_cum.csv')
# death_higher = death_higher.rename(columns=death_higher.loc[0,:])
# death_higher = (death_higher.drop(index=0))#.drop('01/22/0020',axis=1)
realdata_death = realdata = pd.read_csv('data/new_death_cases.csv')
for i in range(1,53):
death_mean.loc[i,'3/15/20':] = pd.to_numeric(death_mean.loc[i,'3/15/20':],errors='coerce')
death_weekly.loc[i,'3/15/20':] = pd.to_numeric(death_weekly.loc[i,'3/15/20':],errors='coerce')
daily_cases_weekly.loc[i,'3/15/20':] = pd.to_numeric(daily_cases_weekly.loc[i,'3/15/20':],errors='coerce')
death_mean.to_csv ('results/'+foldername+'_prediction/death_cum2.csv', index = False, header=True)
death_weekly.to_csv ('results/'+foldername+'_prediction/death_weekly2.csv', index = False, header=True)
daily_cases_weekly.to_csv ('results/'+foldername+'_prediction/daily_cases_weekly2.csv', index = False, header=True)
realdata_death.loc[51] = realdata_death.sum(axis=0)
realdata_death["Province_State"].loc[51] = "US"
real_death_col = pd.melt(realdata_death, id_vars=['Province_State'], var_name='date', value_name='real_number_of_deaths')
# results_death.to_csv ('results/Future_prediction/furture_death_.csv', index = True, header=True)
death_mean = pd.read_csv('results/'+foldername+'_prediction/death_cum2.csv')
death_mean_col = pd.melt(death_mean, id_vars=['Province_State','type'], var_name='date', value_name='number_of_deaths')
results_death= death_mean_col#pd.merge(real_death_col, death_mean_col, how='outer', on=['Province_State', 'date'])
death_weekly = pd.read_csv('results/'+foldername+'_prediction/death_weekly2.csv')
death_weekly_col = | pd.melt(death_weekly, id_vars=['Province_State','type'], var_name='date', value_name='number_of_deaths') | pandas.melt |
# from ifm import Enum
import pandas as pd
class TsPd:
def __init__(self, doc):
self.doc = doc
def info(self):
"""
Returns a pandas.DataFrame with information on existing time series (formerly power functions).
"""
list_info = self.doc.c.ts.info()
df = | pd.DataFrame(list_info, columns=["tsid", "comment", "no_point", "is_cyclic_num", "interpolation_kind"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 16:15:46 2020
@author: navarrenhn
"""
import pandas as pd
def feed_demand(groups, Lancet_diet):
Region_demands = {}
for name, group in groups:
d = Lancet_diet.copy()
d["GROUP"] = name
#print(d)
##create animal product demands:
d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["pork"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["chicken and other poultry"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["fish"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fishandseafood", "Total"].min())
#d = d.drop(["fish"])
#d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
#d.loc[["eggs"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
##create feed demands:
##need to determine oil production from soymeal production for feed
##feed required for 1 dairy cow per day
##similar feed is assumed for beef cattle being reared on mixed system (pasture/crop-fed)
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn equals sum of fresh and corn in concentrate
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
cow_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [41250, 13750 + (1250*0.86), 750*0.8 ]}##, 500, 500, 500, 250, 250, 1000]}
cow_Lancet_diet_per_day = pd.DataFrame(cow_dict)
cow_Lancet_diet_per_day = cow_Lancet_diet_per_day.set_index(["type"])
cow_feed_per_g_milk = cow_Lancet_diet_per_day["gram"]/25000
##beef production from dairy cows and dairy calf rearing
calf_per_g_milk = (1.5/(25000*365*6)) ##3 calves per cow divided by two as only males are used(?)
##Type A calf of Nguyen 2010 using 8438 kg of feed per 1000 kg carcass weight (= per 660kg edible meat)
##(significantly more soymeal --> look into, maybe change Lancet_diet)
g_calf_per_g_milk = calf_per_g_milk * 214880
cow_feed_per_g_calf = ((cow_Lancet_diet_per_day["gram"]/55000)*8438000)/660000
##One 680 kg Holstein dairy cow delivers 224.52 kg of meat (excluding offal and bones) ##what to do with offal?
g_dairycow_beef_per_g_milk = 224520.0 / 36500000.0 #36500000 g milk in her milk giving time of 4 years
g_beef_per_g_milk = g_calf_per_g_milk + g_dairycow_beef_per_g_milk
##feed demand from classic suckler-cow rearing systems is 20863 kg of feed per 1000kg carcass weight (= per 660kg edible meat) (Nguyen 2010)
cow_feed_per_g_suckler_beef = ((cow_Lancet_diet_per_day["gram"]/55000)*20863000)/660000
##required extra beef production besides dairy cows and their calves to reach demand
required_extra_beef_production = max(d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]].values[0][0] - (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * g_beef_per_g_milk), 0)
##this needs a lamb factor
total_feed_cows_for_Lancet_diet_per_day = (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * g_calf_per_g_milk * cow_feed_per_g_calf) + (d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]].values[0][0] * cow_feed_per_g_milk) + (required_extra_beef_production * cow_feed_per_g_suckler_beef)
##one dutch cow delivers on average 25 liter milk per day and eats 55kg of feed a day
##assuming 3 calves per dairy cow of which half is male so used for slaughter
##one dutch dairy cow is culled after 6 years on average
##if not, how much feed does a meat cow need?
##how much manure do the cows produce? (for effect on N input ratio)
##soybean meal assumed to equal 0.8 times fresh soybean weight as in cow Lancet_diet
##whole grains assumed here
##one dutch egg-laying chicken lays 0.85232877 egg per day amounting to 19400/311.1 = 62.35937 gram egg per day
##one dutch chicken eats 121.3 gram feed per day (both broiler and egg)
##chicken feed based on Rezaei et al (high protein organic Lancet_diet) and ratios based on 1/3 of feeds used in first and 2/3 of last stages of life, byproducts and supplements (under 3%) placed in "other"
##one dutch broiler chicken lives 6 weeks, averages 2446g and delivers 166+547+243+520 = 1476 gram of meat
##is chicken manure used as fertilizer? How much manure does a chicken produce?
chicken_dict = {"type": ["wheat", "soybean meal", "rapeseed", "oats", "peas"], ##"other"],
"gram": [45.95, 21.62*0.8, 4.04, 23.15, 9.7]} ##, 16.84]}
chicken_Lancet_diet_per_day = pd.DataFrame(chicken_dict)
chicken_Lancet_diet_per_day = chicken_Lancet_diet_per_day.set_index(["type"])
chicken_feed_per_g_meat = (chicken_Lancet_diet_per_day["gram"]*42)/1476
chicken_feed_per_g_egg = chicken_Lancet_diet_per_day["gram"]/62.35937
total_feed_meat_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_meat * d.loc[["chicken and other poultry"], ["BMI" , "EAT", "Org"]].values[0][0]
total_feed_egg_chickens_for_Lancet_diet_per_day = chicken_feed_per_g_egg * d.loc[["eggs"], ["BMI" , "EAT", "Org"]].values[0][0]
##feed required for 1 lamb per day
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
##one lamb gives 35.24% of its original weight as meat. One slaughtered lamb weighs 40kg so 40* 0.3524 = 14.096 kg meat per lamb
##feed composition assumed to be similar to milk cow (both pasture raised and ruminants).Feed requirement about 1kg a day (Bello et al, 2016)
##manure production
lamb_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [687.5, 312.5 + (20.8*0.86), 12.5*0.8]} ##, 8.33, 8.33, 8.33, 4.15, 4.15, 16.66]}
lamb_Lancet_diet_per_day = pd.DataFrame(lamb_dict)
lamb_Lancet_diet_per_day = lamb_Lancet_diet_per_day.set_index(["type"])
lamb_feed_per_g_meat = (lamb_Lancet_diet_per_day["gram"]*365)/14096
total_feed_lamb_for_Lancet_diet_per_day = lamb_feed_per_g_meat * d.loc[["beef and lamb"], ["BMI" , "EAT", "Org"]].values[0][0]
##need to add beef/lamb ratio
##one slaughtered pig gives on average 57% of its live weight as meat, slaughtered weight is 95.2kg so 95.2*0.57 = 54.264kg meat per fattening pig
##one pig lives 88 days (based on BINternet growth per day) and uses 185,064kg of feed in its life (based on BINternet feed conversion) so eats 2,103kg of feed a day
##feed requirement based on byproducts scenario of Lassaletta et al 2016
##manure production
##swill and molasses assumed to be by-products
##are brans a by-product? Do they require extra production? Assumed to be about 10% of original crop (Feedipedia)
pig_dict = {"type": ["corn", "barley", "brans", "wheat"], ##"swill", "molasses"],
"gram": [378.54, 147.21, 525.75, 630.9]} ##, 210.3, 210.3]}
pig_Lancet_diet_per_day = pd.DataFrame(pig_dict)
pig_Lancet_diet_per_day = pig_Lancet_diet_per_day.set_index(["type"])
pig_feed_per_g_meat = (pig_Lancet_diet_per_day["gram"]*88)/54264
total_feed_pig_for_Lancet_diet_per_day = pig_feed_per_g_meat * d.loc[["pork"], ["BMI" , "EAT", "Org"]].values[0][0]
##create crop demands including demand for feed crops:
##assuming no waste in feedcrops
d.loc[["rice wheat corn and other"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Cereals", "Total"].min())
d.loc[["rice wheat corn and other"], ["BMI" , "EAT", "Org"]] += total_feed_cows_for_Lancet_diet_per_day.loc["corn"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["wheat"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["oats"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["wheat"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["oats"] + total_feed_lamb_for_Lancet_diet_per_day.loc["corn"] + total_feed_pig_for_Lancet_diet_per_day.loc["corn"] + total_feed_pig_for_Lancet_diet_per_day.loc["barley"] + total_feed_pig_for_Lancet_diet_per_day.loc["wheat"]
d.loc[["potatoes and cassava"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Rootsandtubers", "Total"].min())
d.loc[["dry beans lentils and peas"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["dry beans lentils and peas"], ["BMI" , "EAT", "Org"]] += total_feed_meat_chickens_for_Lancet_diet_per_day.loc["peas"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["peas"]
d.loc[["soy foods"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["soy foods"], ["BMI" , "EAT", "Org"]] += total_feed_cows_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_lamb_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_meat_chickens_for_Lancet_diet_per_day.loc["soybean meal"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["soybean meal"]
d.loc[["peanuts"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["tree nuts"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
#d.loc[["palm oil"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["unsaturated oils"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Oilseedsandpulses", "Total"].min())
d.loc[["unsaturated oils"], ["BMI" , "EAT", "Org"]] += total_feed_meat_chickens_for_Lancet_diet_per_day.loc["rapeseed"] + total_feed_egg_chickens_for_Lancet_diet_per_day.loc["rapeseed"]
d.loc[["all fruit"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
#d.loc[["all vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
d.loc[["dark green vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
d.loc[["red and orange vegetables"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Fruitsandvegetables", "Total"].min())
Region_demands[name] = d.loc[(Lancet_diet["GROUP"] == name)]
return Region_demands
def feed_remove(groups, Lancet_diet):
Region_demands = {}
for name, group in groups:
d = Lancet_diet.copy()
d["GROUP"] = name
#print(d)
##create animal product demands:
d.loc[["beef and lamb"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["pork"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["chicken and other poultry"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Meat", "Total"].min())
d.loc[["fish"], ["Org_nf"]] *= (1 - group.loc[group["Foodtype"] == "Fishandseafood", "Total"].min())
#d = d.drop(["fish"])
#d.loc[["whole milk or derivative equivalents"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
#d.loc[["eggs"], ["BMI" , "EAT", "Org"]] *= (1 + group.loc[group["Foodtype"] == "Milk", "Total"].min())
##create feed demands:
##need to determine oil production from soymeal production for feed
##feed required for 1 dairy cow per day
##similar feed is assumed for beef cattle being reared on mixed system (pasture/crop-fed)
##all concentrates except corn and soy assumed to be by-products
##soymeal in concentrate equals 0.8 times fresh soybeans in weight (soymeal.org)
##corn equals sum of fresh and corn in concentrate
##corn (maize grain) in concentrate equals 0.86 times fresh yield? Based on dry mass?
cow_dict = {"type": ["grass", "corn", "soybean meal"], ##"citrus pulp concentrate", "palm kernel meal concentrate", "rapeseed meal concentrate", "beet pulp concentrate", "wheat concentrate", "rest products"],
"gram": [41250, 13750 + (1250/0.86), 750/0.8 ]}##, 500, 500, 500, 250, 250, 1000]}
cow_Lancet_diet_per_day = pd.DataFrame(cow_dict)
cow_Lancet_diet_per_day = cow_Lancet_diet_per_day.set_index(["type"])
cow_feed_per_g_milk = cow_Lancet_diet_per_day["gram"]/25000
##beef production from dairy cows and dairy calf rearing
calf_per_g_milk = (1.5/(25000*365*6)) ##3 calves per cow divided by two as only males are used(?)
##Type A calf of Nguyen 2010 using 8438 kg of feed per 1000 kg carcass weight (= per 660kg edible meat)
##(significantly more soymeal --> look into, maybe change Lancet_diet)
g_calf_per_g_milk = calf_per_g_milk * 214880
cow_feed_per_g_calf = ((cow_Lancet_diet_per_day["gram"]/55000)*8438000)/660000
##One 680 kg Holstein dairy cow delivers 224.52 kg of meat (excluding offal and bones) ##what to do with offal?
g_dairycow_beef_per_g_milk = 224520.0 / 36500000.0 #36500000 g milk in her milk giving time of 4 years
g_beef_per_g_milk = g_calf_per_g_milk + g_dairycow_beef_per_g_milk
##feed demand from classic suckler-cow rearing systems is 20863 kg of feed per 1000kg carcass weight (= per 660kg edible meat) (Nguyen 2010)
cow_feed_per_g_suckler_beef = ((cow_Lancet_diet_per_day["gram"]/55000)*20863000)/660000
##required extra beef production besides dairy cows and their calves to reach demand
required_extra_beef_production = max(d.loc[["beef and lamb"], ["Org_nf"]].values[0][0] - (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * g_beef_per_g_milk), 0)
##this needs a lamb factor
total_feed_cows_for_Lancet_diet_per_day = (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * g_calf_per_g_milk * cow_feed_per_g_calf) + (d.loc[["whole milk or derivative equivalents"], ["Org_nf"]].values[0][0] * cow_feed_per_g_milk) + (required_extra_beef_production * cow_feed_per_g_suckler_beef)
##one dutch cow delivers on average 25 liter milk per day and eats 55kg of feed a day
##assuming 3 calves per dairy cow of which half is male so used for slaughter
##one dutch dairy cow is culled after 6 years on average
##if not, how much feed does a meat cow need?
##how much manure do the cows produce? (for effect on N input ratio)
##soybean meal assumed to equal 0.8 times fresh soybean weight as in cow Lancet_diet
##whole grains assumed here
##one dutch egg-laying chicken lays 0.85232877 egg per day amounting to 19400/311.1 = 62.35937 gram egg per day
##one dutch chicken eats 121.3 gram feed per day (both broiler and egg)
##chicken feed based on Rezaei et al (high protein organic Lancet_diet) and ratios based on 1/3 of feeds used in first and 2/3 of last stages of life, byproducts and supplements (under 3%) placed in "other"
##one dutch broiler chicken lives 6 weeks, averages 2446g and delivers 166+547+243+520 = 1476 gram of meat
##is chicken manure used as fertilizer? How much manure does a chicken produce?
chicken_dict = {"type": ["wheat", "soybean meal", "rapeseed", "oats", "peas"], ##"other"],
"gram": [45.95, 21.62/0.8, 4.04, 23.15, 9.7]} ##, 16.84]}
chicken_Lancet_diet_per_day = | pd.DataFrame(chicken_dict) | pandas.DataFrame |
#! /user/bin/python
""" Ensures all objects in the comprehend.rightcall s3 bucket
are added, along with their metadata to the local elasticsearch index.
Metadata is stored in local dynamodb database.
Flow:
If ref from s3 object exists in elasticsearch index with all its meta data:
Do Nothing
If exists without metadata add ref to list csv file of refs for which metadata
is needed.
If doesn't exist in index, download it and try to get metadata
"""
import dynamodb_tools
import elasticsearch_tools
import s3 as s3py
import pandas as pd
import boto3
import json
import logging
class Comp2Elas:
def __init__(self, region, db_endpoint, bucket, directory, es_endpoint, loglevel='INFO'):
self.region = region
self.db_endpoint = db_endpoint
self.bucket = bucket
self.directory = directory
self.es_endpoint = es_endpoint
self.LOGLEVEL = loglevel
self.setup()
def setup(self):
# Create the following directories if they don't already exist
self.csv_dir = self.directory + 'data/csvs/'
self.mp3_dir = self.directory + 'data/mp3s'
self.dynamodb = boto3.resource('dynamodb',
region_name=self.region,
endpoint_url=self.db_endpoint)
# Find the name of the table(s) that exist at this endpoint
self.TABLE_NAME = 'Rightcall'
self.table = self.dynamodb.Table(self.TABLE_NAME)
self.INDEX_NAME = 'rightcall'
self.TYPE_NAME = '_doc'
self.s3 = boto3.client('s3')
# Get host and port from endpoint string
self.es_host = self.es_endpoint.split(':')[1].replace('/', '')
self.es_port = int(self.es_endpoint.split(':')[2])
self.es = elasticsearch_tools.Elasticsearch([{'host': self.es_host,
'port': self.es_port}])
# Logging
levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
if self.LOGLEVEL not in levels:
raise ValueError(f"Invalid log level choice {self.LOGLEVEL}")
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(self.LOGLEVEL)
# create console handler and set level to LOGLEVEL
ch = logging.StreamHandler()
ch.setLevel(self.LOGLEVEL)
# create file handler and set level to DEBUG
fh = logging.FileHandler('rightcall_local.log')
fh.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add ch to logger
self.logger.addHandler(ch)
self.logger.addHandler(fh)
def update_existing_items(self, source=None):
if source is None:
source = self.bucket
else:
source = str(source)
refs = self.get_all_refs_from_s3_objects(source)
# get_meta_data = []
# Forcing the function to update all documents in index with values in objects in bucket
for i, call_record in enumerate(refs):
s3_item = None
ref = call_record['Name']
s3_item = s3py.get_first_matching_item(ref, source)
s3_item = elasticsearch_tools.rename(s3_item)
try:
result = elasticsearch_tools.update_document(self.es, self.INDEX_NAME, s3_item['referenceNumber'], s3_item)
self.logger.debug(f"Result: {result}")
except Exception as err:
self.logger.error(str(err))
return
def get_reference_number_from_object_name(self, object_name_string):
""" Given s3 object name: 'e23413582523--QUIDP.json' or 'e23413582523P.json':
return just 'e23413582523'
"""
self.logger.debug(f"Received: {object_name_string}")
if '--' in object_name_string:
reference_number = object_name_string.split('--')[0]
elif '.json' in object_name_string:
reference_number = object_name_string.split('.')[0]
else:
reference_number = object_name_string
self.logger.debug(f"Ref Num: {reference_number}")
if '--' in reference_number or '.json' in reference_number:
raise ValueError(f"Invalid characters detected in reference number: {object_name_string}")
return reference_number
def get_all_refs_from_s3_objects(self, bucket_name):
"""Given an s3 bucket name, returns a list of the reference numbers
contained in the names of all objects in that bucket
Input: <string> 'comprehend.rightcall'
Output: <list> ['b310f08130r3', 'c210935j22239', ...]
"""
self.logger.info(f"Getting objects from {bucket_name}")
keys = self.s3.list_objects_v2(Bucket=bucket_name)
self.logger.debug(f"Received {len(keys['Contents'])} objects from {bucket_name}")
list_of_reference_numbers = []
for key in keys['Contents']:
ref = self.get_reference_number_from_object_name(key['Key'])
list_of_reference_numbers.append({'Name': ref})
return list_of_reference_numbers
def add_new_or_incomplete_items(self, source=None):
"""Ensures elasticsearch index has all the records that exist in comprehend.rightcall bucket
and that they are fully populated with as much information as possible.
Pulls objects down from comprehend.rightcall bucket.
For each object:
Checks if it exists in elasticsearch already.
Checks if it has all the required fields populated with data.
If so - moves on to next item
If not - Checks if that missing data can be found in dynamodb
if so - grabs it from dynamodb, combines it with s3 obeject data
and uploads to elasticsearch index
if not - adds the filename (refNumber) to csv file to be returned."""
print(source)
if source is None:
source = self.bucket
else:
source = str(source)
refs = self.get_all_refs_from_s3_objects(source)
get_meta_data = []
# For each reference number:
for i, call_record in enumerate(refs):
s3_item = None
db_item = None
self.logger.debug('---------------------------------------')
self.logger.debug(f"Working on {i} : {call_record['Name']}")
ref = call_record['Name']
if elasticsearch_tools.exists(self.es, self.INDEX_NAME, ref):
self.logger.debug(f"{ref} already in {self.INDEX_NAME} index")
else:
self.logger.debug(f"{ref} not in {self.INDEX_NAME} index")
self.logger.debug(f"Checking {source} bucket for {call_record['Name']}")
s3_item = s3py.get_first_matching_item(ref, source)
self.logger.debug(f"Preparing data")
s3_item = elasticsearch_tools.rename(s3_item)
if elasticsearch_tools.fully_populated_in_elasticsearch(ref, self.es, self.INDEX_NAME):
self.logger.debug(f"{ref} fully populated in {self.INDEX_NAME}")
continue
else:
self.logger.debug(f"{ref} missing metadata")
self.logger.debug(f"Checking {self.table} database for missing metadata")
db_item = dynamodb_tools.get_db_item(ref, self.table)
if not db_item:
self.logger.debug(f"Adding {ref} to 'get_meta_data'")
get_meta_data.append(ref)
continue
else:
self.logger.debug(f"Data present in {self.table} database: {db_item}")
# Upload to elasticsearch
if s3_item is None:
self.logger.debug(f"Ensuring object is downloaded from {source}")
s3_item = s3py.get_first_matching_item(ref, source)
# Prepare data for ES
self.logger.debug(f"cleaning data")
s3_item = elasticsearch_tools.rename(s3_item)
self.logger.debug(f"Combining data for {ref} from {self.table} and {source} and adding to {self.INDEX_NAME} index")
result = elasticsearch_tools.load_call_record(
db_item,
s3_item,
self.es,
self.INDEX_NAME)
if result:
self.logger.debug(f"{ref} successfully added to {self.INDEX_NAME} index")
else:
self.logger.error(f"Couldn't upload to elasticsearch: {result}")
self.logger.debug(f"Refs without metadata {get_meta_data}")
return get_meta_data
def parse_csv(path_to_file):
file = pd.read_csv(path_to_file, sep=';')
json_file = file.to_json(orient='records')
data = json.loads(json_file)
return data
def write_to_csv(ref_list, path):
logger = logging.getLogger()
logger.debug(ref_list)
df = | pd.DataFrame.from_dict({'col': ref_list}) | pandas.DataFrame.from_dict |
import pandas as pd
#import geopandas as gpd
import numpy as np
import os
#from sqlalchemy import create_engine
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
import math
#from shapely import wkt
from datetime import datetime, timedelta, date
import time
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
import requests
from pyspark.sql import SparkSession
from pyspark.sql.functions import substring, length, col, expr
from pyspark.sql.types import *
import matplotlib.pyplot as plt
#import contextily as cx --> gives error?
spark = SparkSession \
.builder \
.getOrCreate()
def get_minio_herkomst_2020():
bucket = "gvb-gvb"
data_key = "*/*/*/Datalab_Reis_Herkomst_Uur_*.csv"
data_location = bucket + "/" + data_key
schema_herkomst = StructType([StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van vertrek)", StringType(), True),
StructField("VertrekHalteCode", StringType(), True),
StructField("VertrekHalteNaam", StringType(), True),
StructField("HerkomstLat", StringType(), True),
StructField("HerkomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_herkomst = ["Datum","UurgroepOmschrijving (van vertrek)","VertrekHalteCode","VertrekHalteNaam","AantalReizen"]
gvb_herkomst_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_herkomst, sep = ";").select(*cols_herkomst)
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.distinct()
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.toPandas()
return gvb_herkomst_raw_csv
def get_minio_bestemming_2020 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/*/*/*/Datalab_Reis_Bestemming_Uur_*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_bestemming = StructType(
[StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van aankomst)", StringType(), True),
StructField("AankomstHalteCode", StringType(), True),
StructField("AankomstHalteNaam", StringType(), True),
StructField("AankomstLat", StringType(), True),
StructField("AankomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_bestemming = ["Datum","UurgroepOmschrijving (van aankomst)","AankomstHalteCode","AankomstHalteNaam","AantalReizen"]
gvb_bestemming_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_bestemming, sep = ";").select(*cols_bestemming)
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.distinct()
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.toPandas()
return gvb_bestemming_raw_csv
def get_minio_herkomst_2021 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/2021/*/*/Datalab_Reis_Herkomst_Uur_2021*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_herkomst = StructType([StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van vertrek)", StringType(), True),
StructField("VertrekHalteCode", StringType(), True),
StructField("VertrekHalteNaam", StringType(), True),
StructField("HerkomstLat", StringType(), True),
StructField("HerkomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_herkomst = ["Datum","UurgroepOmschrijving (van vertrek)","VertrekHalteCode","VertrekHalteNaam","AantalReizen"]
gvb_herkomst_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_herkomst, sep =";").select(*cols_herkomst)
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.distinct()
gvb_herkomst_raw_csv = gvb_herkomst_raw_csv.toPandas()
return gvb_herkomst_raw_csv
def get_minio_bestemming_2021 ():
bucket = "gvb-gvb"
data_key = "topics/gvb/2021/*/*/Datalab_Reis_Bestemming_Uur_2021*.csv"
data_location = f"s3a://{bucket}/{data_key}"
schema_bestemming = StructType(
[StructField("Datum", StringType(), True),
StructField("UurgroepOmschrijving (van aankomst)", StringType(), True),
StructField("AankomstHalteCode", StringType(), True),
StructField("AankomstHalteNaam", StringType(), True),
StructField("AankomstLat", StringType(), True),
StructField("AankomstLon", StringType(), True),
StructField("AantalReizen", IntegerType(), True)
])
cols_bestemming = ["Datum","UurgroepOmschrijving (van aankomst)","AankomstHalteCode","AankomstHalteNaam","AantalReizen"]
gvb_bestemming_raw_csv = spark.read.format("csv").option("header", "true").load(data_location, header = 'True', schema = schema_bestemming, sep = ";").select(*cols_bestemming)
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.distinct()
gvb_bestemming_raw_csv = gvb_bestemming_raw_csv.toPandas()
return gvb_bestemming_raw_csv
def read_csv_dir(dir):
read_csv_beta = pd.read_csv(dir,sep=';')
return read_csv_beta
def get_knmi_obs():
knmi_obs_schema = StructType([StructField("DD", StringType(), True),
StructField("DR", StringType(), True),
StructField("FF", StringType(), True),
StructField("FH", StringType(), True),
StructField("FX", StringType(), True),
StructField("IX", StringType(), True),
StructField("M", IntegerType(), True),
StructField("N", IntegerType(), True),
StructField("O", IntegerType(), True),
StructField("P", IntegerType(), True),
StructField("Q", IntegerType(), True),
StructField("R", IntegerType(), True),
StructField("RH", IntegerType(), True),
StructField("S", IntegerType(), True),
StructField("SQ", IntegerType(), True),
StructField("T", IntegerType(), True),
StructField("T10N", IntegerType(), True),
StructField("TD", IntegerType(), True),
StructField("U", IntegerType(), True),
StructField("VV", IntegerType(), True),
StructField("WW", IntegerType(), True),
StructField("Y", IntegerType(), True),
StructField("date", StringType(), True),
StructField("hour", IntegerType(), True),
StructField("station_code", IntegerType(), True)
])
knmi_obs = spark.read.format("json").option("header", "true").load("s3a://knmi-knmi/topics/knmi-observations/2021/*/*/*", schema=knmi_obs_schema)
return knmi_obs
def get_knmi_preds():
knmi_pred_schema = StructType([StructField("cape", IntegerType(), True),
StructField("cond", StringType(), True),
StructField("gr", StringType(), True),
StructField("gr_w", StringType(), True),
StructField("gust", StringType(), True),
StructField("gustb", StringType(), True),
StructField("gustkmh", StringType(), True),
StructField("gustkt", StringType(), True),
StructField("hw", StringType(), True),
StructField("ico", StringType(), True),
StructField("icoon", StringType(), True),
StructField("loc", StringType(), True),
StructField("luchtd", StringType(), True),
StructField("luchtdinhg", StringType(), True),
StructField("luchtdmmhg", StringType(), True),
StructField("lw", StringType(), True),
StructField("mw", StringType(), True),
StructField("neersl", StringType(), True),
StructField("offset", StringType(), True),
StructField("rv", StringType(), True),
StructField("samenv", IntegerType(), True),
StructField("temp", StringType(), True),
StructField("tijd", StringType(), True),
StructField("tijd_nl", StringType(), True),
StructField("tw", StringType(), True),
StructField("vis", StringType(), True),
StructField("windb", StringType(), True),
StructField("windkmh", StringType(), True),
StructField("windknp", StringType(), True),
StructField("windr", StringType(), True),
StructField("windrltr", StringType(), True),
StructField("winds", StringType(), True)
])
knmi_pred_cols = ('cape', 'cond', 'gr', 'gr_w', 'gust', 'gustb', 'gustkmh', 'gustkt',
'hw', 'ico', 'icoon', 'loc', 'luchtd', 'luchtdinhg', 'luchtdmmhg', 'lw',
'mw', 'neersl', 'offset', 'rv', 'samenv', 'temp', 'tijd', 'tijd_nl',
'tw', 'vis', 'windb', 'windkmh', 'windknp', 'windr', 'windrltr',
'winds')
knmi_pred = spark.read.format("json").option("header", "true").load("s3a://knmi-knmi/topics/knmi/2021/*/*/*.json.gz", schema=knmi_pred_schema).select(*knmi_pred_cols)
return knmi_pred
def get_prediction_df():
"""
Return the prediction dataframe (date- and hours only)
"""
this_year = date.today().isocalendar()[0]
this_week = date.today().isocalendar()[1]
firstdayofweek = datetime.strptime(f'{this_year}-W{int(this_week )}-1', "%Y-W%W-%w").date()
prediction_date_range = pd.date_range(first_date, periods=8, freq='D')
prediction_date_range_hour = pd.date_range(prediction_date_range.min(), prediction_date_range.max(), freq='h').delete(-1)
return prediction_date_range_hour
def get_vacations():
"""
Retrieves vacations in the Netherlands from the Government of the Netherlands (Rijksoverheid) and returns
the list of dates that are vacation dates
"""
vacations_url = 'https://opendata.rijksoverheid.nl/v1/sources/rijksoverheid/infotypes/schoolholidays?output=json'
vacations_raw = requests.get(url = vacations_url).json()
df_vacations = pd.DataFrame(columns={'vacation', 'region', 'startdate', 'enddate'})
for x in range(0, len(vacations_raw)): # Iterate through all vacation years
for y in range(0, len(vacations_raw[0]['content'][0]['vacations'])): # number of vacations in a year
dates = pd.DataFrame(vacations_raw[x]['content'][0]['vacations'][y]['regions'])
dates['vacation'] = vacations_raw[x]['content'][0]['vacations'][y]['type'].strip() # vacation name
dates['school_year'] = vacations_raw[x]['content'][0]['schoolyear'].strip() # school year
df_vacations = df_vacations.append(dates)
filtered = df_vacations[(df_vacations['region']=='noord') | (df_vacations['region']=='heel Nederland')]
vacations_date_only = pd.DataFrame(columns={'date'})
for x in range(0, len(filtered)):
df_temporary = pd.DataFrame(data = {'date':pd.date_range(filtered.iloc[x]['startdate'], filtered.iloc[x]['enddate'], freq='D') + pd.Timedelta(days=1)})
vacations_date_only = vacations_date_only.append(df_temporary)
vacations_date_only['date'] = vacations_date_only['date'].apply(lambda x: x.date)
vacations_date_only['date'] = vacations_date_only['date'].astype('datetime64[ns]')
# Since the data from Rijksoverheid starts from school year 2019-2020, add the rest of 2019 vacations manually!
kerst_18 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 1, 1), periods = 6, freq='1d')})
voorjaar_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 2, 16), periods = 9, freq='1d')})
mei_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 4, 27), periods = 9, freq='1d')})
zomer_19 = pd.DataFrame(data = {'date': pd.date_range(date(2019, 7, 13), periods = 7*6 + 2, freq='1d')})
vacations_date_only = vacations_date_only.append([kerst_18, voorjaar_19, mei_19, zomer_19])
return vacations_date_only
def get_events():
"""
Event data from static file. We can store events in the database in the near future. When possible, we can get it from an API.
"""
events = pd.read_excel('events_zuidoost.xlsx', sheet_name='Resultaat', header=1)
# Clean
events.dropna(how='all', inplace=True)
events.drop(events.loc[events['Datum']=='Niet bijzonder evenementen zijn hierboven niet meegenomen.'].index, inplace=True)
events.drop(events.loc[events['Locatie'].isna()].index, inplace=True)
events.drop(events.loc[events['Locatie']=='Overig'].index, inplace=True)
events['Datum'] = events['Datum'].astype('datetime64[ns]')
# Fix location names
events['Locatie'] = events['Locatie'].apply(lambda x: x.strip()) # Remove spaces
events['Locatie'] = np.where(events['Locatie'] == 'Ziggo dome', 'Ziggo Dome', events['Locatie'])
events['Locatie'] = np.where(events['Locatie'] == 'Ziggo Dome (2x)', 'Ziggo Dome', events['Locatie'])
# Get events from 2019 from static file
events = events[events['Datum'].dt.year>=2019].copy()
events.reset_index(inplace=True)
events.drop(columns=['index'], inplace=True)
events
# Add 2020-present events manually
events = events.append({'Datum':datetime(2020, 1, 19)}, ignore_index=True) # Ajax - Sparta
events = events.append({'Datum':datetime(2020, 2, 2)}, ignore_index=True) # Ajax - PSV
events = events.append({'Datum':datetime(2020, 2, 16)}, ignore_index=True) # Ajax - RKC
events = events.append({'Datum':datetime(2020, 1, 3)}, ignore_index=True) # Ajax - AZ
# Euro 2021
events = events.append({'Datum':datetime(2021, 6, 13)}, ignore_index=True) # EURO 2020 Nederland- Oekraïne
events = events.append({'Datum':datetime(2021, 6, 17)}, ignore_index=True) # EURO 2020 Nederland- Oostenrijk
events = events.append({'Datum':datetime(2021, 6, 21)}, ignore_index=True) # EURO 2020 Noord-Macedonië - Nederland
events = events.append({'Datum':datetime(2021, 6, 26)}, ignore_index=True) # EURO 2020 Wales - Denemarken
return events
def merge_csv_json(bestemming_csv, herkomst_csv, bestemming_json, herkomst_json):
bestemming = pd.concat([bestemming_csv, bestemming_json]).copy()
herkomst = pd.concat([herkomst_csv, herkomst_json]).copy()
return [bestemming, herkomst]
def merge_bestemming_herkomst(bestemming, herkomst):
bestemming.rename(columns={'AantalReizen':'Uitchecks',
'UurgroepOmschrijving (van aankomst)':'UurgroepOmschrijving',
'AankomstHalteCode':'HalteCode',
'AankomstHalteNaam':'HalteNaam'}, inplace=True)
herkomst.rename(columns={'AantalReizen':'Inchecks',
'UurgroepOmschrijving (van vertrek)':'UurgroepOmschrijving',
'VertrekHalteCode':'HalteCode',
'VertrekHalteNaam':'HalteNaam'}, inplace=True)
merged = pd.merge(left=bestemming, right=herkomst,
left_on=['Datum', 'UurgroepOmschrijving', 'HalteNaam'],
right_on=['Datum', 'UurgroepOmschrijving', 'HalteNaam'],
how='outer')
return merged
def preprocess_gvb_data_for_modelling(gvb_df, station):
df = gvb_df[gvb_df['HalteNaam']==station].copy()
# create datetime column
df['datetime'] = df['Datum'].astype('datetime64[ns]')
df['UurgroepOmschrijving'] = df['UurgroepOmschrijving'].astype(str)
df['hour'] = df['UurgroepOmschrijving'].apply(lambda x: int(x[:2]))
# add time indications
df['week'] = df['datetime'].dt.isocalendar().week
df['month'] = df['datetime'].dt.month
df['year'] = df['datetime'].dt.year
df['weekday'] = df['datetime'].dt.weekday
hours = pd.get_dummies(df['hour'], prefix='hour')
days = pd.get_dummies(df['weekday'], prefix='weekday')
df = pd.concat([df, hours, days], axis=1)
# drop duplicates and sort
df_ok = df.drop_duplicates()
# sort values and reset index
df_ok = df_ok.sort_values(by = 'datetime')
df_ok = df_ok.reset_index(drop = True)
# drop unnecessary columns
df_ok.drop(columns=['Datum', 'UurgroepOmschrijving', 'HalteNaam'], inplace=True)
# rename columns
df_ok.rename(columns={'Inchecks':'check-ins', 'Uitchecks':'check-outs'}, inplace=True)
return df_ok
def preprocess_knmi_data_hour(df_raw):
"""
Prepare the raw knmi data for modelling.
We rename columns and resample from 60min to 15min data.
Also, we will create a proper timestamp.
Documentation: https://www.daggegevens.knmi.nl/klimatologie/uurgegevens
"""
# drop duplicates
df_raw = df_raw.drop_duplicates()
# rename columns
df = df_raw.rename(columns={"DD": "wind_direction", "FH": "wind_speed_h", "FF": "wind_speed", "FX": "wind_gust",
"T": "temperature", "T10N": "temperature_min", "TD": "dew_point_temperature",
"SQ": "radiation_duration", "Q": "global_radiation",
"DR": "precipitation_duration", "RH": "precipitation_h",
"P": "pressure", "VV": "sight", "N": "cloud_cover", "U": "relative_humidity",
"WW": "weather_code", "IX": "weather_index",
"M": "fog", "R": "rain", "S": "snow", "O": "thunder", "Y": "ice"
})
# get proper datetime column
df["datetime"] = pd.to_datetime(df['date'], format='%Y%m%dT%H:%M:%S.%f') + pd.to_timedelta(df["hour"] - 1, unit = 'hours')
df["datetime"] = df["datetime"].dt.tz_convert("Europe/Amsterdam")
df = df.sort_values(by = "datetime", ascending = True)
df = df.reset_index(drop = True)
df['date'] = df['datetime'].dt.date
df['date'] = df['date'].astype('datetime64[ns]')
df['hour'] -= 1
# drop unwanted columns
df = df.drop(['datetime', 'weather_code', 'station_code'], axis = 'columns')
df = df.astype({'wind_speed':'float64', 'wind_gust':'float64','temperature':'float64','temperature_min':'float64',
'dew_point_temperature':'float64','radiation_duration':'float64','precipitation_duration':'float64',
'precipitation_h':'float64','pressure':'float64'})
# divide some columns by ten (because using 0.1 degrees C etc. as units)
col10 = ["wind_speed", "wind_gust", "temperature", "temperature_min", "dew_point_temperature",
"radiation_duration", "precipitation_duration", "precipitation_h", "pressure"]
df[col10] = df[col10] / 10
return df
def preprocess_metpre_data(df_raw):
"""
To be filled
Documentation: https://www.meteoserver.nl/weersverwachting-API.php
"""
# rename columns
df = df_raw.rename(columns={"windr": "wind_direction", "rv": "relative_humidity", "luchtd": "pressure",
"temp": "temperature", "windb": "wind_force", "winds": "wind_speed",
"gust": "wind_gust", "vis": "sight_m", "neersl": "precipitation_h",
"gr": "global_radiation", "tw": "clouds"
})
# drop duplicates
df = df.drop_duplicates()
# get proper datetime column
df["datetime"] = pd.to_datetime(df['tijd'], unit='s', utc = True)
df["datetime"] = df["datetime"] + pd.to_timedelta(1, unit = 'hours') ## klopt dan beter, maar waarom?
df = df.sort_values(by = "datetime", ascending = True)
df = df.reset_index(drop = True)
df["datetime"] = df["datetime"].dt.tz_convert("Europe/Amsterdam")
# new column: forecast created on
df["offset_h"] = df["offset"].astype(float)
#df["datetime_predicted"] = df["datetime"] - pd.to_timedelta(df["offset_h"], unit = 'hours')
# select only data after starting datetime
#df = df[df['datetime'] >= start_ds] # @me: move this to query later
# select latest prediction # logisch voor prediction set, niet zozeer voor training set
df = df.sort_values(by = ['datetime', 'offset_h'])
df = df.drop_duplicates(subset = 'datetime', keep = 'first')
# drop unwanted columns
df = df.drop(['tijd', 'tijd_nl', 'loc',
'icoon', 'samenv', 'ico',
'cape', 'cond', 'luchtdmmhg', 'luchtdinhg',
'windkmh', 'windknp', 'windrltr', 'wind_force',
'gustb', 'gustkt', 'gustkmh', 'wind_gust', # deze zitten er niet in voor 14 juni
'hw', 'mw', 'lw',
'offset', 'offset_h',
'gr_w'], axis = 'columns', errors = 'ignore')
# set datatypes of weather data to float
df = df.set_index('datetime')
df = df.astype('float64').reset_index()
# cloud cover similar to observations (0-9) & sight, but not really the same thing
df['cloud_cover'] = df['clouds'] / 12.5
df['sight'] = df['sight_m'] / 333
df.drop(['clouds', 'sight_m'], axis = 'columns')
# go from hourly to quarterly values
df_hour = df.set_index('datetime').resample('1h').ffill(limit = 11)
# later misschien smoothen? lijkt nu niet te helpen voor voorspelling
#df_smooth = df_15.apply(lambda x: savgol_filter(x,17,2))
#df_smooth = df_smooth.reset_index()
df_hour = df_hour.reset_index()
df_hour['date'] = df_hour['datetime'].dt.date
df_hour['date'] = df_hour['date'].astype('datetime64[ns]')
df_hour['hour'] = df_hour['datetime'].dt.hour
return df_hour # df_smooth
def preprocess_covid_data(df_raw):
# Put data to dataframe
df_raw_unpack = df_raw.T['NLD'].dropna()
df = pd.DataFrame.from_records(df_raw_unpack) # Add datetime column
df['datetime'] = pd.to_datetime(df['date_value']) # Select columns
df_sel = df[['datetime', 'stringency']] # extend dataframe to 14 days in future (based on latest value)
dates_future = pd.date_range(df['datetime'].iloc[-1], periods = 14, freq='1d')
df_future = pd.DataFrame(data = {'datetime': dates_future,
'stringency': df['stringency'].iloc[-1]}) # Add together and set index
df_final = df_sel.append(df_future.iloc[1:])
df_final = df_final.set_index('datetime')
return df_final
def preprocess_holiday_data(holidays):
df = pd.DataFrame(holidays, columns=['Date', 'Holiday'])
df['Date'] = df['Date'].astype('datetime64[ns]')
return df
def interpolate_missing_values(data_to_interpolate):
df = data_to_interpolate.copy()
random_state_value = 1 # Ensure reproducability
# Train check-ins interpolator
checkins_interpolator_cols = ['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-outs']
checkins_interpolator_targets = ['check-ins']
X_train = df.dropna()[checkins_interpolator_cols]
y_train = df.dropna()[checkins_interpolator_targets]
checkins_interpolator = RandomForestRegressor(random_state=random_state_value)
checkins_interpolator.fit(X_train, y_train)
# Train check-outs interpolator
checkouts_interpolator_cols = ['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-ins']
checkouts_interpolator_targets = ['check-outs']
X_train = df.dropna()[checkouts_interpolator_cols]
y_train = df.dropna()[checkouts_interpolator_targets]
checkouts_interpolator = RandomForestRegressor(random_state=random_state_value)
checkouts_interpolator.fit(X_train, y_train)
# Select rows which need interpolation
df_to_interpolate = df.drop(df.loc[(df['check-ins'].isna()==True) & (df['check-outs'].isna()==True)].index)
# Interpolate check-ins
checkins_missing = df_to_interpolate[(df_to_interpolate['check-outs'].isna()==False) & (df_to_interpolate['check-ins'].isna()==True)].copy()
checkins_missing['stringency'] = checkins_missing['stringency'].replace(np.nan, 0)
checkins_missing['check-ins'] = checkins_interpolator.predict(checkins_missing[['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-outs']])
# Interpolate check-outs
checkouts_missing = df_to_interpolate[(df_to_interpolate['check-ins'].isna()==False) & (df_to_interpolate['check-outs'].isna()==True)].copy()
checkouts_missing['stringency'] = checkouts_missing['stringency'].replace(np.nan, 0)
checkouts_missing['check-outs'] = checkouts_interpolator.predict(checkouts_missing[['hour', 'year', 'weekday', 'month', 'stringency', 'holiday', 'check-ins']])
# Insert interpolated values into main dataframe
for index, row in checkins_missing.iterrows():
df.loc[df.index==index, 'check-ins'] = row['check-ins']
for index, row in checkouts_missing.iterrows():
df.loc[df.index==index, 'check-outs'] = row['check-outs']
return df
def get_crowd_last_week(df, row):
week_ago = row['datetime'] - timedelta(weeks=1)
subset_with_hour = df[(df['datetime']==week_ago) & (df['hour']==row['hour'])]
# If crowd from last week is not available at exact date- and hour combination, then get average crowd of last week.
subset_week_ago = df[(df['year']==row['year']) & (df['week']==row['week']) & (df['hour']==row['hour'])]
checkins_week_ago = 0
checkouts_week_ago = 0
if len(subset_with_hour) > 0: # return crowd from week ago at the same day/time (hour)
checkins_week_ago = subset_with_hour['check-ins'].mean()
checkouts_week_ago = subset_with_hour['check-outs'].mean()
elif len(subset_week_ago) > 0: # return average crowd the hour group a week ago
checkins_week_ago = subset_week_ago['check-ins'].mean()
checkouts_week_ago = subset_week_ago['check-outs'].mean()
return [checkins_week_ago, checkouts_week_ago]
def get_train_test_split(df):
"""
Create train and test split for 1-week ahead models. This means that the last week of the data will be used
as a test set and the rest will be the training set.
"""
most_recent_date = df['datetime'].max()
last_week = pd.date_range(df.datetime.max()-pd.Timedelta(7, unit='D')+pd.DateOffset(1), df['datetime'].max())
train = df[df['datetime']<last_week.min()]
test = df[(df['datetime']>=last_week.min()) & (df['datetime']<=last_week.max())]
return [train, test]
def get_train_val_test_split(df):
"""
Create train, validation, and test split for 1-week ahead models. This means that the last week of the data will be used
as a test set, the second-last will be the validation set, and the rest will be the training set.
"""
most_recent_date = df['datetime'].max()
last_week = pd.date_range(df.datetime.max()-pd.Timedelta(7, unit='D')+pd.DateOffset(1), df['datetime'].max())
two_weeks_before = pd.date_range(last_week.min()-pd.Timedelta(7, unit='D'), last_week.min()-pd.DateOffset(1))
train = df[df['datetime']<two_weeks_before.min()]
validation = df[(df['datetime']>=two_weeks_before.min()) & (df['datetime']<=two_weeks_before.max())]
test = df[(df['datetime']>=last_week.min()) & (df['datetime']<=last_week.max())]
return [train, validation, test]
def get_future_df(features, gvb_data, covid_stringency, holidays, vacations, weather, events):
"""
Create empty data frame for predictions of the target variable for the specfied prediction period
"""
this_year = date.today().isocalendar()[0]
this_week = date.today().isocalendar()[1]
firstdayofweek = datetime.strptime(f'{this_year}-W{int(this_week )}-1', "%Y-W%W-%w").date()
prediction_date_range = pd.date_range(firstdayofweek, periods=8, freq='D')
prediction_date_range_hour = pd.date_range(prediction_date_range.min(), prediction_date_range.max(), freq='h').delete(-1)
# Create variables
df = pd.DataFrame({'datetime':prediction_date_range_hour})
df['hour'] = df.apply(lambda x: x['datetime'].hour, axis=1)
df['week'] = df['datetime'].dt.isocalendar().week
df['month'] = df['datetime'].dt.month
df['year'] = df['datetime'].dt.year
df['weekday'] = df['datetime'].dt.weekday
df['stringency'] = covid_stringency
df['datetime'] = df.apply(lambda x: x['datetime'].date(), axis=1)
df['datetime'] = df['datetime'].astype('datetime64[ns]')
#adding sin and cosine features
df["hour_norm"] = 2 * math.pi * df["hour"] / df["hour"].max()
df["cos_hour"] = np.cos(df["hour_norm"])
df["sin_hour"] = np.sin(df["hour_norm"])
df["month_norm"] = 2 * math.pi * df["month"] / df["month"].max()
df["cos_month"] = np.cos(df["month_norm"])
df["sin_month"] = np.sin(df["month_norm"])
df["weekday_norm"] = 2 * math.pi * df["weekday"] / df["weekday"].max()
df["cos_weekday"] = np.cos(df["weekday_norm"])
df["sin_weekday"] = np.sin(df["weekday_norm"])
#adding dummy variable for peak hour
df['peak_period'] = 0
df['peak_period'][df.hour.isin([7,8,17,18])] = 1
# Set holidays, vacations, and events
df['holiday'] = np.where((df['datetime'].isin(holidays['Date'].values)), 1, 0)
df['vacation'] = np.where((df['datetime'].isin(vacations['date'].values)), 1, 0)
# Get events from database in future!
df['planned_event'] = np.where((df['datetime'].isin(events['Datum'].values)), 1, 0)
# Set forecast for temperature, rain, and wind speed.
df = pd.merge(left=df, right=weather.drop(columns=['datetime']), left_on=['datetime', 'hour'], right_on=['date', 'hour'], how='left')
df.drop(columns=['date'], inplace=True)
# Set recent crowd
df[['check-ins_week_ago', 'check-outs_week_ago']] = df.apply(lambda x: get_crowd_last_week(gvb_data, x), axis=1, result_type="expand")
if not 'datetime' in features:
features.append('datetime') # Add datetime to make storing in database easier
return df[features]
def train_random_forest_regressor(X_train, y_train, X_val, y_val, hyperparameters=None):
if hyperparameters == None:
model = RandomForestRegressor(random_state=1).fit(X_train, y_train)
else:
model = RandomForestRegressor(**hyperparameters, random_state=1).fit(X_train, y_train)
y_pred = model.predict(X_val)
r_squared = metrics.r2_score(y_val, y_pred)
mae = metrics.mean_absolute_error(y_val, y_pred)
rmse = np.sqrt(metrics.mean_squared_error(y_val, y_pred))
return [model, r_squared, mae, rmse]
def merge_gvb_with_datasources(gvb, weather, covid, holidays, vacations, events):
gvb_merged = pd.merge(left=gvb, right=weather, left_on=['datetime', 'hour'], right_on=['date', 'hour'], how='left')
gvb_merged.drop(columns=['date'], inplace=True)
gvb_merged = pd.merge(gvb_merged, covid['stringency'], left_on='datetime', right_index=True, how='left')
gvb_merged['holiday'] = np.where((gvb_merged['datetime'].isin(holidays['Date'].values)), 1, 0)
gvb_merged['vacation'] = np.where((gvb_merged['datetime'].isin(vacations['date'].values)), 1, 0)
gvb_merged['planned_event'] = np.where((gvb_merged['datetime'].isin(events['Datum'].values)), 1, 0)
return gvb_merged
def predict(model, X_predict):
y_predict = model.predict(X_predict.drop(columns=['datetime']))
predictions = X_predict.copy()
predictions['check-ins_predicted'] = y_predict[:,0]
predictions['check-outs_predicted'] = y_predict[:,1]
return predictions
def set_station_type(df, static_gvb):
stationtypes = static_gvb[['arrival_stop_code', 'type']]
return pd.merge(left=df, right=stationtypes, left_on='HalteCode', right_on='arrival_stop_code', how='inner')
def merge_bestemming_herkomst_stop_level(bestemming, herkomst):
bestemming.rename(columns={'AantalReizen':'Uitchecks',
'UurgroepOmschrijving (van aankomst)':'UurgroepOmschrijving',
'AankomstHalteCode':'HalteCode',
'AankomstHalteNaam':'HalteNaam'}, inplace=True)
herkomst.rename(columns={'AantalReizen':'Inchecks',
'UurgroepOmschrijving (van vertrek)':'UurgroepOmschrijving',
'VertrekHalteCode':'HalteCode',
'VertrekHalteNaam':'HalteNaam'}, inplace=True)
merged = pd.merge(left=bestemming, right=herkomst,
left_on=['Datum', 'UurgroepOmschrijving', 'HalteCode', 'HalteNaam'],
right_on=['Datum', 'UurgroepOmschrijving', 'HalteCode', 'HalteNaam'],
how='outer')
return merged
def get_crowd_last_week_stop_level(df, row):
week_ago = row['datetime'] - timedelta(weeks=1)
subset_with_hour = df[(df['type_metro']==row['type_metro']) & (df['type_tram/bus']==row['type_tram/bus']) &
(df['datetime']==week_ago) & (df['hour']==row['hour'])]
# If crowd from last week is not available at exact date- and hour combination, then get average crowd of last week.
subset_week_ago = df[(df['type_metro']==row['type_metro']) & (df['type_tram/bus']==row['type_tram/bus']) &
(df['year']==row['year']) & (df['week']==row['week']) & (df['hour']==row['hour'])]
checkins_week_ago = 0
checkouts_week_ago = 0
if len(subset_with_hour) > 0: # return crowd from week ago at the same day/time (hour)
checkins_week_ago = subset_with_hour['check-ins'].mean()
checkouts_week_ago = subset_with_hour['check-outs'].mean()
elif len(subset_week_ago) > 0: # return average crowd the hour group a week ago
checkins_week_ago = subset_week_ago['check-ins'].mean()
checkouts_week_ago = subset_week_ago['check-outs'].mean()
return [checkins_week_ago, checkouts_week_ago]
"""
Below are old functions which are not used for the prediction models.
"""
def preprocess_gvb_data(df):
# create datetime column
df['date'] = pd.to_datetime(df['Datum'])
df['time'] = df['UurgroepOmschrijving (van aankomst)'].astype(str).str[:5]
df['datetime'] = df['date'].astype(str) + " " + df['time']
df['datetime'] = | pd.to_datetime(df['datetime']) | pandas.to_datetime |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, True),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
# (cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), True),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
# (cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), True),
# (cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_struct_dtype(obj, expect):
# TODO: All inputs of interval types are currently disabled due to
# inconsistent behavior of is_struct_dtype for interval types that will be
# fixed as part of the array refactor.
assert types.is_struct_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
( | pd.Series(dtype="float") | pandas.Series |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Plots
import matplotlib.pyplot as plt
import seaborn as sns
# Preprocessing
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
import gc
# LightGBM framework
import lightgbm as lgb
"""
From github:
A fast, distributed, high performance gradient boosting (GBDT, GBRT, GBM or MART)
framework based on decision tree algorithms,
used for ranking, classification and many other machine learning tasks.
LightGBM is a gradient boosting framework that uses tree based learning algorithms.
It is designed to be distributed and efficient with the following advantages:
Faster training speed and higher efficiency
Lower memory usage
Better accuracy
Parallel and GPU learning supported
Capable of handling large-scale data
"""
########
# Load the data
########
#maing train and test
train = pd.read_csv('train.csv')
test = | pd.read_csv('test.csv') | pandas.read_csv |
# 对文件和数据库数据合并载入内存
from pandas import Timedelta, DataFrame, read_csv, to_datetime
from numpy import float32, polyfit, string_
from config import Config, str2array, PARAMS_TABLE_NAME, get_table_name, MINI_EPS, PARAMS_LIST
from sql_mapper import SQLMapper
from multiprocessing import Process
def view_data(data, num=20):
print(data.dtypes)
print(data[:num])
class DataPool:
ef_tables = dict()
params = None
save_start = dict()
# is_exist = dict()
# 初始化设置和参数
@classmethod
def init(cls, by_file=True):
Config.init_from_file()
SQLMapper.class_init_by_config(Config.mysql_config)
cls.params = Config.PARAMS_TEMPLATE.copy(deep=True)
if SQLMapper.is_table_exist(PARAMS_TABLE_NAME):
cls.params = SQLMapper.select_params()
cls.params.set_index(["table_name"], drop=False, inplace=True)
# 调入所有
if not by_file:
return
else:
for table_name in Config.device2path.keys():
cls.load_table(table_name)
@classmethod
def load_table(cls, table_name):
if SQLMapper.is_table_exist(table_name):
cls.ef_tables[table_name] = SQLMapper.select_16days(table_name)
else:
cls.ef_tables[table_name] = DataFrame()
print("start")
cls.save_start[table_name] = len(cls.ef_tables[table_name].index)
@classmethod
def read_instruction(cls, cmd):
cmd_arr = str2array(cmd)
new_df = DataFrame.from_dict({
"datetime": [cmd_arr[1]],
"temperature": [float32(cmd_arr[2])],
"strain": [float32(cmd_arr[3])],
})
new_df['height'] = new_df['stress'] = new_df['tsf'] = float32(0.0)
new_df['datetime'] = to_datetime(new_df['datetime'])
table_name = get_table_name(cmd_arr[0].strip())
cls.load_table(table_name)
print("Reading by cmd: " + cmd)
cls.ef_tables[table_name] = cls.ef_tables[table_name].append(new_df, ignore_index=True,
verify_integrity=True)
return [table_name]
@classmethod
def read_file(cls):
for table_name, import_file_name in Config.device2path.items():
print(table_name + ":" + import_file_name + "file is being read.")
file_data = read_csv(import_file_name, sep=',',
names=['datetime', 'temperature', 'strain'],
dtype={'datetime': string_, 'temperature': float32, 'strain': float32},
parse_dates=['datetime']
)
# datetime, temperature, strain, height, stress,
file_data['height'] = file_data['stress'] = file_data['tsf'] = float32(0.0)
cls.ef_tables[table_name] = cls.ef_tables[table_name].append(file_data, ignore_index=True,
verify_integrity=True)
# print(cls.ef_tables[table_name].info)
# view_data(cls.ef_tables[table_name])
return Config.device2path.keys()
@classmethod
def multi_process_fit(cls, table_names):
for table_name in table_names:
if table_name not in cls.params.index:
tmp = DataFrame([dict(zip(PARAMS_LIST, [table_name] + [0] * 8))], index=[table_name])
cls.params = cls.params.append(tmp)
print(cls.save_start)
process = [Process(target=cls.fit_one, args=(i,)) for i in table_names]
[p.start() for p in process]
[p.join() for p in process]
@classmethod
def fit_one(cls, table_name):
print(table_name + " SOLVING")
save_start = cls.save_start[table_name]
this_table = cls.ef_tables[table_name]
count = 0
if len(this_table.iloc[save_start:]) > 0:
for idx in range(save_start, len(this_table.index)):
count += 1
print("%s deal %d packet" %(table_name, count))
if cls.get_params(table_name, idx):
continue
cls.compute(table_name, idx)
@classmethod
def normal_fit_(cls, table_names):
for table_name in table_names:
if table_name not in cls.params.index:
tmp = DataFrame([dict(zip(PARAMS_LIST, [table_name] + [0] * 8))], index=[table_name])
cls.params = cls.params.append(tmp)
for table_name in table_names:
cls.fit_one(table_name)
@classmethod
def fit_params_by_least_square(cls, table_name, start, end):
this_table = cls.ef_tables[table_name].iloc[start: end]
x = this_table["temperature"].values.flatten()
y = this_table["strain"].values.flatten()
coefficient = polyfit(x, y, 1)
return coefficient[0], coefficient[1]
@classmethod
def get_params(cls, table_name, idx):
this_table = cls.ef_tables[table_name]
param_idx = cls.params.index.get_loc(table_name)
param_d = cls.params.iloc[param_idx].to_dict()
datetime_num = cls.ef_tables[table_name].columns.get_loc("datetime")
init_day = this_table.iloc[0, datetime_num].date()
now_day = this_table.iloc[idx, datetime_num].date()
yesterday = this_table.iloc[idx - 1, datetime_num].date()
is_diff_day = (now_day != yesterday)
past_days = (now_day - init_day).days
if past_days < 2:
return True
else:
if 2 <= past_days < 15 or (past_days == 15 and is_diff_day):
# k,b 按当前这包之前的所有
param_d['k'], param_d['b'] = cls.fit_params_by_least_square(table_name, 0, idx)
param_d['k_packet_num'] = idx
else:
# k,b 按上一次计算大小
param_d['k'], param_d['b'] = cls.fit_params_by_least_square(table_name,
idx - param_d["k_packet_num"] - 1, idx)
if is_diff_day and past_days in [2, 7, 15]:
# k0, b0 按当前这包之前的所有包
last_k0 = param_d['k0']
param_d['k0'], param_d['b0'] = cls.fit_params_by_least_square(table_name, 0, idx)
param_d['k0_packet_num'] = idx
if past_days == 2:
param_d['k0_accumulate'] = 0
elif past_days == 7:
param_d['k0_accumulate'] = param_d['k0'] - last_k0
elif past_days == 15:
param_d['k0_accumulate'] = param_d['k0'] + param_d['k0_accumulate'] - last_k0
for k, v in param_d.items():
cls.params.loc[table_name, k] = v
return False
@classmethod
def compute(cls, table_name, idx):
this_row = cls.ef_tables[table_name].iloc[idx].to_dict()
last_row = cls.ef_tables[table_name].iloc[idx - 1].to_dict()
param_d = cls.params.loc[table_name].to_dict()
mutation = (this_row["strain"] - param_d["mutation_accumulate"] - last_row["strain"]) - (
param_d["k0"] * (this_row["temperature"] - last_row["temperature"]))
delta_t = abs(this_row["temperature"] - last_row["temperature"])
if delta_t < MINI_EPS:
deviation = True
else:
deviation = abs(mutation / delta_t) - 180 > MINI_EPS
if abs(this_row["datetime"] - last_row["datetime"]) <= | Timedelta(hours=3) | pandas.Timedelta |
# Copyright 2021 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import pathlib
import shutil
import tarfile
import tempfile
import pandas as pd
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from typing import Optional
import weakref
from kubric.kubric_typing import PathLike
from kubric.core import objects
from kubric.core import materials
class ClosableResource:
_set_of_open_resources = weakref.WeakSet()
def __init__(self):
super().__init__()
self.is_closed = False
self._set_of_open_resources.add(self)
def close(self):
try:
self._set_of_open_resources.remove(self)
except (ValueError, KeyError):
pass # not listed anymore. Ignore.
@classmethod
def close_all(cls):
while True:
try:
r = cls._set_of_open_resources.pop()
except KeyError:
break
r.close()
class AssetSource(ClosableResource):
"""TODO(klausg): documentation."""
def __init__(self, path: PathLike, scratch_dir: Optional[PathLike] = None):
super().__init__()
self.remote_dir = tfds.core.as_path(path)
name = self.remote_dir.name
logging.info("Adding AssetSource '%s' with URI='%s'", name, self.remote_dir)
self.local_dir = pathlib.Path(tempfile.mkdtemp(prefix="assets", dir=scratch_dir))
manifest_path = self.remote_dir / "manifest.json"
if manifest_path.exists():
self.db = pd.read_json(tf.io.gfile.GFile(manifest_path, "r"))
logging.info("Found manifest file. Loaded information about %d assets", self.db.shape[0])
else:
assets_list = [p.name[:-7] for p in self.remote_dir.iterdir() if p.name.endswith(".tar.gz")]
self.db = pd.DataFrame(assets_list, columns=["id"])
logging.info("No manifest file. Found %d assets.", self.db.shape[0])
def close(self):
if self.is_closed:
return
try:
shutil.rmtree(self.local_dir)
finally:
super().close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def create(self, asset_id: str, **kwargs) -> objects.FileBasedObject:
assert asset_id in self.db["id"].values, kwargs
sim_filename, vis_filename, properties = self.fetch(asset_id)
for pname in ["mass", "friction", "restitution", "bounds", "render_import_kwargs"]:
if pname in properties and pname not in kwargs:
kwargs[pname] = properties[pname]
return objects.FileBasedObject(asset_id=asset_id,
simulation_filename=str(sim_filename),
render_filename=str(vis_filename),
**kwargs)
def fetch(self, object_id):
remote_path = self.remote_dir / (object_id + ".tar.gz")
local_path = self.local_dir / (object_id + ".tar.gz")
if not local_path.exists():
logging.debug("Copying %s to %s", str(remote_path), str(local_path))
tf.io.gfile.copy(remote_path, local_path)
with tarfile.open(local_path, "r:gz") as tar:
list_of_files = tar.getnames()
if object_id in list_of_files and tar.getmember(object_id).isdir():
# tarfile contains directory with name object_id, so we can just extract
assert f"{object_id}/data.json" in list_of_files, list_of_files
tar.extractall(self.local_dir)
else:
# tarfile contains files only, so extract into a new directory
assert "data.json" in list_of_files, list_of_files
tar.extractall(self.local_dir / object_id)
logging.debug("Extracted %s", repr([m.name for m in tar.getmembers()]))
json_path = self.local_dir / object_id / "data.json"
with open(json_path, "r", encoding="utf-8") as f:
properties = json.load(f)
logging.debug("Loaded properties %s", repr(properties))
# paths
vis_path = properties["paths"]["visual_geometry"]
if isinstance(vis_path, list):
vis_path = vis_path[0]
vis_path = self.local_dir / object_id / vis_path
urdf_path = properties["paths"]["urdf"]
if isinstance(urdf_path, list):
urdf_path = urdf_path[0]
urdf_path = self.local_dir / object_id / urdf_path
return urdf_path, vis_path, properties
def get_test_split(self, fraction=0.1):
"""
Generates a train/test split for the asset source.
Args:
fraction: the fraction of the asset source to use for the heldout set.
Returns:
train_objects: list of asset ID strings
held_out_objects: list of asset ID strings
"""
held_out_objects = list(self.db.sample(frac=fraction, replace=False, random_state=42)["id"])
train_objects = [i for i in self.db["id"] if i not in held_out_objects]
return train_objects, held_out_objects
class TextureSource(ClosableResource):
"""TODO(klausg): documentation."""
def __init__(self, path: PathLike, scratch_dir: Optional[PathLike] = None):
super().__init__()
self.remote_dir = tfds.core.as_path(path)
name = self.remote_dir.name
logging.info("Adding TextureSource '%s' with URI='%s'", name, self.remote_dir)
self.local_dir = tfds.core.as_path(tempfile.mkdtemp(prefix="textures", dir=scratch_dir))
manifest_path = self.remote_dir / "manifest.json"
if manifest_path.exists():
self.db = pd.read_json(tf.io.gfile.GFile(manifest_path, "r"))
logging.info("Found manifest file. Loaded information about %d assets", self.db.shape[0])
else:
assets_list = [p.name for p in self.remote_dir.iterdir()]
self.db = | pd.DataFrame(assets_list, columns=["id"]) | pandas.DataFrame |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import cv2
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from src.utils.image_augmentation import * # to create batch_labels files,
from src.utils.data_loaders import load_encoded_imgbatch_using_logfile, load_raw_img_batch
from PIL import Image, ImageDraw
from matplotlib.font_manager import FontProperties
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.dummy import DummyClassifier
# Function, ............................................................................................
def perfrom_grid_search(*, X, y, train_proportion=0.7, pipe, grid, method_name=np.nan, verbose=False):
# check the data, ................................
assert type(X)==np.ndarray, "Incorrect obj type" # Test input df,
assert type(y)==np.ndarray, "Incorrect obj type" # Test input df,
# Data preparation, ...............................
# .. Split data into train/test sets
X_tr, X_te, y_tr, y_te = train_test_split(
X, y,
train_size=train_proportion,
test_size=(1-train_proportion),
random_state=0
)
# .. test dimensions,
if verbose==True:
print('Number of combinations:', len(grid))
print("Input Data shapes are:", "train=",X_tr.shape," test=",X_te.shape)
else:
pass
# Grid Search, ...............................
# Save accuracy on test set
test_scores = []
# Enumerate combinations starting from 1
for i, params_dict in enumerate(grid, 1):
if verbose==True:
# Print progress
if i-1==0:
print(f"GridSearch: ", end="")
if i>1 and i<len(grid)-1:
print(".",end="")
if i==len(grid):
print(".", end="\n")
else:
pass
# Set parameters
pipe.set_params(**params_dict)
# Fit a k-NN classifier
pipe.fit(X_tr, y_tr)
# Save accuracy on test set
params_dict['train_accuracy'] = pipe.score(X_tr, y_tr)
params_dict['test_accuracy'] = pipe.score(X_te, y_te)
params_dict['method'] = method_name
# Save result
test_scores.append(params_dict)
if verbose==True:
print('done')
else:
pass
# prepare the results, ...................
scores_df = pd.DataFrame(test_scores)
return scores_df
# Function, ...........................................................................................................
def knn_grid_search(*,
method_name="knn",
path,
dataset_name,
subset_names_tr,
subset_names_te,
module_names,
class_encoding,
grid,
param_names_for_Classifier,
train_proportion=0.7,
random_state_nr=0,
store_predictions=True,
verbose=False,
track_progresss=False
):
"""
================= ===============================================================================
Property Description
================= ===============================================================================
* Function, Custom function that perfomes grid search using decision trees, on features extracted
from images with different tf.hub modules.
Optionally, it allows using pca, for tranforming
extracted features intro selected number of principial components,
later on used by SVM algorithm
# Inputs,
.................................................................................................
. path. : str, path to directory, with data sstored,
. dataset_name : str, datassets name, used while creating
. logfile_name : str, path to logfile
. dataset_name :
. subset_names_tr : list, eg: [train", "valid"], these two dastasets will be concastenated in that order
Ussed exclu
. subset_names_te : list, eg: ["test"], these two dastasets will be concastenated in that order
Caution, I assumed that, more then one subset of data is keept in dataset file folder, ¨
eg, that you stored test and train data separately,
. module_names : list, with names given to different moduless or methods used for feature extractio
from images,
. param_names_for_DecisionTreeClassifier : list, with parameters that will be used exlusively,
for DecisionTreeClassifier()
. grid : ParameterGrid() object, with parameters for DecisionTreeClassifier() and number
of principial axes ussed instead of extracted features,
eg:
grid = ParameterGrid({
'criterion': ["gini"],
'max_depth': [3,5],
'class_weight': ['balanced'],
'pca':[0, 10, 30]}) # pca will not be used, or the alg,
will use either 10 or 30 principial
components to train decision tree
. store_predictions : bool, if True, predictions for all models, with train, validations and test datasets
will be perfomed and stored in model_predictions_dict
. class_encoding : dict, key:<orriginal class name>:value<numerical value used by decision tre>
eg: dict(zip(list(class_colors.keys()), list(range(len(class_colors)))))
. random_state_nr : int, random state nr, used by sample split, and decision tree alg,
. train_proportion : propotion of samples in inpur data for training,
# Returns,
.................................................................................................
. model_acc_and_parameters_list : list, where each entry is a dict, with accuracy and parameters usied to build
a given model, and model_ID that can be used to retrieve items from two other
objectes returned by this funciotn,
. dot_data_dict : dict, key=model_ID, stores decission trees in dot_data file format,
created using export_graphviz() for each model,
. model_predictions_dict : dict, key=model_ID ()
content: another dict, with "train, test and valid" keys
representing predictions made with eahc of these subsets
each of them is also a dict. with,
> "idx_in_used_batch": index of each image in original img_batch
as created, using, subset_names_tr, and
load_encoded_imgbatch_using_logfile() function
> "original_labels": array, with original class names for each image in a given dataset
> "model_predictions": array, with preducted class names for each image in a given dataset
> "acc_restuls_and_params": contains dict, with acc_restuls_and_params
to ensure reproducibility,
# Notes,
.................................................................................................
I DO NOT SAVE MODELS, BECAUSE THESE TAKE A LOT OF MEMORY, THAT IS REALLY RESTRICTED ON MY COMPUTER,
MOREVER, KNN MODELS CARRIES ENTIRE INPUT DATASET IN IT !
in case you wish to save models use joblib library
or visit: https://machinelearningmastery.com/save-load-machine-learning-models-python-scikit-learn/
"""
# dist to store results,
dot_data_dict = dict() # decision trees stored in dot format,
model_acc_and_parameters_list = list()
model_predictions_dict = dict()
# ...
class_decoding = dict(zip(list(list(class_encoding.values())), list(class_encoding.keys()))) # reverse on class_encoding,
# ...
model_ID = -1 # id number for each model, its predictions, I started with -1 so the first id will be 0 !
for i, module_name in enumerate(module_names):
"""
Note: I decided to load the data and tranform them at each iteration,
because I was working with relatively small datasets, and it was easier,
otherwise i woudl recommend to create a copy of inpout for models, and modify it with pca,
instead of relading entire dataset.
Note: I am evaluating each model with the same set of X valid and X te, because it was a goal,
of that task, and only onbce, becuase it was exploratory data analysis,
"""
if track_progresss==True:
print(f"{i} {module_name} _________________________________________ {pd.to_datetime('now')}")
else:
pass
# Grid search,
for params in grid:
# PARAMETERS, ...................................
model_ID +=1
pca_axes_nr = params["pca"]
dt_params_dct = dict(zip(param_names_for_Classifier,[params[x] for x in param_names_for_Classifier]))
# ...
Xy_names = ["train", "valid", "test"]
if track_progresss==True:
print('.', end="")
else:
pass
# DATA PREPARATION,..............................
# .................
# load and ecode X,y arrays
# find any logfile created while saving img files,
os.chdir(path)
logfiles = []
for file in glob.glob(f"{''.join([module_name,'_',dataset_name])}*_logfile.csv"):
logfiles.append(file)
# ... info,
if verbose==True:
print(f'{"".join(["."]*80)}')
print(f'{module_name}, logfie: {logfiles[0]}')
print(f" --- dt params: {dt_params_dct}")
print(f" --- pca params: {pca_axes_nr}")
else:
pass
# train data,
X, batch_labels = load_encoded_imgbatch_using_logfile(logfile_name=logfiles[0], load_datasetnames=subset_names_tr)
X = X.astype(np.float)
y = | pd.Series(batch_labels.classname) | pandas.Series |
import json
from typing import Tuple, Union
import pandas as pd
import numpy as np
import re
import os
from tableone import TableOne
from collections import defaultdict
from io import StringIO
from .gene_patterns import *
import plotly.express as px
import pypeta
from pypeta import Peta
from pypeta import filter_description
class SampleIdError(RuntimeError):
def __init__(self, sample_id: str, message: str):
self.sample_id = sample_id
self.message = message
class NotNumericSeriesError(RuntimeError):
def __init__(self, message: str):
self.message = message
class UnknowSelectionTypeError(RuntimeError):
def __init__(self, message: str):
self.message = message
class NotInColumnError(RuntimeError):
def __init__(self, message: str):
self.message = message
class GenesRelationError(RuntimeError):
def __init__(self, message: str):
self.message = message
class VariantUndefinedError(RuntimeError):
def __init__(self, message: str):
self.message = message
class ListsUnEqualLengthError(RuntimeError):
def __init__(self, message: str):
self.message = message
class DatetimeFormatError(RuntimeError):
def __init__(self, message: str):
self.message = message
class CDx_Data():
"""[summary]
"""
def __init__(self,
mut_df: pd.DataFrame = None,
cli_df: pd.DataFrame = None,
cnv_df: pd.DataFrame = None,
sv_df: pd.DataFrame = None,
json_str: str = None):
"""Constructor method with DataFrames
Args:
mut_df (pd.DataFrame, optional): SNV and InDel info. Defaults to None.
cli_df (pd.DataFrame, optional): Clinical info. Defaults to None.
cnv_df (pd.DataFrame, optional): CNV info. Defaults to None.
sv_df (pd.DataFrame, optional): SV info. Defaults to None.
"""
self.json_str = json_str
self.mut = mut_df
self.cnv = cnv_df
self.sv = sv_df
if not cli_df is None:
self.cli = cli_df
self.cli = self._infer_datetime_columns()
else:
self._set_cli()
self.crosstab = self.get_crosstab()
def __len__(self):
return 0 if self.cli is None else len(self.cli)
def __getitem__(self, n):
return self.select_by_sample_ids([self.cli.sampleId.iloc[n]])
def __sub__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = None if self.cli is None and cdx.cli is None else pd.concat(
[self.cli, cdx.cli]).drop_duplicates(keep=False)
mut = None if self.mut is None and cdx.mut is None else pd.concat(
[self.mut, cdx.mut]).drop_duplicates(keep=False)
cnv = None if self.cnv is None and cdx.cnv is None else pd.concat(
[self.cnv, cdx.cnv]).drop_duplicates(keep=False)
sv = None if self.sv is None and cdx.sv is None else pd.concat(
[self.sv, cdx.sv]).drop_duplicates(keep=False)
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def __add__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = pd.concat([self.cli, cdx.cli]).drop_duplicates()
mut = pd.concat([self.mut, cdx.mut]).drop_duplicates()
cnv = pd.concat([self.cnv, cdx.cnv]).drop_duplicates()
sv = pd.concat([self.sv, cdx.sv]).drop_duplicates()
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def from_PETA(self,
token: str,
json_str: str,
host='https://peta.bgi.com/api'):
"""Retrieve CDx data from BGI-PETA database.
Args:
token (str): Effective token for BGI-PETA database
json_str (str): The json format restrictions communicating to the database
"""
self.json_str = json_str
peta = Peta(token=token, host=host)
peta.set_data_restriction_from_json_string(json_str)
# peta.fetch_clinical_data() does`not process dtype inference correctly, do manully.
#self.cli = peta.fetch_clinical_data()
self.cli = pd.read_csv(
StringIO(peta.fetch_clinical_data().to_csv(None, index=False)))
self.mut = peta.fetch_mutation_data()
self.cnv = peta.fetch_cnv_data()
self.sv = peta.fetch_sv_data()
# dedup for the same sampleId in different studyIds, discard the duplicated ones from all tables
cli_original = self.cli
self.cli = self.cli.drop_duplicates('sampleId')
if (len(self.cli) < len(cli_original)):
print('Duplicated sampleId exists, drop duplicates and go on')
undup_tuple = [(x, y)
for x, y in zip(self.cli.sampleId, self.cli.studyId)]
self.sv = self.sv[self.sv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.cnv = self.cnv[self.cnv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.mut = self.mut[self.mut.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
# time series
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
return filter_description(json_str)
def filter_description(self):
"""retrun filter description when data load from PETA
Returns:
str: description
"""
return filter_description(self.json_str) if self.json_str else None
def from_file(self,
mut_f: str = None,
cli_f: str = None,
cnv_f: str = None,
sv_f: str = None):
"""Get CDx data from files.
Args:
mut_f (str, optional): File as NCBI MAF format contains SNV and InDel. Defaults to None.
cli_f (str, optional): File name contains clinical info. Defaults to None.
cnv_f (str, optional): File name contains CNV info. Defaults to None.
sv_f (str, optional): File name contains SV info. Defaults to None.
"""
if not mut_f is None:
self.mut = pd.read_csv(mut_f, sep='\t')
if not cnv_f is None:
self.cnv = pd.read_csv(cnv_f, sep='\t')
if not sv_f is None:
self.sv = pd.read_csv(sv_f, sep='\t')
if not cli_f is None:
self.cli = pd.read_csv(cli_f, sep='\t')
else:
self._set_cli()
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
def to_tsvs(self, path: str = './'):
"""Write CDx_Data properties to 4 seprated files
Args:
path (str, optional): Path to write files. Defaults to './'.
"""
if not self.cli is None:
self.cli.to_csv(os.path.join(path, 'sample_info.txt'),
index=None,
sep='\t')
if not self.mut is None:
self.mut.to_csv(os.path.join(path, 'mut_info.txt'),
index=None,
sep='\t')
if not self.cnv is None:
self.cnv.to_csv(os.path.join(path, 'cnv_info.txt'),
index=None,
sep='\t')
if not self.sv is None:
self.sv.to_csv(os.path.join(path, 'fusion_info.txt'),
index=None,
sep='\t')
def to_excel(self, filename: str = './output.xlsx'):
"""Write CDx_Data properties to excel file
Args:
filename (str, optional): target filename. Defaults to './output.xlsx'.
"""
if not filename.endswith('xlsx'):
filename = filename + '.xlsx'
with pd.ExcelWriter(filename) as ew:
if not self.cli is None:
self.cli.to_excel(ew, sheet_name='clinical', index=None)
if not self.mut is None:
self.mut.to_excel(ew, sheet_name='mutations', index=None)
if not self.cnv is None:
self.cnv.to_excel(ew, sheet_name='cnv', index=None)
if not self.sv is None:
self.sv.to_excel(ew, sheet_name='sv', index=None)
def _set_cli(self):
"""Set the cli attribute, generate a void DataFrame when it is not specified.
"""
sample_id_series = []
if not self.mut is None:
sample_id_series.append(
self.mut['Tumor_Sample_Barcode'].drop_duplicates())
if not self.cnv is None:
sample_id_series.append(
self.cnv['Tumor_Sample_Barcode'].drop_duplicates())
if not self.sv is None:
sample_id_series.append(
self.sv['Tumor_Sample_Barcode'].drop_duplicates())
if len(sample_id_series) > 0:
self.cli = pd.DataFrame({
'sampleId': pd.concat(sample_id_series)
}).drop_duplicates()
else:
self.cli = None
def _infer_datetime_columns(self) -> pd.DataFrame:
"""To infer the datetime_columns and astype to datetime64 format
Returns:
pd.DataFrame: CDx.cli dataframe
"""
cli = self.cli
for column in cli.columns:
if column.endswith('DATE'):
try:
cli[column] = pd.to_datetime(cli[column])
except Exception as e:
raise DatetimeFormatError(
f'{column} column end with "DATE" can not be transformed to datetime format'
)
return cli
def get_crosstab(self) -> pd.DataFrame:
"""Generate a Gene vs. Sample_id cross table.
Raises:
SampleIdError: Sample id from the mut, cnv or sv which not exsits in the cli table.
Returns:
pd.DataFrame: CDx_Data.
"""
# 这里cli表中不允许存在相同的样本编号。会造成crosstab的列中存在重复,引入Series的boolen值无法处理的问题
if (self.cli is None) or (len(self.cli) == 0):
return pd.DataFrame([])
sub_dfs = []
# cli
cli_crosstab = self.cli.copy().set_index('sampleId').T
cli_crosstab['track_type'] = 'CLINICAL'
sub_dfs.append(cli_crosstab)
# mut. represent by cHgvs, joined by '|' for mulitple hit
if (not self.mut is None) and (len(self.mut) != 0):
mut_undup = self.mut[[
'Hugo_Symbol', 'Tumor_Sample_Barcode', 'HGVSp_Short'
]].groupby([
'Hugo_Symbol', 'Tumor_Sample_Barcode'
])['HGVSp_Short'].apply(lambda x: '|'.join(x)).reset_index()
mut_crosstab = mut_undup.pivot('Hugo_Symbol',
'Tumor_Sample_Barcode',
'HGVSp_Short')
mut_crosstab['track_type'] = 'MUTATIONS'
sub_dfs.append(mut_crosstab)
# cnv. represent by gain or loss. at first use the virtual column "copy_Num"
if (not self.cnv is None) and (len(self.cnv) != 0):
cnv_undup = self.cnv[[
'Hugo_Symbol', 'Tumor_Sample_Barcode', 'status'
]].groupby([
'Hugo_Symbol', 'Tumor_Sample_Barcode'
])['status'].apply(lambda x: '|'.join(x)).reset_index()
cnv_crosstab = cnv_undup.pivot('Hugo_Symbol',
'Tumor_Sample_Barcode', 'status')
cnv_crosstab['track_type'] = 'CNV'
sub_dfs.append(cnv_crosstab)
# sv. represent by gene1 and gene2 combination. explode one record into 2 lines.
if (not self.sv is None) and (len(self.sv) != 0):
sv_undup = pd.concat([
self.sv,
self.sv.rename(columns={
'gene1': 'gene2',
'gene2': 'gene1'
})
])[['gene1', 'Tumor_Sample_Barcode', 'gene2']].groupby([
'gene1', 'Tumor_Sample_Barcode'
])['gene2'].apply(lambda x: '|'.join(x)).reset_index()
sv_crosstab = sv_undup.pivot('gene1', 'Tumor_Sample_Barcode',
'gene2')
sv_crosstab['track_type'] = 'FUSION'
sub_dfs.append(sv_crosstab)
# pandas does not support reindex with duplicated index, so turn into multiIndex
crosstab = pd.concat(sub_dfs)
crosstab = crosstab.set_index('track_type', append=True)
crosstab = crosstab.swaplevel()
return crosstab
#如何构建通用的选择接口,通过变异、基因、癌种等进行选择,并支持“或”和“且”的逻辑运算
#该接口至关重要,对变异入选条件的选择会影响到crosstab,
#选择后返回一个新的CDX_Data对象
def select(self, conditions: dict = {}, update=True):
"""A universe interface to select data via different conditions.
Args:
conditions (dict, optional): Each key represent one column`s name of the CDx_Data attributes. Defaults to {}.
update (bool, optional): [description]. Defaults to True.
"""
return self
# 数据选择的辅助函数
def _numeric_selector(self, ser: pd.Series, range: str) -> pd.Series:
"""Compute a comparition expression on a numeric Series
Args:
ser (pd.Series): Numeric Series.
range (str): comparition expression like 'x>5'. 'x' is mandatory and represent the input.
Raises:
NotNumericSeriesError: Input Series`s dtype is not a numeric type.
Returns:
pd.Series: Series with boolean values.
"""
if ser.dtype == 'object':
raise NotNumericSeriesError(f'{ser.name} is not numeric')
#return ser.map(lambda x: eval(re.sub(r'x', str(x), range)))
return eval(re.sub(r'x', 'ser', range))
def _catagory_selector(self, ser: pd.Series, range: list) -> pd.Series:
"""Return True if the Series` value in the input range list.
Args:
ser (pd.Series): Catagory Series.
range (list): List of target options.
Returns:
pd.Series: Series with boolean values
"""
return ser.isin(range)
def _selector(self, df: pd.DataFrame, selections: dict) -> pd.DataFrame:
"""Filter the input DataFrame via the dict of conditions.
Args:
df (pd.DataFrame): Input.
selections (dict): Dict format of conditions like "{'Cancer_type':['lung','CRC'],'Age':'x>5'}".
The keys represent a column in the input DataFrame.
The list values represent a catagory target and str values represent a numeric target.
Raises:
NotInColumnError: Key in the dict is not in the df`s columns.
UnknowSelectionTypeError: The type of value in the dict is not str nor list.
Returns:
pd.DataFrame: Filterd DataFrame
"""
columns = df.columns
for key, value in selections.items():
if key not in columns:
raise NotInColumnError(f'{key} is not in the columns')
if isinstance(value, str):
df = df[self._numeric_selector(df[key], value)]
elif isinstance(value, list):
df = df[self._catagory_selector(df[key], value)]
else:
raise UnknowSelectionTypeError(
f'{selections} have values not str nor list')
return df
def _fuzzy_id(self, regex: re.Pattern, text: str) -> str:
"""transform a sample id into fuzzy mode according the regex pattern
Args:
regex (re.Pattern): The info retains are in the capture patterns
text (str): input sample id
Returns:
str: fuzzy mode sample id
"""
matches = regex.findall(text)
if matches:
text = '_'.join(matches[0])
return text
def select_by_sample_ids(self,
sample_ids: list,
fuzzy: bool = False,
regex_str: str = r'(\d+)[A-Z](\d+)',
study_ids: list = []):
"""Select samples via a list of sample IDs.
Args:
sample_ids (list): sample ids list.
fuzzy (bool): fuzzy mode.
regex_str (str): The match principle for fuzzy match. The info in the regex capture patterns must be matched for a certifired record. Default for r'(\d+)[A-Z](\d+)'.
study_ids: (list): The corresponding study id of each sample ids. Length of sample_ids and study_ids must be the same.
Raises:
ListsUnEqualLengthError: Length of sample_ids and study_ids are not equal.
Returns:
CDx: CDx object of selected samples.
"""
if fuzzy:
regex = re.compile(regex_str)
# fuzzy the input ids
target_ids = []
fuzzy_to_origin = defaultdict(list)
transform = lambda x: self._fuzzy_id(regex, x)
for sample_id in sample_ids:
fuzzy_sample_id = self._fuzzy_id(regex, sample_id)
fuzzy_to_origin[fuzzy_sample_id].append(sample_id)
target_ids.append(fuzzy_sample_id)
else:
target_ids = sample_ids
transform = lambda x: x
# match
sample_id_bool = self.cli['sampleId'].map(transform).isin(target_ids)
# no match, return immediately
if not sample_id_bool.any():
return CDx_Data()
# with study ids
if len(study_ids):
if len(study_ids) != len(sample_ids):
raise ListsUnEqualLengthError('Error')
sub_cli_df = self.cli[sample_id_bool]
study_id_bool = sub_cli_df.apply(
lambda x: x['studyId'] == study_ids[target_ids.index(
transform(x['sampleId']))],
axis=1)
sample_id_bool = sample_id_bool & study_id_bool
# construct new CDx_Data object
# CDx_Data always have a cli
cli_df = self.cli[sample_id_bool].copy()
# add a column of query ids for fuzzy match
# multi hit represent as a string
if fuzzy:
cli_df['queryId'] = cli_df['sampleId'].map(
lambda x: ','.join(fuzzy_to_origin[transform(x)])).copy()
if not self.mut is None and len(self.mut) != 0:
mut_df = self.mut[self.mut['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
mut_df = None
if not self.cnv is None and len(self.cnv) != 0:
cnv_df = self.cnv[self.cnv['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
cnv_df = None
if not self.sv is None and len(self.sv) != 0:
sv_df = self.sv[self.sv['Tumor_Sample_Barcode'].isin(
cli_df['sampleId'])].copy()
else:
sv_df = None
return CDx_Data(cli_df=cli_df,
mut_df=mut_df,
cnv_df=cnv_df,
sv_df=sv_df)
#
def set_mut_eligibility(self, **kwargs):
"""Set threshold for SNV/InDels to regrard as a positive sample
Raises:
VariantUndefinedError: mut info not provided by user.
Returns:
CDx_Data: CDx_Data object
"""
if self.mut is None or len(self.mut) == 0:
mut = None
else:
mut = self._selector(self.mut, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=mut,
cnv_df=self.cnv,
sv_df=self.sv)
def set_cnv_eligibility(self, **kwargs):
"""Set threshold for CNV to regrard as a positive sample.
Raises:
VariantUndefinedError: cnv info not provided by user.
Returns:
CDx_Data: CDx_Data object.
"""
if self.cnv is None or len(self.cnv) == 0:
cnv = None
else:
cnv = self._selector(self.cnv, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=self.mut,
cnv_df=cnv,
sv_df=self.sv)
def set_sv_eligibility(self, **kwargs):
"""Set threshold for SV to regrard as a positive sample.
Raises:
VariantUndefinedError: SV info not provided by user.
Returns:
CDx_Data: CDx_Data object.
"""
if self.sv is None or len(self.sv) == 0:
sv = None
else:
sv = self._selector(self.sv, kwargs)
return CDx_Data(cli_df=self.cli,
mut_df=self.mut,
cnv_df=self.cnv,
sv_df=sv)
# 指定一个列名,再指定范围。离散型用数组,数值型
# attrdict={'Cancer_type':['lung','CRC'],'Age':'x>5'}
def select_samples_by_clinical_attributes2(self, attr_dict: dict):
"""Select samples via a set of conditions corresponding to the columns in the cli DataFrame.
Args:
attr_dict (dict): Dict format of conditions like "{'Cancer_type':['lung','CRC'],'Age':'x>5'}".
The keys represent a column in the input DataFrame.
The list values represent a catagory target and str values represent a numeric target.
Returns:
CDx: CDx object of selected samples.
"""
cli_df = self._selector(self.cli, attr_dict)
return self.select_by_sample_ids(cli_df['sampleId'])
def select_samples_by_clinical_attributes(self, **kwargs):
"""Select samples via a set of conditions corresponding to the columns in the cli DataFrame.
Args:
Keywords arguments with each key represent a column in the input DataFrame.
like "Cancer_type=['lung','CRC'], Age='x>5'"
The list values represent a catagory target and str values represent a numeric target.
Returns:
CDx: CDx object of selected samples.
"""
cli_df = self._selector(self.cli, kwargs)
return self.select_by_sample_ids(cli_df['sampleId'])
def select_samples_by_date_attributes(
self,
column_name: str = 'SAMPLE_RECEIVED_DATE',
start='',
end: str = '',
days: int = 0,
period: str = '',
):
"""Select samples using a datetime attribute in the cli dataframe
Args:
column_name (str, optional): Column used in the cli dataframe. Defaults to 'SAMPLE_RECEIVED_DATE'.
from (str, optional): Time start point. Defaults to ''.
to (str, optional): Time end point. Defaults to ''.
days (int, optional): Days lasts. Defaults to ''.
exact (str, optional): Exact range,eg '202005' for May in 2020 or '2021' for the whole year. Defaults to ''.
"""
date_ser = self.cli.set_index(column_name)['sampleId']
if period:
cdx = self.select_by_sample_ids(date_ser[period])
elif start and end:
cdx = self.select_by_sample_ids(date_ser[start:end])
elif start and days:
cdx = self.select_by_sample_ids(date_ser[start:(
pd.to_datetime(start) +
pd.to_timedelta(days, 'D')).strftime("%Y-%m-%d")])
elif end and days:
cdx = self.select_by_sample_ids(date_ser[(
| pd.to_datetime(end) | pandas.to_datetime |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
| tm.assert_frame_equal(result, expected, by_blocks=True) | pandas._testing.assert_frame_equal |
import os
import yaml
import json
import pandas as pd
import matplotlib.pyplot as plt
from pylab import rcParams
import seaborn as sns
import numpy as np
from sklearn.linear_model import LinearRegression
import glob
import time
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: convertYaml2PandasDataframeT20
# This function converts yaml files to Pandas dataframe and saves as CSV
#
###########################################################################################
def convertYaml2PandasDataframeT20(infile,source,dest):
'''
Converts and save T20 yaml files to pandasdataframes
Description
This function coverts all T20 Yaml files from source directory to pandas ata frames.
The data frames are then stored as .csv files The saved file is of the format
team1-team2-date.csv For e.g. Kolkata Knight Riders-Sunrisers Hyderabad-2016-05-22.csv etc
Usage
convertYaml2PandasDataframeT20(yamlFile,sourceDir=".",targetDir=".")
Arguments
yamlFile
The yaml file to be converted to dataframe and saved
sourceDir
The source directory of the yaml file
targetDir
The target directory in which the data frame is stored as RData file
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
convertYaml2PandasDataframeT20
Examples
# In the example below ../yamldir c
convertYaml2PandasDataframeT20("225171.yaml",".","../data")
'''
os.chdir(source)
os.path.join(source,infile)
# Read Yaml file and convert to json
print('Converting file:',infile)
with open(infile) as f:
a=yaml.load(f)
# 1st innings
deliveries=a['innings'][0]['1st innings']['deliveries']
#Create empty dataframe for team1
team1=pd.DataFrame()
# Loop through all the deliveries of 1st innings and append each row to dataframe
for i in range(len(deliveries)):
df = pd.DataFrame(deliveries[i])
b= df.T
team1=pd.concat([team1,b])
# Rename batsman to striker/non-striker as there is another column batsman who scored runs
team1=team1.rename(columns={'batsman':'striker'})
# All extras column names
extras=[0,'wides','byes','legbyes','noballs','penalty']
if 'extras' in team1: #Check if extras are there
# Get the columns in extras for team1
b=team1.extras.apply(pd.Series).columns
# Find the missing extras columns
diff= list(set(extras) - set(b))
print('Team1:diff:',diff)
# Rename extras dict column as there is another column extras which comes from runs_dict
team1=team1.rename(columns={'extras':'extras_dict'})
#Create new columns by splitting dictionary columns - extras and runs
team1=pd.concat([team1,team1['extras_dict'].apply(pd.Series)], axis=1)
# Add the missing columns
for col in diff:
print("team1:",col)
team1[col]=0
team1=team1.drop(columns=0)
else:
print('Team1:Extras not present')
# Rename runs columns to runs_dict
if 'runs' in team1: #Check if runs in team1
team1=team1.rename(columns={'runs':'runs_dict'})
team1=pd.concat([team1,team1['runs_dict'].apply(pd.Series)], axis=1)
else:
print('Team1:Runs not present')
if 'wicket' in team1: #Check if wicket present
# Rename wicket as wicket_dict dict column as there is another wicket column
team1=team1.rename(columns={'wicket':'wicket_dict'})
team1=pd.concat([team1,team1['wicket_dict'].apply(pd.Series)], axis=1)
else:
print('Team1: Wicket not present')
team1['team']=a['innings'][0]['1st innings']['team']
team1=team1.reset_index(inplace=False)
#Rename index to delivery
team1=team1.rename(columns={'index':'delivery'})
# 2nd innings - Check if the 2nd inning was played
if len(a['innings']) > 1: # Team2 played
deliveries=a['innings'][1]['2nd innings']['deliveries']
#Create empty dataframe for team1
team2=pd.DataFrame()
# Loop through all the deliveries of 1st innings
for i in range(len(deliveries)):
df = pd.DataFrame(deliveries[i])
b= df.T
team2=pd.concat([team2,b])
# Rename batsman to striker/non-striker as there is another column batsman who scored runs
team2=team2.rename(columns={'batsman':'striker'})
# Get the columns in extras for team1
if 'extras' in team2: #Check if extras in team2
b=team2.extras.apply(pd.Series).columns
diff= list(set(extras) - set(b))
print('Team2:diff:',diff)
# Rename extras dict column as there is another column extras which comes from runs_dict
team2=team2.rename(columns={'extras':'extras_dict'})
#Create new columns by splitting dictionary columns - extras and runs
team2=pd.concat([team2,team2['extras_dict'].apply(pd.Series)], axis=1)
# Add the missing columns
for col in diff:
print("team2:",col)
team2[col]=0
team2=team2.drop(columns=0)
else:
print('Team2:Extras not present')
# Rename runs columns to runs_dict
if 'runs' in team2:
team2=team2.rename(columns={'runs':'runs_dict'})
team2=pd.concat([team2,team2['runs_dict'].apply(pd.Series)], axis=1)
else:
print('Team2:Runs not present')
if 'wicket' in team2:
# Rename wicket as wicket_dict column as there is another column wicket
team2=team2.rename(columns={'wicket':'wicket_dict'})
team2=pd.concat([team2,team2['wicket_dict'].apply(pd.Series)], axis=1)
else:
print('Team2:wicket not present')
team2['team']=a['innings'][1]['2nd innings']['team']
team2=team2.reset_index(inplace=False)
#Rename index to delivery
team2=team2.rename(columns={'index':'delivery'})
else: # Create empty columns for team2 so that the complete DF as all columns
team2 = pd.DataFrame()
cols=['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']
team2 = team2.reindex(columns=cols)
#Check for missing columns. It is possible that no wickets for lost in the entire innings
cols=['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']
# Team1 - missing columns
msngCols=list(set(cols) - set(team1.columns))
print('Team1-missing columns:', msngCols)
for col in msngCols:
print("Adding:team1:",col)
team1[col]=0
# Team2 - missing columns
msngCols=list(set(cols) - set(team2.columns))
print('Team2-missing columns:', msngCols)
for col in msngCols:
print("Adding:team2:",col)
team2[col]=0
# Now both team1 and team2 should have the same columns. Concatenate
team1=team1[['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']]
team2=team2[['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']]
df=pd.concat([team1,team2])
#Fill NA's with 0s
df=df.fillna(0)
# Fill in INFO
print("Length of info field=",len(a['info']))
#City
try:
df['city']=a['info']['city']
except:
df['city'] =0
#Date
df['date']=a['info']['dates'][0]
#Gender
df['gender']=a['info']['gender']
#Match type
df['match_type']=a['info']['match_type']
# Neutral venue
try:
df['neutral_venue'] = a['info']['neutral_venue']
except KeyError as error:
df['neutral_venue'] = 0
#Outcome - Winner
try:
df['winner']=a['info']['outcome']['winner']
# Get the win type - runs, wickets etc
df['winType']=list(a['info']['outcome']['by'].keys())[0]
print("Wintype=",list(a['info']['outcome']['by'].keys())[0])
#Get the value of wintype
winType=list(a['info']['outcome']['by'].keys())[0]
print("Win value=",list(a['info']['outcome']['by'].keys())[0] )
# Get the win margin - runs,wickets etc
df['winMargin']=a['info']['outcome']['by'][winType]
print("win margin=", a['info']['outcome']['by'][winType])
except:
df['winner']=0
df['winType']=0
df['winMargin']=0
# Outcome - Tie
try:
df['result']=a['info']['outcome']['result']
df['resultHow']=list(a['info']['outcome'].keys())[0]
df['resultTeam'] = a['info']['outcome']['eliminator']
print(a['info']['outcome']['result'])
print(list(a['info']['outcome'].keys())[0])
print(a['info']['outcome']['eliminator'])
except:
df['result']=0
df['resultHow']=0
df['resultTeam']=0
try:
df['non_boundary'] = a['info']['non_boundary']
except KeyError as error:
df['non_boundary'] = 0
try:
df['ManOfMatch']=a['info']['player_of_match'][0]
except:
df['ManOfMatch']=0
# Identify the winner
df['overs']=a['info']['overs']
df['team1']=a['info']['teams'][0]
df['team2']=a['info']['teams'][1]
df['tossWinner']=a['info']['toss']['winner']
df['tossDecision']=a['info']['toss']['decision']
df['venue']=a['info']['venue']
# Rename column 'striker' to batsman
# Rename column 'batsman' to runs as it signifies runs scored by batsman
df=df.rename(columns={'batsman':'runs'})
df=df.rename(columns={'striker':'batsman'})
if (type(a['info']['dates'][0]) == str):
outfile=a['info']['teams'][0]+ '-' + a['info']['teams'][1] + '-' +a['info']['dates'][0] + '.csv'
else:
outfile=a['info']['teams'][0]+ '-' + a['info']['teams'][1] + '-' +a['info']['dates'][0].strftime('%Y-%m-%d') + '.csv'
destFile=os.path.join(dest,outfile)
print(destFile)
df.to_csv(destFile,index=False)
print("Dataframe shape=",df.shape)
return df, outfile
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: convertAllYaml2PandasDataframesT20
# This function converts all yaml files to Pandas dataframes and saves as CSV
#
###########################################################################################
def convertAllYaml2PandasDataframesT20(source,dest):
'''
Convert and save all Yaml files to pandas dataframes and save as CSV
Description
This function coverts all Yaml files from source directory to data frames. The data frames are
then stored as .csv. The saved files are of the format team1-team2-date.RData For
e.g. England-India-2008-04-06.RData etc
Usage
convertAllYaml2PandasDataframesT20(sourceDir=".",targetDir=".")
Arguments
sourceDir
The source directory of the yaml files
targetDir
The target directory in which the data frames are stored as RData files
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
convertYaml2PandasDataframe
Examples
# In the example below ../yamldir is the source dir for the yaml files
convertAllYaml2PandasDataframesT20("../yamldir","../data")
'''
files = os.listdir(source)
for index, file in enumerate(files):
print("\n\nFile no=",index)
if file.endswith(".yaml"):
df, filename = convertYaml2PandasDataframeT20(file, source, dest)
#print(filename)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getRuns
# This function gets the runs scored by batsmen
#
###########################################################################################
def getRuns(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
# Determine number of deliveries faced and runs scored
runs=df1[['batsman','runs']].groupby(['batsman'],sort=False,as_index=False).agg(['count','sum'])
# Drop level 0
runs.columns = runs.columns.droplevel(0)
runs=runs.reset_index(inplace=False)
runs.columns=['batsman','balls','runs']
return(runs)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getFours
# This function gets the fours scored by batsmen
#
###########################################################################################
def getFours(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
# Get number of 4s. Check if it is boundary (non_boundary=0)
m=df1.loc[(df1.runs >=4) & (df1.runs <6) & (df1.non_boundary==0)]
# Count the number of 4s
noFours= m[['batsman','runs']].groupby('batsman',sort=False,as_index=False).count()
noFours.columns=['batsman','4s']
return(noFours)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getSixes
# This function gets the sixes scored by batsmen
#
###########################################################################################
def getSixes(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
df2= df1.loc[(df1.runs ==6)]
sixes= df2[['batsman','runs']].groupby('batsman',sort=False,as_index=False).count()
sixes.columns=['batsman','6s']
return(sixes)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getExtras
# This function gets the extras for the team
#
###########################################################################################
def getExtras(df):
df3= df[['total','wides', 'noballs', 'legbyes', 'byes', 'penalty', 'extras']]
a=df3.sum().astype(int)
#Convert series to dataframe
extras=a.to_frame().T
return(extras)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBattingScorecardMatch
# This function returns the team batting scorecard
#
###########################################################################################
def teamBattingScorecardMatch (match,theTeam):
'''
Team batting scorecard of a team in a match
Description
This function computes returns the batting scorecard (runs, fours, sixes, balls played) for the team
Usage
teamBattingScorecardMatch(match,theTeam)
Arguments
match
The match for which the score card is required e.g.
theTeam
Team for which scorecard required
Value
scorecard A data frame with the batting scorecard
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBatsmenPartnershipMatch
teamBowlingScorecardMatch
teamBatsmenVsBowlersMatch
Examples
x1,y1=teamBattingScorecardMatch(kkr_sh,"<NAME>")
print(x1)
print(y1)
'''
scorecard=pd.DataFrame()
if(match.size != 0):
team=match.loc[match['team'] == theTeam]
else:
return(scorecard,-1)
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']]
extras=getExtras(match)
return(scorecard,extras)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getRunsConceded
# This function gets the runs conceded by bowler
#
###########################################################################################
def getRunsConceded(df):
# Note the column batsman has the runs scored by batsman
df1=df[['bowler','runs','wides', 'noballs']]
df2=df1.groupby('bowler').sum()
# Only wides and no balls included in runs conceded
df2['runs']=(df2['runs']+df2['wides']+df2['noballs']).astype(int)
df3 = df2['runs']
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getOvers
# This function gets the overs for bowlers
#
###########################################################################################
def getOvers(df):
df1=df[['bowler','delivery']]
df2=(df1.groupby('bowler').count()/6).astype(int)
df2.columns=['overs']
return(df2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getMaidens
# This function gets the maiden overs for bowlers
#
###########################################################################################
def getMaidens(df):
df1=df[['bowler','delivery','runs','wides', 'noballs']]
# Get the over
df1['over']=df1.delivery.astype(int)
# Runs conceded includes wides and noballs
df1['runsConceded']=df1['runs'] + df1['wides'] + df1['noballs']
df2=df1[['bowler','over','runsConceded']]
# Compute runs in each over by bowler
df3=df2.groupby(['bowler','over']).sum()
df4=df3.reset_index(inplace=False)
# If maiden set as 1 else as 0
df4.loc[df4.runsConceded !=0,'maiden']=0
df4.loc[df4.runsConceded ==0,'maiden']=1
# Sum te maidens
df5=df4[['bowler','maiden']].groupby('bowler').sum()
return(df5)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getWickets
# This function gets the wickets for bowlers
#
###########################################################################################
def getWickets(df):
df1=df[['bowler','kind', 'player_out', 'fielders']]
# Check if the team took wickets. Then this column will be a string
if isinstance(df1.player_out.iloc[0],str):
df2= df1[df1.player_out !='0']
df3 = df2[['bowler','player_out']].groupby('bowler').count()
else: # Did not take wickets. Set wickets as 0
df3 = df1[['bowler','player_out']].groupby('bowler').count()
df3['player_out']=0 # Set wicktes as 0
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingScorecardMatch
# This function gets the bowling scorecard
#
###########################################################################################
def teamBowlingScorecardMatch (match,theTeam):
'''
Compute and return the bowling scorecard of a team in a match
Description
This function computes and returns the bowling scorecard of a team in a match
Usage
teamBowlingScorecardMatch(match,theTeam)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlersVsBatsmenMatch
teamBattingScorecardMatch
Examples
m=teamBowlingScorecardMatch(kkr_sh,"<NAME>")
print(m)
'''
team=match.loc[match.team== theTeam]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
return(g1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBatsmenPartnershipMatch
# This function gets the batting partnerships
#
###########################################################################################
def teamBatsmenPartnershipMatch(match,theTeam,opposition,plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Team batting partnerships of batsmen in a match
Description
This function plots the partnerships of batsmen in a match against an opposition or it can return the data frame
Usage
teamBatsmenPartnershipMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
df The data frame of the batsmen partnetships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBattingScorecardMatch
teamBowlingWicketKindMatch
teamBatsmenVsBowlersMatch
matchWormChart
Examples
teamBatsmenPartnershipMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
m=teamBatsmenPartnershipMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=False)
print(m)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','non_striker']]
if plot == True:
df3=df2.groupby(['batsman','non_striker']).sum().unstack().fillna(0)
rcParams['figure.figsize'] = 10, 6
df3.plot(kind='bar',stacked=True)
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -batting partnership- vs ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBatsmenPartnershipMatch
# This function gives the performances of batsmen vs bowlers
#
###########################################################################################
def teamBatsmenVsBowlersMatch(match,theTeam,opposition, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team batsmen against bowlers in a match
Description
This function plots the performance of batsmen versus bowlers in a match or it can return the data frame
Usage
teamBatsmenVsBowlersMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=TRUE then a plot is created otherwise a data frame is return
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
b The data frame of the batsmen vs bowlers performance
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketKindMatch
teamBowlingWicketMatch
Examples
teamBatsmenVsBowlersMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','bowler']]
if plot == True:
df3=df2.groupby(['batsman','bowler']).sum().unstack().fillna(0)
df3.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Batsman vs Bowler- in match against ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingWicketKindMatch
# This function gives the wicket kind for bowlers
#
###########################################################################################
def teamBowlingWicketKindMatch(match,theTeam,opposition, plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute and plot the wicket kinds by bowlers in match
Description
This function computes returns kind of wickets (caught, bowled etc) of bowlers in a match between 2 teams
Usage
teamBowlingWicketKindMatch(match,theTeam,opposition,plot=TRUE)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
opposition
The opposition team
plot
If plot= TRUE the dataframe will be plotted else a data frame will be returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or data fame A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlingWicketRunsMatch
teamBowlersVsBatsmenMatch
Examples
teamBowlingWicketKindMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
m=teamBowlingWicketKindMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=False)
print(m)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df3=df2[df2.player_out != '0']
if plot == True:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','kind']).count().unstack().fillna(0)
df4.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Wicketkind vs Runs- given against ' + opposition)
plt.text(4, 30,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile))
else:
plt.show()
plt.gcf().clear()
else:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','kind']).count().reset_index(inplace=False)
return(df4)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlingWicketMatch
# This function gives the wickets for bowlers
#
###########################################################################################
def teamBowlingWicketMatch(match,theTeam,opposition, plot=True,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute and plot wickets by bowlers in match
Description
This function computes returns the wickets taken bowlers in a match between 2 teams
Usage
teamBowlingWicketMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The match between the teams
theTeam
Team for which bowling performance is required
opposition
The opposition team
plot
If plot= TRUE the dataframe will be plotted else a data frame will be returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or data fame A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingWicketMatch
teamBowlingWicketRunsMatch
teamBowlersVsBatsmenMatch
Examples
teamBowlingWicketMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df3=df2[df2.player_out != '0']
if plot == True:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','player_out']).count().unstack().fillna(0)
df4.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -No of Wickets vs Runs conceded- against ' + opposition)
plt.text(1, 1,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
# Find the different types of wickets for each bowler
df4=df3.groupby(['bowler','player_out']).count().reset_index(inplace=False)
return(df4)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBowlersVsBatsmenMatch
# This function gives the bowlers vs batsmen and runs conceded
#
###########################################################################################
def teamBowlersVsBatsmenMatch (match,theTeam,opposition, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers vs batsmen in a match
Description
This function computes performance of bowlers of a team against an opposition in a match
Usage
teamBowlersVsBatsmenMatch(match,theTeam,opposition, plot=TRUE)
Arguments
match
The data frame of the match. This can be obtained with the call for e.g a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
theTeam
The team against which the performance is required
opposition
The opposition team
plot
This parameter specifies if a plot is required, If plot=FALSE then a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe If plot=TRUE there is no return. If plot=TRUE then the dataframe is returned
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBattingScorecardMatch
teamBowlingWicketKindMatch
matchWormChart
Examples
teamBowlersVsBatsmenMatch(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad",plot=True)
'''
df1=match.loc[match.team== theTeam]
df2= df1[['batsman','runs','bowler']]
if plot == True:
df3=df2.groupby(['batsman','bowler']).sum().unstack().fillna(0)
df3.plot(kind='bar',stacked=True)
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Batsman')
plt.ylabel('Runs')
plt.title(theTeam + ' -Bowler vs Batsman- against ' + opposition)
plt.text(4, 20,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
return(df3)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: matchWormChart
# This function draws the match worm chart
#
###########################################################################################
def matchWormChart(match,team1,team2,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot the match worm graph
Description
This function plots the match worm graph between 2 teams in a match
Usage
matchWormGraph(match,t1,t2)
Arguments
match
The dataframe of the match
team1
The 1st team of the match
team2
the 2nd team in the match
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
none
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBatsmenVsBowlersMatch
teamBowlingWicketKindMatch
Examples
## Not run:
#Get the match details
a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
# Plot tne match worm plot
matchWormChart(kkr_sh,"Kolkata Knight Riders","Sunrisers Hyderabad")
'''
df1=match.loc[match.team==team1]
df2=match.loc[match.team==team2]
df3=df1[['delivery','total']]
df3['cumsum']=df3.total.cumsum()
df4 = df2[['delivery','total']]
df4['cumsum'] = df4.total.cumsum()
df31 = df3[['delivery','cumsum']]
df41 = df4[['delivery','cumsum']]
#plt.plot(df3.delivery.values,df3.cumsum.values)
df51= pd.merge(df31,df41,how='outer', on='delivery').dropna()
df52=df51.set_index('delivery')
df52.columns = [team1,team2]
df52.plot()
rcParams['figure.figsize'] = 10, 6
plt.xlabel('Delivery')
plt.ylabel('Runs')
plt.title('Match worm chart ' + team1 + ' vs ' + team2)
plt.text(10, 10,'Data source-Courtesy:http://cricsheet.org',
horizontalalignment='center',
verticalalignment='center',
)
if plot == True:
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: getAllMatchesBetweenTeams
# This function gets all the matches between 2 IPL teams
#
###########################################################################################
def getAllMatchesBetweenTeams(team1,team2,dir=".",save=False,odir="."):
'''
Get data on all matches between 2 opposing teams
Description
This function gets all the data on matches between opposing IPL teams This can be saved
by the user which can be used in function in which analyses are done for all matches
between these teams.
Usage
getAllMatchesBetweenTeams(team1,team2,dir=".",save=FALSE)
Arguments
team1
One of the team in consideration e.g (KKR, CSK etc)
team2
The other team for which matches are needed e.g( MI, GL)
dir
The directory which has the RData files of matches between teams
save
Default=False. This parameter indicates whether the combined data frame
needs to be saved or not. It is recommended to save this large dataframe as
the creation of this data frame takes a several seconds depending on the number of matches
Value
matches - The combined data frame
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
plotWinsbyTossDecision
teamBowlersVsBatsmenOppnAllMatches
'''
# Create the 2 combinations
t1 = team1 +'-' + team2 + '*.csv'
t2 = team2 + '-' + team1 + '*.csv'
path1= os.path.join(dir,t1)
path2 = os.path.join(dir,t2)
files = glob.glob(path1) + glob.glob(path2)
print(len(files))
# Save as CSV only if there are matches between the 2 teams
if len(files) !=0:
df = pd.DataFrame()
for file in files:
df1 = pd.read_csv(file)
df=pd.concat([df,df1])
if save==True:
dest= team1 +'-' + team2 + '-allMatches.csv'
output=os.path.join(odir,dest)
df.to_csv(output)
else:
return(df)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: saveAllMatchesBetween2IPLTeams
# This function saves all the matches between allIPL teams
#
###########################################################################################
def saveAllMatchesBetween2IPLTeams(dir1,odir="."):
'''
Saves all matches between 2 IPL teams as dataframe
Description
This function saves all matches between 2 IPL teams as a single dataframe in the
current directory
Usage
saveAllMatchesBetween2IPLTeams(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenVsBowlersOppnAllMatches
'''
teams = ["Chennai Super Kings","Deccan Chargers","Delhi Daredevils",
"Kings XI Punjab", 'Kochi Tuskers Kerala',"Kolkata Knight Riders",
"Mumbai Indians", "Pune Warriors","Rajasthan Royals",
"Royal Challengers Bangalore","Sunrisers Hyderabad","Gujarat Lions",
"Rising Pune Supergiants"]
for team1 in teams:
for team2 in teams:
if team1 != team2:
print("Team1=",team1,"team2=", team2)
getAllMatchesBetweenTeams(team1,team2,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenPartnershiOppnAllMatches
# This function gets the partnetships for a team in all matches
#
###########################################################################################
def teamBatsmenPartnershiOppnAllMatches(matches,theTeam,report="summary",top=5):
'''
Team batting partnership against a opposition all IPL matches
Description
This function computes the performance of batsmen against all bowlers of an oppositions in
all matches. This function returns a dataframe
Usage
teamBatsmenPartnershiOppnAllMatches(matches,theTeam,report="summary")
Arguments
matches
All the matches of the team against the oppositions
theTeam
The team for which the the batting partnerships are sought
report
If the report="summary" then the list of top batsmen with the highest partnerships
is displayed. If report="detailed" then the detailed break up of partnership is returned
as a dataframe
top
The number of players to be displayed from the top
Value
partnerships The data frame of the partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
'''
df1 = matches[matches.team == theTeam]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
if report == 'summary':
return(df5)
elif report == 'detailed':
return(df6)
else:
print("Invalid option")
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenPartnershipOppnAllMatchesChart
# This function plots the partnetships for a team in all matches
#
###########################################################################################
def teamBatsmenPartnershipOppnAllMatchesChart(matches,main,opposition,plot=True,top=5,partnershipRuns=20,savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot of team partnership in all IPL matches against an opposition
Description
This function plots the batting partnership of a team againt all oppositions in all
matches This function also returns a dataframe with the batting partnerships
Usage
teamBatsmenPartnershipOppnAllMatchesChart(matches,main,opposition, plot=TRUE,top=5,partnershipRuns=20))
Arguments
matches
All the matches of the team against all oppositions
main
The main team for which the the batting partnerships are sought
opposition
The opposition team for which the the batting partnerships are sought
plot
Whether the partnerships have top be rendered as a plot. If plot=FALSE the data frame is returned
top
The number of players from the top to be included in chart
partnershipRuns
The minimum number of partnership runs to include for the chart
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershiplOppnAllMatches
saveAllMatchesBetween2IPLTeams
teamBatsmenVsBowlersAllOppnAllMatchesPlot
teamBatsmenVsBowlersOppnAllMatches
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','non_striker','partnershipRuns']]
# Remove rows where partnershipRuns < partnershipRuns as there are too many
df8 = df7[df7['partnershipRuns'] > partnershipRuns]
df9=df8.groupby(['batsman','non_striker'])['partnershipRuns'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='non_striker',index='batsman').fillna(0)
if plot == True:
df9.plot(kind='bar',stacked=True,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Partnership runs between ' + main + '-' + opposition)
plt.xlabel('Batsman')
plt.ylabel('Partnership runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBatsmenVsBowlersOppnAllMatches
# This function plots the performance of batsmen against bowlers
#
###########################################################################################
def teamBatsmenVsBowlersOppnAllMatches(matches,main,opposition,plot=True,top=5,runsScored=20,savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes the performance of batsmen against the bowlers of an oppositions in all matches
Usage
teamBatsmenVsBowlersOppnAllMatches(matches,main,opposition,plot=TRUE,top=5,runsScored=20)
Arguments
matches
All the matches of the team against one specific opposition
main
The team for which the the batting partnerships are sought
opposition
The opposition team
plot
If plot=True then a plot will be displayed else a data frame will be returned
top
The number of players to be plotted or returned as a dataframe. The default is 5
runsScored
The cutfoff limit for runs scored for runs scored against bowler
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
teamBatsmenVsBowlersOppnAllMatches
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','bowler','runs']]
# Runs scored by bowler
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
df3.columns = ['batsman','bowler','runsScored']
# Need to pick the 'top' number of bowlers
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('runsScored',ascending=False)
df4.columns = ['batsman','totalRunsScored']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','bowler','runsScored']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsScored'] >runsScored]
df9=df8.groupby(['batsman','bowler'])['runsScored'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Runs against bowlers ' + main + '-' + opposition)
plt.xlabel('Batsman')
plt.ylabel('Runs scored')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBattingScorecardOppnAllMatches
# This function computes the batting scorecard for all matches
#
###########################################################################################
def teamBattingScorecardOppnAllMatches(matches,main,opposition):
'''
Team batting scorecard of a team in all matches against an opposition
Description
This function computes returns the batting scorecard (runs, fours, sixes, balls played)
for the team in all matches against an opposition
Usage
teamBattingScorecardOppnAllMatches(matches,main,opposition)
Arguments
matches
the data frame of all matches between a team and an opposition obtained with the call getAllMatchesBetweenteam()
main
The main team for which scorecard required
opposition
The opposition team
Value
scorecard The scorecard of all the matches
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershipAllOppnAllMatches
teamBowlingWicketKindOppositionAllMatches
'''
team=matches.loc[matches.team== main]
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']].sort_values('runs',ascending=False)
return(scorecard)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBattingScorecardOppnAllMatches
# This function computes the batting scorecard for all matches
#
###########################################################################################
def teamBowlingScorecardOppnAllMatches(matches,main,opposition):
'''
Team bowling scorecard opposition all matches
Description
This function computes returns the bowling dataframe of best bowlers
deliveries, maidens, overs, wickets against an IPL oppositions in all matches
Usage
teamBowlingScorecardOppnAllMatches(matches,main,opposition)
Arguments
matches
The matches of the team against all oppositions and all matches
main
Team for which bowling performance is required
opposition
The opposing IPL team
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingWicketKindOppositionAllMatches
teamBatsmenVsBowlersOppnAllMatches
plotWinsbyTossDecision
'''
team=matches.loc[matches.team== main]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
g2 = g1.sort_values('wicket',ascending=False)
return(g2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBowlingWicketKindOppositionAllMatches
# This function plots the performance of bowlers and the kind of wickets
#
###########################################################################################
def teamBowlingWicketKindOppositionAllMatches(matches,main,opposition,plot=True,top=5,wickets=2,savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers wicket kind against an opposition in all matches
Description
This function computes performance of bowlers of a team and the wicket kind against
an opposition in all matches against the opposition
Usage
teamBowlersWicketKindOppnAllMatches(matches,main,opposition,plot=TRUE,top=5,wickets=2)
Arguments
matches
The data frame of all matches between a team the opposition. T
main
The team for which the performance is required
opposition
The opposing team
plot
If plot=True then a plot is displayed else a dataframe is returned
top
The top number of players to be considered
wickets
The minimum number of wickets as cutoff
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or dataframe The return depends on the value of the plot
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
plotWinsByRunOrWickets
teamBowlersVsBatsmenOppnAllMatches
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df2=df2[df2.player_out != '0']
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','kind']).count().reset_index(inplace=False)
df3.columns = ['bowler','kind','wickets']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('wickets',ascending=False)
df4.columns = ['bowler','totalWickets']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','kind','wickets']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['wickets'] >wickets]
df9=df8.groupby(['bowler','kind'])['wickets'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + '-' + opposition)
plt.xlabel('Bowler')
plt.ylabel('Total wickets')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: teamBowlersVsBatsmenOppnAllMatches
# This function plots the performance of the bowlers against batsmen
#
###########################################################################################
def teamBowlersVsBatsmenOppnAllMatches(matches,main,opposition,plot=True,top=5,runsConceded=10, savePic=False, dir1=".",picFile="pic1.png"):
'''
Team bowlers vs batsmen against an opposition in all matches
Description
This function computes performance of bowlers of a team against an opposition in all
matches against the opposition
Usage
teamBowlersVsBatsmenOppnAllMatches(matches,main,opposition,plot=True,top=5,runsConceded=10))
Arguments
matches
The data frame of all matches between a team the opposition.
main
The main team against which the performance is required
opposition
The opposition team against which the performance is require
plot
If true plot else return dataframe
top
The number of rows to be returned. 5 by default
runsConceded
The minimum numer runs to use as cutoff
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
dataframe The dataframe with all performances
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenPartnershipOppnAllMatches
teamBowlersVsBatsmenOppnAllMatchesRept
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','batsman','runs']]
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','batsman']).sum().reset_index(inplace=False)
df3.columns = ['bowler','batsman','runsConceded']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('runsConceded',ascending=False)
df4.columns = ['bowler','totalRunsConceded']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','batsman','runsConceded']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsConceded'] >runsConceded]
df9=df8.groupby(['bowler','batsman'])['runsConceded'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + '-' + opposition)
plt.xlabel('Bowler')
plt.ylabel('Total runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinLossBetweenTeams
# This function plots the number of wins and losses in teams
#
###########################################################################################
def plotWinLossBetweenTeams(matches,team1,team2,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot wins for each team
Description
This function computes and plots number of wins for each team in all their encounters.
The plot includes the number of wins byteam1 each team and the matches with no result
Usage
plotWinLossBetweenTeams(matches)
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The 1st team
team2
The 2nd team
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
teamBattingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
getAllMatchesBetweenTeams
'''
a=matches[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
b=a.groupby('winner').count().reset_index(inplace=False)
b.columns = ['winner','number']
sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner')
plt.ylabel('Number')
plt.title("Wins vs losses " + team1 + "-"+ team2)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinsByRunOrWickets
# This function plots how the win for the team was whether by runs or wickets
#
###########################################################################################
def plotWinsByRunOrWickets(matches,team1,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsByRunOrWickets(matches,team1)
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>esh.<EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
getAllMatchesBetweenTeams
'''
# Get the number of matches won
df= matches.loc[matches.winner == team1]
a=df[['date','winType']].groupby(['date','winType']).count().reset_index(inplace=False)
b=a.groupby('winType').count().reset_index(inplace=False)
b.columns = ['winType','number']
sns.barplot(x='winType',y='number',data=b)
plt.xlabel('Win Type - Runs or wickets')
plt.ylabel('Number')
plt.title("Win type for team -" + team1 )
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 26 Jan 2019
# Function: plotWinsbyTossDecision
# This function plots the number of wins/losses for team based on its toss decision
#
###########################################################################################
def plotWinsbyTossDecision(matches,team1,tossDecision='bat', plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsbyTossDecision(matches,team1,tossDecision='bat')
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
teamBowlingWicketKindOppositionAllMatches
'''
df=matches.loc[(matches.tossDecision==tossDecision) & (matches.tossWinner==team1)]
a=df[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
b=a.groupby('winner').count().reset_index(inplace=False)
b.columns = ['winner','number']
sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner ' + 'when toss decision was to :' + tossDecision)
plt.ylabel('Number')
plt.title('Wins vs losses for ' + team1 + ' when toss decision was to ' + tossDecision )
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: getAllMatchesAllOpposition
# This function gets all the matches between a IPL team and all opposition
#
###########################################################################################
def getAllMatchesAllOpposition(team1,dir=".",save=False,odir="."):
'''
Get data on all matches against all opposition
Description
This function gets all the matches for a particular IPL team for
against all other oppositions. It constructs a huge dataframe of
all these matches. This can be saved by the user which can be used in
function in which analyses are done for all matches and for all oppositions.
Usage
getAllMatchesAllOpposition(team,dir=".",save=FALSE)
Arguments
team
The team for which all matches and all opposition has to be obtained e.g. India, Pakistan
dir
The directory in which the saved .RData files exist
save
Default=False. This parameter indicates whether the combined data frame needs to be saved or not. It is recommended to save this large dataframe as the creation of this data frame takes a several seconds depending on the number of matches
Value
match The combined data frame
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
saveAllMatchesAllOppositionIPLT20
teamBatsmenPartnershiAllOppnAllMatches
'''
# Create the 2 combinations
t1 = '*' + team1 +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
print(len(files))
# Save as CSV only if there are matches between the 2 teams
if len(files) !=0:
df = pd.DataFrame()
for file in files:
df1 = pd.read_csv(file)
df=pd.concat([df,df1])
if save==True:
dest= team1 + '-allMatchesAllOpposition.csv'
output=os.path.join(odir,dest)
df.to_csv(output)
else:
return(df)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: saveAllMatchesAllOppositionIPLT20
# This function saves all the matches between all IPL team and all opposition
#
###########################################################################################
def saveAllMatchesAllOppositionIPLT20(dir1,odir="."):
'''
Saves matches against all IPL teams as dataframe and CSV for an IPL team
Description
This function saves all IPL matches agaist all opposition as a single
dataframe in the current directory
Usage
saveAllMatchesAllOppositionIPLT20(dir)
Arguments
dir
Directory to store saved matches
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
convertYaml2PandasDataframeT20
teamBattingScorecardMatch
'''
teams = ["Chennai Super Kings","Deccan Chargers","Delhi Daredevils",
"Kings XI Punjab", 'Kochi Tuskers Kerala',"Kolkata Knight Riders",
"Mumbai Indians", "Pune Warriors","Rajasthan Royals",
"Royal Challengers Bangalore","Sunrisers Hyderabad","Gujarat Lions",
"Rising Pune Supergiants"]
for team in teams:
print("Team=",team)
getAllMatchesAllOpposition(team,dir=dir1,save=True,odir=odir)
time.sleep(2) #Sleep before next save
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenPartnershiAllOppnAllMatches
# This function computes the partnerships of an IPK team against all other IPL teams
#
###########################################################################################
def teamBatsmenPartnershiAllOppnAllMatches(matches,theTeam,report="summary",top=5):
'''
Team batting partnership against a opposition all IPL matches
Description
This function computes the performance of batsmen against all bowlers of an oppositions in
all matches. This function returns a dataframe
Usage
teamBatsmenPartnershiAllOppnAllMatches(matches,theTeam,report="summary")
Arguments
matches
All the matches of the team against the oppositions
theTeam
The team for which the the batting partnerships are sought
report
If the report="summary" then the list of top batsmen with the highest partnerships
is displayed. If report="detailed" then the detailed break up of partnership is returned
as a dataframe
top
The number of players to be displayed from the top
Value
partnerships The data frame of the partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBatsmenVsBowlersOppnAllMatchesPlot
teamBatsmenPartnershipOppnAllMatchesChart
'''
df1 = matches[matches.team == theTeam]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
if report == 'summary':
return(df5)
elif report == 'detailed':
return(df6)
else:
print("Invalid option")
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenPartnershipAllOppnAllMatchesChart
# This function computes and plots the partnerships of an IPK team against all other IPL teams
#
###########################################################################################
def teamBatsmenPartnershipAllOppnAllMatchesChart(matches,main,plot=True,top=5,partnershipRuns=20, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plots team batting partnership all matches all oppositions
Description
This function plots the batting partnership of a team againt all oppositions in all matches This function also returns a dataframe with the batting partnerships
Usage
teamBatsmenPartnershipAllOppnAllMatchesChart(matches,theTeam,main,plot=True,top=5,partnershipRuns=20)
Arguments
matches
All the matches of the team against all oppositions
theTeam
The team for which the the batting partnerships are sought
main
The main team for which the the batting partnerships are sought
plot
Whether the partnerships have top be rendered as a plot. If plot=FALSE the data frame is returned
top
The number of players from the top to be included in chart
partnershipRuns
The minimum number of partnership runs to include for the chart
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None or partnerships
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','non_striker','runs']]
# Compute partnerships
df3=df2.groupby(['batsman','non_striker']).sum().reset_index(inplace=False)
df3.columns = ['batsman','non_striker','partnershipRuns']
# Compute total partnerships
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('partnershipRuns',ascending=False)
df4.columns = ['batsman','totalPartnershipRuns']
# Select top 5
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','non_striker','partnershipRuns']]
# Remove rows where partnershipRuns < partnershipRuns as there are too many
df8 = df7[df7['partnershipRuns'] > partnershipRuns]
df9=df8.groupby(['batsman','non_striker'])['partnershipRuns'].sum().unstack(fill_value=0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='non_striker',index='batsman').fillna(0)
if plot == True:
df9.plot(kind='bar',stacked=True,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Batting partnerships of' + main + 'against all teams')
plt.xlabel('Batsman')
plt.ylabel('Partnership runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBatsmenVsBowlersAllOppnAllMatches
# This function computes and plots the performance of batsmen
# of an IPL team against all other teams
#
###########################################################################################
def teamBatsmenVsBowlersAllOppnAllMatches(matches,main,plot=True,top=5,runsScored=20, savePic=False, dir1=".",picFile="pic1.png"):
'''
Report of team batsmen vs bowlers in all matches all oppositions
Description
This function computes the performance of batsmen against all bowlers of all oppositions in all matches
Usage
teamBatsmenVsBowlersAllOppnAllMatches(matches,main,plot=True,top=5,runsScored=20)
Arguments
matches
All the matches of the team against all oppositions
main
The team for which the the batting partnerships are sought
plot
Whether a plot is required or not
top
The number of top batsmen to be included
runsScored
The total runs scoed by batsmen
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
The data frame of the batsman and the runs against bowlers
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1 = matches[matches.team == main]
df2 = df1[['batsman','bowler','runs']]
# Runs scored by bowler
df3=df2.groupby(['batsman','bowler']).sum().reset_index(inplace=False)
df3.columns = ['batsman','bowler','runsScored']
print(df3.shape)
# Need to pick the 'top' number of bowlers
df4 = df3.groupby('batsman').sum().reset_index(inplace=False).sort_values('runsScored',ascending=False)
print(df4.shape)
df4.columns = ['batsman','totalRunsScored']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='batsman')
df7 = df6[['batsman','bowler','runsScored']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsScored'] >runsScored]
df9=df8.groupby(['batsman','bowler'])['runsScored'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df8=df7.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
#ax.legend(fontsize=25)
plt.title('Runs by ' + main + ' against all T20 bowlers')
plt.xlabel('Batsman')
plt.ylabel('Runs scored')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBattingScorecardAllOppnAllMatches
# This function computes and batting scorecard of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBattingScorecardAllOppnAllMatches(matches,main):
'''
Team batting scorecard against all oppositions in all matches
Description
This function omputes and returns the batting scorecard of a team in all matches against all oppositions. The data frame has the ball played, 4's,6's and runs scored by batsman
Usage
teamBattingScorecardAllOppnAllMatches(matches,theTeam)
Arguments
matches
All matches of the team in all matches with all oppositions
main
The team for which the the batting partnerships are sought
Value
details The data frame of the scorecard of the team in all matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
team=matches.loc[matches.team== main]
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1=pd.merge(a1, b1, how='outer', on='batsman')
e=pd.merge(d1,c1,how='outer', on='batsman')
e=e.fillna(0)
e['4s']=e['4s'].astype(int)
e['6s']=e['6s'].astype(int)
e['SR']=(e['runs']/e['balls']) *100
scorecard = e[['batsman','runs','balls','4s','6s','SR']].sort_values('runs',ascending=False)
return(scorecard)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlingScorecardAllOppnAllMatches
# This function computes and bowling scorecard of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlingScorecardAllOppnAllMatches(matches,main):
'''
Team bowling scorecard all opposition all matches
Description
This function computes returns the bowling dataframe of bowlers deliveries,
maidens, overs, wickets against all oppositions in all matches
Usage
teamBowlingScorecardAllOppnAllMatches(matches,theTeam)
Arguments
matches
The matches of the team against all oppositions and all matches
theTeam
Team for which bowling performance is required
Value
l A data frame with the bowling performance in alll matches against all oppositions
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
team=matches.loc[matches.team== main]
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
g2 = g1.sort_values('wicket',ascending=False)
return(g2)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlingWicketKindAllOppnAllMatches
# This function computes and plots the wicket kind of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlingWicketKindAllOppnAllMatches(matches,main,plot=True,top=5,wickets=2,savePic=False, dir1=".",picFile="pic1.png"):
df1=matches.loc[matches.team== main]
df2= df1[['bowler','kind','player_out']]
# Find all rows where there was a wicket
df2=df2[df2.player_out != '0']
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','kind']).count().reset_index(inplace=False)
df3.columns = ['bowler','kind','wickets']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('wickets',ascending=False)
df4.columns = ['bowler','totalWickets']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','kind','wickets']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['wickets'] >wickets]
df9=df8.groupby(['bowler','kind'])['wickets'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Wicker kind by bowlers of ' + main + ' against all T20 teams')
plt.xlabel('Bowler')
plt.ylabel('Total wickets')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: teamBowlersVsBatsmenAllOppnAllMatches
# This function computes and plots the performance of bowlers of an IPL team against all other
# IPL teams
#
###########################################################################################
def teamBowlersVsBatsmenAllOppnAllMatches(matches,main,plot=True,top=5,runsConceded=10,savePic=False, dir1=".",picFile="pic1.png"):
'''
Compute team bowlers vs batsmen all opposition all matches
Description
This function computes performance of bowlers of a team against all opposition in all matches
Usage
teamBowlersVsBatsmenAllOppnAllMatches(matches,,main,plot=True,top=5,runsConceded=10)
Arguments
matches
the data frame of all matches between a team and aall opposition and all obtained with the call getAllMatchesAllOpposition()
main
The team against which the performance is requires
plot
Whether a plot should be displayed or a dataframe to be returned
top
The top number of bowlers in result
runsConded
The number of runs conceded by bowlers
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
dataframe The dataframe with all performances
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
df1=matches.loc[matches.team== main]
df2= df1[['bowler','batsman','runs']]
# Number of wickets taken by bowler
df3=df2.groupby(['bowler','batsman']).sum().reset_index(inplace=False)
df3.columns = ['bowler','batsman','runsConceded']
# Need to pick the 'top' number of bowlers by wickets
df4 = df3.groupby('bowler').sum().reset_index(inplace=False).sort_values('runsConceded',ascending=False)
df4.columns = ['bowler','totalRunsConceded']
df5 = df4.head(top)
df6= pd.merge(df5,df3,on='bowler')
df7 = df6[['bowler','batsman','runsConceded']]
# Remove rows where runsScored < runsScored as there are too many
df8 = df7[df7['runsConceded'] >runsConceded]
df9=df8.groupby(['bowler','batsman'])['runsConceded'].sum().unstack().fillna(0)
# Note: Can also use the below code -*************
#df9=df8.pivot(columns='bowler',index='batsman').fillna(0)
if plot == True:
ax=df9.plot(kind='bar',stacked=False,legend=False,fontsize=8)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),fontsize=8)
plt.title('Performance of' + main + 'Bowlers vs Batsmen ' )
plt.xlabel('Bowler')
plt.ylabel('Total runs')
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
return(df7)
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinLossByTeamAllOpposition
# This function computes and plots twins and lossed of IPL team against all other
# IPL teams
#
###########################################################################################
def plotWinLossByTeamAllOpposition(matches, team1, plot='summary',savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot wins for each team
Description
This function computes and plots number of wins for each team in all their encounters.
The plot includes the number of wins byteam1 each team and the matches with no result
Usage
plotWinLossByTeamAllOpposition(matches, main, plot='summary')
Arguments
matches
The dataframe with all matches between 2 IPL teams
main
The 1st team
plot
Summary or detailed
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
a=matches[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
# Plot the overall performance as wins and losses
if plot=="summary":
m= a.loc[a.winner==team1]['winner'].count()
n= a.loc[a.winner!=team1]['winner'].count()
df=pd.DataFrame({'outcome':['win','loss'],'number':[m,n]})
sns.barplot(x='outcome',y='number',data=df)
plt.xlabel('Outcome')
plt.ylabel('Number')
plt.title("Wins vs losses(summary) of " + team1 + ' against all Opposition' )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
elif plot=="detailed" :
#Plot breakup by team
b=a.groupby('winner').count().reset_index(inplace=False)
# If 'winner' is '0' then the match is a tie.Set as 'tie'
b.loc[b.winner=='0','winner']='Tie'
b.columns = ['winner','number']
ax=sns.barplot(x='winner',y='number',data=b)
plt.xlabel('Winner')
plt.ylabel('Number')
plt.title("Wins vs losses(detailed) of " + team1 + ' against all Opposition' )
ax.set_xticklabels(ax.get_xticklabels(),rotation=60,fontsize=6)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
else:
print("Unknown option")
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinsByRunOrWicketsAllOpposition
# This function computes and plots twins and lossed of IPL team against all other
# IPL teams
#
###########################################################################################
def plotWinsByRunOrWicketsAllOpposition(matches,team1,plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets against all Opposition
Usage
plotWinsByRunOrWicketsAllOpposition(matches,team1)
Arguments
matches
The dataframe with all matches between an IPL team and all IPL teams
team1
The team for which the plot has to be done
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
'''
# Get the number of matches won
df= matches.loc[matches.winner == team1]
a=df[['date','winType']].groupby(['date','winType']).count().reset_index(inplace=False)
b=a.groupby('winType').count().reset_index(inplace=False)
b.columns = ['winType','number']
sns.barplot(x='winType',y='number',data=b)
plt.xlabel('Win Type - Runs or wickets')
plt.ylabel('Number')
plt.title("Win type for team -" + team1 + ' against all opposition' )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
##########################################################################################
# Designed and developed by <NAME>
# Date : 1 Feb 2019
# Function: plotWinsbyTossDecisionAllOpposition
# This function computes and plots the win type of IPL team against all
# IPL teams
#
###########################################################################################
def plotWinsbyTossDecisionAllOpposition(matches,team1,tossDecision='bat',plot="summary", savePic=False, dir1=".",picFile="pic1.png"):
'''
Plot whether the wins for the team was by runs or wickets
Description
This function computes and plots number the number of wins by runs vs number of wins
by wickets
Usage
plotWinsbyTossDecisionAllOpposition(matches,team1,tossDecision='bat',plot="summary")
Arguments
matches
The dataframe with all matches between 2 IPL teams
team1
The team for which the plot has to be done
plot
'summary' or 'detailed'
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
teamBowlingScorecardOppnAllMatches
teamBatsmenPartnershipOppnAllMatchesChart
teamBowlingWicketKindOppositionAllMatches
'''
df=matches.loc[(matches.tossDecision==tossDecision) & (matches.tossWinner==team1)]
a=df[['date','winner']].groupby(['date','winner']).count().reset_index(inplace=False)
if plot=="summary":
m= a.loc[a.winner==team1]['winner'].count()
n= a.loc[a.winner!=team1]['winner'].count()
df=pd.DataFrame({'outcome':['win','loss'],'number':[m,n]})
sns.barplot(x='outcome',y='number',data=df)
plt.xlabel('Outcome')
plt.ylabel('Number')
plt.title("Wins vs losses(summary) against all opposition when toss decision was to " + tossDecision + ' for ' + team1 )
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
elif plot=="detailed" :
#Plot breakup by team
b=a.groupby('winner').count().reset_index(inplace=False)
# If 'winner' is '0' then the match is a tie.Set as 'tie'
b.loc[b.winner=='0','winner']='Tie'
b.columns = ['winner','number']
ax=sns.barplot(x='winner',y='number',data=b)
plt.xlabel(team1 + ' chose to ' + tossDecision)
plt.ylabel('Number')
plt.title('Wins vs losses(detailed) against all opposition for ' + team1 + ' when toss decision was to ' + tossDecision )
ax.set_xticklabels(ax.get_xticklabels(),rotation=60, fontsize=6)
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: Details
# This function computes the batting details of a team
# IPL teams
#
###########################################################################################
def getTeamBattingDetails(team,dir=".",save=False,odir="."):
'''
Description
This function gets the batting details of a team in all matchs against all oppositions. This gets all the details of the batsmen balls faced,4s,6s,strikerate, runs, venue etc. This function is then used for analyses of batsmen. This function calls teamBattingPerfDetails()
Usage
getTeamBattingDetails(team,dir=".",save=FALSE)
Arguments
team
The team for which batting details is required
dir
The source directory of RData files obtained with convertAllYaml2RDataframes()
save
Whether the data frame needs to be saved as RData or not. It is recommended to set save=TRUE as the data can be used for a lot of analyses of batsmen
Value
battingDetails The dataframe with the batting details
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
Examples
m=getTeamBattingDetails(team1,dir1,save=True)
'''
# Get all matches played by team
t1 = '*' + team +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
# Create an empty dataframe
details = pd.DataFrame()
# Loop through all matches played by team
for file in files:
match=pd.read_csv(file)
scorecard,extras=teamBattingScorecardMatch(match,team)
if scorecard.empty:
continue
# Filter out only the rows played by team
match1 = match.loc[match.team==team]
# Check if there were wickets, you will 'bowled', 'caught' etc
if len(match1 !=0):
if isinstance(match1.kind.iloc[0],str):
b=match1.loc[match1.kind != '0']
# Get the details of the wicket
wkts= b[['batsman','bowler','fielders','kind','player_out']]
#date','team2','winner','result','venue']]
df=pd.merge(scorecard,wkts,how='outer',on='batsman')
# Fill NA as not outs
df =df.fillna('notOut')
# Set other info
if len(b) != 0:
df['date']= b['date'].iloc[0]
df['team2']= b['team2'].iloc[0]
df['winner']= b['winner'].iloc[0]
df['result']= b['result'].iloc[0]
df['venue']= b['venue'].iloc[0]
details= pd.concat([details,df])
details = details.sort_values(['batsman','date'])
if save==True:
fileName = "./" + team + "-BattingDetails.csv"
output=os.path.join(odir,fileName)
details.to_csv(output)
return(details)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getBatsmanDetails
# This function gets the batsman details
# IPL teams
#
###########################################################################################
def getBatsmanDetails(team, name,dir="."):
'''
Get batting details of batsman from match
Description
This function gets the batting details of a batsman given the match data as a RData file
Usage
getBatsmanDetails(team,name,dir=".")
Arguments
team
The team of the batsman e.g. India
name
Name of batsman
dir
The directory where the source file exists
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
batsmanRunsPredict
batsmanMovingAverage
bowlerWicketsVenue
bowlerMeanRunsConceded
Examples
## Not run:
name="<NAME>"
team='Chennai Super Kings'
#df=getBatsmanDetails(team, name,dir=".")
'''
path = dir + '/' + team + "-BattingDetails.csv"
battingDetails= pd.read_csv(path)
batsmanDetails = battingDetails.loc[battingDetails['batsman'].str.contains(name)]
return(batsmanDetails)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getBatsmanDetails
# This function plots runs vs deliveries for the batsman
#
###########################################################################################
def batsmanRunsVsDeliveries(df,name= "A Late Cut",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Runs versus deliveries faced
Description
This function plots the runs scored and the deliveries required. A regression smoothing function is used to fit the points
Usage
batsmanRunsVsDeliveries(df, name= "A Late Cut")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsVsDeliveries(df, name)
'''
rcParams['figure.figsize'] = 8, 5
plt.scatter(df.balls,df.runs)
sns.lmplot(x='balls',y='runs', data=df)
plt.xlabel("Balls faced",fontsize=8)
plt.ylabel('Runs',fontsize=8)
atitle=name + "- Runs vs balls faced"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanFoursSixes
# This function gets the batsman fours and sixes for batsman
#
#
###########################################################################################
def batsmanFoursSixes(df,name= "A Leg Glance", plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the total runs, fours and sixes of the batsman
Usage
batsmanFoursSixes(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanDismissals batsmanRunsVsDeliveries batsmanRunsVsStrikeRate batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanFoursSixes(df,"SK Raina")
'''
# Compute runs from fours and sixes
rcParams['figure.figsize'] = 8, 5
df['RunsFromFours']=df['4s']*4
df['RunsFromSixes']=df['6s']*6
df1 = df[['balls','runs','RunsFromFours','RunsFromSixes']]
# Total runs
sns.scatterplot('balls','runs',data=df1)
# Fit a linear regression line
balls=df1.balls.reshape(-1,1)
linreg = LinearRegression().fit(balls, df1.runs)
x=np.linspace(0,120,10)
#Plot regression line balls vs runs
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='blue',label="Total runs")
# Runs from fours
sns.scatterplot('balls','RunsFromFours',data=df1)
#Plot regression line balls vs Runs from fours
linreg = LinearRegression().fit(balls, df1.RunsFromFours)
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='red',label="Runs from fours")
# Runs from sixes
sns.scatterplot('balls','RunsFromSixes',data=df1)
#Plot regression line balls vs Runs from sixes
linreg = LinearRegression().fit(balls, df1.RunsFromSixes)
plt.plot(x, linreg.coef_ * x + linreg.intercept_, color='green',label="Runs from sixes")
plt.xlabel("Balls faced",fontsize=8)
plt.ylabel('Runs',fontsize=8)
atitle=name + "- Total runs, fours and sixes"
plt.title(atitle,fontsize=8)
plt.legend(loc="upper left")
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanDismissals
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the type of dismissals of the the batsman
Usage
batsmanDismissals(df,name="A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanDismissals(df,"SK Raina")
'''
# Count dismissals
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman','kind']]
df2 = df1.groupby('kind').count().reset_index(inplace=False)
df2.columns = ['dismissals','count']
plt.pie(df2['count'], labels=df2['dismissals'],autopct='%.1f%%')
atitle= name + "-Dismissals"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsVsStrikeRate
# This function plots the runs vs strike rate
#
#
###########################################################################################
def batsmanRunsVsStrikeRate (df,name= "A Late Cut", plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function plots the runs scored by the batsman and the runs scored by the batsman. A loess line is fitted over the points
Usage
batsmanRunsVsStrikeRate(df, name= "A Late Cut")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanDismissals
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsVsStrikeRate(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
plt.scatter(df.runs,df.SR)
sns.lmplot(x='runs',y='SR', data=df,order=2)
plt.xlabel("Runs",fontsize=8)
plt.ylabel('Strike Rate',fontsize=8)
atitle=name + "- Runs vs Strike rate"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: movingaverage
# This computes the moving average
#
#
###########################################################################################
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanMovingAverage
# This function plots the moving average of runs
#
#
###########################################################################################
def batsmanMovingAverage(df, name, plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function plots the runs scored by the batsman over the career as a time series. A loess regression line is plotted on the moving average of the batsman the batsman
Usage
batsmanMovingAverage(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanDismissals
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanMovingAverage(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
y_av = movingaverage(df.runs, 10)
date= pd.to_datetime(df['date'])
plt.plot(date, y_av,"b")
plt.xlabel('Date',fontsize=8)
plt.ylabel('Runs',fontsize=8)
plt.xticks(rotation=90)
atitle = name + "-Moving average of runs"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanCumulativeAverageRuns
# This functionplots the cumulative average runs
#
#
###########################################################################################
def batsmanCumulativeAverageRuns(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanCumulativeAverageRuns(df,"SK Raina")
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['runs'].cumsum()/pd.Series(np.arange(1, len( df['runs'])+1), df['runs'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=8)
plt.ylabel('Cumulative Average Runs',fontsize=8)
plt.xticks(rotation=90)
atitle = name + "-Cumulative Average Runs vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanCumulativeStrikeRate
# This function plots the cumulative average Strike rate
#
#
###########################################################################################
def batsmanCumulativeStrikeRate(df,name="A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(df,name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets batsmanRunsVsStrikeRate batsmanRunsPredict
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
#batsmanCumulativeAverageRunsdf(df,name)
'''
rcParams['figure.figsize'] = 8, 5
cumAvgRuns = df['SR'].cumsum()/pd.Series(np.arange(1, len( df['SR'])+1), df['SR'].index)
plt.plot(cumAvgRuns)
plt.xlabel('No of matches',fontsize=8)
plt.ylabel('Cumulative Average Strike Rate',fontsize=8)
plt.xticks(rotation=70)
atitle = name + "-Cumulative Average Strike Rate vs matches"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsAgainstOpposition
# This function plots the batsman's runs against opposition
#
#
###########################################################################################
def batsmanRunsAgainstOpposition(df,name= "A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the mean runs scored by the batsman against different oppositions
Usage
batsmanRunsAgainstOpposition(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
The name of the savefile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
Examples
name="<NAME>"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
batsmanRunsAgainstOpposition(df,name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman', 'runs','team2']]
df2=df1.groupby('team2').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='team2', y="runs_mean", data=df3)
plt.xticks(rotation="vertical",fontsize=8)
plt.xlabel('Opposition',fontsize=8)
plt.ylabel('Mean Runs',fontsize=8)
atitle=name + "-Mean Runs against opposition"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: batsmanRunsVenue
# This function plos the batsman's runs at venues
#
#
###########################################################################################
def batsmanRunsVenue(df,name= "A Leg Glance",plot=True, savePic=False, dir1=".",picFile="pic1.png"):
'''
Description
This function computes and plots the mean runs scored by the batsman at different venues of the world
Usage
batsmanRunsVenue(df, name= "A Leg Glance")
Arguments
df
Data frame
name
Name of batsman
plot
If plot=TRUE then a plot is created otherwise a data frame is returned
savePic
If savePic = True then the plot is saved
dir1
The directory where the plot is saved
picFile
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
batsmanFoursSixes
batsmanRunsVsDeliveries
batsmanRunsVsStrikeRate
teamBatsmenPartnershipAllOppnAllMatches
batsmanRunsAgainstOpposition
Examples
name="SK Raina"
team='Chennai Super Kings'
df=getBatsmanDetails(team, name,dir=".")
#batsmanRunsVenue(df,name)
'''
rcParams['figure.figsize'] = 8, 5
df1 = df[['batsman', 'runs','venue']]
df2=df1.groupby('venue').agg(['sum','mean','count'])
df2.columns= ['_'.join(col).strip() for col in df2.columns.values]
# Reset index
df3=df2.reset_index(inplace=False)
ax=sns.barplot(x='venue', y="runs_mean", data=df3)
plt.xticks(rotation="vertical",fontsize=8)
plt.xlabel('Venue',fontsize=8)
plt.ylabel('Mean Runs',fontsize=8)
atitle=name + "-Mean Runs at venues"
plt.title(atitle,fontsize=8)
if(plot==True):
if(savePic):
plt.savefig(os.path.join(dir1,picFile),bbox_inches='tight')
else:
plt.show()
plt.gcf().clear()
return
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: teamBowlingPerDetails
# This function gets the bowling performances
#
#
###########################################################################################
def teamBowlingPerDetails(team):
# Compute overs bowled
a1= getOvers(team).reset_index(inplace=False)
# Compute runs conceded
b1= getRunsConceded(team).reset_index(inplace=False)
# Compute maidens
c1= getMaidens(team).reset_index(inplace=False)
# Compute wickets
d1= getWickets(team).reset_index(inplace=False)
e1=pd.merge(a1, b1, how='outer', on='bowler')
f1= pd.merge(e1,c1,how='outer', on='bowler')
g1= pd.merge(f1,d1,how='outer', on='bowler')
g1 = g1.fillna(0)
# Compute economy rate
g1['econrate'] = g1['runs']/g1['overs']
g1.columns=['bowler','overs','runs','maidens','wicket','econrate']
g1.maidens = g1.maidens.astype(int)
g1.wicket = g1.wicket.astype(int)
return(g1)
##########################################################################################
# Designed and developed by <NAME>
# Date : 24 Feb 2019
# Function: getTeamBowlingDetails
# This function gets the team bowling details
#
#
###########################################################################################
def getTeamBowlingDetails (team,dir=".",save=False,odir="."):
'''
Description
This function gets the bowling details of a team in all matchs against all oppositions. This gets all the details of the bowlers for e.g deliveries, maidens, runs, wickets, venue, date, winner ec
Usage
getTeamBowlingDetails(team,dir=".",save=FALSE)
Arguments
team
The team for which detailed bowling info is required
dir
The source directory of RData files obtained with convertAllYaml2RDataframes()
save
Whether the data frame needs to be saved as RData or not. It is recommended to set save=TRUE as the data can be used for a lot of analyses of batsmen
Value
bowlingDetails The dataframe with the bowling details
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.wordpress.com/
https://github.com/tvganesh/yorkrData
See Also
getBatsmanDetails
getBowlerWicketDetails
batsmanDismissals
getTeamBattingDetails
Examples
dir1= "C:\\software\\cricket-package\\yorkpyIPLData\\data"
eam1='Delhi Daredevils'
m=getTeamBowlingDetails(team1,dir1,save=True)
'''
# Get all matches played by team
t1 = '*' + team +'*.csv'
path= os.path.join(dir,t1)
files = glob.glob(path)
# Create an empty dataframe
details = pd.DataFrame()
# Loop through all matches played by team
for file in files:
match=pd.read_csv(file)
if(match.size != 0):
team1=match.loc[match.team != team]
else:
continue
if len(team1) !=0:
scorecard=teamBowlingPerDetails(team1)
scorecard['date']= match['date'].iloc[0]
scorecard['team2']= match['team2'].iloc[0]
scorecard['winner']= match['winner'].iloc[0]
scorecard['result']= match['result'].iloc[0]
scorecard['venue']= match['venue'].iloc[0]
details= | pd.concat([details,scorecard]) | pandas.concat |
import numpy as np
import numpy.matlib
import pandas as pd
import pvlib as pv
from scipy.interpolate import interp1d
DOY_LEAPDAY = 60
def _addHotwater(simData):
""" Calculate hot water demand profile in W
All load values are modified by a daily profile.
The profile values have to be scaled by each agents COC value.
Args:
simData (pandas data frame): Simulation time and data
Returns:
pandas data frame: simData complemented by hot water day profile factor
"""
# all agents are using PHH profile,
# since there is no statistic to business hot water demand available
HWP = pd.read_hdf("./BoundaryConditions/Thermal/HotWaterProfile/"
"HotWaterDayProfile.h5", key='PHH')
simData.insert(simData.shape[1], ('HWPfactor', ''),
HWP.loc[simData[('time', '')].dt.hour,
'fProportion'].values)
return simData
def _addSLPdata(simData):
""" Add standard load profile for different agents to time data frame.
The SLP is calculated for the time frame beginning at startDate
and ending at endDate (inclusive). For each day a curve with
15min steps is calculated, based on the SLP data (H0 for PHH,
G0/L0 for business) from BDEW. The SLP differes between Summer,
Winter, intermediate periods and Weekdays, Weekend, Holydays as well.
The PHH SLP is additionally modyfied according to BDEW
by a dynamic sampling profile.
Args:
simData (pandas data frame): Simulation time information
(is created by getSimTime method)
Returns:
pandas data frame: Data frame with sim time and SLP data
"""
# prepare columns
cIdx = [('SLP', 'PHH'), ('SLP', 'BSLa'), ('SLP', 'BSLc')]
newData = pd.DataFrame(index=np.arange(simData.shape[0]), columns=cIdx)
# load SLP base data
PHH = pd.read_hdf("./BoundaryConditions/Electrical/SLP/PHH.h5",
key='PHH')
G0 = pd.read_hdf("./BoundaryConditions/Electrical/SLP/G0.h5",
key='G0')
L0 = pd.read_hdf("./BoundaryConditions/Electrical/SLP/L0.h5",
key='L0')
# add SLP data
# Winter
mask = simData.winter & (simData.weekDaySLP < 5)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.Winter.WorkDay.values,
G0.Winter.WorkDay.values,
L0.Winter.WorkDay.values]), nDays).T
mask = simData.winter & (simData.weekDaySLP == 5)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.Winter.Saturday.values,
G0.Winter.Saturday.values,
L0.Winter.Saturday.values]), nDays).T
mask = simData.winter & (simData.weekDaySLP == 6)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.Winter.Sunday.values,
G0.Winter.Sunday.values,
L0.Winter.Sunday.values]), nDays).T
# Intermediate
mask = simData.intermediate & (simData.weekDaySLP < 5)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.InterimPeriod.WorkDay.values,
G0.InterimPeriod.WorkDay.values,
L0.InterimPeriod.WorkDay.values]), nDays).T
mask = simData.intermediate & (simData.weekDaySLP == 5)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.InterimPeriod.Saturday.values,
G0.InterimPeriod.Saturday.values,
L0.InterimPeriod.Saturday.values]), nDays).T
mask = simData.intermediate & (simData.weekDaySLP == 6)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.InterimPeriod.Sunday.values,
G0.InterimPeriod.Sunday.values,
L0.InterimPeriod.Sunday.values]), nDays).T
# Summer
mask = simData.summer & (simData.weekDaySLP < 5)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.Summer.WorkDay.values,
G0.Summer.WorkDay.values,
L0.Summer.WorkDay.values]), nDays).T
mask = simData.summer & (simData.weekDaySLP == 5)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.Summer.Saturday.values,
G0.Summer.Saturday.values,
L0.Summer.Saturday.values]), nDays).T
mask = simData.summer & (simData.weekDaySLP == 6)
nDays = int(mask.sum() / 96)
newData.loc[mask, [('SLP', 'PHH'), ('SLP', 'BSLc'),
('SLP', 'BSLa')]] = np.tile(
np.array([PHH.Summer.Sunday.values,
G0.Summer.Sunday.values,
L0.Summer.Sunday.values]), nDays).T
# Dynamic sampling of PHH profile
newData[('SLP', 'PHH')] *= (- 3.92*1e-10*simData.doy**4 +
3.2*1e-7*simData.doy**3 -
7.02*1e-5*simData.doy**2 +
2.1*1e-3*simData.doy + 1.24)
# merge data frames
simData = simData.join(newData.astype(np.float32))
return simData
def _cleanSimData(simData):
""" Remove unnecessary columns
Args:
simData (pandas data frame): Simulation data
Returns:
pandas data frame: Data frame with sim data
"""
simData.drop(columns=["doy", "weekDaySLP", "summer",
"winter", "intermediate"], inplace=True)
return simData
def _getSimTime(startDate, endDate):
""" Prepare a pandas dataframe for simulation course
This function will add all time related informations
(Summer, Winter, day of year, hour of day, correct week days for SLP)
Info to pandas WeekDays: Monday=0, Sunday=6.
Args:
startDate (string): Start date DD.MM.YYYY
(start time is hard coded to 00:00)
endDate (string): End date DD.MM.YYYY
(end day is not in time range, so end date
should be end date + 1 day)
Return:
pandas data frame: Time course and additional informations
for preparing boundary conditions of
a simulation run
"""
startDate = startDate.split(".")
startDate = "/".join([startDate[1], startDate[0], startDate[2]])
endDate = endDate.split(".")
endDate = "/".join([endDate[1], endDate[0], endDate[2]])
time = pd.date_range(startDate, endDate, freq='0.25H', closed='left')
doy = time.dayofyear
weekDaySLP = time.dayofweek
df = pd.DataFrame({('time', ''): time, 'doy': doy,
'weekDaySLP': weekDaySLP})
# add relevant time periods
df['summer'] = (((time.month > 5) & (time.month < 9)) |
((time.month == 5) & (time.day >= 15)) |
((time.month == 9) & (time.day <= 14)))
df['winter'] = (((time.month >= 11) | (time.month < 3)) |
((time.month == 3) & (time.day <= 20)))
df['intermediate'] = ~(df['summer'] | df['winter'])
# correct week days of SLP days
# -> add Christmas Eve and New Years Eve to Sat if Week
mask = ((time.month == 12) &
((time.day == 24) | (time.day == 31)) &
(df.weekDaySLP < 5))
df.loc[mask, 'weekDaySLP'] = 5
# load and check holidays and set them to sunday
holidays = pd.read_csv("./BoundaryConditions/Simulation/holydaysSN.csv",
parse_dates=[0],
dayfirst=True)
mask = pd.to_datetime(time.date).isin(holidays.date)
df.loc[mask, 'weekDaySLP'] = 6
return df
def _getWeather(simData, region):
"""Calculate temperature and irradiation curve for
given simulation time and region
Test reference year data of DWD consist out of:
- Data for reference year
- Data for year with extreme summer
- Data for extreme winter
By randomly weighting those curves a new weather curve is generated. The
random weights are updated per simulated year.
Arguments:
simData {pandas data frame} -- Simulation data
region {string} -- Location of simulation
(determines climate / weather)
Supported regions:
East, West, South, North
Returns:
pandas data frame -- Simulation data extended by weather course
"""
RefWeather = pd.read_hdf("./BoundaryConditions/Weather/" +
region + ".h5", 'Weather')
cols = RefWeather.reference.columns
# at first create simulation weather data without interpolation
SimWeather = pd.DataFrame(columns=['t [s]'] + cols.to_list())
# ensure ref Weather time steps are hourly
if RefWeather.date_time.dt.freq != 'H':
# TODO: Catch -> Create hourly stepped ref Data
raise ValueError("Weather data time step must be one hour")
# Fill sim time in seconds hourly stepped
SimWeather['time'] = pd.date_range(simData[('time', '')].iloc[0],
simData[('time', '')].iloc[-1],
freq='H')
SimWeather['doy'] = SimWeather.time.dt.dayofyear
SimWeather['t [s]'] = ((SimWeather.time - SimWeather.time[0])
.dt.total_seconds())
# get mask of all non leap year days once -> keep out doy 366
maskDoy = (SimWeather.doy >= 1) & (SimWeather.doy <= 365)
# one-time create weight function to get smooth transistions
# between years or december extrapolation
lenDay = 24 # h -> since ref weather data is hourly stepped
wDay = np.vstack(np.arange(lenDay-1, -1., -1.) / lenDay)
wDay = wDay**10
wDayInv = 1 - wDay
yearEnd = None
# Split up Eg data generation into linked doy sequences
for year in range(SimWeather.time.dt.year.min(),
SimWeather.time.dt.year.max()+1):
# for now ignore the possibility of leap year
maskY = ((SimWeather.time.dt.year == year) & maskDoy)
# get start and end Idx for current year
doyStart = SimWeather.doy[(SimWeather.time.dt.year == year).idxmax()]
startY = (RefWeather.doy == doyStart).idxmax()
endY = startY + maskY.sum()-1
# get weighting factors
w = np.random.random(3)
w /= w.sum() # sum of all factors must be 1
# Calculate simulation data
SimWeather.loc[maskY, cols] = (
RefWeather.reference.loc[startY:endY, cols]*w[0] +
RefWeather.winter_extreme.loc[startY:endY, cols]*w[1] +
RefWeather.summer_extreme.loc[startY:endY, cols]*w[2]
).values
# get smooth transition if there is a year before
if yearEnd is not None:
mask_new = maskY & (SimWeather.doy == 1)
SimWeather.loc[mask_new, cols] = (
wDay * yearEnd +
wDayInv * SimWeather.loc[maskY, cols].values[:lenDay])
# leap day treatment
if SimWeather.time[maskY].dt.is_leap_year.any():
# update year mask
maskY = SimWeather.time.dt.year == year
# handle different cases
doyEnd = SimWeather.doy[maskY].max()
# there is missing data, only if last day of year is considered
if doyEnd == 366:
# prepare
doyStart = SimWeather.doy[maskY].min()
# random weights for inter-/extrapolation
w = np.random.random(2)
w /= w.sum()
# two cases:
# 1. Start before leap -> interpolate leap
# 2. Start after leap -> extrapolate end pf year
if doyStart < DOY_LEAPDAY:
# move data beginning from leap day
mask_new = maskY & ((SimWeather.doy >= DOY_LEAPDAY+1) &
(SimWeather.doy <= 366))
mask_old = maskY & ((SimWeather.doy >= DOY_LEAPDAY) &
(SimWeather.doy <= 365))
SimWeather.loc[mask_new, cols] = (
SimWeather.loc[mask_old, cols].values)
# interpolate leap day data with surrounding days
# leap day has March 1st for know -> add Feb 28th
mask_new = maskY & (SimWeather.doy == DOY_LEAPDAY)
mask_old = maskY & (SimWeather.doy == DOY_LEAPDAY-1)
New = (w[0] * SimWeather.loc[mask_new, cols].values +
w[1] * SimWeather.loc[mask_old, cols].values)
Last = SimWeather.loc[mask_old, cols].values[-1]
# first transition
SimWeather.loc[mask_new, cols] = (wDay*Last + wDayInv*New)
# second transition -> new is now old
mask_old = maskY & (SimWeather.doy == DOY_LEAPDAY+1)
New = SimWeather.loc[mask_old, cols].values[-1]
Last = SimWeather.loc[mask_new, cols].values
SimWeather.loc[mask_old, cols] = (wDayInv*Last + wDay*New)
else:
# just add missing data to last day of year
# since information is missing
# for time before doyStart,
# the last two known days will be extrapolated
mask_new = maskY & (SimWeather.doy == 366)
mask_old_1 = maskY & (SimWeather.doy == 364)
mask_old_2 = maskY & (SimWeather.doy == 365)
# scale new temperature in relation to
# last temperature of day before
Last = SimWeather.loc[mask_old_2, cols].values[-1]
New = (w[0] * SimWeather.loc[mask_old_1, cols].values +
w[1] * SimWeather.loc[mask_old_2, cols].values)
SimWeather.loc[mask_new, cols] = (wDay*Last + wDayInv*New)
# set year Flag
yearEnd = SimWeather.loc[maskY, cols].values[-1]
# go threw simulated weather data and interpolate it for simData
simTime = (simData[('time', '')] -
simData[('time', '')][0]).dt.total_seconds()
for col in cols:
fWeather = interp1d(SimWeather['t [s]'], SimWeather[col], 'linear',
bounds_error=False, fill_value='extrapolate')
simData[('Weather', col)] = fWeather(simTime).astype(np.float32)
return simData
def _getSolarPosition(simData, latitude, longitude):
""" Get position of sun from time and location
Args:
simData (pandas data frame): Simulation data
latitude (float): Latitude in decimal degrees. Positive north of
equator, negative to south
longitude (float): Longitude in decimal degrees. Positive east of
prime meridian, negative to west
Returns:
pandas data frame: Data frame with sim data
"""
# TODO: calculation assumes UTC-time if not localized
solarPosition = pv.solarposition.get_solarposition(
simData[('time', '')],
latitude,
longitude
)
simData[('SolarPosition',
'elevation [degree]')] = (solarPosition
.elevation
.values
.astype(np.float32))
simData[('SolarPosition',
'azimuth [degree]')] = (solarPosition
.azimuth
.values
.astype(np.float32) + 180.)
return simData
def getSimData_df(startDate, endDate, region):
""" Get all boundary condition data needed for a simulation run
Args:
startDate (string): Start date DD.MM.YYYY
(start time is hard coded to 00:00)
endDate (string): End date DD.MM.YYYY
(end day is not in time range, so end date
should be end date + 1 day)
region (string): Location of simulation (determines climate / weather)
Supported regions:
East, West, South, North
Returns:
pandas data frame: All simulation data needed
"""
data = _getSimTime(startDate, endDate)
data = _addSLPdata(data)
data = _addHotwater(data)
data = _getWeather(data, region)
# Mittelpunkt Deutschland
latitude = 51.164305
longitude = 10.4541205
data = _getSolarPosition(data, latitude, longitude)
data = _cleanSimData(data)
data.columns = | pd.MultiIndex.from_tuples(data.columns) | pandas.MultiIndex.from_tuples |
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
import lightgbm as lgb
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Lasso,SGDRegressor,PassiveAggressiveRegressor,ElasticNet,LinearRegression
import heapq
path = '../AwA2/'
classname = pd.read_csv(path+'classes.txt',header=None,sep = '\t')
dic_class2name = {classname.index[i]:classname.loc[i][1] for i in range(classname.shape[0])}
dic_name2class = {classname.loc[i][1]:classname.index[i] for i in range(classname.shape[0])}
def make_test_attributetable():
attribut_bmatrix = np.loadtxt(path+'predicate-matrix-binary.txt')
#attribut_bmatrix = np.loadtxt(path+'food-label-vector-normed.txt')
attribut_bmatrix = pd.DataFrame(attribut_bmatrix)
test_classes = pd.read_csv(path+'testclasses.txt',header=None)
test_classes_flag = []
for item in test_classes.iloc[:,0].values.tolist():
test_classes_flag.append(dic_name2class[item])
return attribut_bmatrix.iloc[test_classes_flag,:]
def make_train_attributetable():
attribut_bmatrix = np.loadtxt(path+'predicate-matrix-binary.txt')
#attribut_bmatrix = np.loadtxt(path+'food-label-vector-normed.txt')
attribut_bmatrix = pd.DataFrame(attribut_bmatrix)
train_classes = pd.read_csv(path+'trainclasses.txt',header=None)
train_classes_flag = []
for item in train_classes.iloc[:,0].values.tolist():
train_classes_flag.append(dic_name2class[item])
return attribut_bmatrix.iloc[train_classes_flag,:]
def construct_Y(label_onehot):
for i in range(label_onehot.shape[0]):
for j in range(label_onehot.shape[1]):
if label_onehot[i][j] == 0:
label_onehot[i][j] = -1
return np.mat(label_onehot)
def generate_data(data_mean,data_std,attribute_table,num):
class_num = data_mean.shape[0]
feature_num = data_mean.shape[1]
data_list = []
label_list = []
for i in range(class_num):
data = []
for j in range(feature_num):
data.append(list(np.random.normal(data_mean[i,j],np.abs(data_std[i,j]),num)))
data = np.row_stack(data).T
data_list.append(data)
label_list+=[test_attributetable.iloc[i,:].values]*num
return np.row_stack(data_list),np.row_stack(label_list)
def cosinedist(gt, pre, top):
dist_list = []
labels = []
for i in range(gt.values.shape[0]):
dist = 1 - np.dot(gt.values[i],pre.transpose())/(np.linalg.norm(gt.values[i])*np.linalg.norm(pre))
dist_list.append(dist)
result = map(dist_list.index, heapq.nsmallest(top, dist_list))
result = list(result)
for loc in result:
labels.append(gt.index[loc])
#print("Result:", result)
return labels
#trainlabel = np.load(path+'Food11_trainlabel.npy')
trainlabel = np.load(path+'AWA2_trainlabel.npy')
#train_attributelabel = np.load(path+'AWA2_train_Label_attributelabel.npy')
testlabel = np.load(path+'AWA2_testlabel.npy')
#test_attributelabel = np.load(path+'AWA2_test_Label_attributelabel.npy')
enc1 = OneHotEncoder()
enc1.fit(np.mat(trainlabel).T)
trainlabel_onehot = enc1.transform(np.mat(trainlabel).T).toarray()
enc2 = OneHotEncoder()
enc2.fit(np.mat(testlabel).T)
testlabel_onehot = enc2.transform(np.mat(testlabel).T).toarray()
trainfeatures = np.load(path+'resnet101_trainfeatures.npy')
print("train feature:", trainfeatures.shape)
testfeatures = np.load(path+'resnet101_testfeatures.npy')
print("test feature:", testfeatures.shape)
train_attributetable = make_train_attributetable()
test_attributetable = make_test_attributetable()
trainfeatures_tabel = | pd.DataFrame(trainfeatures) | pandas.DataFrame |
from django.shortcuts import render
from rest_framework import status
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.shortcuts import get_object_or_404
from .models import UserProfile
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
from .forms import GuestForm
from django.shortcuts import redirect
from django.contrib import messages
import requests
import json
import pandas as pd
from functools import reduce
import base64
import os
import os.path
from datetime import datetime
import time
from django.db.models import Q
from profiles_project.secrets import YITU_AUTH
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
url_visitors = "https://10.12.201.64:9812/visitors"
url_history = "https://10.12.201.64:9812/visitors/history"
headers = {
"Content-Type": "application/json",
"Authorization": YITU_AUTH
}
# Create your views here.
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and update profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'nric_number',)
class UserLoginApiView(ObtainAuthToken):
"""Handle user authentication token"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class VisitorList(ListView):
template_name = 'userprofile_list.html'
model = UserProfile
context_object_name = 'visitors'
ordering = ['reg_date']
paginate_by = 3
def get_queryset(self): # new
query = self.request.GET.get('q', '')
object_list = UserProfile.objects.filter(
Q(name__icontains=query) | Q(nric_number__icontains=query)
)
return object_list
# Granting access to a registered guest
class VisitorDetail(DetailView):
model = UserProfile
template_name = 'userprofile.html'
context_object_name = 'visitor'
def post(self, request, pk=None):
if request.method == 'POST':
form = UserProfile.objects.get(pk=pk)
form_img = form.photo
form_nric = form.nric_number
form_name = form.name
form_mobile_number = form.mobile_number
form_company = form.company
form_img_url = "/vagrant/media/" + str(form_img)
last_access = form.last_access_date.strftime("%Y-%m-%d")
now_access = datetime.today().strftime("%Y-%m-%d")
if last_access == now_access:
messages.success(request, 'Visitor access in on going')
return redirect('userprofile', pk=pk)
else:
form.last_access_date = datetime.today()
form.save(update_fields=['last_access_date'])
print(form_name)
print(last_access)
with open(form_img_url, "rb") as file:
enc_img = base64.b64encode(file.read())
dec_img = enc_img.decode("utf-8")
print(dec_img)
payload = {
"visitor_list" : [ {
"card_numbers" : [ form_nric ],
"face_image_content" : dec_img,
"meta" : {},
"person_information" : {
"company" : form_company,
"identity_number" : form_nric,
"name" : form_name,
"phone" : form_mobile_number,
"remark" : "",
"visit_end_timestamp" : 0,
"visit_start_timestamp" : 0,
"visit_time_type" : 1,
"visitee_name" : ""
},
"tag_id_list" : [ "5e58b6d9e2e6a700014a2b19" ]
} ]
}
jsonpayload = json.dumps(payload)
response = requests.request("POST", url_visitors, headers=headers, data=jsonpayload, verify=False)
print(response.text)
messages.success(request, 'Visitor is allowed to access')
return redirect('userprofile', pk=pk)
else:
form = GuestForm()
return render(request, 'userprofile.html', {
'form': form
})
def search(request):
return render(request, 'search.html', {})
def history(request):
# response = requests.request("GET", url_history, headers=headers, verify=False)
#
# history = json.loads(response.text)
# info = history.get("result", {})
# info_for_csv = history.get("result", {})
#
# infoarr = json.dumps(info, indent=2)
#
# # pagination
#
# page = request.GET.get('page', 1)
#
# paginator = Paginator(info, 3)
#
# try:
# info = paginator.page(page)
# except PageNotAnInteger:
# info = paginator.page(1)
# except EmptyPage:
# info = paginator.page(paginator.num_pages)
#
# context = {'info': info }
#
# file = []
#
# for item in info_for_csv:
# name = item['person_information']['name']
# inf = item['person_information']
# file.append(inf)
#
# for f in file:
# t_start = datetime.fromtimestamp(f['visit_start_timestamp'])
# t_end = datetime.fromtimestamp(f['visit_end_timestamp'])
# t_check = datetime.fromtimestamp(f['check_out_timestamp'])
# f['visit_start_timestamp'] = t_start.strftime("%d-%m-%Y %H:%M:%S")
# f['visit_end_timestamp'] = t_end.strftime("%d-%m-%Y %H:%M:%S")
# f['check_out_timestamp'] = t_check.strftime("%d-%m-%Y %H:%M:%S")
#
#
# outname = 'history.csv'
# outdir = './media/logs'
#
# fullname = os.path.join(outdir, outname)
#
# result = pd.DataFrame(file)
#
# result.to_csv(fullname, index=False)
context = {}
df1 = | pd.read_table('./media/logs/history.csv', sep=',') | pandas.read_table |
import pandas as pd
import numpy as np
import pytest
from conftest import DATA_DIR, assert_series_equal
from numpy.testing import assert_allclose
from pvlib import temperature
@pytest.fixture
def sapm_default():
return temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
def test_sapm_cell(sapm_default):
default = temperature.sapm_cell(900, 20, 5, sapm_default['a'],
sapm_default['b'], sapm_default['deltaT'])
assert_allclose(default, 43.509, 3)
def test_sapm_module(sapm_default):
default = temperature.sapm_module(900, 20, 5, sapm_default['a'],
sapm_default['b'])
assert_allclose(default, 40.809, 3)
def test_sapm_cell_from_module(sapm_default):
default = temperature.sapm_cell_from_module(50, 900,
sapm_default['deltaT'])
assert_allclose(default, 50 + 900 / 1000 * sapm_default['deltaT'])
def test_sapm_ndarray(sapm_default):
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = np.array([0., 23.06066166, 5.])
expected_module = np.array([0., 21.56066166, 5.])
assert_allclose(expected_cell, cell_temps, 3)
assert_allclose(expected_module, module_temps, 3)
def test_sapm_series(sapm_default):
times = pd.date_range(start='2015-01-01', end='2015-01-02', freq='12H')
temps = pd.Series([0, 10, 5], index=times)
irrads = pd.Series([0, 500, 0], index=times)
winds = | pd.Series([10, 5, 0], index=times) | pandas.Series |
import os
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.neural_network import MLPRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import GridSearchCV, cross_val_score
print(os.listdir("data"))
train_data = pd.read_csv('data/train.csv')
test_data = | pd.read_csv('data/test.csv') | pandas.read_csv |
# TODO get a cleaner inclusion maybe more then one
# TODO naming rule on the combination of extracrtor and Cleaner
import pandas as pd
import os
import sys
import glob
from tqdm import tqdm
from tqdm.auto import tqdm as tqdma
def get_wave_files(base_folder,FileFindDict, FileCountLimit):
fn = dict()
fa = dict()
SNR = FileFindDict['SNR']
machine = FileFindDict['machine']
ID = FileFindDict['ID']
#print(base_folder, machine, SNR, ID)
for idx in ID:
fn[idx] = sorted(glob.glob(os.path.abspath( "{base}/{SNR}/{machine}/id_{ID}/{n}/*.{ext}".format(
base=base_folder+'dataset',SNR=SNR,machine=machine,ID=idx, n='normal',ext='wav' ))))
fa[idx] = sorted(glob.glob(os.path.abspath( "{base}/{SNR}/{machine}/id_{ID}/{n}/*.{ext}".format(
base=base_folder+'dataset',SNR=SNR,machine=machine,ID=idx, n='abnormal',ext='wav' ))))
for idx in fn:
if FileCountLimit:
if FileCountLimit < len(fn[idx]):
fn[idx] = fn[idx][:FileCountLimit]
if FileCountLimit < len(fa[idx]):
fa[idx] = fa[idx][:FileCountLimit]
return fn, fa
def BaseDataFrame(nf, af, FileFindDict):
get_filename = lambda l: [os.path.basename(pl).replace('.'+'wav','') for pl in l]
df = pd.DataFrame(columns=['path','abnormal','ID'])
for idx in nf:
df_temp_n = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Consensus non-negative matrix factorization (cNMF) adapted from (Kotliar, et al. 2019)
"""
import numpy as np
import pandas as pd
import os, errno
import glob
import shutil
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
import warnings
from scipy.spatial.distance import squareform
from sklearn.decomposition import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from sklearn.preprocessing import normalize
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
from ._version import get_versions
def save_df_to_npz(obj, filename):
"""
Saves numpy array to `.npz` file
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
np.savez_compressed(
filename,
data=obj.values,
index=obj.index.values,
columns=obj.columns.values,
)
def save_df_to_text(obj, filename):
"""
Saves numpy array to tab-delimited text file
"""
obj.to_csv(filename, sep="\t")
def load_df_from_npz(filename):
"""
Loads numpy array from `.npz` file
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (
p for i, p in enumerate(iterable) if (i - worker_index) % total_workers == 0
)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1, 1))
D += squared_norms.reshape((1, -1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return beta
def fast_ols_all_cols_df(X, Y):
beta = fast_ols_all_cols(X, Y)
beta = pd.DataFrame(beta, index=X.columns, columns=Y.columns)
return beta
def var_sparse_matrix(X):
mean = np.array(X.mean(axis=0)).reshape(-1)
Xcopy = X.copy()
Xcopy.data **= 2
var = np.array(Xcopy.mean(axis=0)).reshape(-1) - (mean ** 2)
return var
def get_highvar_genes_sparse(
expression, expected_fano_threshold=None, minimal_mean=0.01, numgenes=None
):
# Find high variance genes within those cells
gene_mean = np.array(expression.mean(axis=0)).astype(float).reshape(-1)
E2 = expression.copy()
E2.data **= 2
gene2_mean = np.array(E2.mean(axis=0)).reshape(-1)
gene_var = pd.Series(gene2_mean - (gene_mean ** 2))
del E2
gene_mean = pd.Series(gene_mean)
gene_fano = gene_var / gene_mean
# Find parameters for expected fano line
top_genes = gene_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_var) / gene_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_fano.quantile([0.10, 0.90])
winsor_box = (
(gene_fano > w_fano_low)
& (gene_fano < w_fano_high)
& (gene_mean > w_mean_low)
& (gene_mean < w_mean_high)
)
fano_median = gene_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A ** 2) * gene_mean + (B ** 2)
fano_ratio = gene_fano / gene_expected_fano
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T = None
else:
if not expected_fano_threshold:
T = 1.0 + gene_counts_fano[winsor_box].std()
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame(
{
"mean": gene_mean,
"var": gene_var,
"fano": gene_fano,
"expected_fano": gene_expected_fano,
"high_var": high_var_genes_ind,
"fano_ratio": fano_ratio,
}
)
gene_fano_parameters = {
"A": A,
"B": B,
"T": T,
"minimal_mean": minimal_mean,
}
return (gene_counts_stats, gene_fano_parameters)
def get_highvar_genes(
input_counts, expected_fano_threshold=None, minimal_mean=0.01, numgenes=None
):
# Find high variance genes within those cells
gene_counts_mean = pd.Series(input_counts.mean(axis=0).astype(float))
gene_counts_var = pd.Series(input_counts.var(ddof=0, axis=0).astype(float))
gene_counts_fano = pd.Series(gene_counts_var / gene_counts_mean)
# Find parameters for expected fano line
top_genes = gene_counts_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_counts_var) / gene_counts_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_counts_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_counts_fano.quantile([0.10, 0.90])
winsor_box = (
(gene_counts_fano > w_fano_low)
& (gene_counts_fano < w_fano_high)
& (gene_counts_mean > w_mean_low)
& (gene_counts_mean < w_mean_high)
)
fano_median = gene_counts_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A ** 2) * gene_counts_mean + (B ** 2)
fano_ratio = gene_counts_fano / gene_expected_fano
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T = None
else:
if not expected_fano_threshold:
T = 1.0 + gene_counts_fano[winsor_box].std()
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame(
{
"mean": gene_counts_mean,
"var": gene_counts_var,
"fano": gene_counts_fano,
"expected_fano": gene_expected_fano,
"high_var": high_var_genes_ind,
"fano_ratio": fano_ratio,
}
)
gene_fano_parameters = {
"A": A,
"B": B,
"T": T,
"minimal_mean": minimal_mean,
}
return (gene_counts_stats, gene_fano_parameters)
def compute_tpm(input_counts):
"""
Default TPM normalization
"""
tpm = input_counts.copy()
tpm.layers["raw_counts"] = tpm.X.copy()
sc.pp.normalize_total(tpm, target_sum=1e6)
return tpm
def subset_adata(adata, subset):
"""
Subsets anndata object on one or more `.obs` columns
"""
print("Subsetting AnnData on {}".format(subset), end="")
# initialize .obs column for choosing cells
adata.obs["adata_subset_combined"] = 0
# create label as union of given subset args
for i in range(len(subset)):
adata.obs.loc[
adata.obs[subset[i]].isin(["True", True, 1.0, 1]), "adata_subset_combined"
] = 1
adata = adata[adata.obs["adata_subset_combined"] == 1, :].copy()
adata.obs.drop(columns="adata_subset_combined", inplace=True)
print(" - now {} cells and {} genes".format(adata.n_obs, adata.n_vars))
return adata
def cnmf_markers(adata, spectra_score_file, n_genes=30, key="cnmf"):
"""
Read cNMF spectra into AnnData object
Reads in gene spectra score output from cNMF and saves top gene loadings for
each usage as dataframe in adata.uns
Parameters
----------
adata : AnnData.AnnData
AnnData object
spectra_score_file : str
`<name>.gene_spectra_score.<k>.<dt>.txt` file from cNMF containing gene
loadings
n_genes : int, optional (default=30)
number of top genes to list for each usage (rows of df)
key : str, optional (default="cnmf")
prefix of `adata.uns` keys to save
Returns
-------
adata : AnnData.AnnData
adata is edited in place to include gene spectra scores
(`adata.varm["cnmf_spectra"]`) and list of top genes by spectra score
(`adata.uns["cnmf_markers"]`)
"""
# load Z-scored GEPs which reflect gene enrichment, save to adata.varm
spectra = pd.read_csv(spectra_score_file, sep="\t", index_col=0).T
spectra = adata.var[[]].merge(
spectra, how="left", left_index=True, right_index=True
)
adata.varm["{}_spectra".format(key)] = spectra.values
# obtain top n_genes for each GEP in sorted order and combine them into df
top_genes = []
for gep in spectra.columns:
top_genes.append(
list(spectra.sort_values(by=gep, ascending=False).index[:n_genes])
)
# save output to adata.uns
adata.uns["{}_markers".format(key)] = pd.DataFrame(
top_genes, index=spectra.columns.astype(str)
).T
def cnmf_load_results(adata, cnmf_dir, name, k, dt, key="cnmf", **kwargs):
"""
Load results of cNMF
Given adata object and corresponding cNMF output (cnmf_dir, name, k, dt to
identify), read in relevant results and save to adata object inplace, and
output plot of gene loadings for each GEP usage.
Parameters
----------
adata : AnnData.AnnData
AnnData object
cnmf_dir : str
relative path to directory containing cNMF outputs
name : str
name of cNMF replicate
k : int
value used for consensus factorization
dt : int
distance threshold value used for consensus clustering
key : str, optional (default="cnmf")
prefix of adata.uns keys to save
n_points : int
how many top genes to include in rank_genes() plot
**kwargs : optional (default=None)
keyword args to pass to cnmf_markers()
Returns
-------
adata : AnnData.AnnData
`adata` is edited in place to include overdispersed genes
(`adata.var["cnmf_overdispersed"]`), usages (`adata.obs["usage_#"]`,
`adata.obsm["cnmf_usages"]`), gene spectra scores
(`adata.varm["cnmf_spectra"]`), and list of top genes by spectra score
(`adata.uns["cnmf_markers"]`).
"""
# read in cell usages
usage = pd.read_csv(
"{}/{}/{}.usages.k_{}.dt_{}.consensus.txt".format(
cnmf_dir, name, name, str(k), str(dt).replace(".", "_")
),
sep="\t",
index_col=0,
)
usage.columns = ["usage_" + str(col) for col in usage.columns]
# normalize usages to total for each cell
usage_norm = usage.div(usage.sum(axis=1), axis=0)
usage_norm.index = usage_norm.index.astype(str)
# add usages to .obs for visualization
adata.obs = pd.merge(
left=adata.obs, right=usage_norm, how="left", left_index=True, right_index=True
)
# replace missing values with zeros for all factors
adata.obs.loc[:, usage_norm.columns].fillna(value=0, inplace=True)
# add usages as array in .obsm for dimension reduction
adata.obsm["cnmf_usages"] = adata.obs.loc[:, usage_norm.columns].values
# read in overdispersed genes determined by cNMF and add as metadata to adata.var
overdispersed = np.genfromtxt(
"{}/{}/{}.overdispersed_genes.txt".format(cnmf_dir, name, name),
delimiter="\t",
dtype=str,
)
adata.var["cnmf_overdispersed"] = 0
adata.var.loc[
[x for x in adata.var.index if x in overdispersed], "cnmf_overdispersed"
] = 1
# read top gene loadings for each GEP usage and save to adata.uns['cnmf_markers']
cnmf_markers(
adata,
"{}/{}/{}.gene_spectra_score.k_{}.dt_{}.txt".format(
cnmf_dir, name, name, str(k), str(dt).replace(".", "_")
),
key=key,
**kwargs
)
class cNMF:
"""
Consensus NMF object
Containerizes the cNMF inputs and outputs to allow for easy pipelining
"""
def __init__(self, output_dir=".", name=None):
"""
Parameters
----------
output_dir : path, optional (default=".")
Output directory for analysis files.
name : string, optional (default=None)
A name for this analysis. Will be prefixed to all output files.
If set to None, will be automatically generated from date (and random string).
"""
self.output_dir = output_dir
if name is None:
now = datetime.datetime.now()
rand_hash = uuid.uuid4().hex[:6]
name = "%s_%s" % (now.strftime("%Y_%m_%d"), rand_hash)
self.name = name
self.paths = None
def _initialize_dirs(self):
if self.paths is None:
# Check that output directory exists, create it if needed.
check_dir_exists(self.output_dir)
check_dir_exists(os.path.join(self.output_dir, self.name))
check_dir_exists(os.path.join(self.output_dir, self.name, "cnmf_tmp"))
self.paths = {
"normalized_counts": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".norm_counts.h5ad",
),
"nmf_replicate_parameters": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".nmf_params.df.npz",
),
"nmf_run_parameters": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".nmf_idvrun_params.yaml",
),
"nmf_genes_list": os.path.join(
self.output_dir, self.name, self.name + ".overdispersed_genes.txt"
),
"tpm": os.path.join(
self.output_dir, self.name, "cnmf_tmp", self.name + ".tpm.h5ad"
),
"tpm_stats": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".tpm_stats.df.npz",
),
"iter_spectra": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".spectra.k_%d.iter_%d.df.npz",
),
"iter_usages": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".usages.k_%d.iter_%d.df.npz",
),
"merged_spectra": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".spectra.k_%d.merged.df.npz",
),
"local_density_cache": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".local_density_cache.k_%d.merged.df.npz",
),
"consensus_spectra": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".spectra.k_%d.dt_%s.consensus.df.npz",
),
"consensus_spectra__txt": os.path.join(
self.output_dir,
self.name,
self.name + ".spectra.k_%d.dt_%s.consensus.txt",
),
"consensus_usages": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".usages.k_%d.dt_%s.consensus.df.npz",
),
"consensus_usages__txt": os.path.join(
self.output_dir,
self.name,
self.name + ".usages.k_%d.dt_%s.consensus.txt",
),
"consensus_stats": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".stats.k_%d.dt_%s.df.npz",
),
"clustering_plot": os.path.join(
self.output_dir, self.name, self.name + ".clustering.k_%d.dt_%s.png"
),
"gene_spectra_score": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".gene_spectra_score.k_%d.dt_%s.df.npz",
),
"gene_spectra_score__txt": os.path.join(
self.output_dir,
self.name,
self.name + ".gene_spectra_score.k_%d.dt_%s.txt",
),
"gene_spectra_tpm": os.path.join(
self.output_dir,
self.name,
"cnmf_tmp",
self.name + ".gene_spectra_tpm.k_%d.dt_%s.df.npz",
),
"gene_spectra_tpm__txt": os.path.join(
self.output_dir,
self.name,
self.name + ".gene_spectra_tpm.k_%d.dt_%s.txt",
),
"k_selection_plot": os.path.join(
self.output_dir, self.name, self.name + ".k_selection.png"
),
"k_selection_stats": os.path.join(
self.output_dir, self.name, self.name + ".k_selection_stats.df.npz"
),
}
def get_norm_counts(
self, counts, tpm, high_variance_genes_filter=None, num_highvar_genes=None
):
"""
Parameters
----------
counts : anndata.AnnData
Scanpy AnnData object (cells x genes) containing raw counts. Filtered such
that no genes or cells with 0 counts
tpm : anndata.AnnData
Scanpy AnnData object (cells x genes) containing tpm normalized data
matching counts
high_variance_genes_filter : np.array, optional (default=None)
A pre-specified list of genes considered to be high-variance.
Only these genes will be used during factorization of the counts matrix.
Must match the .var index of counts and tpm.
If set to None, high-variance genes will be automatically computed, using
the parameters below.
num_highvar_genes : int, optional (default=None)
Instead of providing an array of high-variance genes, identify this many
most overdispersed genes for filtering
Returns
-------
normcounts : anndata.AnnData, shape (cells, num_highvar_genes)
A counts matrix containing only the high variance genes and with columns
(genes) normalized to unit variance
"""
if high_variance_genes_filter is None:
## Get list of high-var genes if one wasn't provided
if sp.issparse(tpm.X):
(gene_counts_stats, gene_fano_params) = get_highvar_genes_sparse(
tpm.X, numgenes=num_highvar_genes
)
else:
(gene_counts_stats, gene_fano_params) = get_highvar_genes(
np.array(tpm.X), numgenes=num_highvar_genes
)
high_variance_genes_filter = list(
tpm.var.index[gene_counts_stats.high_var.values]
)
## Subset out high-variance genes
print(
"Selecting {} highly variable genes".format(len(high_variance_genes_filter))
)
norm_counts = counts[:, high_variance_genes_filter]
norm_counts = norm_counts[tpm.obs_names, :].copy()
## Scale genes to unit variance
if sp.issparse(tpm.X):
sc.pp.scale(norm_counts, zero_center=False)
if np.isnan(norm_counts.X.data).sum() > 0:
print("Warning: NaNs in normalized counts matrix")
else:
norm_counts.X /= norm_counts.X.std(axis=0, ddof=1)
if np.isnan(norm_counts.X).sum().sum() > 0:
print("Warning: NaNs in normalized counts matrix")
## Save a \n-delimited list of the high-variance genes used for factorization
open(self.paths["nmf_genes_list"], "w").write(
"\n".join(high_variance_genes_filter)
)
## Check for any cells that have 0 counts of the overdispersed genes
zerocells = norm_counts.X.sum(axis=1) == 0
if zerocells.sum() > 0:
print(
"Warning: %d cells have zero counts of overdispersed genes - ignoring these cells for factorization."
% (zerocells.sum())
)
sc.pp.filter_cells(norm_counts, min_counts=1)
return norm_counts
def save_norm_counts(self, norm_counts):
self._initialize_dirs()
norm_counts.write(self.paths["normalized_counts"], compression="gzip")
def get_nmf_iter_params(
self, ks, n_iter=100, random_state_seed=None, beta_loss="kullback-leibler"
):
"""
Creates a DataFrame with parameters for NMF iterations
Parameters
----------
ks : integer, or list-like.
Number of topics (components) for factorization.
Several values can be specified at the same time, which will be run
independently.
n_iter : integer, optional (defailt=100)
Number of iterations for factorization. If several `k` are specified,
this many iterations will be run for each value of `k`.
random_state_seed : int or None, optional (default=None)
Seed for sklearn random state.
"""
if type(ks) is int:
ks = [ks]
# Remove any repeated k values, and order.
k_list = sorted(set(list(ks)))
n_runs = len(ks) * n_iter
np.random.seed(seed=random_state_seed)
nmf_seeds = np.random.randint(low=1, high=(2 ** 32) - 1, size=n_runs)
replicate_params = []
for i, (k, r) in enumerate(itertools.product(k_list, range(n_iter))):
replicate_params.append([k, r, nmf_seeds[i]])
replicate_params = pd.DataFrame(
replicate_params, columns=["n_components", "iter", "nmf_seed"]
)
_nmf_kwargs = dict(
alpha=0.0,
l1_ratio=0.0,
beta_loss=beta_loss,
solver="mu",
tol=1e-4,
max_iter=400,
regularization=None,
init="random",
)
## Coordinate descent is faster than multiplicative update but only works for frobenius
if beta_loss == "frobenius":
_nmf_kwargs["solver"] = "cd"
return (replicate_params, _nmf_kwargs)
def save_nmf_iter_params(self, replicate_params, run_params):
self._initialize_dirs()
save_df_to_npz(replicate_params, self.paths["nmf_replicate_parameters"])
with open(self.paths["nmf_run_parameters"], "w") as F:
yaml.dump(run_params, F)
def _nmf(self, X, nmf_kwargs):
"""
Parameters
----------
X : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
nmf_kwargs : dict,
Arguments to be passed to `non_negative_factorization`
"""
(usages, spectra, niter) = non_negative_factorization(X, **nmf_kwargs)
return (spectra, usages)
def run_nmf(
self, worker_i=1, total_workers=1,
):
"""
Iteratively runs NMF with prespecified parameters
Use the `worker_i` and `total_workers` parameters for parallelization.
Generic kwargs for NMF are loaded from `self.paths['nmf_run_parameters']`,
defaults below::
`non_negative_factorization` default arguments:
alpha=0.0
l1_ratio=0.0
beta_loss='kullback-leibler'
solver='mu'
tol=1e-4,
max_iter=200
regularization=None
init='random'
random_state, n_components are both set by the prespecified
self.paths['nmf_replicate_parameters'].
Parameters
----------
norm_counts : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
(Output of `normalize_counts`)
run_params : pandas.DataFrame,
Parameters for NMF iterations.
(Output of `prepare_nmf_iter_params`)
"""
self._initialize_dirs()
run_params = load_df_from_npz(self.paths["nmf_replicate_parameters"])
norm_counts = sc.read(self.paths["normalized_counts"])
_nmf_kwargs = yaml.load(
open(self.paths["nmf_run_parameters"]), Loader=yaml.FullLoader
)
jobs_for_this_worker = worker_filter(
range(len(run_params)), worker_i, total_workers
)
for idx in jobs_for_this_worker:
p = run_params.iloc[idx, :]
print("[Worker %d]. Starting task %d." % (worker_i, idx))
_nmf_kwargs["random_state"] = p["nmf_seed"]
_nmf_kwargs["n_components"] = p["n_components"]
(spectra, usages) = self._nmf(norm_counts.X, _nmf_kwargs)
spectra = pd.DataFrame(
spectra,
index=np.arange(1, _nmf_kwargs["n_components"] + 1),
columns=norm_counts.var.index,
)
save_df_to_npz(
spectra, self.paths["iter_spectra"] % (p["n_components"], p["iter"])
)
def combine_nmf(self, k, remove_individual_iterations=False):
run_params = load_df_from_npz(self.paths["nmf_replicate_parameters"])
print("Combining factorizations for k=%d." % k)
self._initialize_dirs()
combined_spectra = None
n_iter = sum(run_params.n_components == k)
run_params_subset = run_params[run_params.n_components == k].sort_values("iter")
spectra_labels = []
for i, p in run_params_subset.iterrows():
spectra = load_df_from_npz(
self.paths["iter_spectra"] % (p["n_components"], p["iter"])
)
if combined_spectra is None:
combined_spectra = np.zeros((n_iter, k, spectra.shape[1]))
combined_spectra[p["iter"], :, :] = spectra.values
for t in range(k):
spectra_labels.append("iter%d_topic%d" % (p["iter"], t + 1))
combined_spectra = combined_spectra.reshape(-1, combined_spectra.shape[-1])
combined_spectra = pd.DataFrame(
combined_spectra, columns=spectra.columns, index=spectra_labels
)
save_df_to_npz(combined_spectra, self.paths["merged_spectra"] % k)
return combined_spectra
def consensus(
self,
k,
density_threshold_str="0.5",
local_neighborhood_size=0.30,
show_clustering=True,
skip_density_and_return_after_stats=False,
close_clustergram_fig=True,
):
merged_spectra = load_df_from_npz(self.paths["merged_spectra"] % k)
norm_counts = sc.read(self.paths["normalized_counts"])
if skip_density_and_return_after_stats:
density_threshold_str = "2"
density_threshold_repl = density_threshold_str.replace(".", "_")
density_threshold = float(density_threshold_str)
n_neighbors = int(local_neighborhood_size * merged_spectra.shape[0] / k)
# Rescale topics such to length of 1.
l2_spectra = (merged_spectra.T / np.sqrt((merged_spectra ** 2).sum(axis=1))).T
if not skip_density_and_return_after_stats:
# Compute the local density matrix (if not previously cached)
topics_dist = None
if os.path.isfile(self.paths["local_density_cache"] % k):
local_density = load_df_from_npz(self.paths["local_density_cache"] % k)
else:
# first find the full distance matrix
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# partition based on the first n neighbors
partitioning_order = np.argpartition(topics_dist, n_neighbors + 1)[
:, : n_neighbors + 1
]
# find the mean over those n_neighbors (excluding self, which has a distance of 0)
distance_to_nearest_neighbors = topics_dist[
np.arange(topics_dist.shape[0])[:, None], partitioning_order
]
local_density = pd.DataFrame(
distance_to_nearest_neighbors.sum(1) / (n_neighbors),
columns=["local_density"],
index=l2_spectra.index,
)
save_df_to_npz(local_density, self.paths["local_density_cache"] % k)
del partitioning_order
del distance_to_nearest_neighbors
density_filter = local_density.iloc[:, 0] < density_threshold
l2_spectra = l2_spectra.loc[density_filter, :]
kmeans_model = KMeans(n_clusters=k, n_init=10, random_state=1)
kmeans_model.fit(l2_spectra)
kmeans_cluster_labels = pd.Series(
kmeans_model.labels_ + 1, index=l2_spectra.index
)
# Find median usage for each gene across cluster
median_spectra = l2_spectra.groupby(kmeans_cluster_labels).median()
# Normalize median spectra to probability distributions.
median_spectra = (median_spectra.T / median_spectra.sum(1)).T
# Compute the silhouette score
stability = silhouette_score(
l2_spectra.values, kmeans_cluster_labels, metric="euclidean"
)
# Obtain the reconstructed count matrix by re-fitting the usage matrix and computing the dot product: usage.dot(spectra)
refit_nmf_kwargs = yaml.load(
open(self.paths["nmf_run_parameters"]), Loader=yaml.FullLoader
)
refit_nmf_kwargs.update(
dict(n_components=k, H=median_spectra.values, update_H=False)
)
# ensure dtypes match for factorization
if median_spectra.values.dtype != norm_counts.X.dtype:
norm_counts.X = norm_counts.X.astype(median_spectra.values.dtype)
_, rf_usages = self._nmf(norm_counts.X, nmf_kwargs=refit_nmf_kwargs)
rf_usages = pd.DataFrame(
rf_usages, index=norm_counts.obs.index, columns=median_spectra.index
)
rf_pred_norm_counts = rf_usages.dot(median_spectra)
# Compute prediction error as a frobenius norm
if sp.issparse(norm_counts.X):
prediction_error = (
((norm_counts.X.todense() - rf_pred_norm_counts) ** 2).sum().sum()
)
else:
prediction_error = ((norm_counts.X - rf_pred_norm_counts) ** 2).sum().sum()
consensus_stats = pd.DataFrame(
[k, density_threshold, stability, prediction_error],
index=["k", "local_density_threshold", "stability", "prediction_error"],
columns=["stats"],
)
if skip_density_and_return_after_stats:
return consensus_stats
save_df_to_npz(
median_spectra,
self.paths["consensus_spectra"] % (k, density_threshold_repl),
)
save_df_to_npz(
rf_usages, self.paths["consensus_usages"] % (k, density_threshold_repl)
)
save_df_to_npz(
consensus_stats, self.paths["consensus_stats"] % (k, density_threshold_repl)
)
save_df_to_text(
median_spectra,
self.paths["consensus_spectra__txt"] % (k, density_threshold_repl),
)
save_df_to_text(
rf_usages, self.paths["consensus_usages__txt"] % (k, density_threshold_repl)
)
# Compute gene-scores for each GEP by regressing usage on Z-scores of TPM
tpm = sc.read(self.paths["tpm"])
# ignore cells not present in norm_counts
if tpm.n_obs != norm_counts.n_obs:
tpm = tpm[norm_counts.obs_names, :].copy()
tpm_stats = load_df_from_npz(self.paths["tpm_stats"])
if sp.issparse(tpm.X):
norm_tpm = (
np.array(tpm.X.todense()) - tpm_stats["__mean"].values
) / tpm_stats["__std"].values
else:
norm_tpm = (tpm.X - tpm_stats["__mean"].values) / tpm_stats["__std"].values
usage_coef = fast_ols_all_cols(rf_usages.values, norm_tpm)
usage_coef = pd.DataFrame(
usage_coef, index=rf_usages.columns, columns=tpm.var.index
)
save_df_to_npz(
usage_coef, self.paths["gene_spectra_score"] % (k, density_threshold_repl)
)
save_df_to_text(
usage_coef,
self.paths["gene_spectra_score__txt"] % (k, density_threshold_repl),
)
# Convert spectra to TPM units, and obtain results for all genes by running
# last step of NMF with usages fixed and TPM as the input matrix
norm_usages = rf_usages.div(rf_usages.sum(axis=1), axis=0)
refit_nmf_kwargs.update(dict(H=norm_usages.T.values,))
# ensure dtypes match for factorization
if norm_usages.values.dtype != tpm.X.dtype:
tpm.X = tpm.X.astype(norm_usages.values.dtype)
_, spectra_tpm = self._nmf(tpm.X.T, nmf_kwargs=refit_nmf_kwargs)
spectra_tpm = pd.DataFrame(
spectra_tpm.T, index=rf_usages.columns, columns=tpm.var.index
)
save_df_to_npz(
spectra_tpm, self.paths["gene_spectra_tpm"] % (k, density_threshold_repl)
)
save_df_to_text(
spectra_tpm,
self.paths["gene_spectra_tpm__txt"] % (k, density_threshold_repl),
)
if show_clustering:
if topics_dist is None:
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# (l2_spectra was already filtered using the density filter)
else:
# (but the previously computed topics_dist was not!)
topics_dist = topics_dist[density_filter.values, :][
:, density_filter.values
]
spectra_order = []
for cl in sorted(set(kmeans_cluster_labels)):
cl_filter = kmeans_cluster_labels == cl
if cl_filter.sum() > 1:
cl_dist = squareform(topics_dist[cl_filter, :][:, cl_filter])
cl_dist[
cl_dist < 0
] = 0 # Rarely get floating point arithmetic issues
cl_link = linkage(cl_dist, "average")
cl_leaves_order = leaves_list(cl_link)
spectra_order += list(np.where(cl_filter)[0][cl_leaves_order])
else:
## Corner case where a component only has one element
spectra_order += list(np.where(cl_filter)[0])
from matplotlib import gridspec
import matplotlib.pyplot as plt
width_ratios = [0.5, 9, 0.5, 4, 1]
height_ratios = [0.5, 9]
fig = plt.figure(figsize=(sum(width_ratios), sum(height_ratios)))
gs = gridspec.GridSpec(
len(height_ratios),
len(width_ratios),
fig,
0.01,
0.01,
0.98,
0.98,
height_ratios=height_ratios,
width_ratios=width_ratios,
wspace=0,
hspace=0,
)
dist_ax = fig.add_subplot(
gs[1, 1],
xscale="linear",
yscale="linear",
xticks=[],
yticks=[],
xlabel="",
ylabel="",
frameon=True,
)
D = topics_dist[spectra_order, :][:, spectra_order]
dist_im = dist_ax.imshow(
D, interpolation="none", cmap="viridis", aspect="auto", rasterized=True
)
left_ax = fig.add_subplot(
gs[1, 0],
xscale="linear",
yscale="linear",
xticks=[],
yticks=[],
xlabel="",
ylabel="",
frameon=True,
)
left_ax.imshow(
kmeans_cluster_labels.values[spectra_order].reshape(-1, 1),
interpolation="none",
cmap="Spectral",
aspect="auto",
rasterized=True,
)
top_ax = fig.add_subplot(
gs[0, 1],
xscale="linear",
yscale="linear",
xticks=[],
yticks=[],
xlabel="",
ylabel="",
frameon=True,
)
top_ax.imshow(
kmeans_cluster_labels.values[spectra_order].reshape(1, -1),
interpolation="none",
cmap="Spectral",
aspect="auto",
rasterized=True,
)
hist_gs = gridspec.GridSpecFromSubplotSpec(
3, 1, subplot_spec=gs[1, 3], wspace=0, hspace=0
)
hist_ax = fig.add_subplot(
hist_gs[0, 0],
xscale="linear",
yscale="linear",
xlabel="",
ylabel="",
frameon=True,
title="Local density histogram",
)
hist_ax.hist(local_density.values, bins=np.linspace(0, 1, 50))
hist_ax.yaxis.tick_right()
xlim = hist_ax.get_xlim()
ylim = hist_ax.get_ylim()
if density_threshold < xlim[1]:
hist_ax.axvline(density_threshold, linestyle="--", color="k")
hist_ax.text(
density_threshold + 0.02,
ylim[1] * 0.95,
"filtering\nthreshold\n\n",
va="top",
)
hist_ax.set_xlim(xlim)
hist_ax.set_xlabel(
"Mean distance to k nearest neighbors\n\n%d/%d (%.0f%%) spectra above threshold\nwere removed prior to clustering"
% (
sum(~density_filter),
len(density_filter),
100 * (~density_filter).mean(),
)
)
fig.savefig(
self.paths["clustering_plot"] % (k, density_threshold_repl), dpi=250
)
if close_clustergram_fig:
plt.close(fig)
def k_selection_plot(self, close_fig=True):
"""
Borrowed from <NAME>. 2013 Deciphering Mutational Signatures
publication in Cell Reports
"""
run_params = load_df_from_npz(self.paths["nmf_replicate_parameters"])
stats = []
for k in sorted(set(run_params.n_components)):
stats.append(
self.consensus(k, skip_density_and_return_after_stats=True).stats
)
stats = pd.DataFrame(stats)
stats.reset_index(drop=True, inplace=True)
save_df_to_npz(stats, self.paths["k_selection_stats"])
fig = plt.figure(figsize=(6, 4))
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(stats.k, stats.stability, "o-", color="b")
ax1.set_ylabel("Stability", color="b", fontsize=15)
for tl in ax1.get_yticklabels():
tl.set_color("b")
# ax1.set_xlabel('K', fontsize=15)
ax2.plot(stats.k, stats.prediction_error, "o-", color="r")
ax2.set_ylabel("Error", color="r", fontsize=15)
for tl in ax2.get_yticklabels():
tl.set_color("r")
ax1.set_xlabel("Number of Components", fontsize=15)
ax1.grid(True)
plt.tight_layout()
fig.savefig(self.paths["k_selection_plot"], dpi=250)
if close_fig:
plt.close(fig)
def pick_k(k_selection_stats_path):
k_sel_stats = load_df_from_npz(k_selection_stats_path)
return int(k_sel_stats.loc[k_sel_stats.stability.idxmax, "k"])
def prepare(args):
argdict = vars(args)
cnmf_obj = cNMF(output_dir=argdict["output_dir"], name=argdict["name"])
cnmf_obj._initialize_dirs()
print("Reading in counts from {} - ".format(argdict["counts"]), end="")
if argdict["counts"].endswith(".h5ad"):
input_counts = sc.read(argdict["counts"])
else:
## Load txt or compressed dataframe and convert to scanpy object
if argdict["counts"].endswith(".npz"):
input_counts = load_df_from_npz(argdict["counts"])
else:
input_counts = pd.read_csv(argdict["counts"], sep="\t", index_col=0)
if argdict["densify"]:
input_counts = sc.AnnData(
X=input_counts.values,
obs=pd.DataFrame(index=input_counts.index),
var=pd.DataFrame(index=input_counts.columns),
)
else:
input_counts = sc.AnnData(
X=sp.csr_matrix(input_counts.values),
obs=pd.DataFrame(index=input_counts.index),
var=pd.DataFrame(index=input_counts.columns),
)
print("{} cells and {} genes".format(input_counts.n_obs, input_counts.n_vars))
# use desired layer if not .X
if args.layer is not None:
print("Using layer '{}' for cNMF".format(args.layer))
input_counts.X = input_counts.layers[args.layer].copy()
if sp.issparse(input_counts.X) & argdict["densify"]:
input_counts.X = np.array(input_counts.X.todense())
if argdict["tpm"] is None:
tpm = compute_tpm(input_counts)
elif argdict["tpm"].endswith(".h5ad"):
subprocess.call(
"cp %s %s" % (argdict["tpm"], cnmf_obj.paths["tpm"]), shell=True
)
tpm = sc.read(cnmf_obj.paths["tpm"])
else:
if argdict["tpm"].endswith(".npz"):
tpm = load_df_from_npz(argdict["tpm"])
else:
tpm = pd.read_csv(argdict["tpm"], sep="\t", index_col=0)
if argdict["densify"]:
tpm = sc.AnnData(
X=tpm.values,
obs=pd.DataFrame(index=tpm.index),
var=pd.DataFrame(index=tpm.columns),
)
else:
tpm = sc.AnnData(
X=sp.csr_matrix(tpm.values),
obs=pd.DataFrame(index=tpm.index),
var= | pd.DataFrame(index=tpm.columns) | pandas.DataFrame |
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from arch.typing import ArrayLike2D, Float64Array
@pytest.fixture(scope="module", params=[True, False])
def data(request) -> Tuple[Float64Array, Float64Array]:
g = np.random.RandomState([12839028, 3092183, 902813])
e = g.standard_normal((2000, 2))
phi = g.random_sample((3, 2, 2))
phi[:, 0, 0] *= 0.8 / phi[:, 0, 0].sum()
phi[:, 1, 1] *= 0.8 / phi[:, 1, 1].sum()
phi[:, 0, 1] *= 0.2 / phi[:, 0, 1].sum()
phi[:, 1, 0] *= 0.2 / phi[:, 1, 0].sum()
y = e.copy()
for i in range(3, y.shape[0]):
y[i] = e[i]
for j in range(3):
y[i] += (phi[j] @ y[i - j - 1].T).T
y = y[-1000:]
if request.param:
df = | pd.DataFrame(y, columns=["y", "x"]) | pandas.DataFrame |
import json
import pandas as pd
from glob import glob
import os
import numpy as np
def find_file(files_list,file_name):
for file in files_list:
if file.split(os.sep)[-1] == file_name:
return file
return None
def get_raw_files():
""" findes combines and splits to train dev test """
all_jsons = glob('./**/*.json',recursive=True)
for file_name in all_jsons:
name = file_name.split(os.sep)[-1]
if name == 'perspective_pool_v1.0.json':
perspective = file_name
elif name == 'evidence_pool_v1.0.json':
evidence = file_name
elif name == 'dataset_split_v1.0.json':
split = file_name
elif name == 'perspectrum_with_answers_v1.0.json':
merger = file_name
perspective = pd.read_json(perspective)
perspective.columns= ['pId','perspective','source']
evidence = pd.read_json(evidence)
split = pd.read_json(split,typ='series').reset_index()
split.columns = ['id','split']
merger = | pd.read_json(merger) | pandas.read_json |
# -*- coding: utf-8 -*-
"""
module for trade class
"""
import math
import datetime as dt
import logging
import pandas as pd
from pyecharts.charts import Bar, Line
from pyecharts import options as opts
import xalpha.remain as rm
from xalpha.cons import convert_date, line_opts, myround, xirr, yesterdayobj
from xalpha.exceptions import ParserFailure, TradeBehaviorError
from xalpha.record import irecord
import xalpha.universal as xu
from xalpha.universal import get_rt
logger = logging.getLogger(__name__)
def xirrcal(cftable, trades, date, startdate=None, guess=0.01):
"""
calculate the xirr rate
:param cftable: cftable (pd.Dateframe) with date and cash column
:param trades: list [trade1, ...], every item is an trade object,
whose shares would be sold out virtually
:param date: string of date or datetime object,
the date when virtually all holding positions being sold
:param guess: floating number, a guess at the xirr rate solution to be used
as a starting point for the numerical solution
:returns: the IRR as a single floating number
"""
date = convert_date(date)
partcftb = cftable[cftable["date"] <= date]
if len(partcftb) == 0:
return 0
if not startdate:
cashflow = [(row["date"], row["cash"]) for i, row in partcftb.iterrows()]
else:
if not isinstance(startdate, dt.datetime):
startdate = dt.datetime.strptime(
startdate.replace("-", "").replace("/", ""), "%Y%m%d"
)
start_cash = 0
for fund in trades:
start_cash += fund.briefdailyreport(startdate).get("currentvalue", 0)
cashflow = [(startdate, -start_cash)]
partcftb = partcftb[partcftb["date"] > startdate]
cashflow.extend([(row["date"], row["cash"]) for i, row in partcftb.iterrows()])
rede = 0
for fund in trades:
if not isinstance(fund, itrade):
partremtb = fund.remtable[fund.remtable["date"] <= date]
if len(partremtb) > 0:
rem = partremtb.iloc[-1]["rem"]
else:
rem = []
rede += fund.aim.shuhui(
fund.briefdailyreport(date).get("currentshare", 0), date, rem
)[1]
else: # 场内交易
rede += fund.briefdailyreport(date).get("currentvalue", 0)
cashflow.append((date, rede))
return xirr(cashflow, guess)
def bottleneck(cftable):
"""
find the max total input in the history given cftable with cash column
:param cftable: pd.DataFrame of cftable
"""
if len(cftable) == 0:
return 0
# cftable = cftable.reset_index(drop=True) # unnecessary as iloc use natural rows instead of default index
inputl = [-sum(cftable.iloc[:i].cash) for i in range(1, len(cftable) + 1)]
return myround(max(inputl))
def turnoverrate(cftable, end=yesterdayobj()):
"""
calculate the annualized turnoverrate
:param cftable: pd.DataFrame of cftable
:param end: str or obj of datetime for the end date of the estimation
"""
if len(cftable) == 0:
return 0
end = convert_date(end)
start = cftable.iloc[0].date
tradeamount = sum(abs(cftable.loc[:, "cash"]))
turnover = tradeamount / bottleneck(cftable) / 2.0
if (end - start).days <= 0:
return 0
return turnover * 365 / (end - start).days
def vtradevolume(cftable, freq="D", rendered=True):
"""
aid function on visualization of trade summary
:param cftable: cftable (pandas.DataFrame) with at least date and cash columns
:param freq: one character string, frequency label, now supporting D for date,
W for week and M for month, namely the trade volume is shown based on the time unit
:returns: the Bar object
"""
### WARN: datazoom and time conflict, sliding till 1970..., need further look into pyeacharts
startdate = cftable.iloc[0]["date"]
if freq == "D":
# datedata = [d.to_pydatetime() for d in cftable["date"]]
datedata = pd.date_range(startdate, yesterdayobj(), freq="D")
selldata = [
[row["date"].to_pydatetime(), row["cash"]]
for _, row in cftable.iterrows()
if row["cash"] > 0
]
buydata = [
[row["date"].to_pydatetime(), row["cash"]]
for _, row in cftable.iterrows()
if row["cash"] < 0
]
elif freq == "W":
cfmerge = cftable.groupby([cftable["date"].dt.year, cftable["date"].dt.week])[
"cash"
].sum()
# datedata = [
# dt.datetime.strptime(str(a) + "4", "(%Y, %W)%w")
# for a, _ in cfmerge.iteritems()
# ]
datedata = pd.date_range(
startdate, yesterdayobj() + pd.Timedelta(days=7), freq="W-THU"
)
selldata = [
[dt.datetime.strptime(str(a) + "4", "(%G, %V)%w"), b]
for a, b in cfmerge.iteritems()
if b > 0
]
buydata = [
[dt.datetime.strptime(str(a) + "4", "(%G, %V)%w"), b]
for a, b in cfmerge.iteritems()
if b < 0
]
# %V pandas gives iso weeknumber which is different from python original %W or %U,
# see https://stackoverflow.com/questions/5882405/get-date-from-iso-week-number-in-python for more details
# python3.6+ required for %G and %V
# but now seems no equal distance between sell and buy data, no idea why
elif freq == "M":
cfmerge = cftable.groupby([cftable["date"].dt.year, cftable["date"].dt.month])[
"cash"
].sum()
# datedata = [
# dt.datetime.strptime(str(a) + "15", "(%Y, %m)%d")
# for a, _ in cfmerge.iteritems()
# ]
datedata = pd.date_range(
startdate, yesterdayobj() + | pd.Timedelta(days=31) | pandas.Timedelta |
# FIT DATA TO A CURVE
# <NAME> - MIT Licence
# inspired by @dimgrr. Based on
# https://towardsdatascience.com/basic-curve-fitting-of-scientific-data-with-python-9592244a2509?gi=9c7c4ade0880
# https://github.com/venkatesannaveen/python-science-tutorial/blob/master/curve-fitting/curve-fitting-tutorial.ipynb
# https://www.reddit.com/r/CoronavirusUS/comments/fqx8fn/ive_been_working_on_this_extrapolation_for_the/
# to explore : https://github.com/fcpenha/Gompertz-Makehan-Fit/blob/master/script.py
# Import required packages
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.dates as mdates
import copy, math
from lmfit import Model
import pandas as pd
import streamlit as st
import datetime as dt
from datetime import datetime, timedelta
import matplotlib.animation as animation
import imageio
import streamlit.components.v1 as components
import os
import platform
import webbrowser
from pandas import read_csv, Timestamp, Timedelta, date_range
from io import StringIO
from numpy import log, exp, sqrt, clip, argmax, put
from scipy.special import erfc, erf
from matplotlib.pyplot import subplots
from matplotlib.ticker import StrMethodFormatter
from matplotlib.dates import ConciseDateFormatter, AutoDateLocator
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
from PIL import Image
import glob
# Functions to calculate values a,b and c ##########################
def exponential(x, a, b, c):
''' Standard gompertz function
a = height, b= halfway point, c = growth rate
https://en.wikipedia.org/wiki/Gompertz_function '''
return a * np.exp(-b * np.exp(-c * x))
def derivate(x, a, b, c):
''' First derivate of the Gompertz function. Might contain an error'''
return (np.exp(b * (-1 * np.exp(-c * x)) - c * x) * a * b * c ) + BASEVALUE
#return a * b * c * np.exp(-b*np.exp(-c*x))*np.exp(-c*x)
def derivate_of_derivate(x,a,b,c):
return a*b*c*(b*c*exp(-c*x) - c)*exp(-b*exp(-c*x) - c*x)
def gaussian(x, a, b, c):
''' Standard Guassian function. Doesnt give results, Not in use'''
return a * np.exp(-np.power(x - b, 2) / (2 * np.power(c, 2)))
def gaussian_2(x, a, b, c):
''' Another gaussian fuctnion. in use
a = height, b = cen (?), c= width '''
return a * np.exp(-((x - b) ** 2) / c)
def growth(x, a, b):
""" Growth model. a is the value at t=0. b is the so-called R number.
Doesnt work. FIX IT """
return np.power(a * 0.5, (x / (4 * (math.log(0.5) / math.log(b)))))
# https://replit.com/@jsalsman/COVID19USlognormals
def lognormal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * erfc(- (log(x) - mu) / (s * sqrt(2)))
# https://en.wikipedia.org/wiki/Log-normal_distribution#Cumulative_distribution_function
def normal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * (1 + erf((x - mu) / (s * sqrt(2))))
# #####################################################################
def find_gaussian_curvefit(x_values, y_values):
try:
popt_g2, pcov_g2 = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[0, 0, 0],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
except RuntimeError as e:
str_e = str(e)
st.error(f"gaussian fit :\n{str_e}")
return tuple(popt_g2)
def use_curvefit(x_values, x_values_extra, y_values, title, daterange,i):
"""
Use the curve-fit from scipy.
IN : x- and y-values. The ___-extra are for "predicting" the curve
"""
with _lock:
st.subheader(f"Curvefit (scipy) - {title}")
fig1x = plt.figure()
try:
a_start, b_start, c_start = 0,0,0
popt, pcov = curve_fit(
f=exponential,
xdata=x_values,
ydata=y_values,
#p0=[4600, 11, 0.5],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
exponential(x_values_extra, *popt),
"r-",
label="exponential fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Exponential fit :\n{str_e}")
try:
popt_d, pcov_d = curve_fit(
f=derivate,
xdata=x_values,
ydata=y_values,
#p0=[0, 0, 0],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
derivate(x_values_extra, *popt_d),
"g-",
label="derivate fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_d),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Derivate fit :\n{str_e}")
# FIXIT
# try:
# popt_growth, pcov_growth = curve_fit(
# f=growth,
# xdata=x_values,
# ydata=y_values,
# p0=[500, 0.0001],
# bounds=(-np.inf, np.inf),
# maxfev=10000,
# )
# plt.plot(
# x_values_extra,
# growth(x_values_extra, *popt_growth),
# "y-",
# label="growth: a=%5.3f, b=%5.3f" % tuple(popt_growth),
# )
# except:
# st.write("Error with growth model fit")
try:
popt_g, pcov_g = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[a_start, b_start, c_start ],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
gaussian_2(x_values_extra, *popt_g),
"b-",
label="gaussian fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_g),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Gaussian fit :\n{str_e}")
plt.scatter(x_values, y_values, s=20, color="#00b3b3", label="Data")
plt.legend()
plt.title(f"{title} / curve_fit (scipy)")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
# POGING OM DATUMS OP DE X-AS TE KRIJGEN (TOFIX)
# plt.xlim(daterange[0], daterange[-1])
# lay-out of the x axis
# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
# interval_ = 5
# plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=interval_))
# plt.gcf().autofmt_xdate()
#plt.show()
filename= (f"{OUTPUT_DIR}scipi_{title}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1x)
# def make_gif(filelist):
# # Create the frames
# frames = []
# imgs = glob.glob("*.png")
# for i in imgs:
# new_frame = Image.open(i)
# frames.append(new_frame)
#
# # Save into a GIF file that loops forever
# frames[0].save('png_to_gif.gif', format='GIF',
# append_images=frames[1:],
# save_all=True,
# duration=300, loop=0)
def use_lmfit(x_values, y_values, functionlist, title,i, max_y_values):
"""
Use lmfit.
IN : x- and y-values.
functionlist (which functions to use)
adapted from https://stackoverflow.com/a/49843706/4173718
TODO: Make all graphs in one graph
"""
a_start, b_start, c_start = 0,0,0
for function in functionlist:
#placeholder0.subheader(f"LMFIT - {title} - {function}")
# create a Model from the model function
if function == "exponential":
bmodel = Model(exponential)
formula = "a * np.exp(-b * np.exp(-c * x))"
elif function == "derivate":
bmodel = Model(derivate)
formula = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "gaussian":
bmodel = Model(gaussian_2)
formula = "a * np.exp(-((x - b) ** 2) / c)"
else:
st.write("Please choose a function")
st.stop()
# create Parameters, giving initial values
#params = bmodel.make_params(a=4711, b=12, c=0.06)
params = bmodel.make_params(a=a_start, b=b_start, c=c_start) # IC BEDDEN MAART APRIL
# params = bmodel.make_params()
params["a"].min = a_start
params["b"].min = b_start
params["c"].min = c_start
# do fit, st.write result
result = bmodel.fit(y_values, params, x=x_values)
a = round(result.params['a'].value,5)
b= round(result.params['b'].value,5)
c =round(result.params['c'].value,5)
placeholder1.text(result.fit_report())
with _lock:
#fig1y = plt.figure()
fig1y, ax1 = plt.subplots()
ax2 = ax1.twinx()
# plot results -- note that `best_fit` is already available
ax1.scatter(x_values, y_values, color="#00b3b3", s=2)
#ax1.plot(x_values, result.best_fit, "g")
res = (f"a: {a} / b: {b} / c: {c}")
plt.title(f"{title} / lmfit - {function}\n{formula}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
ax1.plot(t, bmodel.eval(result.params, x=t), "r-")
ax2.plot (t, derivate_of_derivate(t,a,b,c), color = 'purple')
ax2.axhline(linewidth=1, color='purple', alpha=0.5, linestyle="--")
#ax1.plot (t, derivate(t,26660.1, 9.01298, 0.032198), color = 'purple')
#ax2.plot (t, derivate_of_derivate(t,26660.1, 9.01298, 0.032198), color = 'yellow')
#plt.ylim(bottom=0)
#ax1.ylim(0, max_y_values*1.1)
#ax1.set_ylim(510,1200)
#ax2.set_ylim(0,12)
ax1.set_xlabel(f"Days from {from_}")
ax1.set_ylabel(f"{title} - red")
ax2.set_ylabel("delta - purple")
#plt.show()
filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
placeholder.pyplot(fig1y)
if prepare_for_animation == False:
with _lock:
fig1z = plt.figure()
# plot results -- note that `best_fit` is already available
if function == "exponential":
plt.plot(t, derivate(t,a,b,c))
function_x = "derivate"
formula_x = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "derivate":
plt.plot(t, exponential(t, a,b,c))
function_x = "exponential"
formula_x = "a * np.exp(-b * np.exp(-c * x))"
else:
st.error("ERROR")
st.stop()
plt.title(f"{title} / {function_x}\n{formula_x}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
#plt.plot(t, bmodel.eval(result.params, x=t), "r-")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
plt.ylabel(title)
#plt.show()
#filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
#plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1z)
return filename
def fit_the_values_really(x_values, y_values, which_method, title, daterange,i, max_y_values):
x_values_extra = np.linspace(
start=0, stop=TOTAL_DAYS_IN_GRAPH - 1, num=TOTAL_DAYS_IN_GRAPH
)
x_values = x_values[:i]
y_values = y_values[:i]
if prepare_for_animation == False:
use_curvefit(x_values, x_values_extra, y_values, title, daterange,i)
return use_lmfit(x_values,y_values, [which_method], title,i, max_y_values)
def fit_the_values(to_do_list , total_days, daterange, which_method, prepare_for_animation):
"""
We are going to fit the values
"""
# Here we go !
st.header("Fitting data to formulas")
infox = (
'<br>Exponential / Standard gompertz function : <i>a * exp(-b * np.exp(-c * x))</i></li>'
'<br>First derivate of the Gompertz function : <i>a * b * c * exp(b * (-1 * exp(-c * x)) - c * x)</i></li>'
'<br>Gaussian : <i>a * exp(-((x - b) ** 2) / c)</i></li>'
'<br>Working on growth model: <i>(a * 0.5 ^ (x / (4 * (math.log(0.5) / math.log(b)))))</i> (b will be the Rt-number)</li>'
)
st.markdown(infox, unsafe_allow_html=True)
global placeholder0, placeholder, placeholder1
placeholder0 = st.empty()
placeholder = st.empty()
placeholder1 = st.empty()
el = st.empty()
for v in to_do_list:
title = v[0]
y_values = v[1]
max_y_values = max(y_values)
# some preperations
number_of_y_values = len(y_values)
global TOTAL_DAYS_IN_GRAPH
TOTAL_DAYS_IN_GRAPH = total_days # number of total days
x_values = np.linspace(start=0, stop=number_of_y_values - 1, num=number_of_y_values)
if prepare_for_animation == True:
filenames = []
for i in range(5, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
filenames.append(filename)
# build gif
with imageio.get_writer('mygif.gif', mode='I') as writer:
for filename_ in filenames:
image = imageio.imread(f"{filename_}.png")
writer.append_data(image)
webbrowser.open('mygif.gif')
# Remove files
for filename__ in set(filenames):
os.remove(f"{filename__}.png")
else:
for i in range(len(x_values)-1, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
# FIXIT
# aq, bq, cq = find_gaussian_curvefit(x_values, y_values)
# st.write(f"Find Gaussian curvefit - a:{aq} b:{bq} c: {cq}")
def select_period(df, show_from, show_until):
""" _ _ _ """
if show_from is None:
show_from = "2020-2-27"
if show_until is None:
show_until = "2020-4-1"
mask = (df[DATEFIELD].dt.date >= show_from) & (df[DATEFIELD].dt.date <= show_until)
df = df.loc[mask]
df = df.reset_index()
return df
def normal_c(df):
#https://replit.com/@jsalsman/COVID19USlognormals
st.subheader("Normal_c")
df = df.set_index(DATEFIELD)
firstday = df.index[0] + Timedelta('1d')
nextday = df.index[-1] + Timedelta('1d')
lastday = df.index[-1] + Timedelta(TOTAL_DAYS_IN_GRAPH - len(df), 'd') # extrapolate
with _lock:
#fig1y = plt.figure()
fig1yz, ax = subplots()
ax.set_title('NL COVID-19 cumulative log-lognormal extrapolations\n'
+ 'Source: repl.it/@jsalsman/COVID19USlognormals')
x = ((df.index - Timestamp('2020-01-01')) # independent
// Timedelta('1d')).values # small day-of-year integers
yi = df['Total_reported_cumm'].values # dependent
yd = df['Deceased_cumm'].values # dependent
exrange = range((Timestamp(nextday)
- Timestamp(firstday)) // Timedelta('1d'),
(Timestamp(lastday) + Timedelta('1d')
- Timestamp(firstday)) // Timedelta('1d')) # day-of-year ints
indates = date_range(df.index[0], df.index[-1])
exdates = date_range(nextday, lastday)
ax.scatter(indates, yi, color="#00b3b3", label='Infected')
ax.scatter(indates, yd, color="#00b3b3", label='Dead')
sqrt2 = sqrt(2)
im = Model(normal_c)
st.write (x)
iparams = im.make_params(s=0.3, mu=4.3, h=16.5)
st.write (iparams)
#iparams['s'].min = 0; iparams['h'].min = 0
iresult = im.fit(log(yi+1), iparams, x=x)
st.text('---- Infections:\n' + iresult.fit_report())
ax.plot(indates, exp(iresult.best_fit)-1, 'b', label='Infections fit')
ipred = iresult.eval(x=exrange)
ax.plot(exdates, exp(ipred)-1, 'b--',
label='Forecast: {:,.0f}'.format(exp(ipred[-1])-1))
iupred = iresult.eval_uncertainty(x=exrange, sigma=0.95) # 95% interval
iintlow = clip(ipred-iupred, ipred[0], None)
put(iintlow, range(argmax(iintlow), len(iintlow)), iintlow[argmax(iintlow)])
ax.fill_between(exdates, exp(iintlow), exp(ipred+iupred), alpha=0.35, color='b')
dm = Model(normal_c)
dparams = dm.make_params(s=19.8, mu=79.1, h=11.4) # initial guesses
dparams['s'].min = 0; iparams['h'].min = 0
dresult = dm.fit(log(yd+1), dparams, x=x)
st.text('---- Deaths:\n' + dresult.fit_report())
ax.plot(indates, exp(dresult.best_fit)-1, 'r', label='Deaths fit')
dpred = dresult.eval(x=exrange)
ax.plot(exdates, exp(dpred)-1, 'r--',
label='Forecast: {:,.0f}'.format(exp(dpred[-1])-1))
dupred = dresult.eval_uncertainty(x=exrange, sigma=0.95) # 95% interval
dintlow = clip(dpred-dupred, log(max(yd)+1), None)
put(dintlow, range(argmax(dintlow), len(dintlow)), dintlow[argmax(dintlow)])
ax.fill_between(exdates, exp(dintlow), exp(dpred+dupred), alpha=0.35, color='r')
ax.fill_between(exdates, 0.012 * (exp(iintlow)), 0.012 * (exp(ipred+iupred)),
alpha=0.85, color='g', label='Deaths from observed fatality rate')
ax.set_xlim(df.index[0], lastday)
#ax.set_yscale('log') # semilog
#ax.set_ylim(0, 1500000)
ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) # comma separators
ax.grid()
ax.legend(loc="upper left")
ax.xaxis.set_major_formatter(ConciseDateFormatter(AutoDateLocator(), show_offset=False))
ax.set_xlabel('95% prediction confidence intervals shaded')
#fig.savefig('plot.png', bbox_inches='tight')
#print('\nTO VIEW GRAPH: click on plot.png in the file pane to the left.')
#fig.show()
st.pyplot(fig1yz)
st.text('Infections at end of period shown: {:,.0f}. Deaths: {:,.0f}.'.format(
exp(ipred[-1])-1, exp(dpred[-1])-1))
def loglognormal(df, what_to_display):
#https://replit.com/@jsalsman/COVID19USlognormals
st.subheader("Log Normal")
df = df.set_index(DATEFIELD)
firstday = df.index[0] + Timedelta('1d')
nextday = df.index[-1] + Timedelta('1d')
lastday = df.index[-1] + Timedelta(TOTAL_DAYS_IN_GRAPH - len(df), 'd') # extrapolate
with _lock:
#fig1y = plt.figure()
fig1yz, ax = subplots()
ax.set_title('NL COVID-19 cumulative log-lognormal extrapolations\n'
+ 'Source: repl.it/@jsalsman/COVID19USlognormals')
x = ((df.index - Timestamp('2020-01-01')) # independent
// | Timedelta('1d') | pandas.Timedelta |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = pd.date_range(
start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"])
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
pd.Timestamp("2013-01-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
nan_df = DataFrame(
{"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
)
assert nan_df["nan"].dtype == "float64"
assert nan_df["nat"].dtype == "datetime64[ns]"
for key in ["nan", "nat"]:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
with pytest.raises(KeyError, match=r"^nan$"):
grouped.get_group(np.nan)
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
tmp = d.groupby(["group"]).mean()
res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
A = np.arange(25000)
df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
left = df.groupby(["A", "B", "C", "D"]).sum()
right = df.groupby(["D", "C", "B", "A"]).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame(
{
"a": ["foo", "bar", "baz"],
"b": [3, 2, 1],
"c": [0, 1, 2],
"d": np.random.randn(3),
}
)
tups = [tuple(row) for row in df[["a", "b", "c"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["a", "b", "c"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = [tuple(row) for row in df[["c", "a", "b"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["c", "a", "b"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = [tuple(x) for x in df[["b", "c", "a"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["b", "c", "a"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame(
{"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
)
grouped = df.groupby(["a", "b"])["d"]
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = [tuple(row) for row in df[keys].values]
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in expected.items():
assert result[k] == v
_check_groupby(df, result, ["a", "b"], "d")
def test_dont_clobber_name_column():
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
result = df.groupby("key").apply(lambda x: x)
tm.assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
pieces = [group.sort_values(by="A")[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_frame_equal(result, expected)
grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = [group.sort_values()[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_series_equal(result, expected)
def test_no_nonsense_name(float_frame):
# GH #995
s = float_frame["C"].copy()
s.name = None
result = s.groupby(float_frame["A"]).agg(np.sum)
assert result.name is None
def test_multifunc_sum_bug():
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x["test"] = 0
x["fl"] = [1.3, 1.5, 1.6]
grouped = x.groupby("test")
result = grouped.agg({"fl": "sum", 2: "size"})
assert result["fl"].dtype == np.float64
def test_handle_dict_return_value(df):
def f(group):
return {"max": group.max(), "min": group.min()}
def g(group):
return Series({"max": group.max(), "min": group.min()})
result = df.groupby("A")["C"].apply(f)
expected = df.groupby("A")["C"].apply(g)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grouper", ["A", ["A", "B"]])
def test_set_group_name(df, grouper):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
grouped = df.groupby(grouper)
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({"C": freduce, "D": freduce})
grouped.transform(f)
grouped["C"].apply(f)
grouped["C"].aggregate(freduce)
grouped["C"].aggregate([freduce, foo])
grouped["C"].transform(f)
def test_group_name_available_in_inference_pass():
# gh-15062
df = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)})
names = []
def f(group):
names.append(group.name)
return group.copy()
df.groupby("a", sort=False, group_keys=False).apply(f)
expected_names = [0, 1, 2]
assert names == expected_names
def test_no_dummy_key_names(df):
# see gh-1291
result = df.groupby(df["A"].values).sum()
assert result.index.name is None
result = df.groupby([df["A"].values, df["B"].values]).sum()
assert result.index.names == (None, None)
def test_groupby_sort_multiindex_series():
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(
levels=[[1, 2], [1, 2]],
codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=["a", "b"],
)
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(
levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"]
)
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=["a", "b"], sort=False).first()
tm.assert_series_equal(result, mseries_result)
result = mseries.groupby(level=["a", "b"], sort=True).first()
tm.assert_series_equal(result, mseries_result.sort_index())
def test_groupby_reindex_inside_function():
periods = 1000
ind = date_range(start="2012/1/1", freq="5min", periods=periods)
df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({"high": agg_before(11, np.max)})
closure_good = grouped.agg({"high": agg_before(11, np.max, True)})
tm.assert_frame_equal(closure_bad, closure_good)
def test_groupby_multiindex_missing_pair():
# GH9049
df = DataFrame(
{
"group1": ["a", "a", "a", "b"],
"group2": ["c", "c", "d", "c"],
"value": [1, 1, 1, 5],
}
)
df = df.set_index(["group1", "group2"])
df_grouped = df.groupby(level=["group1", "group2"], sort=True)
res = df_grouped.agg("sum")
idx = MultiIndex.from_tuples(
[("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"]
)
exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted():
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
)
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
not_lexsorted_df = not_lexsorted_df.pivot_table(
index="a", columns=["b", "c"], values="d"
)
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby("a").mean()
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.groupby("a").mean()
tm.assert_frame_equal(expected, result)
# a transforming function should work regardless of sort
# GH 14776
df = DataFrame(
{"x": ["a", "a", "b", "a"], "y": [1, 1, 2, 2], "z": [1, 2, 3, 4]}
).set_index(["x", "y"])
assert not df.index.is_lexsorted()
for level in [0, 1, [0, 1]]:
for sort in [False, True]:
result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)
expected = df
tm.assert_frame_equal(expected, result)
result = (
df.sort_index()
.groupby(level=level, sort=sort)
.apply(DataFrame.drop_duplicates)
)
expected = df.sort_index()
tm.assert_frame_equal(expected, result)
def test_index_label_overlaps_location():
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1])
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
def test_transform_doesnt_clobber_ints():
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({"a": x // 2, "b": 2.0 * x, "c": 3.0 * x})
df2 = DataFrame({"a": x // 2 * 1.0, "b": 2.0 * x, "c": 3.0 * x})
gb = df.groupby("a")
result = gb.transform("mean")
gb2 = df2.groupby("a")
expected = gb2.transform("mean")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sort_column",
["ints", "floats", "strings", ["ints", "floats"], ["ints", "strings"]],
)
@pytest.mark.parametrize(
"group_column", ["int_groups", "string_groups", ["int_groups", "string_groups"]]
)
def test_groupby_preserves_sort(sort_column, group_column):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{
"int_groups": [3, 1, 0, 1, 0, 3, 3, 3],
"string_groups": ["z", "a", "z", "a", "a", "g", "g", "g"],
"ints": [8, 7, 4, 5, 2, 9, 1, 1],
"floats": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
"strings": ["z", "d", "a", "e", "word", "word2", "42", "47"],
}
)
# Try sorting on different types and with different group types
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
tm.assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_group_shift_with_null_key():
# This test is designed to replicate the segfault in issue #13813.
n_rows = 1200
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
# at those places, where the group-by key is partially missing.
df = DataFrame(
[(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1)
tm.assert_frame_equal(result, expected)
def test_group_shift_with_fill_value():
# GH #24128
n_rows = 24
df = DataFrame(
[(i % 12, i % 3, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1, fill_value=0)[["Z"]]
tm.assert_frame_equal(result, expected)
def test_group_shift_lose_timezone():
# GH 30134
now_dt = pd.Timestamp.utcnow()
df = DataFrame({"a": [1, 1], "date": now_dt})
result = df.groupby("a").shift(0).iloc[0]
expected = Series({"date": now_dt}, name=result.name)
tm.assert_series_equal(result, expected)
def test_pivot_table_values_key_error():
# This test is designed to replicate the error in issue #14938
df = pd.DataFrame(
{
"eventDate": pd.date_range(datetime.today(), periods=20, freq="M").tolist(),
"thename": range(0, 20),
}
)
df["year"] = df.set_index("eventDate").index.year
df["month"] = df.set_index("eventDate").index.month
with pytest.raises(KeyError, match="'badname'"):
df.reset_index().pivot_table(
index="year", columns="month", values="badname", aggfunc="count"
)
def test_empty_dataframe_groupby():
# GH8093
df = DataFrame(columns=["A", "B", "C"])
result = df.groupby("A").sum()
expected = DataFrame(columns=["B", "C"], dtype=np.float64)
expected.index.name = "A"
tm.assert_frame_equal(result, expected)
def test_tuple_as_grouping():
# https://github.com/pandas-dev/pandas/issues/18314
df = pd.DataFrame(
{
("a", "b"): [1, 1, 1, 1],
"a": [2, 2, 2, 2],
"b": [2, 2, 2, 2],
"c": [1, 1, 1, 1],
}
)
with pytest.raises(KeyError, match=r"('a', 'b')"):
df[["a", "b", "c"]].groupby(("a", "b"))
result = df.groupby(("a", "b"))["c"].sum()
expected = pd.Series([4], name="c", index=pd.Index([1], name=("a", "b")))
tm.assert_series_equal(result, expected)
def test_tuple_correct_keyerror():
# https://github.com/pandas-dev/pandas/issues/18798
df = pd.DataFrame(
1, index=range(3), columns=pd.MultiIndex.from_product([[1, 2], [3, 4]])
)
with pytest.raises(KeyError, match=r"^\(7, 8\)$"):
df.groupby((7, 8)).mean()
def test_groupby_agg_ohlc_non_first():
# GH 21716
df = pd.DataFrame(
[[1], [1]],
columns=["foo"],
index=pd.date_range("2018-01-01", periods=2, freq="D"),
)
expected = pd.DataFrame(
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],
columns=pd.MultiIndex.from_tuples(
(
("foo", "sum", "foo"),
("foo", "ohlc", "open"),
("foo", "ohlc", "high"),
("foo", "ohlc", "low"),
("foo", "ohlc", "close"),
)
),
index=pd.date_range("2018-01-01", periods=2, freq="D"),
)
result = df.groupby(pd.Grouper(freq="D")).agg(["sum", "ohlc"])
tm.assert_frame_equal(result, expected)
def test_groupby_multiindex_nat():
# GH 9236
values = [
(pd.NaT, "a"),
(datetime(2012, 1, 2), "a"),
(datetime(2012, 1, 2), "b"),
(datetime(2012, 1, 3), "a"),
]
mi = pd.MultiIndex.from_tuples(values, names=["date", None])
ser = pd.Series([3, 2, 2.5, 4], index=mi)
result = ser.groupby(level=1).mean()
expected = pd.Series([3.0, 2.5], index=["a", "b"])
tm.assert_series_equal(result, expected)
def test_groupby_empty_list_raises():
# GH 5289
values = zip(range(10), range(10))
df = DataFrame(values, columns=["apple", "b"])
msg = "Grouper and axis must be same length"
with pytest.raises(ValueError, match=msg):
df.groupby([[]])
def test_groupby_multiindex_series_keys_len_equal_group_axis():
# GH 25704
index_array = [["x", "x"], ["a", "b"], ["k", "k"]]
index_names = ["first", "second", "third"]
ri = pd.MultiIndex.from_arrays(index_array, names=index_names)
s = pd.Series(data=[1, 2], index=ri)
result = s.groupby(["first", "third"]).sum()
index_array = [["x"], ["k"]]
index_names = ["first", "third"]
ei = pd.MultiIndex.from_arrays(index_array, names=index_names)
expected = pd.Series([3], index=ei)
tm.assert_series_equal(result, expected)
def test_groupby_groups_in_BaseGrouper():
# GH 26326
# Test if DataFrame grouped with a pandas.Grouper has correct groups
mi = pd.MultiIndex.from_product([["A", "B"], ["C", "D"]], names=["alpha", "beta"])
df = pd.DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi)
result = df.groupby([pd.Grouper(level="alpha"), "beta"])
expected = df.groupby(["alpha", "beta"])
assert result.groups == expected.groups
result = df.groupby(["beta", pd.Grouper(level="alpha")])
expected = df.groupby(["beta", "alpha"])
assert result.groups == expected.groups
@pytest.mark.parametrize("group_name", ["x", ["x"]])
def test_groupby_axis_1(group_name):
# GH 27614
df = pd.DataFrame(
np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20]
)
df.index.name = "y"
df.columns.name = "x"
results = df.groupby(group_name, axis=1).sum()
expected = df.T.groupby(group_name).sum().T
tm.assert_frame_equal(results, expected)
# test on MI column
iterables = [["bar", "baz", "foo"], ["one", "two"]]
mi = pd.MultiIndex.from_product(iterables=iterables, names=["x", "x1"])
df = pd.DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi)
results = df.groupby(group_name, axis=1).sum()
expected = df.T.groupby(group_name).sum().T
tm.assert_frame_equal(results, expected)
@pytest.mark.parametrize(
"op, expected",
[
(
"shift",
{
"time": [
None,
None,
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
None,
None,
]
},
),
(
"bfill",
{
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
]
},
),
(
"ffill",
{
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
Timestamp("2019-01-01 14:00:00"),
Timestamp("2019-01-01 14:30:00"),
]
},
),
],
)
def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):
# GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill
tz = tz_naive_fixture
data = {
"id": ["A", "B", "A", "B", "A", "B"],
"time": [
Timestamp("2019-01-01 12:00:00"),
Timestamp("2019-01-01 12:30:00"),
None,
None,
Timestamp("2019-01-01 14:00:00"),
| Timestamp("2019-01-01 14:30:00") | pandas.Timestamp |
import argparse
import numpy as np
import pandas as pd
from pybedtools import BedTool
def parse_args(args):
"""define arguments"""
# create argparse variables
parser = argparse.ArgumentParser(description="FIMO_filter")
parser.add_argument(
"fimo_file", type=str, help="Input location of FIMO.tsv file"
)
parser.add_argument(
"promoter_bedfile", type=str, help="Input location of promoter bedfile"
)
parser.add_argument(
"motifs_bed", type=str, help="Output location of motifs bed file"
)
parser.add_argument(
"q_value", type=float, help="q_value threshold for filtering"
)
parser.add_argument(
"--prevent_shorten_sequence_name",
help="Option to prevent shortening of sequence name up to the first colon.",
action="store_true",
)
return parser.parse_args(
args
) # let argparse grab args from sys.argv itself to allow for testing in module import
def fimo_qfilter(fimo_file, q_value, prevent_shorten_sequence_name):
"""this uses a meme-suite version 5 fimo.tsv file, filters by a q-value, and returns a pandas df"""
# read in fimo.tsv file
fimo = | pd.read_table(fimo_file, sep="\t") | pandas.read_table |
import argparse
import pickle
import pandas as pd
teamColumns = ['rebounds', 'disposals', 'kicks', 'handballs', 'clearances', 'hitouts', 'marks', 'inside50s', 'tackles', 'clangers', 'frees', 'contested', 'uncontested', 'contestedMarks', 'marksIn50', 'onePercenters', 'bounces']
def get_team_probability(modelStorage, data_frame, home_team, away_team):
home = data_frame[data_frame['team'] == home_team][teamColumns].mean()
away = data_frame[data_frame['team'] == away_team][teamColumns].mean()
home['relRebounds'] = home['rebounds'] / away['rebounds']
home['relDisposals'] = home['disposals'] / away['disposals']
home['relKicks'] = home['kicks'] / away['kicks']
home['relHandballs'] = home['handballs'] / away['handballs']
home['relClearances'] = home['clearances'] / away['clearances']
home['relHitouts'] = home['hitouts'] / away['hitouts']
home['relMarks'] = home['marks'] / away['marks']
home['relInside50s'] = home['inside50s'] / away['inside50s']
home['relTackles'] = home['tackles'] / away['tackles']
home['relClangers'] = home['clangers'] / away['clangers']
home['relFrees'] = home['frees'] / away['frees']
home['relContested'] = home['contested'] / away['contested']
home['relUncontested'] = home['uncontested'] / away['uncontested']
home['relContestedMarks'] = home['contestedMarks'] / away['contestedMarks']
home['relMarksIn50'] = home['marksIn50'] / away['marksIn50']
home['relOnePercenters'] = home['onePercenters'] / away['onePercenters']
home['relBounces'] = home['bounces'] / away['bounces']
home['home'] = 1
return modelStorage.randomForest.predict_proba([home[modelStorage.columns]])[0][1]
parser = argparse.ArgumentParser(description="Predict match result given a model and historical data for teams")
parser.add_argument("model")
parser.add_argument("stats")
parser.add_argument("home")
parser.add_argument("away")
parser.add_argument('--min_round', type=int)
parser.add_argument('--max_round', type=int)
args = parser.parse_args()
with open(args.model, 'rb') as file:
modelStorage = pickle.load(file)
randomForest = modelStorage.randomForest
#Load stats
data = | pd.read_csv(args.stats) | pandas.read_csv |
# Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# -*- coding: utf-8 -*-
import datetime
import decimal
from io import StringIO
import textwrap
from unittest import mock
import db_dtypes
import numpy
import pandas
import pandas.testing
import pytest
from pandas_gbq import exceptions
from pandas_gbq.features import FEATURES
from pandas_gbq import load
def load_method(bqclient, api_method):
if not FEATURES.bigquery_has_from_dataframe_with_csv and api_method == "load_csv":
return bqclient.load_table_from_file
return bqclient.load_table_from_dataframe
def test_encode_chunk_with_unicode():
"""Test that a dataframe containing unicode can be encoded as a file.
See: https://github.com/pydata/pandas-gbq/issues/106
"""
df = pandas.DataFrame(
numpy.random.randn(6, 4), index=range(6), columns=list("ABCD")
)
df["s"] = "信用卡"
csv_buffer = load.encode_chunk(df)
csv_bytes = csv_buffer.read()
csv_string = csv_bytes.decode("utf-8")
assert "信用卡" in csv_string
def test_encode_chunk_with_floats():
"""Test that floats in a dataframe are encoded with at most 17 significant
figures.
See: https://github.com/pydata/pandas-gbq/issues/192 and
https://github.com/pydata/pandas-gbq/issues/326
"""
input_csv = textwrap.dedent(
"""01/01/17 23:00,0.14285714285714285,4
01/02/17 22:00,1.05148,3
01/03/17 21:00,1.05153,2
01/04/17 20:00,3.141592653589793,1
01/05/17 19:00,2.0988936657440586e+43,0
"""
)
input_df = pandas.read_csv(
StringIO(input_csv), header=None, float_precision="round_trip"
)
csv_buffer = load.encode_chunk(input_df)
round_trip = pandas.read_csv(csv_buffer, header=None, float_precision="round_trip")
pandas.testing.assert_frame_equal(
round_trip,
input_df,
check_exact=True,
)
def test_encode_chunk_with_newlines():
"""See: https://github.com/pydata/pandas-gbq/issues/180"""
df = pandas.DataFrame({"s": ["abcd", "ef\ngh", "ij\r\nkl"]})
csv_buffer = load.encode_chunk(df)
csv_bytes = csv_buffer.read()
csv_string = csv_bytes.decode("utf-8")
assert "abcd" in csv_string
assert '"ef\ngh"' in csv_string
assert '"ij\r\nkl"' in csv_string
def test_split_dataframe():
df = pandas.DataFrame(numpy.random.randn(6, 4), index=range(6))
chunks = list(load.split_dataframe(df, chunksize=2))
assert len(chunks) == 3
remaining, chunk = chunks[0]
assert remaining == 4
assert len(chunk.index) == 2
def test_encode_chunks_with_chunksize_none():
df = pandas.DataFrame(numpy.random.randn(6, 4), index=range(6))
chunks = list(load.split_dataframe(df))
assert len(chunks) == 1
remaining, chunk = chunks[0]
assert remaining == 0
assert len(chunk.index) == 6
def test_load_csv_from_dataframe_allows_client_to_generate_schema(mock_bigquery_client):
import google.cloud.bigquery
df = | pandas.DataFrame({"int_col": [1, 2, 3]}) | pandas.DataFrame |
"""Metadata data classes."""
import copy
import datetime
import logging
import os
import re
import sys
from functools import lru_cache
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Optional,
Tuple,
Type,
Union,
)
import jinja2
import pandas as pd
import pyarrow as pa
import pydantic
import sqlalchemy as sa
from pydantic.types import DirectoryPath
from pudl.metadata.codes import CODE_METADATA
from pudl.metadata.constants import (
CONSTRAINT_DTYPES,
CONTRIBUTORS,
FIELD_DTYPES_PANDAS,
FIELD_DTYPES_PYARROW,
FIELD_DTYPES_SQL,
LICENSES,
PERIODS,
)
from pudl.metadata.fields import (
FIELD_METADATA,
FIELD_METADATA_BY_GROUP,
FIELD_METADATA_BY_RESOURCE,
)
from pudl.metadata.helpers import (
expand_periodic_column_names,
format_errors,
groupby_aggregate,
most_and_more_frequent,
split_period,
)
from pudl.metadata.resources import FOREIGN_KEYS, RESOURCE_METADATA, eia861
from pudl.metadata.sources import SOURCES
logger = logging.getLogger(__name__)
# ---- Helpers ---- #
def _unique(*args: Iterable) -> list:
"""Return a list of all unique values, in order of first appearance.
Args:
args: Iterables of values.
Examples:
>>> _unique([0, 2], (2, 1))
[0, 2, 1]
>>> _unique([{'x': 0, 'y': 1}, {'y': 1, 'x': 0}], [{'z': 2}])
[{'x': 0, 'y': 1}, {'z': 2}]
"""
values = []
for parent in args:
for child in parent:
if child not in values:
values.append(child)
return values
def _format_for_sql(x: Any, identifier: bool = False) -> str: # noqa: C901
"""Format value for use in raw SQL(ite).
Args:
x: Value to format.
identifier: Whether `x` represents an identifier
(e.g. table, column) name.
Examples:
>>> _format_for_sql('table_name', identifier=True)
'"table_name"'
>>> _format_for_sql('any string')
"'any string'"
>>> _format_for_sql("Single's quote")
"'Single''s quote'"
>>> _format_for_sql(None)
'null'
>>> _format_for_sql(1)
'1'
>>> _format_for_sql(True)
'True'
>>> _format_for_sql(False)
'False'
>>> _format_for_sql(re.compile("^[^']*$"))
"'^[^'']*$'"
>>> _format_for_sql(datetime.date(2020, 1, 2))
"'2020-01-02'"
>>> _format_for_sql(datetime.datetime(2020, 1, 2, 3, 4, 5, 6))
"'2020-01-02 03:04:05'"
"""
if identifier:
if isinstance(x, str):
# Table and column names are escaped with double quotes (")
return f'"{x}"'
raise ValueError("Identifier must be a string")
if x is None:
return "null"
elif isinstance(x, (int, float)):
# NOTE: nan and (-)inf are TEXT in sqlite but numeric in postgresSQL
return str(x)
elif x is True:
return "TRUE"
elif x is False:
return "FALSE"
elif isinstance(x, re.Pattern):
x = x.pattern
elif isinstance(x, datetime.datetime):
# Check datetime.datetime first, since also datetime.date
x = x.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(x, datetime.date):
x = x.strftime("%Y-%m-%d")
if not isinstance(x, str):
raise ValueError(f"Cannot format type {type(x)} for SQL")
# Single quotes (') are escaped by doubling them ('')
x = x.replace("'", "''")
return f"'{x}'"
JINJA_ENVIRONMENT: jinja2.Environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), "templates")
),
autoescape=True,
)
# ---- Base ---- #
class Base(pydantic.BaseModel):
"""Custom Pydantic base class.
It overrides :meth:`fields` and :meth:`schema` to allow properties with those names.
To use them in a class, use an underscore prefix and an alias.
Examples:
>>> class Class(Base):
... fields_: List[str] = pydantic.Field(alias="fields")
>>> m = Class(fields=['x'])
>>> m
Class(fields=['x'])
>>> m.fields
['x']
>>> m.fields = ['y']
>>> m.dict()
{'fields': ['y']}
"""
class Config:
"""Custom Pydantic configuration."""
validate_all: bool = True
validate_assignment: bool = True
extra: str = "forbid"
arbitrary_types_allowed = True
def dict(self, *args, by_alias=True, **kwargs) -> dict: # noqa: A003
"""Return as a dictionary."""
return super().dict(*args, by_alias=by_alias, **kwargs)
def json(self, *args, by_alias=True, **kwargs) -> str:
"""Return as JSON."""
return super().json(*args, by_alias=by_alias, **kwargs)
def __getattribute__(self, name: str) -> Any:
"""Get attribute."""
if name in ("fields", "schema") and f"{name}_" in self.__dict__:
name = f"{name}_"
return super().__getattribute__(name)
def __setattr__(self, name, value) -> None:
"""Set attribute."""
if name in ("fields", "schema") and f"{name}_" in self.__dict__:
name = f"{name}_"
super().__setattr__(name, value)
def __repr_args__(self) -> List[Tuple[str, Any]]:
"""Returns the attributes to show in __str__, __repr__, and __pretty__."""
return [
(a[:-1] if a in ("fields_", "schema_") else a, v)
for a, v in self.__dict__.items()
]
# ---- Class attribute types ---- #
# NOTE: Using regex=r"^\S(.*\S)*$" to fail on whitespace is too slow
String = pydantic.constr(min_length=1, strict=True, regex=r"^\S+(\s+\S+)*$")
"""Non-empty :class:`str` with no trailing or leading whitespace."""
SnakeCase = pydantic.constr(
min_length=1, strict=True, regex=r"^[a-z][a-z0-9]*(_[a-z0-9]+)*$"
)
"""Snake-case variable name :class:`str` (e.g. 'pudl', 'entity_eia860')."""
Bool = pydantic.StrictBool
"""Any :class:`bool` (`True` or `False`)."""
Float = pydantic.StrictFloat
"""Any :class:`float`."""
Int = pydantic.StrictInt
"""Any :class:`int`."""
PositiveInt = pydantic.conint(ge=0, strict=True)
"""Positive :class:`int`."""
PositiveFloat = pydantic.confloat(ge=0, strict=True)
"""Positive :class:`float`."""
Email = pydantic.EmailStr
"""String representing an email."""
HttpUrl = pydantic.AnyHttpUrl
"""Http(s) URL."""
class BaseType:
"""Base class for custom pydantic types."""
@classmethod
def __get_validators__(cls) -> Callable:
"""Yield validator methods."""
yield cls.validate
class Date(BaseType):
"""Any :class:`datetime.date`."""
@classmethod
def validate(cls, value: Any) -> datetime.date:
"""Validate as date."""
if not isinstance(value, datetime.date):
raise TypeError("value is not a date")
return value
class Datetime(BaseType):
"""Any :class:`datetime.datetime`."""
@classmethod
def validate(cls, value: Any) -> datetime.datetime:
"""Validate as datetime."""
if not isinstance(value, datetime.datetime):
raise TypeError("value is not a datetime")
return value
class Pattern(BaseType):
"""Regular expression pattern."""
@classmethod
def validate(cls, value: Any) -> re.Pattern:
"""Validate as pattern."""
if not isinstance(value, (str, re.Pattern)):
raise TypeError("value is not a string or compiled regular expression")
if isinstance(value, str):
try:
value = re.compile(value)
except re.error:
raise ValueError("string is not a valid regular expression")
return value
def StrictList(item_type: Type = Any) -> pydantic.ConstrainedList: # noqa: N802
"""Non-empty :class:`list`.
Allows :class:`list`, :class:`tuple`, :class:`set`, :class:`frozenset`,
:class:`collections.deque`, or generators and casts to a :class:`list`.
"""
return pydantic.conlist(item_type=item_type, min_items=1)
# ---- Class attribute validators ---- #
def _check_unique(value: list = None) -> Optional[list]:
"""Check that input list has unique values."""
if value:
for i in range(len(value)):
if value[i] in value[:i]:
raise ValueError(f"contains duplicate {value[i]}")
return value
def _validator(*names, fn: Callable) -> Callable:
"""Construct reusable Pydantic validator.
Args:
names: Names of attributes to validate.
fn: Validation function (see :meth:`pydantic.validator`).
Examples:
>>> class Class(Base):
... x: list = None
... _check_unique = _validator("x", fn=_check_unique)
>>> Class(y=[0, 0])
Traceback (most recent call last):
ValidationError: ...
"""
return pydantic.validator(*names, allow_reuse=True)(fn)
# ---- Classes: Field ---- #
class FieldConstraints(Base):
"""Field constraints (`resource.schema.fields[...].constraints`).
See https://specs.frictionlessdata.io/table-schema/#constraints.
"""
required: Bool = False
unique: Bool = False
min_length: PositiveInt = None
max_length: PositiveInt = None
minimum: Union[Int, Float, Date, Datetime] = None
maximum: Union[Int, Float, Date, Datetime] = None
pattern: Pattern = None
# TODO: Replace with String (min_length=1) once "" removed from enums
enum: StrictList(Union[pydantic.StrictStr, Int, Float, Bool, Date, Datetime]) = None
_check_unique = _validator("enum", fn=_check_unique)
@pydantic.validator("max_length")
def _check_max_length(cls, value, values): # noqa: N805
minimum, maximum = values.get("min_length"), value
if minimum is not None and maximum is not None:
if type(minimum) is not type(maximum):
raise ValueError("must be same type as min_length")
if maximum < minimum:
raise ValueError("must be greater or equal to min_length")
return value
@pydantic.validator("maximum")
def _check_max(cls, value, values): # noqa: N805
minimum, maximum = values.get("minimum"), value
if minimum is not None and maximum is not None:
if type(minimum) is not type(maximum):
raise ValueError("must be same type as minimum")
if maximum < minimum:
raise ValueError("must be greater or equal to minimum")
return value
class FieldHarvest(Base):
"""Field harvest parameters (`resource.schema.fields[...].harvest`)."""
# NOTE: Callables with defaults must use pydantic.Field() to not bind to self
aggregate: Callable[[pd.Series], pd.Series] = pydantic.Field(
default=lambda x: most_and_more_frequent(x, min_frequency=0.7)
)
"""Computes a single value from all field values in a group."""
tolerance: PositiveFloat = 0.0
"""Fraction of invalid groups above which result is considered invalid."""
class Encoder(Base):
"""A class that allows us to standardize reported categorical codes.
Often the original data we are integrating uses short codes to indicate a
categorical value, like ``ST`` in place of "steam turbine" or ``LIG`` in place of
"lignite coal". Many of these coded fields contain non-standard codes due to
data-entry errors. The codes have also evolved over the years.
In order to allow easy comparison of records across all years and tables, we define
a standard set of codes, a mapping from non-standard codes to standard codes (where
possible), and a set of known but unfixable codes which will be ignored and replaced
with NA values. These definitions can be found in :mod:`pudl.metadata.codes` and we
refer to these as coding tables.
In our metadata structures, each coding table is defined just like any other DB
table, with the addition of an associated ``Encoder`` object defining the standard,
fixable, and ignored codes.
In addition, a :class:`Package` class that has been instantiated using the
:meth:`Package.from_resource_ids` method will associate an `Encoder` object with any
column that has a foreign key constraint referring to a coding table (This
column-level encoder is same as the encoder associated with the referenced table).
This `Encoder` can be used to standardize the codes found within the column.
:class:`Field` and :class:`Resource` objects have ``encode()`` methods that will
use the column-level encoders to recode the original values, either for a single
column or for all coded columns within a Resource, given either a corresponding
:class:`pandas.Series` or :class:`pandas.DataFrame` containing actual values.
If any unrecognized values are encountered, an exception will be raised, alerting
us that a new code has been identified, and needs to be classified as fixable or
to be ignored.
"""
df: pd.DataFrame
"""A table associating short codes with long descriptions and other information.
Each coding table contains at least a ``code`` column containing the standard codes
and a ``description`` column with a human readable explanation of what the code
stands for. Additional metadata pertaining to the codes and their categories may
also appear in this dataframe, which will be loaded into the PUDL DB as a static
table. The ``code`` column is a natural primary key and must contain no duplicate
values.
"""
ignored_codes: List[Union[Int, str]] = []
"""A list of non-standard codes which appear in the data, and will be set to NA.
These codes may be the result of data entry errors, and we are unable to map them
to the appropriate canonical code. They are discarded from the raw input data.
"""
code_fixes: Dict[Union[Int, String], Union[Int, String]] = {}
"""A dictionary mapping non-standard codes to canonical, standardized codes.
The intended meanings of some non-standard codes are clear, and therefore they can
be mapped to the standardized, canonical codes with confidence. Sometimes these are
the result of data entry errors or changes in the stanard codes over time.
"""
name: String = None
"""The name of the code. """
@pydantic.validator("df")
def _df_is_encoding_table(cls, df): # noqa: N805
"""Verify that the coding table provides both codes and descriptions."""
errors = []
if "code" not in df.columns or "description" not in df.columns:
errors.append(
"Encoding tables must contain both 'code' & 'description' columns."
)
if len(df.code) != len(df.code.unique()):
dupes = df[df.duplicated("code")].code.to_list()
errors.append(f"Duplicate codes {dupes} found in coding table")
if errors:
raise ValueError(format_errors(*errors, pydantic=True))
return df
@pydantic.validator("ignored_codes")
def _good_and_ignored_codes_are_disjoint(cls, ignored_codes, values): # noqa: N805
"""Check that there's no overlap between good and ignored codes."""
if "df" not in values:
return ignored_codes
errors = []
overlap = set(values["df"]["code"]).intersection(ignored_codes)
if overlap:
errors.append(f"Overlap found between good and ignored codes: {overlap}.")
if errors:
raise ValueError(format_errors(*errors, pydantic=True))
return ignored_codes
@pydantic.validator("code_fixes")
def _good_and_fixable_codes_are_disjoint(cls, code_fixes, values): # noqa: N805
"""Check that there's no overlap between the good and fixable codes."""
if "df" not in values:
return code_fixes
errors = []
overlap = set(values["df"]["code"]).intersection(code_fixes)
if overlap:
errors.append(f"Overlap found between good and fixable codes: {overlap}")
if errors:
raise ValueError(format_errors(*errors, pydantic=True))
return code_fixes
@pydantic.validator("code_fixes")
def _fixable_and_ignored_codes_are_disjoint(cls, code_fixes, values): # noqa: N805
"""Check that there's no overlap between the ignored and fixable codes."""
if "ignored_codes" not in values:
return code_fixes
errors = []
overlap = set(code_fixes).intersection(values["ignored_codes"])
if overlap:
errors.append(f"Overlap found between fixable and ignored codes: {overlap}")
if errors:
raise ValueError(format_errors(*errors, pydantic=True))
return code_fixes
@pydantic.validator("code_fixes")
def _check_fixed_codes_are_good_codes(cls, code_fixes, values): # noqa: N805
"""Check that every every fixed code is also one of the good codes."""
if "df" not in values:
return code_fixes
errors = []
bad_codes = set(code_fixes.values()).difference(values["df"]["code"])
if bad_codes:
errors.append(
f"Some fixed codes aren't in the list of good codes: {bad_codes}"
)
if errors:
raise ValueError(format_errors(*errors, pydantic=True))
return code_fixes
@property
def code_map(self) -> Dict[str, Union[str, type(pd.NA)]]:
"""A mapping of all known codes to their standardized values, or NA."""
code_map = {code: code for code in self.df["code"]}
code_map.update(self.code_fixes)
code_map.update({code: pd.NA for code in self.ignored_codes})
return code_map
def encode(
self,
col: pd.Series,
dtype: Union[type, None] = None,
) -> pd.Series:
"""Apply the stored code mapping to an input Series."""
# Every value in the Series should appear in the map. If that's not the
# case we want to hear about it so we don't wipe out data unknowingly.
unknown_codes = set(col.dropna()).difference(self.code_map)
if unknown_codes:
raise ValueError(f"Found unknown codes while encoding: {unknown_codes=}")
col = col.map(self.code_map)
if dtype:
col = col.astype(dtype)
return col
@staticmethod
def dict_from_id(x: str) -> dict:
"""Look up the encoder by coding table name in the metadata."""
return copy.deepcopy(RESOURCE_METADATA[x]).get("encoder", None)
@classmethod
def from_id(cls, x: str) -> "Encoder":
"""Construct an Encoder based on `Resource.name` of a coding table."""
return cls(**cls.dict_from_id(x))
@classmethod
def from_code_id(cls, x: str) -> "Encoder":
"""Construct an Encoder based on looking up the name of a coding table directly in the codes metadata."""
return cls(**copy.deepcopy(CODE_METADATA[x]), name=x)
def to_rst(
self, top_dir: DirectoryPath, csv_subdir: DirectoryPath, is_header: Bool
) -> String:
"""Ouput dataframe to a csv for use in jinja template. Then output to an RST file."""
self.df.to_csv(Path(top_dir) / csv_subdir / f"{self.name}.csv", index=False)
template = JINJA_ENVIRONMENT.get_template("codemetadata.rst.jinja")
rendered = template.render(
Encoder=self,
description=RESOURCE_METADATA[self.name]["description"],
csv_filepath=(Path("/") / csv_subdir / f"{self.name}.csv"),
is_header=is_header,
)
return rendered
class Field(Base):
"""Field (`resource.schema.fields[...]`).
See https://specs.frictionlessdata.io/table-schema/#field-descriptors.
Examples:
>>> field = Field(name='x', type='string', constraints={'enum': ['x', 'y']})
>>> field.to_pandas_dtype()
CategoricalDtype(categories=['x', 'y'], ordered=False)
>>> field.to_sql()
Column('x', Enum('x', 'y'), CheckConstraint(...), table=None)
>>> field = Field.from_id('utility_id_eia')
>>> field.name
'utility_id_eia'
"""
name: SnakeCase
type: Literal[ # noqa: A003
"string", "number", "integer", "boolean", "date", "datetime", "year"
]
format: Literal["default"] = "default" # noqa: A003
description: String = None
unit: String = None
constraints: FieldConstraints = {}
harvest: FieldHarvest = {}
encoder: Encoder = None
@pydantic.validator("constraints")
def _check_constraints(cls, value, values): # noqa: N805, C901
if "type" not in values:
return value
dtype = values["type"]
errors = []
for key in ("min_length", "max_length", "pattern"):
if getattr(value, key) is not None and dtype != "string":
errors.append(f"{key} not supported by {dtype} field")
for key in ("minimum", "maximum"):
x = getattr(value, key)
if x is not None:
if dtype in ("string", "boolean"):
errors.append(f"{key} not supported by {dtype} field")
elif not isinstance(x, CONSTRAINT_DTYPES[dtype]):
errors.append(f"{key} not {dtype}")
if value.enum:
for x in value.enum:
if not isinstance(x, CONSTRAINT_DTYPES[dtype]):
errors.append(f"enum value {x} not {dtype}")
if errors:
raise ValueError(format_errors(*errors, pydantic=True))
return value
@pydantic.validator("encoder")
def _check_encoder(cls, value, values): # noqa: N805
if "type" not in values or value is None:
return value
errors = []
dtype = values["type"]
if dtype not in ["string", "integer"]:
errors.append(
"Encoding only supported for string and integer fields, found "
f"{dtype}"
)
if errors:
raise ValueError(format_errors(*errors, pydantic=True))
return value
@staticmethod
def dict_from_id(x: str) -> dict:
"""Construct dictionary from PUDL identifier (`Field.name`)."""
return {"name": x, **copy.deepcopy(FIELD_METADATA[x])}
@classmethod
def from_id(cls, x: str) -> "Field":
"""Construct from PUDL identifier (`Field.name`)."""
return cls(**cls.dict_from_id(x))
def to_pandas_dtype(self, compact: bool = False) -> Union[str, pd.CategoricalDtype]:
"""Return Pandas data type.
Args:
compact: Whether to return a low-memory data type
(32-bit integer or float).
"""
if self.constraints.enum:
return pd.CategoricalDtype(self.constraints.enum)
if compact:
if self.type == "integer":
return "Int32"
if self.type == "number":
return "float32"
return FIELD_DTYPES_PANDAS[self.type]
def to_sql_dtype(self) -> sa.sql.visitors.VisitableType:
"""Return SQLAlchemy data type."""
if self.constraints.enum and self.type == "string":
return sa.Enum(*self.constraints.enum)
return FIELD_DTYPES_SQL[self.type]
def to_pyarrow_dtype(self) -> pa.lib.DataType:
"""Return PyArrow data type."""
if self.constraints.enum and self.type == "string":
return pa.dictionary(pa.int32(), pa.string(), ordered=False)
return FIELD_DTYPES_PYARROW[self.type]
def to_pyarrow(self) -> pa.Field:
"""Return a PyArrow Field appropriate to the field."""
return pa.field(
name=self.name,
type=self.to_pyarrow_dtype(),
nullable=(not self.constraints.required),
metadata={"description": self.description},
)
def to_sql( # noqa: C901
self,
dialect: Literal["sqlite"] = "sqlite",
check_types: bool = True,
check_values: bool = True,
) -> sa.Column:
"""Return equivalent SQL column."""
if dialect != "sqlite":
raise NotImplementedError(f"Dialect {dialect} is not supported")
checks = []
name = _format_for_sql(self.name, identifier=True)
if check_types:
# Required with TYPEOF since TYPEOF(NULL) = 'null'
prefix = "" if self.constraints.required else f"{name} IS NULL OR "
# Field type
if self.type == "string":
checks.append(f"{prefix}TYPEOF({name}) = 'text'")
elif self.type in ("integer", "year"):
checks.append(f"{prefix}TYPEOF({name}) = 'integer'")
elif self.type == "number":
checks.append(f"{prefix}TYPEOF({name}) = 'real'")
elif self.type == "boolean":
# Just IN (0, 1) accepts floats equal to 0, 1 (0.0, 1.0)
checks.append(
f"{prefix}(TYPEOF({name}) = 'integer' AND {name} IN (0, 1))"
)
elif self.type == "date":
checks.append(f"{name} IS DATE({name})")
elif self.type == "datetime":
checks.append(f"{name} IS DATETIME({name})")
if check_values:
# Field constraints
if self.constraints.min_length is not None:
checks.append(f"LENGTH({name}) >= {self.constraints.min_length}")
if self.constraints.max_length is not None:
checks.append(f"LENGTH({name}) <= {self.constraints.max_length}")
if self.constraints.minimum is not None:
minimum = _format_for_sql(self.constraints.minimum)
checks.append(f"{name} >= {minimum}")
if self.constraints.maximum is not None:
maximum = _format_for_sql(self.constraints.maximum)
checks.append(f"{name} <= {maximum}")
if self.constraints.pattern:
pattern = _format_for_sql(self.constraints.pattern)
checks.append(f"{name} REGEXP {pattern}")
if self.constraints.enum:
enum = [_format_for_sql(x) for x in self.constraints.enum]
checks.append(f"{name} IN ({', '.join(enum)})")
return sa.Column(
self.name,
self.to_sql_dtype(),
*[sa.CheckConstraint(check) for check in checks],
nullable=not self.constraints.required,
unique=self.constraints.unique,
comment=self.description,
)
def encode(self, col: pd.Series, dtype: Union[type, None] = None) -> pd.Series:
"""Recode the Field if it has an associated encoder."""
return self.encoder.encode(col, dtype=dtype) if self.encoder else col
# ---- Classes: Resource ---- #
class ForeignKeyReference(Base):
"""Foreign key reference (`resource.schema.foreign_keys[...].reference`).
See https://specs.frictionlessdata.io/table-schema/#foreign-keys.
"""
resource: SnakeCase
fields_: StrictList(SnakeCase) = pydantic.Field(alias="fields")
_check_unique = _validator("fields_", fn=_check_unique)
class ForeignKey(Base):
"""Foreign key (`resource.schema.foreign_keys[...]`).
See https://specs.frictionlessdata.io/table-schema/#foreign-keys.
"""
fields_: StrictList(SnakeCase) = pydantic.Field(alias="fields")
reference: ForeignKeyReference
_check_unique = _validator("fields_", fn=_check_unique)
@pydantic.validator("reference")
def _check_fields_equal_length(cls, value, values): # noqa: N805
if "fields_" in values:
if len(value.fields) != len(values["fields_"]):
raise ValueError("fields and reference.fields are not equal length")
return value
def is_simple(self) -> bool:
"""Indicate whether the FK relationship contains a single column."""
return True if len(self.fields) == 1 else False
def to_sql(self) -> sa.ForeignKeyConstraint:
"""Return equivalent SQL Foreign Key."""
return sa.ForeignKeyConstraint(
self.fields,
[f"{self.reference.resource}.{field}" for field in self.reference.fields],
)
class Schema(Base):
"""Table schema (`resource.schema`).
See https://specs.frictionlessdata.io/table-schema.
"""
fields_: StrictList(Field) = pydantic.Field(alias="fields")
missing_values: List[pydantic.StrictStr] = [""]
primary_key: StrictList(SnakeCase) = None
foreign_keys: List[ForeignKey] = []
_check_unique = _validator(
"missing_values", "primary_key", "foreign_keys", fn=_check_unique
)
@pydantic.validator("fields_")
def _check_field_names_unique(cls, value): # noqa: N805
_check_unique([f.name for f in value])
return value
@pydantic.validator("primary_key")
def _check_primary_key_in_fields(cls, value, values): # noqa: N805
if value is not None and "fields_" in values:
missing = []
names = [f.name for f in values["fields_"]]
for name in value:
if name in names:
# Flag primary key fields as required
field = values["fields_"][names.index(name)]
field.constraints.required = True
else:
missing.append(field.name)
if missing:
raise ValueError(f"names {missing} missing from fields")
return value
@pydantic.validator("foreign_keys", each_item=True)
def _check_foreign_key_in_fields(cls, value, values): # noqa: N805
if value and "fields_" in values:
names = [f.name for f in values["fields_"]]
missing = [x for x in value.fields if x not in names]
if missing:
raise ValueError(f"names {missing} missing from fields")
return value
class License(Base):
"""Data license (`package|resource.licenses[...]`).
See https://specs.frictionlessdata.io/data-package/#licenses.
"""
name: String
title: String
path: HttpUrl
@staticmethod
def dict_from_id(x: str) -> dict:
"""Construct dictionary from PUDL identifier."""
return copy.deepcopy(LICENSES[x])
@classmethod
def from_id(cls, x: str) -> "License":
"""Construct from PUDL identifier."""
return cls(**cls.dict_from_id(x))
class Contributor(Base):
"""Data contributor (`package.contributors[...]`).
See https://specs.frictionlessdata.io/data-package/#contributors.
"""
title: String
path: HttpUrl = None
email: Email = None
role: Literal[
"author", "contributor", "maintainer", "publisher", "wrangler"
] = "contributor"
organization: String = None
orcid: String = None
@staticmethod
def dict_from_id(x: str) -> dict:
"""Construct dictionary from PUDL identifier."""
return copy.deepcopy(CONTRIBUTORS[x])
@classmethod
def from_id(cls, x: str) -> "Contributor":
"""Construct from PUDL identifier."""
return cls(**cls.dict_from_id(x))
def __hash__(self):
"""Implements simple hash method.
Allows use of `set()` on a list of Contributor
"""
return hash(str(self))
class DataSource(Base):
"""A data source that has been integrated into PUDL.
This metadata is used for:
* Generating PUDL documentation.
* Annotating long-term archives of the raw input data on Zenodo.
* Defining what data partitions can be processed using PUDL.
It can also be used to populate the "source" fields of frictionless
data packages and data resources (`package|resource.sources[...]`).
See https://specs.frictionlessdata.io/data-package/#sources.
"""
name: SnakeCase
title: String = None
description: String = None
field_namespace: String = None
keywords: List[str] = []
path: HttpUrl = None
contributors: List[Contributor] = [] # Or should this be compiled from Resources?
license_raw: License
license_pudl: License
# concept_doi: Doi = None # Need to define a Doi type?
working_partitions: Dict[SnakeCase, Any] = {}
# agency: Agency # needs to be defined
email: Email = None
def get_resource_ids(self) -> List[str]:
"""Compile list of resoruce IDs associated with this data source."""
# Temporary check to use eia861.RESOURCE_METADATA directly
# eia861 is not currently included in the general RESOURCE_METADATA dict
resources = RESOURCE_METADATA
if self.name == "eia861":
resources = eia861.RESOURCE_METADATA
return sorted(
[
name
for name, value in resources.items()
if value.get("etl_group") == self.name
]
)
def get_temporal_coverage(self) -> str:
"""Return a string describing the time span covered by the data source."""
if "years" in self.working_partitions:
return f"{min(self.working_partitions['years'])}-{max(self.working_partitions['years'])}"
elif "year_month" in self.working_partitions:
return f"through {self.working_partitions['year_month']}"
else:
return ""
def to_rst(self) -> None:
"""Output a representation of the data source in RST for documentation."""
pass
@classmethod
def from_field_namespace(cls, x: str) -> List["DataSource"]:
"""Return list of DataSource objects by field namespace."""
return [
cls(**cls.dict_from_id(name))
for name, val in SOURCES.items()
if val.get("field_namespace") == x
]
@staticmethod
def dict_from_id(x: str) -> dict:
"""Look up the source by source name in the metadata."""
return {"name": x, **copy.deepcopy(SOURCES[x])}
@classmethod
def from_id(cls, x: str) -> "DataSource":
"""Construct Source by source name in the metadata."""
return cls(**cls.dict_from_id(x))
class ResourceHarvest(Base):
"""Resource harvest parameters (`resource.harvest`)."""
harvest: Bool = False
"""Whether to harvest from dataframes based on field names.
If `False`, the dataframe with the same name is used
and the process is limited to dropping unwanted fields.
"""
tolerance: PositiveFloat = 0.0
"""Fraction of invalid fields above which result is considerd invalid."""
class Resource(Base):
"""Tabular data resource (`package.resources[...]`).
See https://specs.frictionlessdata.io/tabular-data-resource.
Examples:
A simple example illustrates the conversion to SQLAlchemy objects.
>>> fields = [{'name': 'x', 'type': 'year'}, {'name': 'y', 'type': 'string'}]
>>> fkeys = [{'fields': ['x', 'y'], 'reference': {'resource': 'b', 'fields': ['x', 'y']}}]
>>> schema = {'fields': fields, 'primary_key': ['x'], 'foreign_keys': fkeys}
>>> resource = Resource(name='a', schema=schema)
>>> table = resource.to_sql()
>>> table.columns.x
Column('x', Integer(), ForeignKey('b.x'), CheckConstraint(...), table=<a>, primary_key=True, nullable=False)
>>> table.columns.y
Column('y', Text(), ForeignKey('b.y'), CheckConstraint(...), table=<a>)
To illustrate harvesting operations,
say we have a resource with two fields - a primary key (`id`) and a data field -
which we want to harvest from two different dataframes.
>>> from pudl.metadata.helpers import unique, as_dict
>>> fields = [
... {'name': 'id', 'type': 'integer'},
... {'name': 'x', 'type': 'integer', 'harvest': {'aggregate': unique, 'tolerance': 0.25}}
... ]
>>> resource = Resource(**{
... 'name': 'a',
... 'harvest': {'harvest': True},
... 'schema': {'fields': fields, 'primary_key': ['id']}
... })
>>> dfs = {
... 'a': pd.DataFrame({'id': [1, 1, 2, 2], 'x': [1, 1, 2, 2]}),
... 'b': pd.DataFrame({'id': [2, 3, 3], 'x': [3, 4, 4]})
... }
Skip aggregation to access all the rows concatenated from the input dataframes.
The names of the input dataframes are used as the index.
>>> df, _ = resource.harvest_dfs(dfs, aggregate=False)
>>> df
id x
df
a 1 1
a 1 1
a 2 2
a 2 2
b 2 3
b 3 4
b 3 4
Field names and data types are enforced.
>>> resource.to_pandas_dtypes() == df.dtypes.apply(str).to_dict()
True
Alternatively, aggregate by primary key
(the default when :attr:`harvest`. `harvest=True`)
and report aggregation errors.
>>> df, report = resource.harvest_dfs(dfs)
>>> df
x
id
1 1
2 <NA>
3 4
>>> report['stats']
{'all': 2, 'invalid': 1, 'tolerance': 0.0, 'actual': 0.5}
>>> report['fields']['x']['stats']
{'all': 3, 'invalid': 1, 'tolerance': 0.25, 'actual': 0.33...}
>>> report['fields']['x']['errors']
id
2 Not unique.
Name: x, dtype: object
Customize the error values in the error report.
>>> error = lambda x, e: as_dict(x)
>>> df, report = resource.harvest_dfs(
... dfs, aggregate_kwargs={'raised': False, 'error': error}
... )
>>> report['fields']['x']['errors']
id
2 {'a': [2, 2], 'b': [3]}
Name: x, dtype: object
Limit harvesting to the input dataframe of the same name
by setting :attr:`harvest`. `harvest=False`.
>>> resource.harvest.harvest = False
>>> df, _ = resource.harvest_dfs(dfs, aggregate_kwargs={'raised': False})
>>> df
id x
df
a 1 1
a 1 1
a 2 2
a 2 2
Harvesting can also handle conversion to longer time periods.
Period harvesting requires primary key fields with a `datetime` data type,
except for `year` fields which can be integer.
>>> fields = [{'name': 'report_year', 'type': 'year'}]
>>> resource = Resource(**{
... 'name': 'table', 'harvest': {'harvest': True},
... 'schema': {'fields': fields, 'primary_key': ['report_year']}
... })
>>> df = pd.DataFrame({'report_date': ['2000-02-02', '2000-03-03']})
>>> resource.format_df(df)
report_year
0 2000-01-01
1 2000-01-01
>>> df = pd.DataFrame({'report_year': [2000, 2000]})
>>> resource.format_df(df)
report_year
0 2000-01-01
1 2000-01-01
"""
name: SnakeCase
title: String = None
description: String = None
harvest: ResourceHarvest = {}
schema_: Schema = pydantic.Field(alias="schema")
contributors: List[Contributor] = []
licenses: List[License] = []
sources: List[DataSource] = []
keywords: List[String] = []
encoder: Encoder = None
field_namespace: Literal[
"eia", "epacems", "ferc1", "ferc714", "glue", "pudl"
] = None
etl_group: Literal[
"eia860",
"eia861",
"eia923",
"entity_eia",
"epacems",
"ferc1",
"ferc1_disabled",
"ferc714",
"glue",
"static_ferc1",
"static_eia",
] = None
_check_unique = _validator(
"contributors", "keywords", "licenses", "sources", fn=_check_unique
)
@pydantic.validator("schema_")
def _check_harvest_primary_key(cls, value, values): # noqa: N805
if values["harvest"].harvest:
if not value.primary_key:
raise ValueError("Harvesting requires a primary key")
return value
@staticmethod
def dict_from_id(x: str) -> dict: # noqa: C901
"""Construct dictionary from PUDL identifier (`resource.name`).
* `schema.fields`
* Field names are expanded (:meth:`Field.from_id`).
* Field attributes are replaced with any specific to the
`resource.group` and `field.name`.
* `sources`: Source ids are expanded (:meth:`Source.from_id`).
* `licenses`: License ids are expanded (:meth:`License.from_id`).
* `contributors`: Contributor ids are fetched by source ids,
then expanded (:meth:`Contributor.from_id`).
* `keywords`: Keywords are fetched by source ids.
* `schema.foreign_keys`: Foreign keys are fetched by resource name.
"""
obj = copy.deepcopy(RESOURCE_METADATA[x])
obj["name"] = x
schema = obj["schema"]
# Expand fields
if "fields" in schema:
fields = []
for name in schema["fields"]:
# Lookup field by name
value = Field.dict_from_id(name)
# Update with any custom group-level metadata
namespace = obj.get("field_namespace")
if name in FIELD_METADATA_BY_GROUP.get(namespace, {}):
value = {**value, **FIELD_METADATA_BY_GROUP[namespace][name]}
# Update with any custom resource-level metadata
if name in FIELD_METADATA_BY_RESOURCE.get(x, {}):
value = {**value, **FIELD_METADATA_BY_RESOURCE[x][name]}
fields.append(value)
schema["fields"] = fields
# Expand sources
sources = obj.get("sources", [])
obj["sources"] = [
DataSource.from_id(value) for value in sources if value in SOURCES
]
encoder = obj.get("encoder", None)
obj["encoder"] = encoder
# Expand licenses (assign CC-BY-4.0 by default)
licenses = obj.get("licenses", ["cc-by-4.0"])
obj["licenses"] = [License.dict_from_id(value) for value in licenses]
# Lookup and insert contributors
if "contributors" in schema:
raise ValueError("Resource metadata contains explicit contributors")
contributors = []
for source in sources:
if source in SOURCES:
contributors.extend(DataSource.from_id(source).contributors)
obj["contributors"] = set(contributors)
# Lookup and insert keywords
if "keywords" in schema:
raise ValueError("Resource metadata contains explicit keywords")
keywords = []
for source in sources:
if source in SOURCES:
keywords.extend(DataSource.from_id(source).keywords)
obj["keywords"] = sorted(set(keywords))
# Insert foreign keys
if "foreign_keys" in schema:
raise ValueError("Resource metadata contains explicit foreign keys")
schema["foreign_keys"] = FOREIGN_KEYS.get(x, [])
# Delete foreign key rules
if "foreign_key_rules" in schema:
del schema["foreign_key_rules"]
# Add encoders to columns as appropriate, based on FKs.
# Foreign key relationships determine the set of codes to use
for fk in obj["schema"]["foreign_keys"]:
# Only referenced tables with an associated encoder indicate
# that the column we're looking at should have an encoder
# attached to it. All of these FK relationships must have simple
# single-column keys.
encoder = Encoder.dict_from_id(fk["reference"]["resource"])
if len(fk["fields"]) != 1 and encoder:
raise ValueError(
"Encoder for table with a composite primary key: "
f"{fk['reference']['resource']}"
)
if len(fk["fields"]) == 1 and encoder:
# fk["fields"] is a one element list, get the one element:
field = fk["fields"][0]
for f in obj["schema"]["fields"]:
if f["name"] == field:
f["encoder"] = encoder
break
return obj
@classmethod
def from_id(cls, x: str) -> "Resource":
"""Construct from PUDL identifier (`resource.name`)."""
return cls(**cls.dict_from_id(x))
def get_field(self, name: str) -> Field:
"""Return field with the given name if it's part of the Resources."""
names = [field.name for field in self.schema.fields]
if name not in names:
raise KeyError(f"The field {name} is not part of the {self.name} schema.")
return self.schema.fields[names.index(name)]
def to_sql(
self,
metadata: sa.MetaData = None,
check_types: bool = True,
check_values: bool = True,
) -> sa.Table:
"""Return equivalent SQL Table."""
if metadata is None:
metadata = sa.MetaData()
columns = [
f.to_sql(
check_types=check_types,
check_values=check_values,
)
for f in self.schema.fields
]
constraints = []
if self.schema.primary_key:
constraints.append(sa.PrimaryKeyConstraint(*self.schema.primary_key))
for key in self.schema.foreign_keys:
constraints.append(key.to_sql())
return sa.Table(self.name, metadata, *columns, *constraints)
def to_pyarrow(self) -> pa.Schema:
"""Construct a PyArrow schema for the resource."""
fields = [field.to_pyarrow() for field in self.schema.fields]
metadata = {
"description": self.description,
"primary_key": ",".join(self.schema.primary_key),
}
return pa.schema(fields=fields, metadata=metadata)
def to_pandas_dtypes(
self, **kwargs: Any
) -> Dict[str, Union[str, pd.CategoricalDtype]]:
"""Return Pandas data type of each field by field name.
Args:
kwargs: Arguments to :meth:`Field.to_pandas_dtype`.
"""
return {f.name: f.to_pandas_dtype(**kwargs) for f in self.schema.fields}
def match_primary_key(self, names: Iterable[str]) -> Optional[Dict[str, str]]:
"""Match primary key fields to input field names.
An exact match is required unless :attr:`harvest` .`harvest=True`,
in which case periodic names may also match a basename with a smaller period.
Args:
names: Field names.
Raises:
ValueError: Field names are not unique.
ValueError: Multiple field names match primary key field.
Returns:
The name matching each primary key field (if any) as a :class:`dict`,
or `None` if not all primary key fields have a match.
Examples:
>>> fields = [{'name': 'x_year', 'type': 'year'}]
>>> schema = {'fields': fields, 'primary_key': ['x_year']}
>>> resource = Resource(name='r', schema=schema)
By default, when :attr:`harvest` .`harvest=False`,
exact matches are required.
>>> resource.harvest.harvest
False
>>> resource.match_primary_key(['x_month']) is None
True
>>> resource.match_primary_key(['x_year', 'x_month'])
{'x_year': 'x_year'}
When :attr:`harvest` .`harvest=True`,
in the absence of an exact match,
periodic names may also match a basename with a smaller period.
>>> resource.harvest.harvest = True
>>> resource.match_primary_key(['x_year', 'x_month'])
{'x_year': 'x_year'}
>>> resource.match_primary_key(['x_month'])
{'x_month': 'x_year'}
>>> resource.match_primary_key(['x_month', 'x_date'])
Traceback (most recent call last):
ValueError: ... {'x_month', 'x_date'} match primary key field 'x_year'
"""
if len(names) != len(set(names)):
raise ValueError("Field names are not unique")
keys = self.schema.primary_key or []
if self.harvest.harvest:
remaining = set(names)
matches = {}
for key in keys:
match = None
if key in remaining:
# Use exact match if present
match = key
elif split_period(key)[1]:
# Try periodic alternatives
periods = expand_periodic_column_names([key])
matching = remaining.intersection(periods)
if len(matching) > 1:
raise ValueError(
f"Multiple field names {matching} "
f"match primary key field '{key}'"
)
if len(matching) == 1:
match = list(matching)[0]
if match:
matches[match] = key
remaining.remove(match)
else:
matches = {key: key for key in keys if key in names}
return matches if len(matches) == len(keys) else None
def format_df(self, df: pd.DataFrame = None, **kwargs: Any) -> pd.DataFrame:
"""Format a dataframe.
Args:
df: Dataframe to format.
kwargs: Arguments to :meth:`Field.to_pandas_dtypes`.
Returns:
Dataframe with column names and data types matching the resource fields.
Periodic primary key fields are snapped to the start of the desired period.
If the primary key fields could not be matched to columns in `df`
(:meth:`match_primary_key`) or if `df=None`, an empty dataframe is returned.
"""
dtypes = self.to_pandas_dtypes(**kwargs)
if df is None:
return pd.DataFrame({n: pd.Series(dtype=d) for n, d in dtypes.items()})
matches = self.match_primary_key(df.columns)
if matches is None:
# Primary key present but no matches were found
return self.format_df()
df = df.copy()
# Rename periodic key columns (if any) to the requested period
df.rename(columns=matches, inplace=True)
# Cast integer year fields to datetime
for field in self.schema.fields:
if (
field.type == "year"
and field.name in df
and pd.api.types.is_integer_dtype(df[field.name])
):
df[field.name] = pd.to_datetime(df[field.name], format="%Y")
df = (
# Reorder columns and insert missing columns
df.reindex(columns=dtypes.keys(), copy=False)
# Coerce columns to correct data type
.astype(dtypes, copy=False)
)
# Convert periodic key columns to the requested period
for df_key, key in matches.items():
_, period = split_period(key)
if period and df_key != key:
df[key] = PERIODS[period](df[key])
return df
def aggregate_df(
self, df: pd.DataFrame, raised: bool = False, error: Callable = None
) -> Tuple[pd.DataFrame, dict]:
"""Aggregate dataframe by primary key.
The dataframe is grouped by primary key fields
and aggregated with the aggregate function of each field
(:attr:`schema_`. `fields[*].harvest.aggregate`).
The report is formatted as follows:
* `valid` (bool): Whether resouce is valid.
* `stats` (dict): Error statistics for resource fields.
* `fields` (dict):
* `<field_name>` (str)
* `valid` (bool): Whether field is valid.
* `stats` (dict): Error statistics for field groups.
* `errors` (:class:`pandas.Series`): Error values indexed by primary key.
* ...
Each `stats` (dict) contains the following:
* `all` (int): Number of entities (field or field group).
* `invalid` (int): Invalid number of entities.
* `tolerance` (float): Fraction of invalid entities below which
parent entity is considered valid.
* `actual` (float): Actual fraction of invalid entities.
Args:
df: Dataframe to aggregate. It is assumed to have column names and
data types matching the resource fields.
raised: Whether aggregation errors are raised or
replaced with :obj:`np.nan` and returned in an error report.
error: A function with signature `f(x, e) -> Any`,
where `x` are the original field values as a :class:`pandas.Series`
and `e` is the original error.
If provided, the returned value is reported instead of `e`.
Raises:
ValueError: A primary key is required for aggregating.
Returns:
The aggregated dataframe indexed by primary key fields,
and an aggregation report (descripted above)
that includes all aggregation errors and whether the result
meets the resource's and fields' tolerance.
"""
if not self.schema.primary_key:
raise ValueError("A primary key is required for aggregating")
aggfuncs = {
f.name: f.harvest.aggregate
for f in self.schema.fields
if f.name not in self.schema.primary_key
}
df, report = groupby_aggregate(
df,
by=self.schema.primary_key,
aggfuncs=aggfuncs,
raised=raised,
error=error,
)
report = self._build_aggregation_report(df, report)
return df, report
def _build_aggregation_report(self, df: pd.DataFrame, errors: dict) -> dict:
"""Build report from aggregation errors.
Args:
df: Harvested dataframe (see :meth:`harvest_dfs`).
errors: Aggregation errors (see :func:`groupby_aggregate`).
Returns:
Aggregation report, as described in :meth:`aggregate_df`.
"""
nrows, ncols = df.reset_index().shape
freports = {}
for field in self.schema.fields:
if field.name in errors:
nerrors = errors[field.name].size
else:
nerrors = 0
stats = {
"all": nrows,
"invalid": nerrors,
"tolerance": field.harvest.tolerance,
"actual": nerrors / nrows if nrows else 0,
}
freports[field.name] = {
"valid": stats["actual"] <= stats["tolerance"],
"stats": stats,
"errors": errors.get(field.name, None),
}
nerrors = sum([not f["valid"] for f in freports.values()])
stats = {
"all": ncols,
"invalid": nerrors,
"tolerance": self.harvest.tolerance,
"actual": nerrors / ncols,
}
return {
"valid": stats["actual"] <= stats["tolerance"],
"stats": stats,
"fields": freports,
}
def harvest_dfs(
self,
dfs: Dict[str, pd.DataFrame],
aggregate: bool = None,
aggregate_kwargs: Dict[str, Any] = {},
format_kwargs: Dict[str, Any] = {},
) -> Tuple[pd.DataFrame, dict]:
"""Harvest from named dataframes.
For standard resources (:attr:`harvest`. `harvest=False`), the columns
matching all primary key fields and any data fields are extracted from
the input dataframe of the same name.
For harvested resources (:attr:`harvest`. `harvest=True`), the columns
matching all primary key fields and any data fields are extracted from
each compatible input dataframe, and concatenated into a single
dataframe. Periodic key fields (e.g. 'report_month') are matched to any
column of the same name with an equal or smaller period (e.g.
'report_day') and snapped to the start of the desired period.
If `aggregate=False`, rows are indexed by the name of the input dataframe.
If `aggregate=True`, rows are indexed by primary key fields.
Args:
dfs: Dataframes to harvest.
aggregate: Whether to aggregate the harvested rows by their primary key.
By default, this is `True` if `self.harvest.harvest=True` and
`False` otherwise.
aggregate_kwargs: Optional arguments to :meth:`aggregate_df`.
format_kwargs: Optional arguments to :meth:`format_df`.
Returns:
A dataframe harvested from the dataframes, with column names and
data types matching the resource fields, alongside an aggregation
report.
"""
if aggregate is None:
aggregate = self.harvest.harvest
if self.harvest.harvest:
# Harvest resource from all inputs where all primary key fields are present
samples = {}
for name, df in dfs.items():
samples[name] = self.format_df(df, **format_kwargs)
# Pass input names to aggregate via the index
samples[name].index = pd.Index([name] * len(samples[name]), name="df")
df = pd.concat(samples.values())
elif self.name in dfs:
# Subset resource from input of same name
df = self.format_df(dfs[self.name], **format_kwargs)
# Pass input names to aggregate via the index
df.index = | pd.Index([self.name] * df.shape[0], name="df") | pandas.Index |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))
self.assertTrue(result.levels[1].equals(Index(['a','b'])))
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),
('bar', 'a'), ('bar', 'b'), ('bar', 'c'),
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
assert_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
assert_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
(2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
assert_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
self.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
assert_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if compat.PY3:
raise nose.SkipTest("testing for legacy pickles not support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
| assert_almost_equal(exp, exp2) | pandas.util.testing.assert_almost_equal |
import numpy as np
np.random.seed(2018)
import pandas as pd
import xgboost as xgb
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from scipy.sparse import hstack
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error
import jieba
import re
from sklearn.externals import joblib
import os
from sklearn.model_selection import KFold
from sklearn.naive_bayes import MultinomialNB
reg_chinese = re.compile(r'[^\u4e00-\u9fa5]+')
def read_csv(filename, dir='./input/'):
path = os.path.join(dir, filename+'.csv')
return | pd.read_csv(path) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.