prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 6 11:33:59 2017
解析天软数据格式
@author: ws
"""
import pandas as pd
_max_iter_stocks = 100
def _int2date(int_date):
if int_date < 10000000:
return pd.NaT
return pd.datetime(int_date//10000, int_date%10000//100, int_date%100)
def parseByStock(TSData, date_parse=None):
"""
按照股票为单位,逐个解析数据。
数据格式为两列的Array,第一列是股票代码,第二列是对应的子Array。
不同的股票的每个子Array中的列名是相同的,以此保证可以把所有股票的数据按行连接起来。
Return:
=======
DataFrame(index=IDs, columns=data)
"""
if TSData[0] != 0:
raise ValueError("天软数据提取失败!")
iter_stock = 0
table = pd.DataFrame()
temp_table = []
for idata in TSData[1]:
stockID = idata[b'IDs'].decode('utf8')[2:]
stockData = []
iter_stock += 1
for itable in idata[b'data']:
new_dict = {k.decode('gbk'): v for k, v in itable.items()}
new_data = pd.DataFrame(new_dict, index=pd.Index([stockID], name='IDs'))
stockData.append(new_data)
if stockData:
stockData = pd.concat(stockData)
else:
continue
temp_table.append(stockData)
if iter_stock >= _max_iter_stocks:
_ = pd.concat(temp_table)
table = pd.concat([table, _])
temp_table = []
iter_stock = 0
if temp_table:
_ = pd.concat(temp_table)
table = pd.concat([table, _])
if date_parse:
table[date_parse] = table[date_parse].applymap(_int2date)
return table.sort_index()
def parseCrossSection2DArray(TSData, date):
"""
解析横截面上的二维数组, 行索引是股票代码,
列索引是因子名称。
"""
if TSData[0] != 0:
raise ValueError("天软数据提取失败!")
iter_stock = 0
table = | pd.DataFrame() | pandas.DataFrame |
# Script for predicting Enamine 2M compounds using pre-extracted TPATF features
# This script does the following things:
# 1. Reads large csv files in chunks
# 2. For each of the chunks, create pre-defined number of processes and
# 3. In each of the processes, reads the features and evaluates using all the models
#
# Author: <NAME>
# Department of Computer Science and School of Pharmacy, UTEP
# Last modifed: 10/16/2018
from __future__ import print_function
import pandas as pd
import numpy as np
import multiprocessing as mp
import os
from sklearn.externals import joblib
import sys
py_version = "27" if sys.version_info[0] < 3 else "35"
cores = 28 #mp.cpu_count()
partitions = cores
dataset_dir = "/data2/datasets/enamine"
fingerprint_files = ["Enamine_advance_TPATF_fingerprints.csv", "Enamine_hts_tpatf_fingerprint.csv"]
models_dir = "/data2/mhassan/LigandNet/Models/models_generated/all_models"
output_dir = "output/py" + py_version
def get_models():
# Read the model names
with open("/data2/mhassan/LigandNet/Models/models_generated/niners_py" + py_version + ".txt", 'r') as f:
model_names = [line.split('\n')[0] for line in f.readlines()]
names = [i.split("classifier_")[1] for i in model_names]
# Yield models
for name, model in zip(names, model_names):
yield name, joblib.load(os.path.join(models_dir, model))
def work(data):
# Predict SMILES in a chunk
output = []
data = data.apply(lambda x: x.tolist(), axis=1)
ids = []
smiles = []
features = []
for d in data:
ids.append(d[0])
smiles.append(d[1])
features.append(np.array([float(i) for i in d[2].split(' ')], dtype=np.float32))
features = np.array(features, dtype=np.float32)
for model_name, model in get_models():
# Predict and add to the output if there is a hit
pred = []
try:
pred = model.predict_proba(features)
pred = [i[1] for i in pred]
hits = [i>0.5 for i in pred]
except:
pred = model.predict(features)
hits = [i==1.0 for i in pred]
if len(hits) > 0: output.extend([[model_name, i, s, p] for i, s, p, h in zip(ids, smiles, pred, hits) if h])
return output
def parallelize(_file, chunk_number, data, func):
data_split = np.array_split(data, partitions)
pool = mp.Pool(cores)
output = pool.map(func, data_split)
pool.close()
# with mp.Pool(cores) as pool:
# output = pool.map(func, data_split)
# Filtering out empty results
output = [i for i in output if len(i)>0]
# Flatten the output
result = []
for i in output:
result.extend(i)
labels = ["protein", "id", "smiles", "prediction"]
output_df = | pd.DataFrame.from_records(result, columns=labels) | pandas.DataFrame.from_records |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
),
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame(
{"A": [1] * 5},
index=[
Timestamp("20130101 09:00:01"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:04"),
Timestamp("20130101 09:00:06"),
],
)
# closed must be 'right', 'left', 'both', 'neither'
msg = "closed must be 'right', 'left', 'both' or 'neither'"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="2s", closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling("2s", closed="right").sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling("2s", closed="both").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling("2s", closed="left").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling("2s", closed="neither").sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s").sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=3).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self):
df = self.ragged
result = df.rolling(window="3s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self):
df = self.ragged
result = df.rolling(window="3s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = self.ragged
result = df.rolling(window="1s").count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).count()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [0.0, 1, 2, 3, 4]}
).set_index("A")
result = df.rolling("1s").min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [5, 4, 3, 4, 5]}
).set_index("A")
tm.assert_frame_equal(result, expected)
result = df.rolling("2s").min()
expected = df.copy()
expected["B"] = [5.0, 4, 3, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling("5s").min()
expected = df.copy()
expected["B"] = [5.0, 4, 3, 3, 3]
tm.assert_frame_equal(result, expected)
def test_ragged_min(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 0, 0, 1, 1]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pandas as pd
from impedance.models.circuits import CustomCircuit
def getRaw(x):
return complex(x["Re"],x["Im"])
def getReal(x):
return x.real
def getImag(x):
return x.imag
def fitData(rawDF,circuit,initial):
circuit = circuit.replace("Q","CPE")
df=rawDF.copy()
freq = df["f"]
freq = freq.to_numpy()
df["raw"] = df.apply(getRaw,axis=1)
Z = df["raw"].to_numpy()
initial_guess = initial
circuit = CustomCircuit(circuit, initial_guess=initial_guess)
circuit.fit(freq,Z)
print(circuit.parameters_)
freqList = df["f"].to_numpy()
fit = circuit.predict(freqList)
model = pd.Series(fit)
df["model"] = model
df["mRe"] = df["model"].apply(getReal)
df["mIm"] = -df["model"].apply(getImag)
df = df.drop(["raw"],axis=1)
df = df.drop(["model"],axis=1)
outDF = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import PeriodIndex
import pandas._testing as tm
def test_to_native_types():
index = PeriodIndex(["2017-01-01", "2017-01-02", "2017-01-03"], freq="D")
# First, with no arguments.
expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype="=U10")
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep="pandas")
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(["2017-01-01", "2017-01-03"], dtype="=U10")
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(["01-2017-01", "01-2017-02", "01-2017-03"], dtype="=U10")
result = index.to_native_types(date_format="%m-%Y-%d")
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = PeriodIndex(["2017-01-01", pd.NaT, "2017-01-03"], freq="D")
expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object)
result = index.to_native_types()
| tm.assert_numpy_array_equal(result, expected) | pandas._testing.assert_numpy_array_equal |
from scipy.signal import find_peaks
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import re
import os
import pandas as pd
from cell_cycle_gating.findpeaks import get_kde, findpeaks
from cell_cycle_gating import smooth
from scipy.stats.mstats import mquantiles as quantile
import matplotlib.pyplot as plt
def get_ldrgates(ldrint, ldr_control_cutoff=2, peak_loc=1.2):
"""Gating based on ldr intensities
Parameters
----------
ldrint : 1d array
ldr txt feature across all cells in a well
ldr_cutoff : float
default cutoff if gating fails.
Returns
-------
ldr_gates : list of floats
ldr_lims : list of floats
limits of ldr inensity feature that defines x_lims for plots
"""
ldrint = ldrint[ldrint > 0]
logint = np.log10(ldrint)
logint = logint[~np.isnan(logint)]
logint = logint[~np.isinf(logint)]
fig, ax = plt.subplots()
x, y = sns.kdeplot(logint, ax=ax).get_lines()[0].get_data()
plt.close()
peak_locs, _ = find_peaks(-y)
cc = x[peak_locs]
try:
ldr_cutoff = cc[cc > peak_loc][0]
except IndexError:
ldr_cutoff = np.quantile(logint, 0.99)
#ldr_cutoff = ldr_control_cutoff
ldr_gates = np.array([-np.inf, ldr_cutoff])
ldr_lims = np.array([x.min(), x.max()])
return ldr_gates, ldr_lims
def compute_log_dna(dna, x_dna=None):
"""Computes log of DNA content bounded by x_dna[2], x_dna[-3]
Parameters
----------
dna : 1D array
DNA content of cells in a given well
x_dna : 1D array
Expected distribution of DNA content (used as x-axis grid)
Return
------
log_dna : 1D array
log transformed DNA content
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
dna_upper_bound = 10 ** x_dna[-3]
dna_lower_bound = 10 ** x_dna[2]
dna_upper_bounded = [d if d < dna_upper_bound else dna_upper_bound
for d in dna]
dna_bounded = [d if d > dna_lower_bound else dna_lower_bound
for d in dna_upper_bounded]
log_dna = np.array([np.log10(d) for d in dna_bounded])
return log_dna
def get_g1_location(log_dna, x_dna, ldrint, ldr_gates):
"""Computes ocation of G1 based on DNA content
Parameters
----------
log_dna : 1d array
log DNA content of cells in a given well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ldrtxt : 1d array
ldr txt feature across all cells in a well
ldr_gates : list of floats
Returns
-------
g1_loc : float
G1 location on log DNA axis
"""
logint = np.log10(ldrint)
logint[np.isnan(logint)] = -10 #dummy value
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
# Only consider susbet of cells with LDR within ldr_gates
log_dna_low_ldr = log_dna[(ldr_gates[1] >= logint) &
(logint >= ldr_gates[0])]
f_dna_low_ldr = get_kde(log_dna_low_ldr, x_dna)
dna_peaks_amp, dna_peaks_loc, _ = findpeaks(f_dna_low_ldr.tolist())
# Remove lesser peaks
dna_peaks_loc = dna_peaks_loc[dna_peaks_amp > np.max(dna_peaks_amp/10)]
dna_peaks_amp = dna_peaks_amp[dna_peaks_amp > np.max(dna_peaks_amp/10)]
xdna_loc = x_dna[dna_peaks_loc[:4]] # take the 4 highest peaks
# compute dna density surrounding peaks
dna_density = [np.mean(np.array(log_dna > (x - 0.2 * np.log10(2))) &
np.array(log_dna < (x + 1.2 * np.log10(2))))
for x in xdna_loc] + dna_peaks_amp
# Find G1 peak
if len(xdna_loc) == 2:
g1_loc = np.min(xdna_loc)
else:
g1_loc = xdna_loc[np.argmax(dna_density)]
return g1_loc
def get_g2_location(log_dna, x_dna, ldrint, ldr_gates, g1_loc):
"""Computes location of G2 based on DNA content
Parameters
----------
log_dna : 1d array
log DNA content of cells in a given well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ldrtxt : 1d array
ldr txt feature across all cells in a well
ldr_gates : list of floats
g1_loc : numpy float
G1 location on log DNA scale
Returns
-------
g2_loc : numpy float
G2 location on log DNA scale
"""
# Get G2 peak and location
# Only consider subset of cells witt LDR internsity within ldr_gates and
# DNA content > (g1_loc + 0.4 * log10(2))
logint = np.log10(ldrint)
logint[np.isnan(logint)] = -10 #dummy value
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
log_dna_g2_range = log_dna[(log_dna > (g1_loc + 0.4 * np.log10(2))) &
(ldr_gates[1] >= logint) &
(logint >= ldr_gates[0])]
f_dna_g2_range = get_kde(log_dna_g2_range, x_dna)
f_smooth = smooth.smooth(f_dna_g2_range, 5, 'flat')
peak_amp, peak_loc, _ = findpeaks(f_smooth.tolist())
peak_loc = peak_loc[peak_amp > np.max(peak_amp/10)]
xdna_loc = x_dna[peak_loc]
xdna_loc = xdna_loc[xdna_loc > (g1_loc + 0.5 * np.log10(2))]
if len(xdna_loc) > 1:
g2_loc = xdna_loc[np.argmin(
np.abs((xdna_loc - (g1_loc + np.log10(2))))
)]
elif len(xdna_loc) == 1:
g2_loc = xdna_loc[0]
else:
g2_loc = g1_loc + np.log10(2)
return g2_loc
def get_g1_g2_position(log_dna, x_dna, ldrint, ldr_gates):
"""Wrapper function that returns G1 and G2 location
based on log DNA content
Parameters
----------
log_dna : 1d array
log DNA content of cells in a given well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ldrint : 1d array
ldr intensiy feature across all cells in a well
ldr_gates : list of floats
Returns
-------
g1_g2_pos : list of floats
G1 and G2 location on log DNA scale
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
g1_loc = get_g1_location(log_dna, x_dna, ldrint, ldr_gates)
g2_loc = get_g2_location(log_dna, x_dna, ldrint, ldr_gates, g1_loc)
g1_g2_pos = [g1_loc, g2_loc]
return g1_g2_pos
def get_dnalims(log_dna, x_dna=None):
""" Outer bounds on DNA content to use as x_lim for plots
Parameters
----------
log_dna : 1d array
log DNA content of cells in a given well
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
Returns
-------
dna_lims : list of floats
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
dna_lims = (quantile(log_dna, [5e-3, 0.995]) +
[(2.5 * (x_dna[1] - x_dna[0])) * x for x in [-1, 1]])
return dna_lims
def get_dna_gating(dna, ldrint, ldr_gates, x_dna=None, ax=None):
"""Computes gating to claissfy live/dead cells based on DNA content
Parameters
----------
dna : 1d array
DNA content of cells in a given well
ldrtxt : 1d array
ldr txt feature across all cells in a well
ldr_gates : list of floats
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ax : subplot object
provides positional reference for master plot
Returns
-------
dna_gates : list of floats
inner and outer gates to classify live/dead cells
"""
logint = np.log10(ldrint)
logint[np.isnan(logint)] = -10 #dummy value
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
log_dna = compute_log_dna(dna, x_dna)
f_dna = get_kde(np.array(log_dna), x_dna)
log_dna_low_ldr = log_dna[ (ldr_gates[1] >= logint) &
(logint >= ldr_gates[0])]
f_dna_low_ldr = get_kde(log_dna_low_ldr, x_dna)
g1_loc = get_g1_location(log_dna, x_dna, ldrint, ldr_gates)
log_dna_g2_range = log_dna[(log_dna > (g1_loc + 0.4 * np.log10(2))) &
(ldr_gates[1] >= logint) &
(logint >= ldr_gates[0])]
try:
f_dna_g2_range = get_kde(log_dna_g2_range, x_dna)
g1_g2_pos = get_g1_g2_position(log_dna, x_dna, ldrint, ldr_gates)
g1_loc = g1_g2_pos[0]
g2_loc = g1_g2_pos[1]
dna_gates = [a + b for a, b in zip(
[g1_g2_pos[i] for i in [0, 0, 1, 1]],
[(g2_loc-g1_loc) * s for s in [-1.5, -.9, 1.3, 2.2]]
)]
y_vals = [np.max(f_dna) * y for y in [0, 1.02, 1.02, 0]]
inner_x_vals = [dna_gates[i] for i in [1, 1, 2, 2]]
outer_x_vals = [dna_gates[i] for i in [0, 0, 3, 3]]
dna_lims = get_dnalims(log_dna, x_dna)
dna_lims = [np.min((dna_lims[0], dna_gates[0]-0.1)),
np.max((dna_lims[1], dna_gates[3]+0.1))]
return np.array(dna_gates)
except ValueError:
return None
def live_dead(ldrint, ldr_gates=None,
dna=None, dna_gates=None,
x_dna=None, ax=None, ldr_control_cutoff=2):
"""Assign classification to individual cells as live/dead based on
ldrint and DNA content.
If ax is not None, plots pie chart of fraction live/dead
1. alive = selected+others, where selected is within
inner DNA gate and within LDR
2. dead = anything outside of DNA outer gating and LDR gating
3. total = alive + dead; selected + others + dead
Parameters
----------
ldrint : 1d array
ldr int feature across all cells in a well
ldr_gates : list of floats
dna : 1d array
DNA content of cells in a given well
dna_gates : list of floats
ldr_cutoff : float
default cutoff if
x_dna : 1d array
Expected distribution of DNA content (used as x-axis grid)
ax : subplot object
Returns
-------
alive : int
number of cells classied as alive
dead : int
numer of cells classified as dead
outcome : 1d array
classification of each cell as live(>=0) or dead (-1).
should have same length as ldrtxt
"""
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
outcome = [0] * len(ldrint)
logint = np.log10(ldrint)
logint[np.isnan(logint)] = -10 #dummy value
if ldr_gates is None:
ldr_gates, _ = get_ldrgates(ldrint, ldr_control_cutoff)
ldr_outer = (logint < ldr_gates[0]) | (logint > ldr_gates[1])
outcome = [-1 if b else 0 for b in ldr_outer]
#dead = np.sum([1 for ot in outcome if ot == -1])
alive = np.sum([1 for ot in outcome if ot >= 0])
selected = 'DNA information unavailable'
others = 'DNA information unavailable'
dead_ldrpos = np.sum(ldr_outer)
cell_fate_dict = {'alive': alive, 'alive_subg1': 0, 'alive_beyondg2': 0,
'dead_ldrpos': dead_ldrpos, 'dead_subg1': 0}
if dna_gates is not None:
log_dna = compute_log_dna(dna, x_dna)
dna_outermost = (log_dna < dna_gates[0]) | (log_dna > dna_gates[3])
dead_ldrpos = np.sum(ldr_outer)
dead_subg1 = np.sum((ldr_outer==False) & (log_dna < dna_gates[0]))
alive_beyondg2 = np.sum((ldr_outer==False) & (log_dna > dna_gates[2]))
alive_subg1 = np.sum((ldr_outer==False) & (log_dna > dna_gates[0]) & (log_dna < dna_gates[1]))
dna_inner = ((log_dna > dna_gates[1]) &
(log_dna < dna_gates[2]) &
(ldr_outer==False))
alive = np.sum(dna_inner)
#outcome = [-1 if d else 1 if s else 0
# for d, s in zip((ldr_outer | dna_outermost), dna_inner)]
outcome = ((1 * dna_inner) # normal live cells
+ (1.5 * ((ldr_outer==False) & (log_dna > dna_gates[2]))) # live but higher than G2
+ (-1 * ((ldr_outer==False) & (log_dna < dna_gates[0]))) # dead very low G1
+ (1.25 * ((ldr_outer==False) & (log_dna > dna_gates[0]) & (log_dna < dna_gates[1]))) # alive lower than G1
+ (-2 * ldr_outer))
cell_fate_dict = {'alive': alive, 'alive_subg1': alive_subg1, 'alive_beyondg2': alive_beyondg2,
'dead_ldrpos': dead_ldrpos, 'dead_subg1': dead_subg1}
#alive = np.sum([1 for ot in outcome if ot >= 0])
#dead = np.sum([1 for s in outcome if s == -1])
#selected = np.sum([1 for s in outcome if s == 1])
#others = np.sum([1 for s in outcome if s == 0])
if ax is not None:
ax.pie([alive, alive_subg1, alive_beyondg2, dead_ldrpos, dead_subg1],
labels=['alive', 'alive_subg1', 'alive_beyondg2', 'dead_ldrpos', 'dead_subg1'],
explode=(0.1, 0.1, 0.1, 0.1, 0.1), autopct='%1.1f%%')
ax.axis('equal')
else:
if ax is not None:
ax.pie([alive, dead_ldrpos], labels=['alive', 'dead_ldrpos'],
explode=(0.1, 0.1), autopct='%1.1f%%')
ax.axis('equal')
return cell_fate_dict, outcome
def get_counts(batch, filename, ndict, ldr_control_cutoff=2):
well = re.search('result.(.*?)\[', filename).group(1)
well = "%s%s" % (well[0], well[1:].zfill(2))
df = | pd.read_table("%s/%s" % (batch, filename)) | pandas.read_table |
import numpy as np
import pandas as pd
TZ_LOOKUP = {
'America/Anchorage': 9,
'America/Chicago': 6,
'America/Denver': 7,
'America/Los_Angeles': 8,
'America/New_York': 5,
'America/Phoenix': 7,
'Pacific/Honolulu': 10
}
def load_results():
base = 's3://pvinsight.nrel/output/'
nrel_data = pd.read_csv(base + 'pvo_results.csv')
slac_data = pd.read_csv(base + 'scsf-unified-results.csv')
slac_data['all-pass'] = np.logical_and(
np.alltrue(np.logical_not(slac_data[['solver-error', 'f1-increase', 'obj-increase']]), axis=1),
np.isfinite(slac_data['deg'])
)
cols = ['ID', 'rd', 'deg', 'rd_low', 'rd_high', 'all-pass',
'fix-ts', 'num-days', 'num-days-used', 'use-frac',
'res-median', 'res-var', 'res-L0norm']
df = pd.merge(nrel_data, slac_data, how='left', left_on='datastream', right_on='ID')
df = df[cols]
df.set_index('ID', inplace=True)
df = df[df['all-pass'] == True]
df['deg'] = df['deg'] * 100
df['difference'] = df['rd'] - df['deg']
df['rd_range'] = df['rd_high'] - df['rd_low']
cols = ['rd', 'deg', 'difference', 'rd_range',
'res-median', 'res-var', 'res-L0norm', 'rd_low', 'rd_high', 'all-pass',
'fix-ts', 'num-days', 'num-days-used', 'use-frac']
df = df[cols]
return df
def load_sys(n=None, idnum=None, local=True, meta=None):
if local:
base = '../data/PVO/'
if not local:
base = 's3://pvinsight.nrel/PVO/'
if meta is None:
meta = pd.read_csv('s3://pvinsight.nrel/PVO/sys_meta.csv')
if n is not None:
idnum = meta['ID'][n]
elif idnum is not None:
n = meta[meta['ID'] == idnum].index[0]
else:
print('must provide index or ID')
return
df = pd.read_csv(base+'PVOutput/{}.csv'.format(idnum), index_col=0,
parse_dates=[0], usecols=[1, 3])
tz = meta['TimeZone'][n]
df.index = df.index.tz_localize(tz).tz_convert('Etc/GMT+{}'.format(TZ_LOOKUP[tz])) # fix daylight savings
start = df.index[0]
end = df.index[-1]
time_index = | pd.date_range(start=start, end=end, freq='5min') | pandas.date_range |
#!/usr/bin/env python3
import artistools as at
# import artistools.spectra
# import artistools.lightcurve.writebollightcurvedata
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import os
def plot_hesma_spectrum(timeavg, axes):
hesma_file = Path("/Users/ccollins/Downloads/hesma_files/M2a/hesma_specseq.dat")
hesma_spec = pd.read_csv(hesma_file, comment="#", delim_whitespace=True, dtype=float)
# print(hesma_spec)
def match_closest_time(reftime):
return str("{}".format(min([float(x) for x in hesma_spec.keys()[1:]], key=lambda x: abs(x - reftime))))
closest_time = (match_closest_time(timeavg))
closest_time = f'{closest_time:.2f}'
print(closest_time)
#Scale distance to 1 Mpc
dist_mpc = 1e-5 # HESMA specta at 10 pc
hesma_spec[closest_time] = hesma_spec[closest_time] * (1e-5) ** 2 # refspecditance Mpc / 1 Mpc ** 2
for ax in axes:
ax.plot(hesma_spec['0.00'], hesma_spec[closest_time], label='HESMA model')
def plothesmaresspec(fig, ax):
# specfiles = ["/Users/ccollins/Downloads/hesma_files/M2a_i55/hesma_specseq_theta.dat"]
specfiles = ["/Users/ccollins/Downloads/hesma_files/M2a/hesma_virtualspecseq_theta.dat"]
for specfilename in specfiles:
specdata = pd.read_csv(specfilename, delim_whitespace=True, header=None, dtype=float)
# index_to_split = specdata.index[specdata.iloc[:, 1] == specdata.iloc[0, 1]]
# res_specdata = []
# for i, index_value in enumerate(index_to_split):
# if index_value != index_to_split[-1]:
# chunk = specdata.iloc[index_to_split[i]:index_to_split[i + 1], :]
# else:
# chunk = specdata.iloc[index_to_split[i]:, :]
# res_specdata.append(chunk)
res_specdata = at.gather_res_data(specdata)
column_names = res_specdata[0].iloc[0]
column_names[0] = 'lambda'
print(column_names)
for i, res_spec in enumerate(res_specdata):
res_specdata[i] = res_specdata[i].rename(columns=column_names).drop(res_specdata[i].index[0])
ax.plot(res_specdata[0]['lambda'], res_specdata[0][11.7935] * (1e-5) ** 2, label="hesma 0")
ax.plot(res_specdata[1]['lambda'], res_specdata[1][11.7935] * (1e-5) ** 2, label="hesma 1")
ax.plot(res_specdata[2]['lambda'], res_specdata[2][11.7935] * (1e-5) ** 2, label="hesma 2")
ax.plot(res_specdata[3]['lambda'], res_specdata[3][11.7935] * (1e-5) ** 2, label="hesma 3")
ax.plot(res_specdata[4]['lambda'], res_specdata[4][11.7935] * (1e-5) ** 2, label="hesma 4")
fig.legend()
# plt.show()
def make_hesma_vspecfiles(modelpath):
angles = [0, 1, 2, 3, 4]
for angle in angles:
vspecdata_all = at.spectra.get_specpol_data(angle=angle, modelpath=modelpath)
vspecdata = vspecdata_all['I']
timearray = vspecdata.columns.values[1:]
vspecdata.sort_values(by='nu', ascending=False, inplace=True)
vspecdata.eval('lambda_angstroms = 2.99792458e+18 / nu', inplace=True)
for time in timearray:
vspecdata[time] = vspecdata[time] * vspecdata['nu'] / vspecdata['lambda_angstroms']
vspecdata[time] = vspecdata[time] * (1e5) ** 2 # Scale to 10 pc (1 Mpc/10 pc) ** 2
vspecdata = vspecdata.set_index('lambda_angstroms').reset_index()
vspecdata = vspecdata.drop(['nu'], axis=1)
vspecdata = vspecdata.rename(columns={'lambda_angstroms': '0'})
print(vspecdata)
if angle == 0:
vspecdata.to_csv(modelpath / 'hesma_virtualspecseq_theta.dat', sep=' ', index=False) # create file
else:
# append to file
vspecdata.to_csv(modelpath / 'hesma_virtualspecseq_theta.dat', mode='a', sep=' ', index=False)
def make_hesma_bol_lightcurve(modelpath, outpath, timemin, timemax):
"""UVOIR bolometric light curve (angle-averaged)"""
lightcurvedataframe = at.lightcurve.writebollightcurvedata.get_bol_lc_from_lightcurveout(modelpath)
print(lightcurvedataframe)
lightcurvedataframe = lightcurvedataframe[lightcurvedataframe.time > timemin]
lightcurvedataframe = lightcurvedataframe[lightcurvedataframe.time < timemax]
modelname = at.get_model_name(modelpath)
outfilename = f'doubledet_2021_{modelname}.dat'
lightcurvedataframe.to_csv(outpath / outfilename, sep=' ', index=False, header=False)
def make_hesma_peakmag_dm15_dm40(pathtofiles, modelname, outpath):
dm15filename = f"bolband_{modelname}_viewing_angle_data.txt"
dm40filename = f"bolband_{modelname}_viewing_angle_data_deltam40.txt"
dm15data = pd.read_csv(pathtofiles / dm15filename, delim_whitespace=True, header=None, names=['peakmag', 'risetime', 'dm15'], skiprows=1)
dm40data = pd.read_csv(pathtofiles / dm40filename, delim_whitespace=True, header=None, names=['peakmag', 'risetime', 'dm40'], skiprows=1)
outdata = {}
outdata['peakmag'] = dm15data['peakmag'] # dm15 peak mag probably more accurate - shorter time window
outdata['dm15'] = dm15data['dm15']
outdata['dm40'] = dm40data['dm40']
outdataframe = pd.DataFrame(outdata)
outdataframe = outdataframe.round(decimals=4)
outdataframe.to_csv(outpath / f"doubledet_2021_bol_dm_{modelname}.dat", sep=' ', index=False, header=True)
def read_hesma_peakmag_dm15_dm40(pathtofiles):
data = []
for filename in os.listdir(pathtofiles):
print(filename)
data.append( | pd.read_csv(pathtofiles / filename, delim_whitespace=True) | pandas.read_csv |
"""
Area Weighted Interpolation
"""
import numpy as np
import geopandas as gpd
from .vectorized_raster_interpolation import fast_append_profile_in_gdf
import warnings
from scipy.sparse import dok_matrix, diags
import pandas as pd
from tobler.util.util import _check_crs, _nan_check, _check_presence_of_crs
def area_tables_binning(source_df, target_df):
"""Construct area allocation and source-target correspondence tables using a spatial indexing approach
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
Returns
-------
tables : scipy.sparse.dok_matrix
"""
if _check_crs(source_df, target_df):
pass
else:
return None
df1 = source_df
df2 = target_df
l1, b1, r1, t1 = df1.total_bounds
l2, b2, r2, t2 = df2.total_bounds
total_bounds = [min(l1, l2), min(b1, b2), max(r1, r2), max(t1, t2)]
n1, k1 = df1.shape
n2, k2 = df2.shape
numPoly = n1 + n2
DELTA = 0.000001
# constants for bucket sizes
BUCK_SM = 8
BUCK_LG = 80
SHP_SMALL = 1000
shapebox = total_bounds
# bucket size
if numPoly < SHP_SMALL:
bucketmin = numPoly // BUCK_SM + 2
else:
bucketmin = numPoly // BUCK_LG + 2
# print 'bucketmin: ', bucketmin
# bucket length
lengthx = ((shapebox[2] + DELTA) - shapebox[0]) / bucketmin
lengthy = ((shapebox[3] + DELTA) - shapebox[1]) / bucketmin
# initialize buckets
columns1 = [set() for i in range(bucketmin)]
rows1 = [set() for i in range(bucketmin)]
columns2 = [set() for i in range(bucketmin)]
rows2 = [set() for i in range(bucketmin)]
minbox = shapebox[:2] * 2 # minx,miny,minx,miny
binWidth = [lengthx, lengthy] * 2 # lenx,leny,lenx,leny
bbcache = {}
poly2Column1 = [set() for i in range(n1)]
poly2Row1 = [set() for i in range(n1)]
poly2Column2 = [set() for i in range(n2)]
poly2Row2 = [set() for i in range(n2)]
for i in range(n1):
shpObj = df1.geometry.iloc[i]
bbcache[i] = shpObj.bounds
projBBox = [
int((shpObj.bounds[:][j] - minbox[j]) / binWidth[j]) for j in range(4)
]
for j in range(projBBox[0], projBBox[2] + 1):
columns1[j].add(i)
poly2Column1[i].add(j)
for j in range(projBBox[1], projBBox[3] + 1):
rows1[j].add(i)
poly2Row1[i].add(j)
for i in range(n2):
shpObj = df2.geometry.iloc[i]
bbcache[i] = shpObj.bounds
projBBox = [
int((shpObj.bounds[:][j] - minbox[j]) / binWidth[j]) for j in range(4)
]
for j in range(projBBox[0], projBBox[2] + 1):
columns2[j].add(i)
poly2Column2[i].add(j)
for j in range(projBBox[1], projBBox[3] + 1):
rows2[j].add(i)
poly2Row2[i].add(j)
table = dok_matrix((n1, n2), dtype=np.float32)
for polyId in range(n1):
idRows = poly2Row1[polyId]
idCols = poly2Column1[polyId]
rowNeighbors = set()
colNeighbors = set()
for row in idRows:
rowNeighbors = rowNeighbors.union(rows2[row])
for col in idCols:
colNeighbors = colNeighbors.union(columns2[col])
neighbors = rowNeighbors.intersection(colNeighbors)
for neighbor in neighbors:
if df1.geometry.iloc[polyId].intersects(df2.geometry.iloc[neighbor]):
intersection = df1.geometry.iloc[polyId].intersection(
df2.geometry.iloc[neighbor]
)
table[polyId, neighbor] = intersection.area
return table
def area_tables(source_df, target_df):
"""
Construct area allocation and source-target correspondence tables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
Returns
-------
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union = gpd.overlay(source_df, target_df, how="union")
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row["geometry"].area
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
def area_interpolate_binning(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
table=None,
allocate_total=True,
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
extensive_variables : list
columns in dataframes for extensive variables
intensive_variables : list
columns in dataframes for intensive variables
table : scipy.sparse.dok_matrix
allocate_total : boolean
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See Notes for more details.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{k,j}
"""
if _check_crs(source_df, target_df):
pass
else:
return None
if table is None:
table = area_tables_binning(source_df, target_df)
den = source_df["geometry"].area.values
if allocate_total:
den = np.asarray(table.sum(axis=1))
den = den + (den == 0)
den = 1.0 / den
n = den.shape[0]
den = den.reshape((n,))
den = diags([den], [0])
weights = den.dot(table) # row standardize table
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
estimates = diags([vals], [0]).dot(weights)
estimates = estimates.sum(axis=0)
extensive.append(estimates.tolist()[0])
extensive = np.asarray(extensive)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
area = np.asarray(table.sum(axis=0))
den = 1.0 / (area + (area == 0))
n, k = den.shape
den = den.reshape((k,))
den = diags([den], [0])
weights = table.dot(den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
n = vals.shape[0]
vals = vals.reshape((n,))
estimates = diags([vals], [0])
estimates = estimates.dot(weights).sum(axis=0)
intensive.append(estimates.tolist()[0])
intensive = np.asarray(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=0)
df["geometry"] = target_df["geometry"]
df = gpd.GeoDataFrame(df)
return df
def area_interpolate(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
tables=None,
allocate_total=True,
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
target_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
extensive_variables : list, (optional)
columns in dataframes for extensive variables
intensive_variables : list, (optional)
columns in dataframes for intensive variables
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
allocate_total : boolean
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See Notes for more details.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{k,j}
"""
if _check_crs(source_df, target_df):
pass
else:
return None
if tables is None:
SU, UT = area_tables(source_df, target_df)
else:
SU, UT = tables
den = source_df["geometry"].area.values
if allocate_total:
den = SU.sum(axis=1)
den = den + (den == 0)
weights = np.dot(np.diag(1 / den), SU)
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
estimates = np.dot(np.diag(vals), weights)
estimates = np.dot(estimates, UT)
estimates = estimates.sum(axis=0)
extensive.append(estimates)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
ST = np.dot(SU, UT)
area = ST.sum(axis=0)
den = np.diag(1.0 / (area + (area == 0)))
weights = np.dot(ST, den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals.shape = (len(vals), 1)
est = (vals * weights).sum(axis=0)
intensive.append(est)
intensive = np.array(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = | pd.concat(dfs, axis=0) | pandas.concat |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="A", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_B_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_B_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_B_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_B.asfreq("A") == ival_B_to_A
assert ival_B_end_of_year.asfreq("A") == ival_B_to_A
assert ival_B.asfreq("Q") == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q
assert ival_B.asfreq("M") == ival_B_to_M
assert ival_B_end_of_month.asfreq("M") == ival_B_to_M
assert ival_B.asfreq("W") == ival_B_to_W
assert ival_B_end_of_week.asfreq("W") == ival_B_to_W
assert ival_B.asfreq("D") == ival_B_to_D
assert ival_B.asfreq("H", "S") == ival_B_to_H_start
assert ival_B.asfreq("H", "E") == ival_B_to_H_end
assert ival_B.asfreq("Min", "S") == ival_B_to_T_start
assert ival_B.asfreq("Min", "E") == ival_B_to_T_end
assert ival_B.asfreq("S", "S") == ival_B_to_S_start
assert ival_B.asfreq("S", "E") == ival_B_to_S_end
assert ival_B.asfreq("B") == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq="D", year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7)
ival_D_friday = Period(freq="D", year=2007, month=1, day=5)
ival_D_saturday = Period(freq="D", year=2007, month=1, day=6)
ival_D_sunday = Period(freq="D", year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq="B", year=2007, month=1, day=5)
ival_B_monday = Period(freq="B", year=2007, month=1, day=8)
ival_D_to_A = Period(freq="A", year=2007)
ival_Deoq_to_AJAN = Period(freq="A-JAN", year=2008)
ival_Deoq_to_AJUN = Period(freq="A-JUN", year=2007)
ival_Deoq_to_ADEC = Period(freq="A-DEC", year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq="M", year=2007, month=1)
ival_D_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_D_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_D_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_D_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_D.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("A-JAN") == ival_Deoq_to_AJAN
assert ival_D_end_of_quarter.asfreq("A-JUN") == ival_Deoq_to_AJUN
assert ival_D_end_of_quarter.asfreq("A-DEC") == ival_Deoq_to_ADEC
assert ival_D_end_of_year.asfreq("A") == ival_D_to_A
assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC
assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN
assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN
assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC
assert ival_D.asfreq("M") == ival_D_to_M
assert ival_D_end_of_month.asfreq("M") == ival_D_to_M
assert ival_D.asfreq("W") == ival_D_to_W
assert ival_D_end_of_week.asfreq("W") == ival_D_to_W
assert ival_D_friday.asfreq("B") == ival_B_friday
assert ival_D_saturday.asfreq("B", "S") == ival_B_friday
assert ival_D_saturday.asfreq("B", "E") == ival_B_monday
assert ival_D_sunday.asfreq("B", "S") == ival_B_friday
assert ival_D_sunday.asfreq("B", "E") == ival_B_monday
assert ival_D.asfreq("H", "S") == ival_D_to_H_start
assert ival_D.asfreq("H", "E") == ival_D_to_H_end
assert ival_D.asfreq("Min", "S") == ival_D_to_T_start
assert ival_D.asfreq("Min", "E") == ival_D_to_T_end
assert ival_D.asfreq("S", "S") == ival_D_to_S_start
assert ival_D.asfreq("S", "E") == ival_D_to_S_end
assert ival_D.asfreq("D") == ival_D
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_H_end_of_quarter = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_H_end_of_month = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_H_end_of_week = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_H_end_of_day = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_H_end_of_bus = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_H_to_A = Period(freq="A", year=2007)
ival_H_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_H_to_M = Period(freq="M", year=2007, month=1)
ival_H_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_H_to_D = | Period(freq="D", year=2007, month=1, day=1) | pandas.Period |
"""Functions for plotting sipper data."""
from collections import defaultdict
import datetime
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sipper import SipperError
#---dates and shading
def convert_dt64_to_dt(dt64):
"""Converts numpy datetime to standard datetime (needed for shade_darkness
function in most cases)."""
new_date = ((dt64 - np.datetime64('1970-01-01T00:00:00'))/
np.timedelta64(1, 's'))
new_date = datetime.datetime.utcfromtimestamp(new_date)
return new_date
def hours_between(start, end, convert=True):
"""
Create a range of hours between two dates.
Parameters
----------
start, end : datetime-like object
When to begin and end the data range
convert : bool, optional
Whether to convert the start/end arguments from numpy datetime to
standard datetime. The default is True.
Returns
-------
pandas DateTimeIndex
Index array of all hours between start and end.
"""
if convert:
start = convert_dt64_to_dt(start)
end = convert_dt64_to_dt(end)
rounded_start = datetime.datetime(year=start.year,
month=start.month,
day=start.day,
hour=start.hour)
rounded_end = datetime.datetime(year=end.year,
month=end.month,
day=end.day,
hour=end.hour)
return pd.date_range(rounded_start,rounded_end,freq='1H')
def is_day_or_night(time, period, lights_on=7, lights_off=19):
"""
Check if a datetime occured at day or night
Parameters
----------
time : datetime or pandas.Timestamp
time to check
period : str
'day' or 'night', which period to check if the date is part of,
based on the lights_on and lights_off arguments
lights_on : int, optional
Hour of the day (0-23) when lights turn on. The default is 7.
lights_off : int, optional
Hour of the day (0-23) when lights turn off. The default is 19.
Returns
-------
Bool
"""
lights_on = datetime.time(hour=lights_on)
lights_off = datetime.time(hour=lights_off)
val = False
#defaults to checking if at night
if lights_off > lights_on:
val = time.time() >= lights_off or time.time() < lights_on
elif lights_off < lights_on:
val = time.time() >= lights_off and time.time() < lights_on
#reverses if period='day'
return val if period=='night' else not val
def get_daynight_count(start_time, end_time, lights_on=7, lights_off=9):
"""
Compute the (fractional) number of completed light and dark periods
between two dates. Used for normalizing values grouped by day & nightime.
Parameters
----------
start_time : datetime
starting time
end_time : datetime
ending time
lights_on : int, optional
Hour of the day (0-23) when lights turn on. The default is 7.
lights_off : int, optional
Hour of the day (0-23) when lights turn off. The default is 19.
Returns
-------
dict
dictionary with keys "day" and "night", values are the
number of completed periods for each key.
"""
cuts = []
cuts.append(start_time)
loop_time = start_time.replace(minute=0,second=0)
while loop_time < end_time:
loop_time += pd.Timedelta(hours=1)
if loop_time.hour == lights_on:
cuts.append(loop_time)
elif loop_time.hour == lights_off:
cuts.append(loop_time)
cuts.append(end_time)
days = []
nights = []
if lights_off > lights_on:
day_hours = lights_off - lights_on
night_hours = 24 - day_hours
else:
night_hours = lights_on - lights_off
day_hours = 24 - night_hours
day_hours = pd.Timedelta(hours = day_hours)
night_hours = pd.Timedelta(hours = night_hours)
for i, t in enumerate(cuts[:-1]):
if is_day_or_night(t, 'day', lights_on, lights_off):
days.append((cuts[i+1] - t)/day_hours)
else:
nights.append((cuts[i+1] - t)/night_hours)
return {'day':sum(days),'night':sum(nights)}
def night_intervals(array, lights_on, lights_off, instead_days=False):
"""
Find intervals of a date-array corresponding to night time.
Parameters
----------
array : array-like
Array of datetimes (e.g. generated by hours_between).
lights_on : int
Integer between 0 and 23 representing when the light cycle begins.
lights_off : int
Integer between 0 and 23 representing when the light cycle ends.
instead_days : bool, optional
Return intervals during daytime instead of nighttime.
The default is False.
Returns
-------
night_intervals : list
List of tuples with structure (start of nighttime, end of nighttime).
"""
night_intervals = []
on_time = datetime.time(hour=lights_on)
off_time = datetime.time(hour=lights_off)
if on_time == off_time:
return night_intervals
else:
at_night = [is_day_or_night(i, 'night',
lights_on=lights_on,
lights_off=lights_off) for i in array]
if instead_days:
at_night = [not i for i in at_night]
if len(at_night) == 0:
return night_intervals
night_starts = []
night_ends = []
if at_night[0] == True:
night_starts.append(array[0])
for i, _ in enumerate(at_night[1:],start=1):
if at_night[i] == True and at_night[i-1] == False:
night_starts.append(array[i])
elif at_night[i] == False and at_night[i-1] == True:
night_ends.append(array[i])
if at_night[-1] == True:
night_ends.append(array[-1])
night_intervals = list(zip(night_starts, night_ends))
return night_intervals
def shade_darkness(ax, min_date, max_date, lights_on, lights_off,
convert=True):
"""
Shade the night periods of a matplotlib Axes with a datetime x-axis.
Parameters
----------
ax : matplotlib.axes.Axes
Plot Axes.
min_date : datetime
Earliest date to shade.
max_date : datetime
Latest date to shade.
lights_on : int
Integer between 0 and 23 representing when the light cycle begins.
lights_off : int
Integer between 0 and 23 representing when the light cycle ends.
convert : bool, optional
Whether to convert the start/end arguments from numpy datetime to
standard datetime. The default is True.
Returns
-------
None.
"""
hours_list = hours_between(min_date, max_date,convert=convert)
nights = night_intervals(hours_list, lights_on=lights_on,
lights_off=lights_off)
if nights:
for i, interval in enumerate(nights):
start = interval[0]
end = interval[1]
if start != end:
ax.axvspan(start,
end,
color='gray',
alpha=.2,
label='_'*i + 'lights off',
zorder=0)
def date_format_x(ax, start, end):
"""
Format the x-ticks of datetime plots created by FED3 Viz. Handles various
incoming dates by lowering the (time) frequency of ticks with longer
date ranges.
Parameters
----------
ax : matplotlib.axes.Axes
Graph Axes
start : datetime
Earliest x-position of the graph
end : datetime
Latest x-position of the graph
Returns
-------
None.
"""
quarter_hours = mdates.MinuteLocator(byminute=[0,15,30,45])
all_hours = mdates.HourLocator()
quarter_days = mdates.HourLocator(byhour=[0,6,12,18])
days = mdates.DayLocator()
two_days = mdates.DayLocator(interval=2)
three_days = mdates.DayLocator(interval=3)
months = mdates.MonthLocator()
d8_span = end - start
if d8_span < datetime.timedelta(hours=12):
xfmt = mdates.DateFormatter('%H:%M')
major = all_hours
minor = quarter_hours
elif ((d8_span >= datetime.timedelta(hours=12))
and (d8_span < datetime.timedelta(hours=24))):
xfmt = mdates.DateFormatter('%b %d %H:%M')
major = quarter_days
minor = all_hours
elif ((d8_span >= datetime.timedelta(hours=24))
and (d8_span < datetime.timedelta(days=3))):
xfmt = mdates.DateFormatter('%b %d %H:%M')
major = days
minor = quarter_days
elif (d8_span >= datetime.timedelta(days=3)
and (d8_span < datetime.timedelta(days=6))):
xfmt = mdates.DateFormatter('%b %d %H:%M')
major = two_days
minor = days
elif ((d8_span >= datetime.timedelta(days=6))
and (d8_span < datetime.timedelta(days=20))):
xfmt = mdates.DateFormatter('%b %d')
major = three_days
minor = days
elif d8_span >= datetime.timedelta(days=20):
xfmt = mdates.DateFormatter("%b '%y")
major = months
minor = three_days
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45, ha='right')
ax.xaxis.set_major_locator(major)
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_minor_locator(minor)
#---interdrink interval helpers
def get_any_idi(sipper):
"""
Returns the interdrink intervals for a Sipper,
disregarding side or bottle contents
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
Returns
-------
idi_minutes : pandas.Series
array of the interdrink intervals in minutes
"""
data = sipper.data
combined = data['LeftCount'].diff() + data['RightCount'].diff()
combined.dropna(inplace=True)
combined = combined[combined > 0]
idi_delta = combined.index.to_series().diff().dropna()
idi_minutes = idi_delta.dt.total_seconds()/60
return idi_minutes
def get_side_idi(sipper, side):
"""
Returns the interdrink intervals for the left or right bottle of a Sipper
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
side : str ('left' or 'right')
side to return the interdrink intervals for
Returns
-------
idi_minutes : pandas.Series
array of the interdrink intervals in minutes
"""
data = sipper.data
col = 'LeftCount' if side.lower() == 'left' else 'RightCount'
diff = data[col].diff().dropna()
diff = diff[diff > 0]
idi_delta = diff.index.to_series().diff().dropna()
idi_minutes = idi_delta.dt.total_seconds()/60
return idi_minutes
def get_content_idi(sipper, content, df=pd.DataFrame()):
"""
Returns the interdrink intervals for specific bottle contents of a Sipper
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
content : str
str name of content to get values for
df : pandas.DataFrame, optional
a DataFrame of sipper data to get the values for, can be passed
when you want values for a modified version of data (e.g.
after doing a global date filter)
Returns
-------
idi_minutes : pandas.Series
array of the interdrink intervals in minutes
"""
vals = sipper.get_content_values(content, out='Count', df=df)
if vals.empty:
return vals
diff = vals.diff().dropna()
diff = diff[diff > 0]
idi_delta = diff.index.to_series().diff().dropna()
idi_minutes = idi_delta.dt.total_seconds()/60
return idi_minutes
def setup_idi_axes(ax, logx):
"""
Helper to prepare plots for interdrink interval histograms
Parameters
----------
ax : matplotlib.axes.Axes
plot axes
logx : bool
whether or not to create a logarithmic x-axis
Returns
-------
None.
"""
ax.set_xlabel('Minutes Between Drinks')
ax.set_title('Interdrink Interval Plot')
if logx:
lowest = -2
highest = 5
ax.set_xticks(range(lowest,highest))
ax.set_xticklabels([10**num for num in range(-2,5)], rotation=45)
ax.set_xlim(-2.5, 5.1)
else:
ax.set_xticks([0,300,600,900])
ax.set_xlim(-100,1000)
#---circadian helpers
def get_chronogram_vals(series, lights_on, lights_off):
"""
Convert a time series to chronongram values (i.e. averaged
by hour for the light cycle)
Parameters
----------
series : pandas.Series
time series data
lights_on : int
Integer from 0-23 denoting start of light cycle
lights_off : int
Integer from 0-23 denoting end of light cycle
Returns
-------
reindexed : pandas.Series
Series of chronogram values, with 0 being start of the light cycle
"""
byhour = series.groupby([series.index.hour]).sum()
byhourday = series.groupby([series.index.hour, series.index.date])
num_days_by_hour = byhourday.sum().index.get_level_values(0).value_counts()
byhour = byhour.divide(num_days_by_hour, axis=0)
new_index = list(range(lights_on, 24)) + list(range(0,lights_on))
reindexed = byhour.reindex(new_index)
reindexed.index.name = 'hour'
reindexed = reindexed.fillna(0)
return reindexed
#---averageing helpers
def preproc_averaging(data, averaging='datetime', avg_bins='1H',
agg='sum'):
"""
Average data for SipperViz
Parameters
----------
data : collection
collection of pandas.Series to average
averaging : str, optional
Style of averaging. The default is 'datetime'.
- 'datetime' = average in absolute time (no alignment, fails for
time series which did not cooccur)
- 'time' = align by time of day and then average
- 'elapsed' = align by start of recording and then average
avg_bins : str, optional
Bin size to use for downsampling. The default is '1H'.
agg : str, optional
Function to aggregate data after downsampling; this is a
string name of a function used by pandas for resampling.
The default is 'sum'.
Raises
------
SipperError
When "averaging" parameter is not recognized
Returns
-------
output : dict
Dictionary of results, with keys:
- 'x' : x posititions of data
- 'ys' : averaged data
"""
if averaging not in ['datetime','time','elapsed']:
raise SipperError('averaging must be "datetime", "time", or "elapsed"')
output = {}
output['ys'] = []
if averaging == 'datetime':
earliest_end = pd.Timestamp(2200,1,1,0,0,0)
latest_start = pd.Timestamp(1970,1,1,0,0,0)
for d in data:
if min(d.index) > latest_start:
latest_start = min(d.index)
if max(d.index) < earliest_end:
earliest_end = max(d.index)
for d in data:
if latest_start not in d.index:
d.loc[latest_start] = np.nan
r = d.resample(avg_bins).apply(agg)
r = r[(r.index >= latest_start) &
(r.index <= earliest_end)].copy()
output['ys'].append(r)
output['x'] = r.index
elif averaging == 'time':
earliest_start = pd.Timestamp(2200,1,1,0,0,0)
latest_end = pd.Timestamp(1970,1,1,0,0,0)
shifted = []
for d in data:
r = d.resample(avg_bins).apply(agg)
first = r.index[0]
aligned = pd.Timestamp(year=1970, month=1, day=1, hour=first.hour)
shift = first - aligned
r.index = [i-shift for i in r.index]
if r.index.min() < earliest_start:
earliest_start = r.index.min()
if r.index.max() > latest_end:
latest_end = r.index.max()
shifted.append(r)
full_dr = pd.date_range(earliest_start, latest_end, freq=avg_bins)
output['x'] = full_dr
for s in shifted:
reindexed = s.reindex(full_dr)
output['ys'].append(reindexed)
elif averaging == 'elapsed':
maxx = pd.Timedelta(0)
elapsed_data = []
for d in data:
origin = d.index[0]
elapsed = [i - origin for i in d.index]
d.index = elapsed
r = d.resample(avg_bins).apply(agg)
if r.index.max() > maxx:
longest_index = r.index
elapsed_data.append(r)
output['x'] = longest_index.total_seconds()/3600
for s in elapsed_data:
reindexed = s.reindex(longest_index)
reindexed.index = reindexed.index.total_seconds()/3600
output['ys'].append(reindexed)
return output
def format_averaging_axes(ax, averaging, xdata, shade_dark=True,
lights_on=7, lights_off=19):
"""
Helper function to setup axes for average plots in SipperViz
Parameters
----------
ax : matplotlib.axes.Axes
plot axes
averaging : str, optional
Style of averaging. The default is 'datetime', other options
are 'time' and 'elapsed'
xdata : array
x-positions of plotted data. If multiple lines were plotted,
this array should encompass all of them
shade_dark : bool, optional
Whether or not to shade nighttime periods of the ax.
Has no effect when averaging is 'elapsed'. The default is True.
lights_on : int
Integer from 0-23 denoting start of light cycle. The default is 7.
lights_off : int
Integer from 0-23 denoting end of light cycle. The default is 19.
Returns
-------
None.
"""
if averaging == 'datetime':
mindate = pd.Timestamp(2200,1,1,0,0,0)
maxdate = pd.Timestamp(1970,1,1,0,0,0)
for x in xdata:
if x.min() < mindate:
mindate = x.min()
if x.max() > maxdate:
maxdate = x.max()
ax.set_xlabel('Date')
date_format_x(ax, mindate, maxdate)
if shade_dark:
shade_darkness(ax, mindate, maxdate, lights_on, lights_off)
elif averaging == 'time':
mindate = pd.Timestamp(2200,1,1,0,0,0)
maxdate = pd.Timestamp(1970,1,1,0,0,0)
for x in xdata:
if x.min() < mindate:
mindate = x.min()
if x.max() > maxdate:
maxdate = x.max()
start_hour = mindate.strftime('%I%p')
if start_hour[0] == '0':
start_hour = start_hour[1:]
ax.set_xlabel('Hours Since {} on First Day'.format(start_hour))
if shade_dark:
shade_darkness(ax, mindate, maxdate,
lights_on=lights_on,
lights_off=lights_off,
convert=False)
c = 12
ticks = pd.date_range(mindate, maxdate, freq='{}H'.format(str(c)))
tick_labels = [i*c for i in range(len(ticks))]
while len(ticks) > 10:
c += 12
ticks = pd.date_range(mindate, maxdate, freq='{}H'.format(str(c)))
tick_labels = [i*c for i in range(len(ticks))]
ax.set_xticks(ticks)
ax.set_xticklabels(tick_labels)
ax.set_xlim(mindate, maxdate + datetime.timedelta(hours=5))
elif averaging == 'elapsed':
maxx = 0
for x in xdata:
if x.max() > maxx:
maxx = x.max()
ax.set_xlabel('Elapsed Hours')
c = 12
ticks = range(0, int(maxx + 1), c)
while len(ticks) > 10:
c += 12
ticks = range(0, len(maxx) + 1, c)
ax.set_xticks(ticks)
#---drink plots
def drinkcount_cumulative(sipper, show_left=True, show_right=True,
show_content=[], shade_dark=True,
lights_on=7, lights_off=19, **kwargs):
"""
Plot the cumulative drink count of a Sipper.
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
show_left : bool, optional
Show cumulative drinks for the left bottle
show_right : bool, optional
Show cumulative drinks for the right bottle
show_content : collection, optional
Array of contents to show drinks for. The default is [].
shade_dark : bool, optional
Whether or not to shade nighttime periods of the ax.
lights_on : int
Integer from 0-23 denoting start of light cycle. The default is 7.
lights_off : int
Integer from 0-23 denoting end of light cycle. The default is 19.
**kwargs :
date_filter : two-tuple of start and end date to filter data
ax : matplotlib axes to plot on
**kwargs also allow SipperViz to by lazy about passing settings
to functions.
Returns
-------
matplotlib.figure.Figure (unless ax is passed, in which case none)
"""
if 'ax' in kwargs:
ax = kwargs['ax']
else:
fig, ax = plt.subplots()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
ax.plot(df.index, df['LeftCount'], drawstyle='steps', color='red',
label=sipper.left_name)
if show_right:
ax.plot(df.index, df['RightCount'], drawstyle='steps', color='blue',
label=sipper.right_name)
content_max = df.index.min()
content_min = df.index.max()
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
if not count.empty:
ax.plot(count.index, count, drawstyle='steps', label=c)
if count.index.max() > content_max:
content_max = count.index.max()
if count.index.min() < content_min:
content_min = count.index.min()
if show_content and all([not show_left, not show_right]):
dformat_min = content_min
dformat_max = content_max
else:
dformat_min = df.index[0]
dformat_max = df.index[-1]
date_format_x(ax, dformat_min, dformat_max)
ax.set_title('Drink Count for ' + sipper.filename)
ax.set_ylabel('Total Drinks')
ax.set_xlabel('Date')
if shade_dark:
shade_darkness(ax, dformat_min, dformat_max, lights_on, lights_off)
ax.legend()
plt.tight_layout()
return fig if 'ax' not in kwargs else None
def drinkcount_binned(sipper, binsize='1H', show_left=True, show_right=True,
show_content=[], shade_dark=True,
lights_on=7, lights_off=19, **kwargs):
"""
Plot the binned drink count of a Sipper.
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
binsize : str, optional
Pandas time offset string to resample data. The default is '1H'.
show_left : bool, optional
Show cumulative drinks for the left bottle
show_right : bool, optional
Show cumulative drinks for the right bottle
show_content : collection, optional
Array of contents to show drinks for. The default is [].
shade_dark : bool, optional
Whether or not to shade nighttime periods of the ax.
lights_on : int
Integer from 0-23 denoting start of light cycle. The default is 7.
lights_off : int
Integer from 0-23 denoting end of light cycle. The default is 19.
**kwargs :
date_filter : two-tuple of start and end date to filter data
ax : matplotlib axes to plot on
**kwargs also allow SipperViz to by lazy about passing settings
to functions.
Returns
-------
matplotlib.figure.Figure (unless ax is passed, in which case none)
"""
if 'ax' in kwargs:
ax = kwargs['ax']
else:
fig, ax = plt.subplots()
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
if show_left:
l = df['LeftCount'].diff().resample(binsize, base=base).sum()
ax.plot(l.index, l, color='red',
label=sipper.left_name)
if show_right:
r = df['RightCount'].diff().resample(binsize, base=base).sum()
ax.plot(r.index, r, color='blue',
label=sipper.right_name)
content_max = df.index.min()
content_min = df.index.max()
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
binned = count.diff().resample(binsize, base=base).sum()
if not count.empty:
ax.plot(binned.index, binned, label=c)
if count.index.max() > content_max:
content_max = count.index.max()
if count.index.min() < content_min:
content_min = count.index.min()
if show_content and all([not show_left, not show_right]):
dformat_min = content_min
dformat_max = content_max
else:
dformat_min = df.index[0]
dformat_max = df.index[-1]
date_format_x(ax, dformat_min, dformat_max)
ax.set_title('Drink Count for ' + sipper.filename)
ax.set_ylabel('Drinks')
ax.set_xlabel('Date')
if shade_dark:
shade_darkness(ax, dformat_min, dformat_max, lights_on, lights_off)
ax.legend()
plt.tight_layout()
return fig if 'ax' not in kwargs else None
def drinkduration_cumulative(sipper, show_left=True, show_right=True,
show_content=[], shade_dark=True,
lights_on=7, lights_off=19, **kwargs):
"""
Plot the cumulative drink duration of a Sipper.
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
show_left : bool, optional
Show cumulative drinks for the left bottle
show_right : bool, optional
Show cumulative drinks for the right bottle
show_content : collection, optional
Array of contents to show drinks for. The default is [].
shade_dark : bool, optional
Whether or not to shade nighttime periods of the ax.
lights_on : int
Integer from 0-23 denoting start of light cycle. The default is 7.
lights_off : int
Integer from 0-23 denoting end of light cycle. The default is 19.
**kwargs :
date_filter : two-tuple of start and end date to filter data
ax : matplotlib axes to plot on
**kwargs also allow SipperViz to by lazy about passing settings
to functions.
Returns
-------
matplotlib.figure.Figure (unless ax is passed, in which case none)
"""
if 'ax' in kwargs:
ax = kwargs['ax']
else:
fig, ax = plt.subplots()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
ax.plot(df.index, df['LeftDuration'], drawstyle='steps', color='red',
label=sipper.left_name)
if show_right:
ax.plot(df.index, df['RightDuration'], drawstyle='steps', color='blue',
label=sipper.right_name)
content_max = df.index.min()
content_min = df.index.max()
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
if not count.empty:
ax.plot(count.index, count, drawstyle='steps', label=c)
if count.index.max() > content_max:
content_max = count.index.max()
if count.index.min() < content_min:
content_min = count.index.min()
if show_content and all([not show_left, not show_right]):
dformat_min = content_min
dformat_max = content_max
else:
dformat_min = df.index[0]
dformat_max = df.index[-1]
date_format_x(ax, dformat_min, dformat_max)
ax.set_title('Drink Duration for ' + sipper.filename)
ax.set_ylabel('Total Drink Duration (s)')
ax.set_xlabel('Date')
if shade_dark:
shade_darkness(ax, dformat_min, dformat_max, lights_on, lights_off)
ax.legend()
plt.tight_layout()
return fig if 'ax' not in kwargs else None
def drinkduration_binned(sipper, binsize='1H', show_left=True, show_right=True,
show_content=[], shade_dark=True,
lights_on=7, lights_off=19, **kwargs):
"""
Plot the binned drink count of a Sipper.
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
binsize : str, optional
Pandas time offset string to resample data. The default is '1H'.
show_left : bool, optional
Show cumulative drinks for the left bottle
show_right : bool, optional
Show cumulative drinks for the right bottle
show_content : collection, optional
Array of contents to show drinks for. The default is [].
shade_dark : bool, optional
Whether or not to shade nighttime periods of the ax.
lights_on : int
Integer from 0-23 denoting start of light cycle. The default is 7.
lights_off : int
Integer from 0-23 denoting end of light cycle. The default is 19.
**kwargs :
date_filter : two-tuple of start and end date to filter data
ax : matplotlib axes to plot on
**kwargs also allow SipperViz to by lazy about passing settings
to functions.
Returns
-------
matplotlib.figure.Figure (unless ax is passed, in which case none)
"""
if 'ax' in kwargs:
ax = kwargs['ax']
else:
fig, ax = plt.subplots()
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
if show_left:
l = df['LeftDuration'].diff().resample(binsize, base=base).sum()
ax.plot(l.index, l, color='red',
label=sipper.left_name)
if show_right:
r = df['RightDuration'].diff().resample(binsize, base=base).sum()
ax.plot(r.index, r, color='blue',
label=sipper.right_name)
content_max = df.index.min()
content_min = df.index.max()
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Duration', df=df)
binned = count.diff().resample(binsize, base=base).sum()
if not count.empty:
ax.plot(binned.index, binned, label=c)
if count.index.max() > content_max:
content_max = count.index.max()
if count.index.min() < content_min:
content_min = count.index.min()
if show_content and all([not show_left, not show_right]):
dformat_min = content_min
dformat_max = content_max
else:
dformat_min = df.index[0]
dformat_max = df.index[-1]
date_format_x(ax, dformat_min, dformat_max)
ax.set_title('Drink Duration for ' + sipper.filename)
ax.set_ylabel('Drink Duration (s)')
ax.set_xlabel('Date')
if shade_dark:
shade_darkness(ax, dformat_min, dformat_max, lights_on, lights_off)
ax.legend()
plt.tight_layout()
return fig if 'ax' not in kwargs else None
#---interdrink intervals
def interdrink_intervals(sippers, kde=True, logx=True,
combine=False, **kwargs):
"""
Plot a histogram of the interdrink intervals of multiple Sippers
Parameters
----------
sippers : collection
Array of Sipper objects
kde : bool, optional
Include a kernel density estimation. The default is True.
logx : bool, optional
Use a logarithmic x-axis. The default is True.
combine : bool, optional
Concate the data from all sippers. The default is False,
which plots data for each Sipper separately.
**kwargs :
date_filter : two-tuple of start and end date to filter data
ax : matplotlib axes to plot on
**kwargs also allow SipperViz to by lazy about passing settings
to functions.
Returns
-------
matplotlib.figure.Figure (unless ax is passed, in which case none)
"""
if 'ax' not in kwargs:
fig, ax = plt.subplots()
else:
ax = kwargs['ax']
setup_idi_axes(ax, logx)
combined = []
for sipper in sippers:
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_any_idi(sipper)
if logx:
y = [np.log10(val) for val in y if not | pd.isna(val) | pandas.isna |
# coding: utf-8
import numpy as np
import pandas as pd
import os
import time
import multiprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn import preprocessing
from utils import check_and_make_path
# Generate data used for node classification
class DataGenerator(object):
base_path: str
input_base_path: str
output_base_path: str
label_base_path: str
file_sep: str
full_node_list: list
node2idx_dict: dict
node_num: int
train_ratio: float
val_ratio: float
test_ratio: float
def __init__(self, base_path, input_folder, output_folder, node_file, label_folder, file_sep='\t', train_ratio=0.7, val_ratio=0.2, test_ratio=0.1):
self.base_path = base_path
self.input_base_path = os.path.abspath(os.path.join(base_path, input_folder))
self.output_base_path = os.path.abspath(os.path.join(base_path, output_folder))
self.label_base_path = os.path.abspath(os.path.join(base_path, label_folder))
self.file_sep = file_sep
node_file_path = os.path.abspath(os.path.join(base_path, node_file))
nodes_set = pd.read_csv(node_file_path, names=['node'])
self.full_node_list = nodes_set['node'].tolist()
self.node_num = len(self.full_node_list)
self.node2idx_dict = dict(zip(self.full_node_list, np.arange(self.node_num)))
assert train_ratio + test_ratio + val_ratio <= 1.0
self.train_ratio = train_ratio
self.val_ratio = val_ratio
self.test_ratio = test_ratio
check_and_make_path(self.input_base_path)
check_and_make_path(self.output_base_path)
def generate_node_samples(self, file_name, sep='\t'):
date = file_name.split('.')[0]
file_path = os.path.join(self.label_base_path, file_name)
df_nodes = | pd.read_csv(file_path, sep=sep, header=0, names=['node', 'label']) | pandas.read_csv |
"""
a function to draw the bar plots in SI for evaluation based on the cases data from JHU
"""
import more_itertools
import datetime
from datetime import timedelta
import os
import json
import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
from precomputing import read_countries, repair_increasing
from compute_WIS_coverage import compute_WIS, compute_coverage
from paper_visualization import plot_forecast_baseline, plot_CI
def substring_index(l, substr):
"""
select index of substring substr in list l
"""
index = [idx for idx, s in enumerate(l) if substr in s]
return index
def substring_two_indeces(l,substr):
"""
find indices of the first and second element in substing in list l
"""
substr1, substr2 = substr[0], substr[1]
index1 = substring_index(l,substr1)
index2 = substring_index(l,substr2)
if len(index1)==0:
index1 = 0
else:
index1 = index1[0]
if len(index2)==0:
index2 = len(l)-1
else:
index2 = index2[0]
return index1, index2
def get_df_limits(dir_, date_limits, country_list, add_days=0):
"""
merge forecasts (CI) from the path in "dir_" within the limits in "date_limits"
for countries in "country_list"
Output:
DataFrame with forecasts (CI)
"""
paths = sorted(glob.glob(dir_))
date_limit_2 = datetime.datetime.strptime(date_limits[1], "%Y-%m-%d")
date_limits_1 = datetime.datetime.strptime(date_limits[0], "%Y-%m-%d")+ timedelta(days=-add_days)
start_f,end_f = substring_two_indeces(paths, [str(date_limits_1)[:10], str(date_limit_2)[:10]])
paths = paths[start_f:end_f+1]
df = pd.read_csv(paths[0])
if len(country_list)>0:
df = df[df["country"].isin(country_list)]
for path in paths[1:]:
df_ = pd.read_csv(path)
if len(country_list)>0:
df_ = df_[df_["country"].isin(country_list)]
df = df.append(df_,ignore_index=True)
return df
def evaluation(country_list, date_limits=['2020-04-01','2021-03-07'], path_data=[], datasource="JHU",typedata="cases",addon="",
path_results = "../data/paper/", H=7, raw_ground_truth=False):
"""
run the evaluations based on the forecasts saved in path_results folder
computes the RMAE, RmedianAE, RC and RWIS for the forecasting methodology in the dashboard compared to the constant forecast baseline
for the list of countries in country_list
"""
path_data = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/b849410bace2cc777227f0f1ac747a74fd8dc4be/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" #Link to the data of 2022-01-10
datasource, parse_column = "JHU", "Country/Region"
df = pd.read_csv(path_data)
data_type = "horison_"+str(int(H/7))+"_week/" + datasource+ "_"+typedata +"/"
path_TrendForecast = path_results + data_type + "TrendForecast" + addon
#-------------------------------forecast of our model---------------------------------------------------
df_forecasts = get_df_limits(path_TrendForecast+"/Point/*", date_limits, country_list)
df_forecasts = df_forecasts[(df_forecasts["target_date"]>=date_limits[0])&(df_forecasts["target_date"]<=date_limits[1])]
df_target = get_df_limits(path_TrendForecast+"/Target/*", date_limits, country_list, add_days=H)
df_forecasts_CI = get_df_limits(path_TrendForecast+"/CI/*", date_limits,country_list)
df_errors = pd.DataFrame(columns=["country","forecast_MAE","forecast_MedianAE","baseline_MAE","baseline_MedianAE","coverage"])
#-----------------------------fix confidence intervals--------------------------------------------------
type_norm = ["sqrt"]
df_forecasts_CI = df_forecasts_CI[df_forecasts_CI["confidence_norm"].isin(type_norm)]
df_forecasts_CI = df_forecasts_CI[(df_forecasts_CI["target_date"]>=date_limits[0])&(df_forecasts_CI["target_date"]<=date_limits[1])]
df_forecasts_CI["type"] = "day"
#--------------------------------------------------------------------------------------------------------------
RI = pd.DataFrame(columns=["country","$RI_{MAE}$","$RI_{MedianAE}$","$RI_old0$"])
WIS = pd.DataFrame(columns=["country","forecast_WIS","baseline_WIS"])
coverage = pd.DataFrame(columns=["country","forecast","baseline"])
for numit, country in enumerate(country_list):
try:
res = read_countries(df, [country], 0, datasource, typedata)
except:
print(country)
cumulative_, date_ = res[0][0], res[1][0]
cumulative_ = repair_increasing(cumulative_)
target_now = pd.DataFrame()
target_now["target_uptodate"] = pd.Series(np.diff(np.ravel(cumulative_))).rolling(H).mean()[H:]*H
target_now["target_date"] = date_[H+1:]
target_now["target_date"] = target_now["target_date"].astype(str)
country_forecast = df_forecasts[df_forecasts["country"]==country].sort_values(by="target_date")
country_target = df_target[df_target["country"]==country].sort_values(by="target_date").merge(target_now[["target_date","target_uptodate"]],on="target_date")
country_forecast = country_forecast.merge(country_target[["target_date","target"]],on="target_date")
country_forecast = country_forecast.merge(target_now[["target_date","target_uptodate"]],on="target_date")
country_forecast["fc_AE"] = np.abs(country_forecast["target_uptodate"]-country_forecast["forecast"])
country_target["bl_AE"] = np.nan
if country_target.shape[0]>H:
country_baseline_forecast = country_target.copy()
country_baseline_forecast["target_date"] = pd.to_datetime(country_baseline_forecast["target_date"]) + timedelta(days=H)
country_baseline_forecast["target_date"] = country_baseline_forecast["target_date"].astype(str)
country_baseline_forecast =country_baseline_forecast.rename(columns={"target":"baseline_forecast"})
country_target = country_target.merge(country_baseline_forecast[["target_date","baseline_forecast"]], on=["target_date"], how="inner")
country_target["bl_AE"] = np.abs(country_target["target_uptodate"]-country_target["baseline_forecast"])
AE = pd.merge(country_target[["bl_AE","target_date","target_uptodate"]],
country_forecast[["fc_AE","target_date"]],
on = "target_date", how="inner")
AE = AE.dropna(axis=0).reset_index(drop=True)
CI_baseline = compute_baseline_quantiles(AE[["target_date","bl_AE"]], AE["target_uptodate"], H=H)
CI_baseline["target_date"] =[0]*H + list(CI_baseline["target_date"][:-H])
CI_baseline = CI_baseline.iloc[H:]
CI_baseline = CI_baseline.merge(country_target[["target_uptodate","target_date"]],on=["target_date"])
country_CI_ = df_forecasts_CI[df_forecasts_CI["country"]==country]
country_CI_ = country_CI_[country_CI_["target_date"].isin(list(country_forecast["target_date"]))].reset_index().sort_values(by="target_date")
country_CI_ = country_CI_.merge(country_target[["target_date","target"]], on="target_date")
country_CI = country_CI_.merge(target_now[["target_date","target_uptodate"]], on="target_date")
wis_forecast = compute_WIS(country_CI, "forecast","day")
wis_baseline = compute_WIS(CI_baseline, "baseline","day")
coverage_forecast = compute_coverage(country_CI, "forecast","day")
coverage_baseline = compute_coverage(CI_baseline, "baseline","day")
WIS = WIS.append(pd.DataFrame([[country]+ [np.mean(wis_forecast["forecastWIS"]),np.mean(wis_baseline["baselineWIS"])]],
columns=WIS.columns), ignore_index=True)
coverage = coverage.append(pd.DataFrame([[country] + [np.mean(coverage_forecast["forecast_coverage"]),
np.mean(coverage_baseline["baseline_coverage"])]],
columns=coverage.columns), ignore_index=True)
AE["cover"] = 1*(AE["bl_AE"]>AE["fc_AE"])
errors = pd.DataFrame([[country, np.mean(AE["fc_AE"]),np.nanmedian(AE["fc_AE"]),
np.nanmean(AE["bl_AE"].values), np.nanmedian(AE["bl_AE"].values), np.mean(AE["cover"])]],
columns = df_errors.columns)
df_errors = df_errors.append(errors,ignore_index=True)
ri_mean = (errors["baseline_MAE"].values-errors["forecast_MAE"].values)/(1+errors["baseline_MAE"].values)
ri_median = (errors["baseline_MedianAE"].values-errors["forecast_MedianAE"].values)/(1+errors["baseline_MedianAE"].values)
ri_0 = errors["coverage"].values[0]
RI = RI.append(pd.DataFrame([[country, ri_mean[0], ri_median[0], ri_0]], columns = RI.columns),ignore_index=True)
return RI, WIS, coverage
def compute_baseline_quantiles(AE, forecast, H=7):
"""
compute empirical quantiles for the forecast by baseline
motivated by https://github.com/reichlab/covidModels/blob/master/R-package/R/quantile_baseline.R
"""
AE = AE.sort_values(by="target_date").reset_index(drop=True)
quantiles = [0.01,0.025] + list(np.arange(0.05,1,0.05)) + [0.975,0.99]
ind_median = int(len(quantiles)/2)+1
col_names = [str(round(q,3)) for q in quantiles]+["target_date"]
q_AE = pd.DataFrame(columns=col_names)
for i in range(2,AE.shape[0]):
vals = AE["bl_AE"].values[:i]
vals = list(vals) + list(-vals)
quant = np.quantile(vals, quantiles)
quant = quant - quant[ind_median] + forecast[i-1]
q_ = [list(quant)+ [AE["target_date"].values[i-1]]]
q_AE = q_AE.append(pd.DataFrame(q_, columns=col_names))
return q_AE
def evaluation_AE(country_list, date_limits=['2020-04-01','2021-03-07'], path_data=[], datasource="JHU",typedata="cases",addon="",
path_results = "../paper/", H=7, raw_ground_truth=False):
"""
run the evaluations to obtain absolute deviations of TrendModel and baseline needed for slope computation (see the notebook notebooks/paper_error_vs_slope_Fig_SI4.ipynb)
"""
path_data = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/b849410bace2cc777227f0f1ac747a74fd8dc4be/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" #Link to the data of 2022-01-10
datasource, parse_column = "JHU", "Country/Region"
df = pd.read_csv(path_data)
data_type = "horison_"+str(int(H/7))+"_week/" + datasource+ "_"+typedata +"/"
path_TrendForecast = path_results + data_type + "TrendForecast" + addon
#-------------------------------forecast of our model---------------------------------------------------
df_forecasts = get_df_limits(path_TrendForecast+"/Point/*", date_limits, country_list)
df_forecasts = df_forecasts[(df_forecasts["target_date"]>=date_limits[0])&(df_forecasts["target_date"]<=date_limits[1])]
df_target = get_df_limits(path_TrendForecast+"/Target/*", date_limits, country_list, add_days=H)
stl,baseline,target = {},{},{}
for numit, country in enumerate(country_list):
res = read_countries(df, [country], 0, datasource, typedata)
cumulative_, date_ = res[0][0], res[1][0]
cumulative_ = repair_increasing(cumulative_)
target_now = | pd.DataFrame() | pandas.DataFrame |
import sys
import os
import yaml
import argparse
import numpy as np
import pandas as pd
import csv
import random
import stat
import glob
import subprocess
from statistics import mean
from pprint import pprint, pformat
import geopandas
from shapely.geometry import Point
from math import sin, cos, atan2, sqrt, pi
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.algorithms.moo.nsga3 import NSGA3
from pymoo.algorithms.moo.moead import MOEAD, ParallelMOEAD
from pymoo.factory import get_sampling, get_crossover, get_mutation, \
get_problem, get_reference_directions
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter
from pymoo.core.problem import Problem
from pymoo.factory import get_performance_indicator
from moo_algs.bce_moead import BCEMOEAD
import time
from datetime import timedelta
work_dir = os.path.dirname(os.path.abspath(__file__))
EXEC_LOG_FILE = None
USE_PJ = False
QCG_MANAGER = None
class dict_to_obj:
def __init__(self, in_dict: dict):
assert isinstance(in_dict, dict)
for key, val in in_dict.items():
if isinstance(val, (list, tuple)):
setattr(self, key, [dict_to_obj(x) if isinstance(
x, dict) else x for x in val])
else:
setattr(self, key, dict_to_obj(val)
if isinstance(val, dict) else val)
def MOO_log(msg):
with open(EXEC_LOG_FILE, "a") as log_file:
print("{}".format(msg), file=log_file)
def read_MOO_setting_yaml():
"""
read MOO setting from yaml file
"""
with open(os.path.join(work_dir, "MOO_setting.yaml")) as f:
MOO_CONFIG = yaml.safe_load(f)
# convert the json to a nested object
# MOO_CONFIG_DICT = dict_to_obj(MOO_CONFIG)
# return MOO_CONFIG_DICT
return MOO_CONFIG
class FLEE_MOO_Problem(Problem):
def __init__(self, execution_mode, simulation_period, cores,
work_dir=work_dir):
# TODO: add input vraibles to MOO_setting.yaml file
super().__init__(n_var=1,
n_obj=5,
xl=np.array([0]), #
xu=np.array([19688])) #
self.work_dir = work_dir
self.cnt_SWEEP_dir = 0
self.execution_mode = execution_mode
self.simulation_period = simulation_period
self.cores = cores
def avg_distance(self, agents_out_files, camp_name):
df_array = [pd.read_csv(filename, index_col=None, header=0)
for filename in agents_out_files]
df = pd.concat(df_array, axis=0, ignore_index=True)
# filter rows for agent location == camp_name
df = df[(df["agent location"] == camp_name) &
(df["distance_moved_this_timestep"] > 0)
]
df.to_csv(os.path.join(
os.path.dirname(agents_out_files[0]), "df_agents.out.csv"),
sep=",",
mode="w",
index=False,
encoding='utf-8'
)
return df["distance_travelled"].mean()
def find_closest_location_to_camp(self, camp_lon, camp_lat):
# in kilometres
R = 6371
p = pi/180
dist = []
locations=[]
# Read lat(Latitude) and lon(Longitude) column in locations.csv file row by row.
locations_path = os.path.join(self.work_dir, "input_csv", "locations.csv")
with open(locations_path, newline='') as csvfile:
reader = csv.reader(csvfile)
next(reader)
# Iterate over each row after the header in the csv
for row in reader:
# row variable is a list that represents a row in csv
# print(row)
if row[2] == 'South_Sudan':
locations.append(row[0])
lat = float(row[3])
lon = float(row[4])
MOO_log(msg="\tlocation ={}".format(row[0]))
MOO_log(msg="\tlongitude ={}".format(lon))
MOO_log(msg="\tlatitude ={}".format(lat))
# calculate the haversine distance between Z and other locations in south sudan, respectively.
phi = (camp_lat-lat) * p
lam = (lon-camp_lon) * p
a = sin(phi/2)*sin(phi/2)+cos(lat*p)*cos(camp_lat*p)*sin(lam/2)*sin(lam/2);
c = 2*atan2(sqrt(a),sqrt(1-a))
dist.append(R * c)
MOO_log(msg="\tall locations ={}".format(locations))
MOO_log(msg="\tdistance between these locations and Z={}".format(dist))
# find the shortest path
min_dist = np.amin(dist)
index_min_dist = dist.index(min_dist)
nearest_loc = locations[index_min_dist]
return nearest_loc, min_dist
# --------------------------------------------------------------------------
def change_route_to_camp(self, csv_name):
"""
Change the location that connect to the camp
"""
MOO_log(msg="\n[change_route_to_camp]")
selectedCamps_csv_PATH = os.path.join(self.work_dir, "input_csv", csv_name)
# Read the data in selectedCamps.csv file row by row.
with open(selectedCamps_csv_PATH, newline='') as csvfile:
reader = csv.reader(csvfile)
next(reader)
# print(header)
# Iterate over each row after the header in the csv
for row in reader:
# row variable is a list that represents a row in csv
# print(row)
lon = float(row[0])
lat = float(row[1])
ipc = float(row[2])
accessibility = float(row[3])
MOO_log(msg="\tcamp lon ={}".format(lon))
MOO_log(msg="\tcamp lat ={}".format(lat))
# 1. Find the nearest location to camp and calculate the distance
# between them.
nearest_loc, min_dist = self.find_closest_location_to_camp(
camp_lon=float(lon), camp_lat=float(lat)
)
# 2. Read routes.csv and modify the data (i.e., the nearest
# location to camp and the distance between them)
routes_csv_PATH = os.path.join(self.work_dir, "input_csv", "routes.csv")
df = pd.read_csv(routes_csv_PATH)
# change one value of a row
df.loc[lambda df: df['name2'] == 'Z', lambda df:'#name1'] = nearest_loc
df.loc[lambda df: df['name2'] == 'Z', lambda df:'distance'] = str(min_dist)
MOO_log(msg="\tLatitude of camp Z: {} \n\t"
"Longitude of camp Z: {}\n\t"
"nearest location: {}\n\t"
"distance to {}:{}".format(
float(lon),
float(lat),
nearest_loc,
nearest_loc, min_dist)
)
# 3. Write the updated route.csv in the moo_ssudan SWEEP
# directory.
sweep_dir = os.path.join(self.work_dir, "SWEEP")
# curr_dir_count = len(os.listdir(sweep_dir))
curr_dir_count = self.cnt_SWEEP_dir
sub_dir_SWEEP = os.path.join(
sweep_dir, "{}".format(curr_dir_count + 1), "input_csv"
)
if os.path.exists(sub_dir_SWEEP):
raise RuntimeError(
"SWEEP dir {} is exists !!!!!".format(sub_dir_SWEEP)
)
os.makedirs(sub_dir_SWEEP)
MOO_log(msg="\tgenerates SWEEP : {}".format(sub_dir_SWEEP))
updated_routes_csv_PATH = os.path.join(sub_dir_SWEEP, "routes.csv")
df.to_csv(updated_routes_csv_PATH, index = False)
# 4. Write campIPC.csv in the moo_ssudan SWEEP directory
campIPC_PATH = os.path.join(sub_dir_SWEEP, "campIPC.csv")
with open(campIPC_PATH, "w", newline="") as fout:
writer = csv.writer(fout, delimiter=",")
writer.writerow(["lon", "lat", "ipc", "accessibility"])
writer.writerow([lon, lat, ipc, accessibility])
self.cnt_SWEEP_dir += 1
MOO_log(msg="\t{}".format("-" * 30))
# --------------------------------------------------------------------------
def flee_optmization(self, run_dir, camp_name):
MOO_log(msg="\n[flee_optmization] called for "
"run_dir = {} camp_name = {}".format(run_dir, camp_name)
)
# calculate camp population, obj#2
df = pd.read_csv(os.path.join(run_dir, "out.csv"))
sim_camp_population_last_day = df["{} sim".format(camp_name)].iloc[-1]
sim_camp_population = df["{} sim".format(camp_name)].tolist()
MOO_log(msg="\tsim camp {} population of the last day = {}".format(
camp_name, sim_camp_population_last_day)
)
MOO_log(msg="\tsim camp {} population = {}".format(
camp_name, sim_camp_population)
)
# find the agents.out files
agents_out_files = glob.glob(
"{}".format(os.path.join(run_dir, "agents.out.*"))
)
# obj#1
avg_distance_travelled = self.avg_distance(
agents_out_files=agents_out_files, camp_name=camp_name
)
MOO_log(
msg="\tInput file : {}"
"\n\t\tavg distance travelled for agents "
"to camp name {} = {}".format(
[os.path.basename(filename) for filename in agents_out_files],
camp_name,
avg_distance_travelled
)
)
# clean agents.out files to reduce the disk space usage
clean_agents_cmd = "rm {}".format(os.path.join(
os.path.dirname(agents_out_files[0]), "agents.out.*"))
subprocess.check_output(
clean_agents_cmd,
shell=True,
)
# calculate camp capacity
PopulationScaledownFactor = 100
df = pd.read_csv(os.path.join(run_dir, "input_csv", "locations.csv"))
camp_population = df[df["#name"] == camp_name]["population"].values[0]
camp_population = camp_population/PopulationScaledownFactor
MOO_log(msg="\tmax camp {} population = {}".format(
camp_name, camp_population)
)
# calculate average remain camp capacity over simulation days, obj#3
remain_camp_capacity = mean(
[abs(camp_population - i) for i in sim_camp_population]
)
MOO_log(msg="\tremain camp {} capacity = {}".format(
camp_name, remain_camp_capacity)
)
# calculate IPC phase, obj#4
input_dir_SWEEP = os.path.join(run_dir, "input_csv")
ipc_df = pd.read_csv(os.path.join(input_dir_SWEEP, "campIPC.csv"))
camp_ipc = float(ipc_df.loc[0,"ipc"])
# calculate accessibility score, obj#5
camp_accessibility = float(ipc_df.loc[0,"accessibility"])
MOO_log(msg="\tcamp {}: IPC phase = {},\taccessibility score = {}".format(
camp_name, camp_ipc, camp_accessibility)
)
# return values [obj#1, obj#2, obj#3, obj#4, obj#5]
return [avg_distance_travelled, sim_camp_population_last_day,
remain_camp_capacity, camp_ipc, camp_accessibility]
#------------------------------------start-----------------------------------
def run_simulation_with_PJ(self, sh_jobs_scripts):
"""
running simulation from SWEEP dir using PJ
"""
from qcg.pilotjob.api.job import Jobs
jobs = Jobs()
for sh_job_scripts in sh_jobs_scripts:
sweep_dir_name = os.path.basename(os.path.dirname(sh_job_scripts))
jobs.add(
name="SWEEP_{}".format(sweep_dir_name),
exec="bash",
args=["-l", sh_job_scripts],
stdout="{}/{}.stdout".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}"
),
stderr="{}/{}.stderr".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}"
),
numCores={"exact": self.cores},
model="default"
)
print("\nAdd job with :")
print("name=SWEEP_{}".format(sweep_dir_name))
print("args = [-l,{}]".format(sh_job_scripts))
print("stdout = {}/{}.stdout".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}")
)
print("stderr = {}/{}.stderr".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}")
)
print("numCores=exact: {}".format(self.cores))
ids = QCG_MANAGER.submit(jobs)
# wait until submited jobs finish
QCG_MANAGER.wait4(ids)
print("\nAll new SWEEP dirs are finished...\n")
def run_simulation_without_PJ(self, sh_jobs_scripts):
"""
running simulation from SWEEP dir without using PJ
"""
for sh_job_scripts in sh_jobs_scripts:
# subprocess.check_output(sh_job_scripts, shell=True)
try:
p = subprocess.Popen(sh_job_scripts, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
except Exception as e:
raise RuntimeError("Unexpected error: {}".format(e))
sys.exit()
acceptable_err_subprocesse_ret_codes = [0]
if p.returncode not in acceptable_err_subprocesse_ret_codes:
raise RuntimeError(
"\njob execution encountered an error (return code {})"
"while executing '{}'".format(p.returncode, command)
)
sys.exit(0)
#-------------------------------------end------------------------------------
def _evaluate(self, x, out, *args, **kwargs):
"""
1. The _evaluate method takes a one-dimensional NumPy array X with n rows as an input.
The row represents an individual, namely, the index of a possible camp location.
After doing the necessary calculations, the objective values must be
added to the dictionary, out, with the key F.
"""
# ---------------------------------start--------------------------------
# read accessible_camp_ipc.csv
df = pd.read_csv("accessible_camp_ipc.csv")
camp_coords_df = df[['lon', 'lat']]
coords = camp_coords_df.to_numpy()
# obtain coordinates of selected camps
X_1D = x.flatten()
X_1D = X_1D.astype('int64')
population = coords[X_1D, :]
pop_size = len(population)
MOO_log(
msg="\n{}\nExecuting _evaluate function with input "
"population : \n{}\n".format("-" * 30, pformat(population))
)
n = 1
for row in population:
MOO_log("\tpotential location {}: {}".format(n, row))
n += 1
# Get IPC phase data of each camp location
ipc = df.loc[X_1D, 'IPC']
ipc_list = ipc.tolist()
# Get accessibility score of each camp location
accessibility_score = df.loc[X_1D, 'landcover']
accessibility_list = accessibility_score.tolist()
selected_camps = [[*a, b, c] for a, b, c in zip(population, ipc_list, accessibility_list)]
selectedCamps_csv_PATH = os.path.join(
self.work_dir, "input_csv", "selectedCamps.csv"
)
# Save data to CSV
with open(selectedCamps_csv_PATH, "w", newline="") as file:
writer = csv.writer(file, delimiter=",")
writer.writerow(["Camp Longitude", "Camp Latitude", "IPC Score", "Accessibility Score"]) # header
writer.writerows(selected_camps)
# ------------------------------end-----------------------------------
# count the number of run folder in SWEEP dir
sweep_dir = os.path.join(self.work_dir, "SWEEP")
####################################################################
# Run change_route_to_camp function to update the routes.csv file #
# according to the parameter ind, which is the coordinate of camp. #
####################################################################
cnt_SWEEP_dir_before = self.cnt_SWEEP_dir
self.change_route_to_camp(csv_name="selectedCamps.csv")
####################################
# job_script parameter preparation #
####################################
# list of files and folders to be included
sel_files_folders = ["**input_csv/***", "**source_data/***",
"run.py",
"run_par.py", "simsetting.csv"
]
# Note: be careful with rync command arguments
rync_cmd = " ".join([
*["rsync -pthrvz --ignore-existing"],
*["--include='{}' ".format(sel) for sel in sel_files_folders],
*["--exclude='*'"],
*["--exclude='SWEEP'"],
*["{}/ .".format(self.work_dir)]
])
# set the execution command for flee simulation
if self.execution_mode.lower() == "serial":
flee_exec_cmd = "python3 run.py input_csv source_data " \
"{} simsetting.csv > out.csv".format(
self.simulation_period)
elif self.execution_mode.lower() == "parallel":
flee_exec_cmd = "mpirun -np {} " \
"python3 run_par.py input_csv source_data " \
"{} simsetting.csv > out.csv".format(
self.cores,
self.simulation_period)
else:
raise RuntimeError(
"The input execution_mode {} not valid!".format(
self.execution_mode)
)
# clean the SWEEP dir after simulation finished
clean_cmd = "find . -type f ! \( -name 'out.csv' " \
"-o -name 'routes.csv' -o -name 'agents.out.*' " \
"-o -name 'flee_exec_cmd.sh' "\
"-o -name '*.stdout' "\
"-o -name '*.stderr' "\
"-o -name 'selectedCamps.csv' "\
"-o -name 'campIPC.csv' "\
"-o -name 'locations.csv' \) -exec rm -rf {} \; ;" \
"rm -rf source_data"
###################################################
# save job_script in each new generated SWEEP dir #
###################################################
print("cnt_SWEEP_dir_before = {}\nself.cnt_SWEEP_dir={}\n".format(
cnt_SWEEP_dir_before, self.cnt_SWEEP_dir)
)
sh_jobs_scripts = []
for i in range(cnt_SWEEP_dir_before, self.cnt_SWEEP_dir):
dest_SWEEP_dir = os.path.join(work_dir, "SWEEP", str(i + 1))
# here we create a bash script to call the execution part
flee_exec_sh = os.path.join(dest_SWEEP_dir, "flee_exec_cmd.sh")
with open(flee_exec_sh, "w") as f:
f.write("#!/bin/bash\n\n")
f.write("# change dir\n\n")
f.write("cd {}\n\n".format(dest_SWEEP_dir))
f.write("# copying the required input files\n")
f.write("{}\n\n".format(rync_cmd))
f.write("# running simulation\n")
# f.write("cd {}\n".format(dest_SWEEP_dir))
f.write("{}\n\n".format(flee_exec_cmd))
f.write("# cleaning the SWEEP dir after simulation finished\n")
f.write("{}\n\n".format(clean_cmd))
f.write("touch DONE\n")
# change file permission to executable
st = os.stat(flee_exec_sh)
os.chmod(flee_exec_sh, st.st_mode | stat.S_IEXEC)
sh_jobs_scripts.append(flee_exec_sh)
#####################################
# run simulation per each SWEEP dir #
#####################################
if USE_PJ is False:
self.run_simulation_without_PJ(sh_jobs_scripts)
else:
self.run_simulation_with_PJ(sh_jobs_scripts)
# Step 3: Calculate objective values
# Create an csv file only contains header
with open("objectives.csv", "w", newline="") as file:
writer = csv.writer(file, delimiter=",")
# add header
writer.writerow(["Objective #1", "Objective #2", "Objective #3", "Objective #4", "Objective #5"])
# Calculate objective values and save the data in objectives.csv file
for i in range(cnt_SWEEP_dir_before, self.cnt_SWEEP_dir):
dest_SWEEP_dir = os.path.join("SWEEP", str(i + 1))
row = self.flee_optmization(run_dir=dest_SWEEP_dir, camp_name="Z")
with open("objectives.csv", "a", newline="") as file:
writer = csv.writer(file)
writer.writerow(row)
MOO_log(msg="=" * 50)
# Fetch the objective values
objectives = pd.read_csv("objectives.csv")
MOO_log(msg="objectives.csv =\n{}".format(pformat(objectives)))
# objective 1: minimize average distance travelled by each arriving
# refugee.
f1 = objectives["Objective #1"].values
MOO_log(msg="\tf1: {}".format(f1))
# objective 2: maximize camp population, i.e.,the number of people in
# the camp at the end of the simulation.
f2 = -objectives["Objective #2"].values
MOO_log(msg="\tf2: {}".format(f2))
# objective 3: minimize the average remain camp capacity over simulation days
f3 = objectives["Objective #3"].values
MOO_log(msg="\tf3: {}".format(f3))
# objective 4: minimize the IPC phase score of camp
f4 = objectives["Objective #4"].values
MOO_log(msg="\tf4: {}".format(f4))
# objective 5: maximize accessibility
f5 = -objectives["Objective #5"].values
MOO_log(msg="\tf5: {}".format(f5))
MOO_log(msg="=" * 50)
out["F"] = np.column_stack([f1, f2, f3, f4, f5])
if __name__ == "__main__":
start_time = time.monotonic()
# do your work here
# Instantiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("--execution_mode", action="store", default="serial")
parser.add_argument("--simulation_period", action="store", type=int,
default="-1")
parser.add_argument("--exec_log_file", action="store",
default="log_MOO.txt")
parser.add_argument("--cores", action="store", type=int, default="1")
parser.add_argument("--USE_PJ", action="store", default="False")
args = parser.parse_args()
execution_mode = args.execution_mode
simulation_period = args.simulation_period
cores = args.cores
if args.USE_PJ.lower() == "true":
USE_PJ = True
from qcg.pilotjob.api.manager import LocalManager
QCG_MANAGER = LocalManager(
cfg={'log_level': 'DEBUG'}, server_args=['--log', 'debug']
)
else:
USE_PJ = False
EXEC_LOG_FILE = os.path.join(work_dir, args.exec_log_file)
MOO_log(msg="run_MOO input args : {}".format(args))
# read MOO setting from config yaml file
MOO_CONFIG = read_MOO_setting_yaml()
MOO_log(msg="MOO_CONFIG =\n{}".format(pformat(MOO_CONFIG)))
problem = FLEE_MOO_Problem(
execution_mode=execution_mode,
simulation_period=simulation_period,
cores=cores,
)
algorithm = None
alg_name = MOO_CONFIG["alg_name"]
crossover_func = MOO_CONFIG["crossover_func"]
crossover_func_args = MOO_CONFIG["crossover_func_args"][crossover_func]
mutation_func = MOO_CONFIG["mutation_func"]
mutation_func_args = MOO_CONFIG["mutation_func_args"][mutation_func]
alg_specific_args = MOO_CONFIG["alg_specific_args"][alg_name]
try:
ref_dir_func = alg_specific_args["ref_dir_name"]
ref_dir_func_args = MOO_CONFIG["ref_dir_func"][ref_dir_func]
ref_dir_func_args.update({"n_dim": problem.n_obj})
except KeyError as e:
# DO NOT raise any Exception if the alg_name does not require
# any input reference direction function
pass
except Exception as e:
print(e)
sys.exit()
if alg_name == "NSGA2":
sampling_func = MOO_CONFIG["sampling_func"]
pop_size = alg_specific_args["pop_size"]
#################
# set algorithm #
#################
algorithm = NSGA2(
pop_size=pop_size,
sampling=get_sampling(sampling_func),
crossover=get_crossover(crossover_func, **crossover_func_args),
mutation=get_mutation(mutation_func, **mutation_func_args),
eliminate_duplicates=True
)
#####################
# algorithm logging #
#####################
MOO_log(
msg="algorithm = {}(\n"
"pop_size={},\n"
"sampling=get_sampling({}),\n"
"crossover=get_crossover({},{}),\n"
"mutation=get_mutation({},{}),\n"
"eliminate_duplicates=True\n"
")".format(
alg_name,
pop_size,
sampling_func,
crossover_func, crossover_func_args,
mutation_func, mutation_func_args,
)
)
elif alg_name == "MOEAD":
alg_specific_args = MOO_CONFIG["alg_specific_args"]["MOEAD"]
n_neighbors = alg_specific_args["n_neighbors"]
prob_neighbor_mating = alg_specific_args["prob_neighbor_mating"]
#################
# set algorithm #
#################
algorithm = MOEAD(
ref_dirs=get_reference_directions(ref_dir_func,
**ref_dir_func_args),
n_neighbors=n_neighbors,
prob_neighbor_mating=prob_neighbor_mating,
crossover=get_crossover(crossover_func, **crossover_func_args),
mutation=get_mutation(mutation_func, **mutation_func_args),
)
#####################
# algorithm logging #
#####################
MOO_log(
msg="algorithm = {}(\n"
"ref_dirs = get_reference_directions({},{}),\n"
"n_neighbors = {}\n"
"prob_neighbor_mating = {}\n"
"crossover=get_crossover({},{}),\n"
"mutation=get_mutation({},{}),\n"
")".format(
alg_name,
ref_dir_func, ref_dir_func_args,
n_neighbors,
prob_neighbor_mating,
crossover_func, crossover_func_args,
mutation_func, mutation_func_args,
)
)
elif alg_name == "BCE-MOEAD":
alg_specific_args = MOO_CONFIG["alg_specific_args"]["BCE-MOEAD"]
n_neighbors = alg_specific_args["n_neighbors"]
prob_neighbor_mating = alg_specific_args["prob_neighbor_mating"]
#################
# set algorithm #
#################
algorithm = BCEMOEAD(
ref_dirs=get_reference_directions(ref_dir_func,
**ref_dir_func_args),
n_neighbors=n_neighbors,
prob_neighbor_mating=prob_neighbor_mating,
crossover=get_crossover(crossover_func, **crossover_func_args),
mutation=get_mutation(mutation_func, **mutation_func_args),
)
#####################
# algorithm logging #
#####################
MOO_log(
msg="algorithm = {}(\n"
"ref_dirs = get_reference_directions({},{}),\n"
"n_neighbors = {}\n"
"prob_neighbor_mating = {}\n"
"crossover=get_crossover({},{}),\n"
"mutation=get_mutation({},{}),\n"
")".format(
alg_name,
ref_dir_func, ref_dir_func_args,
n_neighbors,
prob_neighbor_mating,
crossover_func, crossover_func_args,
mutation_func, mutation_func_args,
)
)
elif alg_name == "NSGA3":
pop_size = alg_specific_args["pop_size"]
#################
# set algorithm #
#################
algorithm = NSGA3(
pop_size=pop_size,
ref_dirs=get_reference_directions(ref_dir_func,
**ref_dir_func_args),
crossover=get_crossover(crossover_func, **crossover_func_args),
mutation=get_mutation(mutation_func, **mutation_func_args),
)
#####################
# algorithm logging #
#####################
MOO_log(
msg="algorithm = {}(\n"
"pop_size = {}\n`"
"ref_dirs = get_reference_directions({},{}),\n"
"crossover=get_crossover({},{}),\n"
"mutation=get_mutation({},{}),\n"
")".format(
alg_name,
pop_size,
ref_dir_func, ref_dir_func_args,
crossover_func, crossover_func_args,
mutation_func, mutation_func_args,
)
)
if algorithm is None:
raise RuntimeError(
"Input alg_name = {} is not valid or "
"not supported within run_MOO.py".format(
MOO_CONFIG.alg_name)
)
# convert dict {'n_gen': 2}} to tuple ('n_gen', 2)
termination = list(MOO_CONFIG["termination"].items())[0]
MOO_log(msg="termination = {}".format(termination))
res = minimize(
problem=problem,
algorithm=algorithm,
termination=termination,
verbose=True
)
x = res.pop.get("X")
MOO_log(msg="location index = \n {}".format(x))
X_1D = x.flatten()
X_1D = X_1D.astype('int64')
# read accessible_camp_ipc.csv
df = | pd.read_csv("accessible_camp_ipc.csv") | pandas.read_csv |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = | pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq) | pandas.DatetimeIndex |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class TestGetDummies:
@pytest.fixture
def df(self):
return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]})
@pytest.fixture(params=["uint8", "i8", np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=["dense", "sparse"])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == "sparse"
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_get_dummies_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype="object")
def test_get_dummies_basic(self, sparse, dtype):
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
)
if sparse:
expected = expected.apply(SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_types(self, sparse, dtype):
# GH 10531
s_list = list("abc")
s_series = Series(s_list)
s_df = DataFrame(
{"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]}
)
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list("abc"),
)
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
if sparse:
dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
tm.assert_series_equal(result, expected)
result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype)
expected_counts = {"int64": 1, "object": 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_get_dummies_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=["A"])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ["A"]
def test_get_dummies_include_na(self, sparse, dtype):
s = ["a", "b", np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype)
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame(
{np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]},
dtype=self.effective_dtype(dtype),
)
exp_na = exp_na.reindex(["a", "b", np.nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(
Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype)
)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_get_dummies_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = "e"
eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE")
s = [e, eacute, eacute]
res = get_dummies(s, prefix="letter", sparse=sparse)
exp = DataFrame(
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[["A", "B"]]
result = get_dummies(df, sparse=sparse)
expected = DataFrame(
{"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]},
dtype=np.uint8,
)
if sparse:
expected = DataFrame(
{
"A_a": SparseArray([1, 0, 1], dtype="uint8"),
"A_b": SparseArray([0, 1, 0], dtype="uint8"),
"B_b": SparseArray([1, 1, 0], dtype="uint8"),
"B_c": SparseArray([0, 0, 1], dtype="uint8"),
}
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
}
)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ["from_A", "from_B"]
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected = expected[["C"] + cols]
typ = SparseArray if sparse else Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix="bad", sparse=sparse)
bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"]
expected = DataFrame(
[[1, 1, 0, 1, 0], [2, 0, 1, 1, 0], [3, 1, 0, 0, 1]],
columns=["C"] + bad_columns,
dtype=np.uint8,
)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat(
[
Series([1, 2, 3], name="C"),
Series([1, 0, 1], name="bad_a", dtype="Sparse[uint8]"),
Series([0, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([1, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([0, 0, 1], name="bad_c", dtype="Sparse[uint8]"),
],
axis=1,
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=["from_A"], columns=["A"], sparse=sparse)
expected = DataFrame(
{
"B": ["b", "b", "c"],
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
if sparse:
cols = ["from_A_a", "from_A_b"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep="..", sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"A..a": [1, 0, 1],
"A..b": [0, 1, 0],
"B..b": [1, 1, 0],
"B..c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]]
if sparse:
cols = ["A..a", "A..b", "B..b", "B..c"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=["..", "__"], sparse=sparse)
expected = expected.rename(columns={"B..b": "B__b", "B..c": "B__c"})
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={"A": "..", "B": "__"}, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=["too few"], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=["bad"], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {"A": "from_A", "B": "from_B"}
df = DataFrame({"C": [1, 2, 3], "A": ["a", "b", "a"], "B": ["b", "b", "c"]})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
}
)
columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=sparse, dtype=dtype).sort_index(
axis=1
)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3, np.nan],
"A_a": arr([1, 0, 1, 0], dtype=typ),
"A_b": arr([0, 1, 0, 0], dtype=typ),
"A_nan": arr([0, 0, 0, 1], dtype=typ),
"B_b": arr([1, 1, 0, 0], dtype=typ),
"B_c": arr([0, 0, 1, 0], dtype=typ),
"B_nan": arr([0, 0, 0, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
"cat_x": arr([1, 0, 0], dtype=typ),
"cat_y": arr([0, 1, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"get_dummies_kwargs,expected",
[
(
{"data": DataFrame({"ä": ["a"]})},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["ä"]})},
DataFrame({"x_ä": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix": "ä"},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"},
DataFrame({"xäa": [1]}, dtype=np.uint8),
),
],
)
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list("aaa")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
expected = DataFrame(index=list("ABC"))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ["a", "b", np.nan]
res = | get_dummies(s_NA, drop_first=True, sparse=sparse) | pandas.get_dummies |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# (ax11, ax12, ax13, ax14, ax15, ax16, ax17, ax18),
# (ax21, ax22, ax23, ax24, ax25, ax26, ax27, ax28),
# (ax31, ax32, ax33, ax34, ax35, ax36, ax37, ax38),
# (ax41, ax42, ax43, ax44, ax45, ax46, ax47, ax48),
# (ax51, ax52, ax53, ax54, ax55, ax56, ax57, ax58),
# (ax61, ax62, ax63, ax64, ax65, ax66, ax67, ax68),
# (ax71, ax72, ax73, ax74, ax75, ax76, ax77, ax78),
# (ax81, ax82, ax83, ax84, ax85, ax86, ax87, ax88)
val2012pipe = pd.read_csv('2012pipe.csv', index_col=0)
val2012rail = pd.read_csv('2012rail.csv', index_col=0)
val2012ship = pd.read_csv('2012ship.csv', index_col=0)
ref2012pipe = pd.read_csv('ref2012pipe.csv', index_col=0)
ref2012rail = | pd.read_csv('ref2012rail.csv', index_col=0) | pandas.read_csv |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': | pd.StringDtype() | pandas.StringDtype |
import os
import unittest
import random
import sys
import site # so that ai4water directory is in path
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import scipy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from ai4water import Model
from ai4water.preprocessing import DataHandler, SiteDistributedDataHandler
from ai4water.preprocessing.datahandler import MultiLocDataHandler
from ai4water.datasets import load_u1, arg_beach
os.environ['PYTHONHASHSEED'] = '313'
random.seed(313)
np.random.seed(313)
# todo, check last dimension of x,y
# todo test with 3d y
def _check_xy_equal_len(x, prev_y, y, lookback, num_ins, num_outs, num_examples, data_type='training'):
feat_dim = 1
if lookback > 1:
assert x.shape[1] == lookback
feat_dim = 2
assert x.shape[
feat_dim] == num_ins, f"for {data_type} x's shape is {x.shape} while num_ins of dataloader are {num_ins}"
if y is not None:
assert y.shape[1] == num_outs, f"for {data_type} y's shape is {y.shape} while num_outs of dataloader are {num_outs}"
else:
assert num_outs == 0
y = x # just for next statement to run
if prev_y is None:
prev_y = x # just for next statement to run
assert x.shape[0] == y.shape[0] == prev_y.shape[
0], f"for {data_type} xshape: {x.shape}, yshape: {y.shape}, prevyshape: {prev_y.shape}"
if num_examples:
assert x.shape[
0] == num_examples, f'for {data_type} x contains {x.shape[0]} samples while expected samples are {num_examples}'
return
def assert_xy_equal_len(x, prev_y, y, data_loader, num_examples=None, data_type='training'):
if isinstance(x, np.ndarray):
_check_xy_equal_len(x, prev_y, y, data_loader.lookback, data_loader.num_ins, data_loader.num_outs, num_examples,
data_type=data_type)
elif isinstance(x, list):
while len(y)<len(x):
y.append(None)
for idx, i in enumerate(x):
_check_xy_equal_len(i, prev_y[idx], y[idx], data_loader.lookback[idx], data_loader.num_ins[idx],
data_loader.num_outs[idx], num_examples, data_type=data_type
)
elif isinstance(x, dict):
for key, i in x.items():
_check_xy_equal_len(i, prev_y.get(key, None), y.get(key, None), data_loader.lookback[key], data_loader.num_ins[key],
data_loader.num_outs[key], num_examples, data_type=data_type
)
elif x is None: # all should be None
assert all(v is None for v in [x, prev_y, y])
else:
raise ValueError
def _check_num_examples(train_x, val_x, test_x, val_ex, test_ex, tot_obs):
val_examples = 0
if val_ex:
val_examples = val_x.shape[0]
test_examples = 0
if test_ex:
test_examples = test_x.shape[0]
xyz_samples = train_x.shape[0] + val_examples + test_examples
# todo, whould be equal
assert xyz_samples == tot_obs, f"""
data_loader has {tot_obs} examples while sum of train/val/test examples are {xyz_samples}."""
def check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader):
if isinstance(train_x, np.ndarray):
_check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader.tot_obs_for_one_df())
elif isinstance(train_x, list):
for idx in range(len(train_x)):
_check_num_examples(train_x[idx], val_x[idx], test_x[idx], val_ex, test_ex,
data_loader.tot_obs_for_one_df()[idx])
return
def check_inverse_transformation(data, data_loader, y, cols, key):
if cols is None:
# not output columns, so not checking
return
# check that after inverse transformation, we get correct y.
if data_loader.source_is_df:
train_y_ = data_loader.inverse_transform(data=pd.DataFrame(y.reshape(-1, len(cols)), columns=cols), key=key)
train_y_, index = data_loader.deindexify(train_y_, key=key)
compare_individual_item(data, key, cols, train_y_, data_loader)
elif data_loader.source_is_list:
#for idx in range(data_loader.num_sources):
# y_ = y[idx].reshape(-1, len(cols[idx]))
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for idx, y in enumerate(train_y_):
compare_individual_item(data[idx], f'{key}_{idx}', cols[idx], y, data_loader)
elif data_loader.source_is_dict:
train_y_ = data_loader.inverse_transform(data=y, key=key)
train_y_, _ = data_loader.deindexify(train_y_, key=key)
for src_name, val in train_y_.items():
compare_individual_item(data[src_name], f'{key}_{src_name}', cols[src_name], val, data_loader)
def compare_individual_item(data, key, cols, y, data_loader):
if y is None:
return
train_index = data_loader.indexes[key]
if y.__class__.__name__ in ['DataFrame']:
y = y.values
for i, v in zip(train_index, y):
if len(cols) == 1:
if isinstance(train_index, pd.DatetimeIndex):
# if true value in data is None, y's value should also be None
if np.isnan(data[cols].loc[i]).item():
assert np.isnan(v).item()
else:
_t = round(data[cols].loc[i].item(), 0)
_p = round(v.item(), 0)
if not np.allclose(data[cols].loc[i].item(), v.item()):
print(f'true: {_t}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(v, np.ndarray):
v = round(v.item(), 3)
_true = round(data[cols].loc[i], 3).item()
_p = round(v, 3)
if _true != _p:
print(f'true: {_true}, : pred: {_p}, index: {i}, col: {cols}')
else:
if isinstance(train_index, pd.DatetimeIndex):
assert abs(data[cols].loc[i].sum() - np.nansum(v)) <= 0.00001, f'{data[cols].loc[i].sum()},: {v}'
else:
assert abs(data[cols].iloc[i].sum() - v.sum()) <= 0.00001
def check_kfold_splits(data_handler):
if data_handler.source_is_df:
splits = data_handler.KFold_splits()
for (train_x, train_y), (test_x, test_y) in splits:
... # print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
return
def assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader):
if isinstance(train_y, list):
assert isinstance(val_y, list)
assert isinstance(test_y, list)
train_y = train_y[0]
val_y = val_y[0]
test_y = test_y[0]
if isinstance(train_y, dict):
train_y = list(train_y.values())[0]
assert isinstance(val_y, dict)
isinstance(test_y, dict)
val_y = list(val_y.values())[0]
test_y = list(test_y.values())[0]
if out_cols is not None:
b = train_y.reshape(-1, )
if val_y is None:
a = test_y.reshape(-1, )
else:
a = val_y.reshape(-1, )
if not len(np.intersect1d(a, b)) == 0:
raise ValueError(f'train and val have overlapping values')
if data_loader.val_data != 'same' and out_cols is not None and val_y is not None and test_y is not None:
a = test_y.reshape(-1,)
b = val_y.reshape(-1,)
assert len(np.intersect1d(a, b)) == 0, 'test and val have overlapping values'
return
def build_and_test_loader(data, config, out_cols, train_ex=None, val_ex=None, test_ex=None, save=True,
assert_uniqueness=True, check_examples=True,
true_train_y=None, true_val_y=None, true_test_y=None):
config['teacher_forcing'] = True # todo
if 'val_fraction' not in config:
config['val_fraction'] = 0.3
if 'test_fraction' not in config:
config['test_fraction'] = 0.3
data_loader = DataHandler(data=data, save=save, verbosity=0, **config)
#dl = DataLoader.from_h5('data.h5')
train_x, prev_y, train_y = data_loader.training_data(key='train')
assert_xy_equal_len(train_x, prev_y, train_y, data_loader, train_ex)
val_x, prev_y, val_y = data_loader.validation_data(key='val')
assert_xy_equal_len(val_x, prev_y, val_y, data_loader, val_ex, data_type='validation')
test_x, prev_y, test_y = data_loader.test_data(key='test')
assert_xy_equal_len(test_x, prev_y, test_y, data_loader, test_ex, data_type='test')
if check_examples:
check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader)
if isinstance(data, str):
data = data_loader.data
check_inverse_transformation(data, data_loader, train_y, out_cols, 'train')
if val_ex:
check_inverse_transformation(data, data_loader, val_y, out_cols, 'val')
if test_ex:
check_inverse_transformation(data, data_loader, test_y, out_cols, 'test')
check_kfold_splits(data_loader)
if assert_uniqueness:
assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader)
if true_train_y is not None:
assert np.allclose(train_y, true_train_y)
if true_val_y is not None:
assert np.allclose(val_y, true_val_y)
if true_test_y is not None:
assert np.allclose(test_y, true_test_y)
return data_loader
class TestAllCases(object):
def __init__(self, input_features, output_features, lookback=3, allow_nan_labels=0, save=True):
self.input_features = input_features
self.output_features = output_features
self.lookback = lookback
self.allow_nan_labels=allow_nan_labels
self.save=save
self.run_all()
def run_all(self):
all_methods = [m for m in dir(self) if callable(getattr(self, m)) and not m.startswith('_') and m not in ['run_all']]
for m in all_methods:
getattr(self, m)()
return
def test_basic(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
val_examples = 22 - (self.lookback - 2) if self.lookback>1 else 22
test_examples = 30 - (self.lookback - 2) if self.lookback>1 else 30
if self.output_features == ['c']:
tty = np.arange(202, 250).reshape(-1, 1, 1)
tvy = np.arange(250, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, val_examples, test_examples,
save=self.save,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
check_examples=True,
)
assert loader.source_is_df
return
def test_with_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random'}
tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 20, 30,
save=self.save,
)
assert loader.source_is_df
return
def test_drop_remainder(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'batch_size': 8,
'drop_remainder': True,
'train_data': 'random'}
loader = build_and_test_loader(data, config, self.output_features,
48, 16, 24,
check_examples=False,
save=self.save,
)
assert loader.source_is_df
return
def test_with_same_val_data(self):
# val_data is "same" as and train_data is make based upon fractions.
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same'}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 29, 29,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save,
check_examples=False
)
assert loader.source_is_df
return
def test_with_same_val_data_and_random(self):
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_data': 'same'}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 30, 30,
check_examples=False,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_val_data(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
ttesty = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 29,
true_train_y=tty,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_no_val_data_with_random(self):
# we dont' want to have any validation_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_fraction': 0.0}
tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70
loader = build_and_test_loader(data, config, self.output_features,
tr_examples, 0, 30,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_test_data(self):
# we don't want any test_data
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'test_fraction': 0.0}
if self.output_features == ['c']:
tty = np.arange(202, 271).reshape(-1, 1, 1)
tvy = np.arange(271, 300).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 29, 0,
true_train_y=tty,
true_val_y=tvy,
save=self.save
)
assert loader.source_is_df
return
def test_with_no_test_data_with_random(self):
# we don't want any test_data
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'test_fraction': 0.0,
'transformation': 'minmax'}
tr_examples = 15- (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0,
save=self.save)
assert loader.source_is_df
return
def test_with_dt_index(self):
# we don't want any test_data
#print('testing test_with_dt_index', self.lookback)
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'test_fraction': 0.0,
'transformation': 'minmax'}
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals(self):
#print('testing test_with_intervals', self.lookback)
examples = 35
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=35, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'transformation': 'minmax',
'intervals': [(0, 10), (20, 35)]
}
tr_examples = 12 - (self.lookback - 1) if self.lookback > 1 else 12
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 7,
save=self.save
)
assert loader.source_is_df
return
def test_with_dt_intervals(self):
# check whether indices of intervals can be datetime?
examples = 35
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=35, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'transformation': 'minmax',
'intervals': [('20110101', '20110110'), ('20110121', '20110204')]
}
tr_examples = 12 - (self.lookback - 1) if self.lookback > 1 else 12
val_examples = 7 - (self.lookback - 2) if self.lookback > 1 else 7
test_examples = 7 - (self.lookback - 2) if self.lookback > 1 else 7
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 7,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_train_indices(self):
#print('testing test_with_custom_train_indices')
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'transformation': 'minmax',
}
tr_examples = 9 - (self.lookback - 2) if self.lookback > 1 else 9
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_train_indices_no_val_data(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'transformation': 'minmax',
'val_fraction': 0.0,
}
test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8
loader = build_and_test_loader(data, config, self.output_features, 12, 0, test_examples,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_train_indices_same_val_data(self):
#print('testing test_with_custom_train_indices_same_val_data')
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'transformation': 'minmax',
'val_data': 'same',
}
test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8
loader = build_and_test_loader(data, config, self.output_features, 12, 0, test_examples,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_train_and_val_indices(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=20, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'transformation': 'minmax',
'val_data': [0, 12, 14, 16, 5],
'val_fraction': 0.0,
}
test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8
loader = build_and_test_loader(data, config, self.output_features, 12, 5, test_examples,
assert_uniqueness=False,
save=self.save,
check_examples=False
)
assert loader.source_is_df
return
# def test_with_train_and_val_and_test_indices(self):
# # todo, does it make sense to define test_data by indices
# return
def test_with_custom_train_indices_and_intervals(self):
#print('testing test_with_custom_train_indices_and_intervals', self.lookback)
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
#'transformation': 'minmax',
'intervals': [(0, 10), (20, 30)]
}
if self.output_features == ['c']:
tty = np.array([63., 64., 65., 66., 67., 68., 69., 82.]).reshape(-1, 1, 1)
tvy = np.arange(83, 87).reshape(-1, 1, 1)
ttesty = np.array([62., 87., 88., 89.]).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_one_feature_transformation(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': [{'method': 'minmax', 'features': ['a']}],
}
if self.output_features == ['c']:
tty = np.arange(42, 51).reshape(-1, 1, 1)
tvy = np.arange(51, 55).reshape(-1, 1, 1)
ttesty = np.arange(55, 60).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_one_feature_multi_transformation(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': [{'method': 'minmax', 'features': ['a']}, {'method': 'zscore', 'features': ['a']}],
}
if self.output_features == ['c']:
tty = np.arange(42, 51).reshape(-1, 1, 1)
tvy = np.arange(51, 55).reshape(-1, 1, 1)
ttesty = np.arange(55, 60).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_one_feature_multi_transformation_on_diff_features(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': [{'method': 'minmax', 'features': ['a', 'b', 'c']}, {'method': 'zscore', 'features': ['c']}],
}
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
save=self.save)
assert loader.source_is_df
return
def test_with_input_transformation(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': [{'method': 'minmax', 'features': ['a', 'b']}],
}
if self.output_features == ['c']:
tty = np.arange(42, 51).reshape(-1, 1, 1)
tvy = np.arange(51, 55).reshape(-1, 1, 1)
ttesty = np.arange(55, 60).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_input_transformation_as_dict(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': {'method': 'minmax', 'features': ['a', 'b']},
}
if self.output_features == ['c']:
tty = np.arange(42, 51).reshape(-1, 1, 1)
tvy = np.arange(51, 55).reshape(-1, 1, 1)
ttesty = np.arange(55, 60).reshape(-1, 1, 1)
else:
tty, tvy, ttesty = None, None, None
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
true_train_y=tty,
true_val_y=tvy,
true_test_y=ttesty,
save=self.save)
assert loader.source_is_df
return
def test_with_output_transformation(self):
examples = 20
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'])
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'transformation': {'method': 'minmax', 'features': ['c']},
}
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_intervals(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback, 'train_data': 'random',
'transformation': 'minmax',
'intervals': [(0, 10), (20, 30)]
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 5 - (self.lookback - 1) if self.lookback > 1 else 5
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_intervals_same_val_data(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback, 'train_data': 'random', 'val_data': 'same',
'transformation': 'minmax',
'intervals': [(0, 10), (20, 30)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_intervals_no_val_data(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random', 'val_fraction': 0.0,
'transformation': 'minmax',
'intervals': [(0, 10), (20, 30)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 5,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_nans(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
if self.output_features is not None:
data['c'].iloc[10:20] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 6, 9,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_and_nans_interpolate(self):
examples = 30
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=30, freq='D'))
if self.output_features is not None:
data['b'].iloc[10:20] = np.nan
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'nan_filler': {'method': 'KNNImputer', 'features': self.input_features},
'train_data': 'random',
}
if self.input_features == ['a']:
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 6
else:
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
val_examples = 6
test_examples = 9
build_and_test_loader(data, config, self.output_features,
tr_examples, val_examples, test_examples,
save=self.save)
data['c'].iloc[10:20] = np.nan
if 'b' not in self.output_features:
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'nan_filler': {'method': 'KNNImputer', 'features': ['b']},
'train_data': 'random',
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6,
save=self.save)
config = {'input_features': self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'nan_filler': {'method': 'KNNImputer', 'features': ['b'], 'imputer_args': {'n_neighbors': 4}},
'train_data': 'random',
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6,
save=self.save)
return
def test_with_indices_and_nans_at_irregular_intervals(self):
if self.output_features is not None and len(self.output_features)>1:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['b'].iloc[20:30] = np.nan
data['c'].iloc[10:20] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
loader = build_and_test_loader(data, config, self.output_features, 18, 8, 12,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5,
check_examples=False, # todo
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
val_examples = 7 - (self.lookback - 1) if self.lookback > 1 else 7
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 8,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_at_irregular_intervals(self):
# if data contains nans and we also have intervals
if self.output_features is not None and len(self.output_features) > 1:
examples = 50
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=50, freq='D'))
data['b'].iloc[20:30] = np.nan
data['c'].iloc[40:50] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'intervals': [(0, 10), (20, 50)]
}
loader = build_and_test_loader(data, config, self.output_features, 9, 4, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
loader = build_and_test_loader(data, config, self.output_features, 18, 7, 11,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_same_val_data(self):
# if data contains nans and we also have intervals and val_data is same
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same',
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 8, 8,
check_examples=False,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_at_irregular_intervals_and_same_val_data(self):
# if data contains nans and we also have intervals and val_data is same
if self.output_features is not None and len(self.output_features) > 1:
examples = 50
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=50, freq='D'))
data['b'].iloc[20:30] = np.nan
data['c'].iloc[40:50] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_data': 'same',
'intervals': [(0, 10), (20, 50)]
}
loader = build_and_test_loader(data, config, self.output_features, 13, 5, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
loader = build_and_test_loader(data, config, self.output_features, 25, 11, 11,
check_examples=False,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_no_val_data(self):
# if data contains nans and we also have intervals and val_data is same
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0,
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 8,
save=self.save)
assert loader.source_is_df
return
def test_with_intervals_and_nans_at_irreg_intervals_and_no_val_data(self):
# if data contains nans and we also have intervals and val_data is same
if self.output_features is not None and len(self.output_features) > 1:
examples = 50
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=50, freq='D'))
data['b'].iloc[20:30] = np.nan
data['c'].iloc[40:50] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'val_fraction': 0.0,
'intervals': [(0, 10), (20, 50)]
}
loader = build_and_test_loader(data, config, self.output_features, 13, 0, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = self.allow_nan_labels
loader = build_and_test_loader(data, config, self.output_features, 25, 0, 11,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_intervals_and_nans(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 3, 5,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 8, save=self.save)
assert loader.source_is_df
return
def test_with_indices_intervals_and_nans_with_same_val_data(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_data': 'same',
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 5,
check_examples=False,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 8, 8,
check_examples=False,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_intervals_and_nans_with_no_val_data(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': 'random',
'val_fraction': 0.0,
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 5,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 8,
save=self.save)
assert loader.source_is_df
return
def test_with_indices_intervals_and_nans_with_no_test_data(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback, 'train_data': 'random', 'test_fraction': 0.0,
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 20 - (self.lookback - 1) if self.lookback > 1 else 20
loader = build_and_test_loader(data, config, self.output_features, tr_examples, 8, 0,
save=self.save)
assert loader.source_is_df
return
def test_with_custom_indices_intervals_and_nans(self):
# if data contains nans and we also have intervals
if self.output_features is not None:
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
data['c'].iloc[20:30] = np.nan
config = {'input_features':self.input_features,
'output_features': self.output_features,
'lookback': self.lookback,
'train_data': [1,2,3,4,5,6,7,8,9,10,11,12],
'intervals': [(0, 10), (20, 40)]
}
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples,
save=self.save)
assert loader.source_is_df
config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1
tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10
val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6
test_examples = 16 - (self.lookback - 1) if self.lookback > 1 else 16
loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples,
test_examples, save=self.save)
assert loader.source_is_df
return
def test_with_random_with_transformation_of_features():
examples = 100
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=len(data), freq='D'))
data['date'] = data.index
config = {'input_features':['b'],
'output_features': ['c'],
'lookback': 5,
'train_data': 'random'}
dh = DataHandler(data, verbosity=0, **config)
x,y = dh.training_data()
return
def test_random_with_intervals():
data = np.random.randint(0, 1000, (40560, 14))
input_features = [f'input_{i}' for i in range(13)]
output_features = ['NDX']
data = pd.DataFrame(data, columns=input_features+output_features)
out = data["NDX"]
# put four chunks of missing intervals
intervals = [(100, 200), (1000, 8000), (10000, 31000)]
for interval in intervals:
st, en = interval[0], interval[1]
out[st:en] = np.nan
data["NDX"] = out
config = {
'input_features': input_features,
'output_features': output_features,
'lookback': 5,
'train_data': 'random',
'intervals': [(0, 99), (200, 999), (8000, 9999), (31000, 40560)],
}
build_and_test_loader(data, config, out_cols=output_features,
train_ex=6096, val_ex=2612, test_ex=3733,
assert_uniqueness=False,
save=False)
return
def make_cross_validator(cv, **kwargs):
model = Model(
model={'randomforestregressor': {}},
data=arg_beach(),
cross_validator=cv,
val_metric="mse",
verbosity=0,
**kwargs
)
return model
class TestCVs(object):
def test_kfold(self):
model = make_cross_validator(cv={'TimeSeriesSplit': {'n_splits': 5}})
model.cross_val_score()
model.dh.plot_TimeSeriesSplit_splits(show=False)
return
def test_loocv(self):
model = make_cross_validator(cv={'KFold': {'n_splits': 5}})
model.cross_val_score()
model.dh.plot_KFold_splits(show=False)
return
def test_tscv(self):
model = make_cross_validator(cv={'LeaveOneOut': {}}, test_fraction=0.6)
model.cross_val_score()
model.dh.plot_LeaveOneOut_splits(show=False)
return
#
# class TestDataLoader(unittest.TestCase):
#
# def test_OndDF(self):
# TestAllCases(
# input_features = ['a', 'b'],
# output_features=['c'], allow_nan_labels=2)
# return
#
# def test_OneDFTwoOut(self):
# TestAllCases(input_features = ['a'],
# output_features=['b', 'c'])
# return
#
# def test_MultiSources(self):
# test_multisource_basic()
# return
#
# def test_MultiUnequalSources(self):
# return
def test_AI4WaterDataSets():
config = {'intervals': [("20000101", "20011231")],
'input_features': ['precipitation_AWAP',
'evap_pan_SILO'],
'output_features': ['streamflow_MLd_inclInfilled'],
'dataset_args': {'stations': 1}
}
build_and_test_loader('CAMELS_AUS', config=config,
out_cols=['streamflow_MLd_inclInfilled'],
train_ex=358, val_ex=154, test_ex=219,
assert_uniqueness=False,
save=False)
return
def test_multisource_basic():
examples = 40
data = np.arange(int(examples * 4), dtype=np.int32).reshape(-1, examples).transpose()
df1 = pd.DataFrame(data, columns=['a', 'b', 'c', 'd'],
index=pd.date_range('20110101', periods=40, freq='D'))
df2 = pd.DataFrame(np.array([5,6]).repeat(40, axis=0).reshape(40, -1), columns=['len', 'dep'],
index=pd.date_range('20110101', periods=40, freq='D'))
input_features = [['a', 'b'], ['len', 'dep']]
output_features = [['c', 'd'], []]
lookback = 4
config = {'input_features': input_features,
'output_features': output_features,
'lookback': lookback}
build_and_test_loader(data=[df1, df2], config=config, out_cols=output_features,
train_ex=18, val_ex=8, test_ex=11,
save=True)
# #testing data as a dictionary
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['output_features'] = {'cont_data': ['c', 'd'], 'static_data': []}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=18, val_ex=8, test_ex=11,
save=True)
# #test when output_features for one data is not provided?
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['output_features'] = {'cont_data': ['c', 'd']}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=18, val_ex=8, test_ex=11,
save=False)
# # #testing with transformation
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['transformation'] = {'cont_data': 'minmax', 'static_data': 'zscore'}
config['output_features'] = {'cont_data': ['c', 'd'], 'static_data': []}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=18, val_ex=8, test_ex=11,
save=True)
# # testing with `same` `val_data`
config['val_data'] = 'same'
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=26, val_ex=11, test_ex=11,
save=True)
# # testing with random train indices
config['val_data'] = 'same'
config['train_data'] = random.sample(list(np.arange(37)), 25)
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['output_features'] = {'cont_data': ['c', 'd'], 'static_data': []}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=config['output_features'],
train_ex=25, val_ex=12, test_ex=12,
save=True)
return
def test_multisource_basic2():
examples = 40
data = np.arange(int(examples * 4), dtype=np.int32).reshape(-1, examples).transpose()
df1 = pd.DataFrame(data, columns=['a', 'b', 'c', 'd'],
index=pd.date_range('20110101', periods=40, freq='D'))
df2 = pd.DataFrame(np.array([[5],[6], [7]]).repeat(40, axis=1).transpose(), columns=['len', 'dep', 'y'],
index=pd.date_range('20110101', periods=40, freq='D'))
input_features = [['a', 'b'], ['len', 'dep']]
output_features = [['c', 'd'], ['y']]
lookback = 4
config = {'input_features': input_features,
'output_features': output_features,
'lookback': lookback}
build_and_test_loader(data=[df1, df2], config=config, out_cols=output_features,
train_ex=18, val_ex=8, test_ex=11,
save=True)
config['input_features'] = {'cont_data': ['a', 'b'], 'static_data': ['len', 'dep']}
config['output_features'] = {'cont_data': ['c', 'd'], 'static_data': ['y']}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config,
out_cols=config['output_features'],
train_ex=18, val_ex=8, test_ex=11,
save=True)
return
def test_multisource_basic3():
examples = 40
data = np.arange(int(examples * 5), dtype=np.int32).reshape(-1, examples).transpose()
y_df = pd.DataFrame(data[:, -1], columns=['y'])
y_df.loc[y_df.sample(frac=0.5).index] = np.nan
cont_df = pd.DataFrame(data[:, 0:4], columns=['a', 'b', 'c', 'd'],
index=pd.date_range('20110101', periods=40, freq='D'))
static_df = pd.DataFrame(np.array([[5],[6], [7]]).repeat(40, axis=1).transpose(), columns=['len', 'dep', 'y'],
index=pd.date_range('20110101', periods=40, freq='D'))
disc_df = pd.DataFrame(np.random.randint(0, 10, (40, 4)), columns=['cl', 'o', 'do', 'bod'],
index=pd.date_range('20110101', periods=40, freq='D'))
cont_df['y'] = y_df.values
static_df['y'] = y_df.values
disc_df['y'] = y_df.values
input_features = [['len', 'dep'], ['a', 'b'], ['cl', 'o', 'do', 'bod']]
output_features = [['y'], ['c', 'y'], ['y']]
lookback = [1, 4, 1]
config = {'input_features': input_features,
'output_features': output_features,
'test_fraction': 0.3,
'val_fraction': 0.3,
'lookback': lookback}
# build_and_test_loader(data=[static_df, cont_df, disc_df], config=config, out_cols=output_features, train_ex=6,
# val_ex=4,
# test_ex=6, save=True)
data_handler = DataHandler(data=[static_df, cont_df, disc_df], verbosity=0, **config)
data_handler.training_data()
data_handler.validation_data()
data_handler.test_data()
return
def test_multisource_multi_loc():
examples = 40
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
training_data = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
val_data = pd.DataFrame(data+1000.0, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
test_data = pd.DataFrame(data+2000, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=40, freq='D'))
dh = MultiLocDataHandler()
train_x, train_y = dh.training_data(data=training_data, input_features=['a', 'b'], output_features=['c'])
valx, val_y = dh.validation_data(data=val_data, input_features=['a', 'b'], output_features=['c'])
test_x, test_y = dh.test_data(data=test_data, input_features=['a', 'b'], output_features=['c'])
assert np.allclose(train_y.reshape(-1,), training_data['c'].values.reshape(-1, ))
assert np.allclose(val_y.reshape(-1, ), val_data['c'].values.reshape(-1, ))
assert np.allclose(test_y.reshape(-1, ), test_data['c'].values.reshape(-1, ))
return
def test_multisource_basic4():
examples = 40
data = np.arange(int(examples * 4), dtype=np.int32).reshape(-1, examples).transpose()
df1 = pd.DataFrame(data, columns=['a', 'b', 'c', 'd'],
index=pd.date_range('20110101', periods=40, freq='D'))
df2 = pd.DataFrame(np.array([5,6]).repeat(40, axis=0).reshape(40, -1), columns=['len', 'dep'],
index=pd.date_range('20110101', periods=40, freq='D'))
input_features = {'cont_data':['a', 'b'], 'static_data':['len', 'dep']}
output_features = {'cont_data': ['c', 'd']}
lookback = {'cont_data': 4, 'static_data': 1}
config = {'input_features': input_features,
'output_features': output_features,
'lookback': lookback}
build_and_test_loader(data={'cont_data': df1, 'static_data': df2},
config=config, out_cols=output_features,
train_ex=18, val_ex=8, test_ex=11,
save=False)
return
def site_distributed_basic():
examples = 50
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
df = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=examples, freq='D'))
config = {'input_features': ['a', 'b'],
'output_features': ['c'],
'lookback': 4,
'val_fraction': 0.3,
'test_fraction': 0.3,
'verbosity': 0}
data = {'0': df, '1': df+1000, '2': df+2000, '3': df+3000}
configs = {'0': config, '1': config, '2': config, '3': config}
dh = SiteDistributedDataHandler(data, configs, verbosity=0)
train_x, train_y = dh.training_data()
val_x, val_y = dh.validation_data()
test_x, test_y = dh.test_data()
assert train_x.shape == (23, len(data), config['lookback'], len(config['input_features']))
assert val_x.shape == (10, len(data), config['lookback'], len(config['input_features']))
assert test_x.shape == (14, len(data), config['lookback'], len(config['input_features']))
dh = SiteDistributedDataHandler(data, configs, training_sites=['0', '1'], validation_sites=['2'],
test_sites=['3'], verbosity=0)
train_x, train_y = dh.training_data()
val_x, val_y = dh.validation_data()
test_x, test_y = dh.test_data()
assert train_x.shape == (len(df)-config['lookback']+1, 2, config['lookback'], len(config['input_features']))
assert val_x.shape == (len(df)-config['lookback']+1, 1, config['lookback'], len(config['input_features']))
assert test_x.shape == (len(df)-config['lookback']+1, 1, config['lookback'], len(config['input_features']))
def site_distributed_diff_lens():
examples = 50
data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose()
df = pd.DataFrame(data, columns=['a', 'b', 'c'],
index=pd.date_range('20110101', periods=examples, freq='D'))
config = {'input_features': ['a', 'b'],
'output_features': ['c'],
'lookback': 4,
'verbosity': 0}
data = {'0': df,
'1': pd.concat([df, df], axis=0)+1000,
'2': pd.concat([df, df, df], axis=0)+2000,
'3': df +3000
}
configs = {'0': config, '1': config, '2': config, '3': config}
#dh = SiteDistributedDataHandler(data, configs) # This should raise NotImplementedError
dh = SiteDistributedDataHandler(data, configs, allow_variable_len=True, verbosity=0)
train_x, train_y = dh.training_data()
val_x, val_y = dh.validation_data()
test_x, test_y = dh.test_data()
assert isinstance(train_x, dict)
dh = SiteDistributedDataHandler(data, configs, training_sites=['0', '1'], validation_sites=['2'],
test_sites=['3'], allow_variable_len=True, verbosity=0)
train_x, train_y = dh.training_data()
val_x, val_y = dh.validation_data()
test_x, test_y = dh.test_data()
assert isinstance(train_x, dict)
def site_distributed_multiple_srcs():
examples = 40
data = np.arange(int(examples * 4), dtype=np.int32).reshape(-1, examples).transpose()
cont_df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd'],
index= | pd.date_range('20110101', periods=examples, freq='D') | pandas.date_range |
# -*- coding: utf-8 -*-
import json
import base64
import datetime
import requests
import pathlib
import math
import pandas as pd
import flask
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
from plotly import tools
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
server = app.server
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
# Loading historical tick data
currency_pair_data = {
"EURUSD": pd.read_csv(
DATA_PATH.joinpath("EURUSD.csv"), index_col=1, parse_dates=["Date"]
),
"USDJPY": pd.read_csv(
DATA_PATH.joinpath("USDJPY.csv"), index_col=1, parse_dates=["Date"]
),
"GBPUSD": pd.read_csv(
DATA_PATH.joinpath("GBPUSD.csv"), index_col=1, parse_dates=["Date"]
),
"USDCHF": pd.read_csv(
DATA_PATH.joinpath("USDCHF.csv"), index_col=1, parse_dates=["Date"]
),
}
# Currency pairs
currencies = ["EURUSD", "USDCHF", "USDJPY", "GBPUSD"]
# API Requests for news div
news_requests = requests.get(
"https://newsapi.org/v2/top-headlines?sources=bbc-news&apiKey=da8e2e705b914f9f86ed2e9692e66012"
)
# API Call to update news
def update_news():
json_data = news_requests.json()["articles"]
df = pd.DataFrame(json_data)
df = pd.DataFrame(df[["title", "url"]])
max_rows = 10
return html.Div(
children=[
html.P(className="p-news", children="Headlines"),
html.P(
className="p-news float-right",
children="Last update : "
+ datetime.datetime.now().strftime("%H:%M:%S"),
),
html.Table(
className="table-news",
children=[
html.Tr(
children=[
html.Td(
children=[
html.A(
className="td-link",
children=df.iloc[i]["title"],
href=df.iloc[i]["url"],
target="_blank",
)
]
)
]
)
for i in range(min(len(df), max_rows))
],
),
]
)
# Returns dataset for currency pair with nearest datetime to current time
def first_ask_bid(currency_pair, t):
t = t.replace(year=2016, month=1, day=5)
items = currency_pair_data[currency_pair]
dates = items.index.to_pydatetime()
index = min(dates, key=lambda x: abs(x - t))
df_row = items.loc[index]
int_index = items.index.get_loc(index)
return [df_row, int_index] # returns dataset row and index of row
# Creates HTML Bid and Ask (Buy/Sell buttons)
def get_row(data):
index = data[1]
current_row = data[0]
return html.Div(
children=[
# Summary
html.Div(
id=current_row[0] + "summary",
className="row summary",
n_clicks=0,
children=[
html.Div(
id=current_row[0] + "row",
className="row",
children=[
html.P(
current_row[0], # currency pair name
id=current_row[0],
className="three-col",
),
html.P(
current_row[1].round(5), # Bid value
id=current_row[0] + "bid",
className="three-col",
),
html.P(
current_row[2].round(5), # Ask value
id=current_row[0] + "ask",
className="three-col",
),
html.Div(
index,
id=current_row[0]
+ "index", # we save index of row in hidden div
style={"display": "none"},
),
],
)
],
),
# Contents
html.Div(
id=current_row[0] + "contents",
className="row details",
children=[
# Button for buy/sell modal
html.Div(
className="button-buy-sell-chart",
children=[
html.Button(
id=current_row[0] + "Buy",
children="Buy/Sell",
n_clicks=0,
)
],
),
# Button to display currency pair chart
html.Div(
className="button-buy-sell-chart-right",
children=[
html.Button(
id=current_row[0] + "Button_chart",
children="Chart",
n_clicks=1
if current_row[0] in ["EURUSD", "USDCHF"]
else 0,
)
],
),
],
),
]
)
# color of Bid & Ask rates
def get_color(a, b):
if a == b:
return "white"
elif a > b:
return "#45df7e"
else:
return "#da5657"
# Replace ask_bid row for currency pair with colored values
def replace_row(currency_pair, index, bid, ask):
index = index + 1 # index of new data row
new_row = (
currency_pair_data[currency_pair].iloc[index]
if index != len(currency_pair_data[currency_pair])
else first_ask_bid(currency_pair, datetime.datetime.now())
) # if not the end of the dataset we retrieve next dataset row
return [
html.P(
currency_pair, id=currency_pair, className="three-col" # currency pair name
),
html.P(
new_row[1].round(5), # Bid value
id=new_row[0] + "bid",
className="three-col",
style={"color": get_color(new_row[1], bid)},
),
html.P(
new_row[2].round(5), # Ask value
className="three-col",
id=new_row[0] + "ask",
style={"color": get_color(new_row[2], ask)},
),
html.Div(
index, id=currency_pair + "index", style={"display": "none"}
), # save index in hidden div
]
# Display big numbers in readable format
def human_format(num):
try:
num = float(num)
# If value is 0
if num == 0:
return 0
# Else value is a number
if num < 1000000:
return num
magnitude = int(math.log(num, 1000))
mantissa = str(int(num / (1000 ** magnitude)))
return mantissa + ["", "K", "M", "G", "T", "P"][magnitude]
except:
return num
# Returns Top cell bar for header area
def get_top_bar_cell(cellTitle, cellValue):
return html.Div(
className="two-col",
children=[
html.P(className="p-top-bar", children=cellTitle),
html.P(id=cellTitle, className="display-none", children=cellValue),
html.P(children=human_format(cellValue)),
],
)
# Returns HTML Top Bar for app layout
def get_top_bar(
balance=50000, equity=50000, margin=0, fm=50000, m_level="%", open_pl=0
):
return [
get_top_bar_cell("Balance", balance),
get_top_bar_cell("Equity", equity),
get_top_bar_cell("Margin", margin),
get_top_bar_cell("Free Margin", fm),
get_top_bar_cell("Margin Level", m_level),
get_top_bar_cell("Open P/L", open_pl),
]
####### STUDIES TRACES ######
# Moving average
def moving_average_trace(df, fig):
df2 = df.rolling(window=5).mean()
trace = go.Scatter(
x=df2.index, y=df2["close"], mode="lines", showlegend=False, name="MA"
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Exponential moving average
def e_moving_average_trace(df, fig):
df2 = df.rolling(window=20).mean()
trace = go.Scatter(
x=df2.index, y=df2["close"], mode="lines", showlegend=False, name="EMA"
)
fig.append_trace(trace, 1, 1) # plot in first row
return fig
# Bollinger Bands
def bollinger_trace(df, fig, window_size=10, num_of_std=5):
price = df["close"]
rolling_mean = price.rolling(window=window_size).mean()
rolling_std = price.rolling(window=window_size).std()
upper_band = rolling_mean + (rolling_std * num_of_std)
lower_band = rolling_mean - (rolling_std * num_of_std)
trace = go.Scatter(
x=df.index, y=upper_band, mode="lines", showlegend=False, name="BB_upper"
)
trace2 = go.Scatter(
x=df.index, y=rolling_mean, mode="lines", showlegend=False, name="BB_mean"
)
trace3 = go.Scatter(
x=df.index, y=lower_band, mode="lines", showlegend=False, name="BB_lower"
)
fig.append_trace(trace, 1, 1) # plot in first row
fig.append_trace(trace2, 1, 1) # plot in first row
fig.append_trace(trace3, 1, 1) # plot in first row
return fig
# Accumulation Distribution
def accumulation_trace(df):
df["volume"] = ((df["close"] - df["low"]) - (df["high"] - df["close"])) / (
df["high"] - df["low"]
)
trace = go.Scatter(
x=df.index, y=df["volume"], mode="lines", showlegend=False, name="Accumulation"
)
return trace
# Commodity Channel Index
def cci_trace(df, ndays=5):
TP = (df["high"] + df["low"] + df["close"]) / 3
CCI = pd.Series(
(TP - TP.rolling(window=10, center=False).mean())
/ (0.015 * TP.rolling(window=10, center=False).std()),
name="cci",
)
trace = go.Scatter(x=df.index, y=CCI, mode="lines", showlegend=False, name="CCI")
return trace
# Price Rate of Change
def roc_trace(df, ndays=5):
N = df["close"].diff(ndays)
D = df["close"].shift(ndays)
ROC = pd.Series(N / D, name="roc")
trace = go.Scatter(x=df.index, y=ROC, mode="lines", showlegend=False, name="ROC")
return trace
# Stochastic oscillator %K
def stoc_trace(df):
SOk = pd.Series((df["close"] - df["low"]) / (df["high"] - df["low"]), name="SO%k")
trace = go.Scatter(x=df.index, y=SOk, mode="lines", showlegend=False, name="SO%k")
return trace
# Momentum
def mom_trace(df, n=5):
M = pd.Series(df["close"].diff(n), name="Momentum_" + str(n))
trace = go.Scatter(x=df.index, y=M, mode="lines", showlegend=False, name="MOM")
return trace
# Pivot points
def pp_trace(df, fig):
PP = pd.Series((df["high"] + df["low"] + df["close"]) / 3)
R1 = pd.Series(2 * PP - df["low"])
S1 = pd.Series(2 * PP - df["high"])
R2 = pd.Series(PP + df["high"] - df["low"])
S2 = pd.Series(PP - df["high"] + df["low"])
R3 = | pd.Series(df["high"] + 2 * (PP - df["low"])) | pandas.Series |
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import nba_py
import sqlite3
import pandas as pd
def silverK(MOV, elo_diff):
"""Calculate K constant (Source:
https://www.ergosum.co/nate-silvers-nba-elo-algorithm/).
Args:
MOV - Margin of victory.
elo_diff - ELO difference of teams.
Returns:
0, 1 - K constant
"""
K_0 = 20
if MOV > 0:
multiplier = (MOV + 3) ** (0.8) / (7.5 + 0.006 * (elo_diff))
else:
multiplier = ( -MOV + 3) ** (0.8) / (7.5 + 0.006 * (-elo_diff))
return K_0 * multiplier, K_0 * multiplier
def silverS(home_score, away_score):
"""Calculate S for each team (Source:
https://www.ergosum.co/nate-silvers-nba-elo-algorithm/).
Args:
home_score - score of home team.
away_score - score of away team.
Returns:
0: - S for the home team.
1: - S for the away team.
"""
S_home, S_away = 0, 0
if home_score > away_score:
S_home = 1
elif away_score > home_score:
S_away = 1
else:
S_home, S_away = .5, .5
return S_home, S_away
def elo_prediction(home_rating, away_rating):
"""Calculate the expected win probability of the home team (Source:
https://www.ergosum.co/nate-silvers-nba-elo-algorithm/).
Args:
home_rating - initial home elo score.
away_rating - initial away elo score.
Returns:
E_home - expected win probability of the home team.
"""
E_home = 1. / (1 + 10 ** ((away_rating - home_rating) / (400.)))
return E_home
def silver_elo_update(home_score, away_score, home_rating, away_rating):
"""Calculate change in elo for home and away teams Source:
https://www.ergosum.co/nate-silvers-nba-elo-algorithm/).
Args:
home_score: score of home team.
away_score: score of away team.
home_rating: initial home elo score.
away_rating: initial away elo score.
Returns:
0: change in home elo.
1: change in away elo.
"""
HOME_AD = 100.
home_rating += HOME_AD
E_home = elo_prediction(home_rating, away_rating)
E_away = 1 - E_home
elo_diff = home_rating - away_rating
MOV = home_score - away_score
S_home,S_away = silverS(home_score,away_score)
if S_home>0:
K_home,K_away = silverK(MOV,elo_diff)
else:
K_home,K_away = silverK(MOV,elo_diff)
return K_home * (S_home - E_home), K_away * (S_away - E_away)
def init_db(data=os.path.join("../", "data", "external", "nbaallelo.csv"),
db=os.path.join("..", "data", "outputs", "NBA_ELO.db")):
"""Recreate db from original 538 data.
Args:
data - path to 538 csv.
db - path to resulting db.
"""
# -- Read 538 ELO data.
cols0 = ["game_id", "date_game", "team_id", "pts", "elo_i", "elo_n"]
df = pd.read_csv(data, usecols=cols0)
df.date_game = pd.to_datetime(df.date_game)
# -- Reformat (1 game, 1 row), and write to db.
away, home = df[df.index % 2 == 0], df[df.index % 2 == 1]
with sqlite3.connect(db) as conn:
away.merge(home, on=["game_id", "date_game"],
suffixes=("_away", "_home")) \
.drop("game_id", axis=1) \
.replace({"team_id_away": {"BKN": u"BRK", "PHX": u"PHO"},
"team_id_home": {"BKN": u"BRK", "PHX": u"PHO"}}) \
.to_sql("nba_elo", conn, if_exists="replace")
def read_db(db=os.path.join("..", "data", "outputs", "NBA_ELO.db")):
"""Read data from db.
Args:
db - path to db.
Returns:
Pandas dataframe of nba_elo table.
"""
# -- Load data from db.
with sqlite3.connect(db) as conn:
return pd.read_sql("SELECT * FROM nba_elo", conn,
index_col="index", parse_dates=["date_game"])
def get_games(db=os.path.join("..", "data", "outputs", "NBA_ELO.db")):
"""Using nba_py get data for all games that are not in the DB.
NOTE: seasons variable must be updated for future seasons (this is dumb).
Returns:
new_games - pandas dataframe of all new games.
"""
# -- Exclude pre-season.
seasons = pd.concat([pd.date_range("2015-10-27", "2016-06-02").to_series(),
pd.date_range("2016-10-25", "2017-06-01").to_series(),
pd.date_range("2017-10-17", "2018-06-17").to_series()])
# -- Load data from db.
df = read_db(db)
new_games = pd.DataFrame() # Empty df to append new game data.
cols1 = ["GAME_ID", "GAME_DATE_EST", "TEAM_ABBREVIATION", "PTS"]
# -- For each day since last game, check for game data.
for day in pd.date_range(df.date_game.max(), pd.datetime.today()):
if day in seasons:
print("Collecting data for games on: {}".format(day.date()), end="\r")
sys.stdout.flush()
try:
sb = nba_py.Scoreboard(day.month, day.day, day.year)
days_games = sb.line_score()[cols1]
if len(days_games) > 0 :
away = days_games[days_games.index % 2 == 0]
home = days_games[days_games.index % 2 == 1]
days_games = away.merge(home,
on=["GAME_DATE_EST", "GAME_ID"],
suffixes=("_AWAY", "_HOME")) \
.drop("GAME_ID", axis=1)
new_games = pd.concat([new_games, days_games])
except:
print("Error at {}. Rerun, to continue from here.".format(day))
break
return new_games
def update_db(new_games,
db=os.path.join("..", "data", "outputs", "NBA_ELO.db")):
"""Puts new games into db.
Args:
new_games - pandas dataframe containing new game info.
db - path to db.
"""
cols = {"GAME_DATE_EST": "date_game", "TEAM_ABBREVIATION_AWAY": "team_id_away",
"PTS_AWAY": "pts_away", "TEAM_ABBREVIATION_HOME": "team_id_home",
"PTS_HOME": "pts_home"}
with sqlite3.connect(db) as conn:
tmp = pd.concat([read_db(db), new_games.rename(columns=cols)]) \
.reset_index(drop=True) \
.replace({"team_id_away": {"BKN": u"BRK", "PHX": u"PHO"},
"team_id_home": {"BKN": u"BRK", "PHX": u"PHO"}})
tmp.date_game = tmp.date_game.astype(str)
tmp.to_sql("nba_elo", conn, if_exists="replace")
def last_elo(df):
"""Calculate the last ELO for each team (past 2016).
Args:
df - pandas dataframe containing database table.
Returns:
last_elo (dict) - dictionary where tm: elo score.
max_date - last date in table.
"""
last_elo = {}
max_date = pd.datetime(2014, 1, 1)
for tm in df[df.date_game > pd.datetime(2016, 1, 1)].team_id_away.unique():
try:
# -- Subset table to get most recent record with an ELO rating.
tmp = df[((~df.elo_i_home.isnull()) & (df.team_id_away == tm)) |
((~df.elo_i_home.isnull()) & (df.team_id_home == tm))] \
.sort_values("date_game").iloc[-1]
max_date = max(max_date, tmp.date_game)
except:
print("Error with: {}".format(tm))
# -- Store ELO in dictionary.
if tmp.team_id_home == tm:
last_elo[tm] = tmp.elo_n_home
else:
last_elo[tm] = tmp.elo_n_away
return last_elo, max_date
def new_season(elo_dict):
"""Update ELO score when rolling over into a new season.
Args:
elo_dict - last ELO scores for each team.
Return:
elo_dict - updated ELO scores.
"""
for tm in elo_dict.keys():
elo_dict[tm] = elo_dict[tm] * 0.75 + 1505 * 0.25
return elo_dict
def update_elo(df, db=os.path.join("..", "data", "outputs", "NBA_ELO.db"),
in_season=False):
"""Update ELO score for new records in nba_elo table and write to db.
Args:
df - pandas dataframe of nba_elo.
elo - last ELO scores for each team.
in_season - Do the ELO scores need to be rolled over between seasons?
"""
seasons = [ | pd.date_range("2015-10-27", "2016-06-02") | pandas.date_range |
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Unit tests for (dunder) composition functionality attached to the base class."""
__author__ = ["fkiraly"]
__all__ = []
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sktime.transformations.compose import FeatureUnion, TransformerPipeline
from sktime.transformations.panel.padder import PaddingTransformer
from sktime.transformations.series.exponent import ExponentTransformer
from sktime.transformations.series.impute import Imputer
from sktime.utils._testing.deep_equals import deep_equals
from sktime.utils._testing.estimator_checks import _assert_array_almost_equal
def test_dunder_mul():
"""Test the mul dunder method."""
X = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
t1 = ExponentTransformer(power=2)
t2 = ExponentTransformer(power=5)
t3 = ExponentTransformer(power=0.1)
t4 = ExponentTransformer(power=1)
t12 = t1 * t2
t123 = t12 * t3
t312 = t3 * t12
t1234 = t123 * t4
t1234_2 = t12 * (t3 * t4)
assert isinstance(t12, TransformerPipeline)
assert isinstance(t123, TransformerPipeline)
assert isinstance(t312, TransformerPipeline)
assert isinstance(t1234, TransformerPipeline)
assert isinstance(t1234_2, TransformerPipeline)
assert [x.power for x in t12.steps] == [2, 5]
assert [x.power for x in t123.steps] == [2, 5, 0.1]
assert [x.power for x in t312.steps] == [0.1, 2, 5]
assert [x.power for x in t1234.steps] == [2, 5, 0.1, 1]
assert [x.power for x in t1234_2.steps] == [2, 5, 0.1, 1]
_assert_array_almost_equal(X, t123.fit_transform(X))
_assert_array_almost_equal(X, t312.fit_transform(X))
_assert_array_almost_equal(X, t1234.fit_transform(X))
_assert_array_almost_equal(X, t1234_2.fit_transform(X))
_assert_array_almost_equal(t12.fit_transform(X), t3.fit(X).inverse_transform(X))
def test_dunder_add():
"""Test the add dunder method."""
X = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
t1 = ExponentTransformer(power=2)
t2 = ExponentTransformer(power=5)
t3 = ExponentTransformer(power=3)
t12 = t1 + t2
t123 = t12 + t3
t123r = t1 + (t2 + t3)
assert isinstance(t12, FeatureUnion)
assert isinstance(t123, FeatureUnion)
assert isinstance(t123r, FeatureUnion)
assert [x.power for x in t12.transformer_list] == [2, 5]
assert [x.power for x in t123.transformer_list] == [2, 5, 3]
assert [x.power for x in t123r.transformer_list] == [2, 5, 3]
_assert_array_almost_equal(t123r.fit_transform(X), t123.fit_transform(X))
def test_mul_sklearn_autoadapt():
"""Test auto-adapter for sklearn in mul."""
X = | pd.DataFrame({"a": [1, 2], "b": [3, 4]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
""" Project : PyCoA
Date : april 2020 - march 2021
Authors : <NAME>, <NAME>, <NAME>
Copyright ©pycoa.fr
License: See joint LICENSE file
Module : coa.geo
About :
-------
Geo classes within the PyCoA framework.
GeoManager class provides translations between naming normalisations
of countries. It's based on the pycountry module.
GeoInfo class allow to add new fields to a pandas DataFrame about
statistical information for countries.
GeoRegion class helps returning list of countries in a specified region
GeoCountry manages information for a single country.
"""
import inspect # for debug purpose
import warnings
import pycountry as pc
import pycountry_convert as pcc
import pandas as pd
import geopandas as gpd
import shapely.geometry as sg
import shapely.affinity as sa
import shapely.ops as so
import bs4
from coa.tools import verb,kwargs_test,get_local_from_url,dotdict,tostdstring
from coa.error import *
# ---------------------------------------------------------------------
# --- GeoManager class ------------------------------------------------
# ---------------------------------------------------------------------
class GeoManager():
"""GeoManager class definition. No inheritance from any other class.
It should raise only CoaError and derived exceptions in case
of errors (see pycoa.error)
"""
_list_standard=['iso2', # Iso2 standard, default
'iso3', # Iso3 standard
'name', # Standard name ( != Official, caution )
'num'] # Numeric standard
_list_db=[None,'jhu','worldometers','owid','opencovid19national','spfnational'] # first is default
_list_output=['list','dict','pandas'] # first is default
_standard = None # currently used normalisation standard
def __init__(self,standard=_list_standard[0]):
""" __init__ member function, with default definition of
the used standard. To get the current default standard,
see get_list_standard()[0].
"""
verb("Init of GeoManager() from "+str(inspect.stack()[1]))
self.set_standard(standard)
self._gr=GeoRegion()
def get_GeoRegion(self):
""" return the GeoRegion local instance
"""
return self._gr
def get_region_list(self):
""" return the list of region via the GeoRegion instance
"""
return self._gr.get_region_list()
def get_list_standard(self):
""" return the list of supported standard name of countries.
First one is default for the class
"""
return self._list_standard
def get_list_output(self):
""" return supported list of output type. First one is default
for the class
"""
return self._list_output
def get_list_db(self):
""" return supported list of database name for translation of
country names to standard.
"""
return self._list_db
def get_standard(self):
""" return current standard use within the GeoManager class
"""
return self._standard
def set_standard(self,standard):
"""
set the working standard type within the GeoManager class.
The standard should meet the get_list_standard() requirement
"""
if not isinstance(standard,str):
raise CoaTypeError('GeoManager error, the standard argument'
' must be a string')
if standard not in self.get_list_standard():
raise CoaKeyError('GeoManager.set_standard error, "'+\
standard+' not managed. Please see '\
'get_list_standard() function')
self._standard=standard
return self.get_standard()
def to_standard(self, w, **kwargs):
"""Given a list of string of locations (countries), returns a
normalised list according to the used standard (defined
via the setStandard() or __init__ function. Current default is iso2.
Arguments
-----------------
first arg -- w, list of string of locations (or single string)
to convert to standard one
output -- 'list' (default), 'dict' or 'pandas'
db -- database name to help conversion.
Default : None, meaning best effort to convert.
Known database : jhu, wordometer...
See get_list_db() for full list of known db for
standardization
interpret_region -- Boolean, default=False. If yes, the output should
be only 'list'.
"""
kwargs_test(kwargs,['output','db','interpret_region'],'Bad args used in the to_standard() function.')
output=kwargs.get('output',self.get_list_output()[0])
if output not in self.get_list_output():
raise CoaKeyError('Incorrect output type. See get_list_output()'
' or help.')
db=kwargs.get('db',self.get_list_db()[0])
if db not in self.get_list_db():
raise CoaDbError('Unknown database "'+db+'" for translation to '
'standardized location names. See get_list_db() or help.')
interpret_region=kwargs.get('interpret_region',False)
if not isinstance(interpret_region,bool):
raise CoaTypeError('The interpret_region argument is a boolean, '
'not a '+str(type(interpret_region)))
if interpret_region==True and output!='list':
raise CoaKeyError('The interpret_region True argument is incompatible '
'with non list output option.')
if isinstance(w,str):
w=[w]
elif not isinstance(w,list):
raise CoaTypeError('Waiting for str, list of str or pandas'
'as input of get_standard function member of GeoManager')
w=[v.title() for v in w] # capitalize first letter of each name
w0=w.copy()
if db:
w=self.first_db_translation(w,db)
n=[] # will contain standardized name of countries (if possible)
#for c in w:
while len(w)>0:
c=w.pop(0)
if type(c)==int:
c=str(c)
elif type(c)!=str:
raise CoaTypeError('Locations should be given as '
'strings or integers only')
if (c in self._gr.get_region_list()) and interpret_region == True:
w=self._gr.get_countries_from_region(c)+w
else:
if len(c)==0:
n1='' #None
else:
try:
n0=pc.countries.lookup(c)
except LookupError:
try:
if c.startswith('Owid_'):
nf=['owid_*']
n1='OWID_*'
else:
nf=pc.countries.search_fuzzy(c)
if len(nf)>1:
warnings.warn('Caution. More than one country match the key "'+\
c+'" : '+str([ (k.name+', ') for k in nf])+\
', using first one.\n')
n0=nf[0]
except LookupError:
raise CoaLookupError('No country match the key "'+c+'". Error.')
except Exception as e1:
raise CoaNotManagedError('Not managed error '+type(e1))
except Exception as e2:
raise CoaNotManagedError('Not managed error'+type(e1))
if n0 != 'owid_*':
if self._standard=='iso2':
n1=n0.alpha_2
elif self._standard=='iso3':
n1=n0.alpha_3
elif self._standard=='name':
n1=n0.name
elif self._standard=='num':
n1=n0.numeric
else:
raise CoaKeyError('Current standard is '+self._standard+\
' which is not managed. Error.')
n.append(n1)
if output=='list':
return n
elif output=='dict':
return dict(zip(w0, n))
elif output=='pandas':
return pd.DataFrame({'inputname':w0,self._standard:n})
else:
return None # should not be here
def first_db_translation(self,w,db):
""" This function helps to translate from country name to
standard for specific databases. It's the first step
before final translation.
One can easily add some database support adding some new rules
for specific databases
"""
translation_dict={}
# Caution : keys need to be in title mode, i.e. first letter capitalized
if db=='jhu':
translation_dict.update({\
"Congo (Brazzaville)":"Republic of the Congo",\
"Congo (Kinshasa)":"COD",\
"Korea, South":"KOR",\
"Taiwan*":"Taiwan",\
"Laos":"LAO",\
"West Bank And Gaza":"PSE",\
"Burma":"Myanmar",\
"Iran":"IRN",\
"<NAME>":"",\
"Ms Zaandam":"",\
"Summer Olympics 2020":"",\
"Micronesia":"FSM",\
}) # last two are names of boats
elif db=='worldometers':
translation_dict.update({\
"Dr Congo":"COD",\
"Congo":"COG",\
"Iran":"IRN",\
"South Korea":"KOR",\
"North Korea":"PRK",\
"Czech Republic (Czechia)":"CZE",\
"Laos":"LAO",\
"Sao Tome & Principe":"STP",\
"Channel Islands":"JEY",\
"St. Vincent & Grenadines":"VCT",\
"U.S. Virgin Islands":"VIR",\
"Saint Kitts & Nevis":"KNA",\
"Faeroe Islands":"FRO",\
"Caribbean Netherlands":"BES",\
"Wallis & Futuna":"WLF",\
"Saint Pierre & Miquelon":"SPM",\
"Sint Maarten":"SXM",\
} )
elif db=='owid':
translation_dict.update({\
"Bonaire Sint Eustatius And Saba":"BES",\
"Cape Verde":"CPV",\
"Democratic Republic Of Congo":"COD",\
"Faeroe Islands":"FRO",\
"Laos":"LAO",\
"South Korea":"KOR",\
"Swaziland":"SWZ",\
"United States Virgin Islands":"VIR",\
"Iran":"IRN",\
"Micronesia (Country)":"FSM",\
"Northern Cyprus":"CYP",\
"Curacao":"CUW",\
"Faeroe Islands":"FRO",\
"Vatican":"VAT"
})
return [translation_dict.get(k,k) for k in w]
# ---------------------------------------------------------------------
# --- GeoInfo class ---------------------------------------------------
# ---------------------------------------------------------------------
class GeoInfo():
"""GeoInfo class definition. No inheritance from any other class.
It should raise only CoaError and derived exceptions in case
of errors (see pycoa.error)
"""
_list_field={\
'continent_code':'pycountry_convert (https://pypi.org/project/pycountry-convert/)',\
'continent_name':'pycountry_convert (https://pypi.org/project/pycountry-convert/)' ,\
'country_name':'pycountry_convert (https://pypi.org/project/pycountry-convert/)' ,\
'population':'https://www.worldometers.info/world-population/population-by-country/',\
'area':'https://www.worldometers.info/world-population/population-by-country/',\
'fertility':'https://www.worldometers.info/world-population/population-by-country/',\
'median_age':'https://www.worldometers.info/world-population/population-by-country/',\
'urban_rate':'https://www.worldometers.info/world-population/population-by-country/',\
#'geometry':'https://github.com/johan/world.geo.json/',\
'geometry':'http://thematicmapping.org/downloads/world_borders.php and https://github.com/johan/world.geo.json/',\
'region_code_list':'https://en.wikipedia.org/w/index.php?title=List_of_countries_by_United_Nations_geoscheme&oldid=1008989486',\
#https://en.wikipedia.org/wiki/List_of_countries_by_United_Nations_geoscheme',\
'region_name_list':'https://en.wikipedia.org/w/index.php?title=List_of_countries_by_United_Nations_geoscheme&oldid=1008989486',\
#https://en.wikipedia.org/wiki/List_of_countries_by_United_Nations_geoscheme',\
'capital':'https://en.wikipedia.org/w/index.php?title=List_of_countries_by_United_Nations_geoscheme&oldid=1008989486',\
#https://en.wikipedia.org/wiki/List_of_countries_by_United_Nations_geoscheme',\
'flag':'https://github.com/linssen/country-flag-icons/blob/master/countries.json',\
}
_data_geometry = pd.DataFrame()
_data_population = pd.DataFrame()
_data_flag = pd.DataFrame()
def __init__(self,gm=0):
""" __init__ member function.
"""
verb("Init of GeoInfo() from "+str(inspect.stack()[1]))
if gm != 0:
self._gm=gm
else:
self._gm=GeoManager()
self._grp=self._gm._gr.get_pandas()
def get_GeoManager(self):
""" return the local instance of used GeoManager()
"""
return self._gm
def get_list_field(self):
""" return the list of supported additionnal fields available
"""
return sorted(list(self._list_field.keys()))
def get_source(self,field=None):
""" return the source of the information provided for a given
field.
"""
if field==None:
return self._list_field
elif field not in self.get_list_field():
raise CoaKeyError('The field "'+str(field)+'" is not '
'a supported field of GeoInfo(). Please see help or '
'the get_list_field() output.')
return field+' : '+self._list_field[field]
def add_field(self,**kwargs):
""" this is the main function of the GeoInfo class. It adds to
the input pandas dataframe some fields according to
the geofield field of input.
The return value is the pandas dataframe.
Arguments :
field -- should be given as a string of list of strings and
should be valid fields (see get_list_field() )
Mandatory.
input -- provide the input pandas dataframe. Mandatory.
geofield -- provide the field name in the pandas where the
location is stored. Default : 'location'
overload -- Allow to overload a field. Boolean value.
Default : False
"""
# --- kwargs analysis ---
kwargs_test(kwargs,['field','input','geofield','overload'],
'Bad args used in the add_field() function.')
p=kwargs.get('input',None) # the panda
if not isinstance(p,pd.DataFrame):
raise CoaTypeError('You should provide a valid input pandas'
' DataFrame as input. See help.')
p=p.copy()
overload=kwargs.get('overload',False)
if not isinstance(overload,bool):
raise CoaTypeError('The overload option should be a boolean.')
fl=kwargs.get('field',None) # field list
if fl == None:
raise CoaKeyError('No field given. See help.')
if not isinstance(fl,list):
fl=[fl]
if not all(f in self.get_list_field() for f in fl):
raise CoaKeyError('All fields are not valid or supported '
'ones. Please see help of get_list_field()')
if not overload and not all(f not in p.columns.tolist() for f in fl):
raise CoaKeyError('Some fields already exist in you panda '
'dataframe columns. You may set overload to True.')
geofield=kwargs.get('geofield','location')
if not isinstance(geofield,str):
raise CoaTypeError('The geofield should be given as a '
'string.')
if geofield not in p.columns.tolist():
raise CoaKeyError('The geofield "'+geofield+'" given is '
'not a valid column name of the input pandas dataframe.')
self._gm.set_standard('iso2')
countries_iso2=self._gm.to_standard(p[geofield].tolist())
self._gm.set_standard('iso3')
countries_iso3=self._gm.to_standard(p[geofield].tolist())
p['iso2_tmp']=countries_iso2
p['iso3_tmp']=countries_iso3
# --- loop over all needed fields ---
for f in fl:
if f in p.columns.tolist():
p=p.drop(f,axis=1)
# ----------------------------------------------------------
if f == 'continent_code':
p[f] = [pcc.country_alpha2_to_continent_code(k) for k in countries_iso2]
# ----------------------------------------------------------
elif f == 'continent_name':
p[f] = [pcc.convert_continent_code_to_continent_name( \
pcc.country_alpha2_to_continent_code(k) ) for k in countries_iso2 ]
# ----------------------------------------------------------
elif f == 'country_name':
p[f] = [pcc.country_alpha2_to_country_name(k) for k in countries_iso2]
# ----------------------------------------------------------
elif f in ['population','area','fertility','median_age','urban_rate']:
if self._data_population.empty:
field_descr=( (0,'','idx'),
(1,'Country','country'),
(2,'Population','population'),
(6,'Land Area','area'),
(8,'Fert','fertility'),
(9,'Med','median_age'),
(10,'Urban','urban_rate'),
) # containts tuples with position in table, name of column, new name of field
# get data with cache ok for about 1 month
self._data_population = pd.read_html(get_local_from_url('https://www.worldometers.info/world-population/population-by-country/',30e5) ) [0].iloc[:,[x[0] for x in field_descr]]
# test that field order hasn't changed in the db
if not all (col.startswith(field_descr[i][1]) for i,col in enumerate(self._data_population.columns) ):
raise CoaDbError('The worldometers database changed its field names. '
'The GeoInfo should be updated. Please contact developers.')
# change field name
self._data_population.columns = [x[2] for x in field_descr]
# standardization of country name
self._data_population['iso3_tmp2']=\
self._gm.to_standard(self._data_population['country'].tolist(),\
db='worldometers')
p=p.merge(self._data_population[["iso3_tmp2",f]],how='left',\
left_on='iso3_tmp',right_on='iso3_tmp2',\
suffixes=('','_tmp')).drop(['iso3_tmp2'],axis=1)
# ----------------------------------------------------------
elif f in ['region_code_list','region_name_list']:
if f == 'region_code_list':
ff = 'region'
elif f == 'region_name_list':
ff = 'region_name'
p[f]=p.merge(self._grp[['iso3',ff]],how='left',\
left_on='iso3_tmp',right_on='iso3',\
suffixes=('','_tmp')) \
.groupby('iso3_tmp')[ff].apply(list).to_list()
# ----------------------------------------------------------
elif f in ['capital']:
p[f]=p.merge(self._grp[['iso3',f]].drop_duplicates(), \
how='left',left_on='iso3_tmp',right_on='iso3',\
suffixes=('','_tmp'))[f]
# ----------------------------------------------------------
elif f == 'geometry':
if self._data_geometry.empty:
#geojsondatafile = 'https://raw.githubusercontent.com/johan/world.geo.json/master/countries.geo.json'
#self._data_geometry = gpd.read_file(get_local_from_url(geojsondatafile,0,'.json'))[["id","geometry"]]
world_geometry_url_zipfile='http://thematicmapping.org/downloads/TM_WORLD_BORDERS_SIMPL-0.3.zip' # too much simplified version ?
# world_geometry_url_zipfile='http://thematicmapping.org/downloads/TM_WORLD_BORDERS-0.3.zip' # too precize version ?
self._data_geometry = gpd.read_file('zip://'+get_local_from_url(world_geometry_url_zipfile,0,'.zip'))[['ISO3','geometry']]
self._data_geometry.columns=["id_tmp","geometry"]
# About some countries not properly managed by this database (south and north soudan)
self._data_geometry=self._data_geometry.append({'id_tmp':'SSD','geometry':None},ignore_index=True) # adding the SSD row
for newc in ['SSD','SDN']:
newgeo=gpd.read_file(get_local_from_url('https://github.com/johan/world.geo.json/raw/master/countries/'+newc+'.geo.json'))
poly=newgeo[newgeo.id==newc].geometry.values[0]
self._data_geometry.loc[self._data_geometry.id_tmp==newc,'geometry']=gpd.GeoSeries(poly).values
# About countries that we artificially put on the east of the map
for newc in ['RUS','FJI','NZL','WSM']:
poly=self._data_geometry[self._data_geometry.id_tmp==newc].geometry.values[0]
poly=so.unary_union(sg.MultiPolygon([sg.Polygon([(x,y) if x>=0 else (x+360,y) for x,y in p.exterior.coords]) for p in poly]))
self._data_geometry.loc[self._data_geometry.id_tmp==newc,'geometry']=gpd.GeoSeries(poly).values
# About countries that we artificially put on the west of the map
for newc in ['USA']:
poly=self._data_geometry[self._data_geometry.id_tmp==newc].geometry.values[0]
poly=so.unary_union(sg.MultiPolygon([sg.Polygon([(x-360,y) if x>=0 else (x,y) for x,y in p.exterior.coords]) for p in poly]))
self._data_geometry.loc[self._data_geometry.id_tmp==newc,'geometry']=gpd.GeoSeries(poly).values
p=p.merge(self._data_geometry,how='left',\
left_on='iso3_tmp',right_on='id_tmp',\
suffixes=('','_tmp')).drop(['id_tmp'],axis=1)
# -----------------------------------------------------------
elif f == 'flag':
if self._data_flag.empty:
self._data_flag = pd.read_json(get_local_from_url('https://github.com/linssen/country-flag-icons/raw/master/countries.json',0))
self._data_flag['flag_url']='http:'+self._data_flag['file_url']
p=p.merge(self._data_flag[['alpha3','flag_url']],how='left',\
left_on='iso3_tmp',right_on='alpha3').drop(['alpha3'],axis=1)
return p.drop(['iso2_tmp','iso3_tmp'],axis=1,errors='ignore')
# ---------------------------------------------------------------------
# --- GeoRegion class -------------------------------------------------
# ---------------------------------------------------------------------
class GeoRegion():
"""GeoRegion class definition. Does not inheritate from any other
class.
It should raise only CoaError and derived exceptions in case
of errors (see pycoa.error)
"""
_source_dict={"UN_M49":"https://en.wikipedia.org/w/index.php?title=UN_M49&oldid=986603718", # pointing the previous correct ref . https://en.wikipedia.org/wiki/UN_M49",\
"GeoScheme":"https://en.wikipedia.org/w/index.php?title=List_of_countries_by_United_Nations_geoscheme&oldid=1008989486", #pointing the previous correct ref. https://en.wikipedia.org/wiki/List_of_countries_by_United_Nations_geoscheme",
"European Union":"https://europa.eu/european-union/about-eu/countries/member-countries_en",
"G7":"https://en.wikipedia.org/wiki/Group_of_Seven",
"G8":"https://en.wikipedia.org/wiki/Group_of_Eight",
"G20":"https://en.wikipedia.org/wiki/G20",
"G77":"https://www.g77.org/doc/members.html",
"OECD":"https://en.wikipedia.org/wiki/OECD",
"Commonwealth":"https://en.wikipedia.org/wiki/Member_states_of_the_Commonwealth_of_Nations",
}
_region_dict={}
_p_gs = pd.DataFrame()
def __init__(self,):
""" __init__ member function.
"""
#if 'XK' in self._country_list:
# del self._country_list['XK'] # creates bugs in pycountry and is currently a contested country as country
# --- get the UN M49 information and organize the data in the _region_dict
verb("Init of GeoRegion() from "+str(inspect.stack()[1]))
p_m49=pd.read_html(get_local_from_url(self._source_dict["UN_M49"],0))[1]
p_m49.columns=['code','region_name']
p_m49['region_name']=[r.split('(')[0].rstrip().title() for r in p_m49.region_name] # suppress information in parenthesis in region name
p_m49.set_index('code')
self._region_dict.update(p_m49.to_dict('split')['data'])
self._region_dict.update({ "UE":"European Union",
"G7":"G7",
"G8":"G8",
"G20":"G20",
"OECD":"Oecd",
"G77":"G77",
"CW":"Commonwealth"
}) # add UE for other analysis
# --- filling cw information
p_cw=pd.read_html(get_local_from_url('https://en.wikipedia.org/wiki/Member_states_of_the_Commonwealth_of_Nations'))
self._cw=[w.split('[')[0] for w in p_cw[0]['Country'].to_list()] # removing wikipedia notes
# --- get the UnitedNation GeoScheme and organize the data
p_gs=pd.read_html(get_local_from_url(self._source_dict["GeoScheme"],0))[0]
p_gs.columns=['country','capital','iso2','iso3','num','m49']
idx=[]
reg=[]
cap=[]
for index, row in p_gs.iterrows():
if row.iso3 != '–' : # meaning a non standard iso in wikipedia UN GeoScheme
for r in row.m49.replace(" ","").split('<'):
idx.append(row.iso3)
reg.append(int(r))
cap.append(row.capital)
self._p_gs=pd.DataFrame({'iso3':idx,'capital':cap,'region':reg})
self._p_gs=self._p_gs.merge(p_m49,how='left',left_on='region',\
right_on='code').drop(["code"],axis=1)
def get_source(self):
return self._source_dict
def get_region_list(self):
return list(self._region_dict.values())
def is_region(self,region):
""" it returns either False or the correctly named region name
"""
if type(region) != str:
raise CoaKeyError("The given region is not a str type.")
region=region.title() # if not properly capitalized
if region not in self.get_region_list():
return False
else :
return region
def get_countries_from_region(self,region):
""" it returns a list of countries for the given region name.
The standard used is iso3. To convert to another standard,
use the GeoManager class.
"""
r = self.is_region(region)
if not r:
raise CoaKeyError('The given region "'+str(region)+'" is unknown.')
region=r
clist=[]
if region=='European Union':
clist=['AUT','BEL','BGR','CYP','CZE','DEU','DNK','EST',\
'ESP','FIN','FRA','GRC','HRV','HUN','IRL','ITA',\
'LTU','LUX','LVA','MLT','NLD','POL','PRT','ROU',\
'SWE','SVN','SVK']
elif region=='G7':
clist=['DEU','CAN','USA','FRA','ITA','JAP','GBR']
elif region=='G8':
clist=['DEU','CAN','USA','FRA','ITA','JAP','GBR','RUS']
elif region=='G20':
clist=['ZAF','SAU','ARG','AUS','BRA','CAN','CHN','KOR','USA',\
'IND','IDN','JAP','MEX','GBR','RUS','TUR',\
'AUT','BEL','BGR','CYP','CZE','DEU','DNK','EST',\
'ESP','FIN','FRA','GRC','HRV','HUN','IRL','ITA',\
'LTU','LUX','LVA','MLT','NLD','POL','PRT','ROU',\
'SWE','SVN','SVK']
elif region=='Oecd': # OCDE in french
clist=['DEU','AUS','AUT','BEL','CAN','CHL','COL','KOR','DNK',\
'ESP','EST','USA','FIN','FRA','GRC','HUN','IRL','ISL','ISR',\
'ITA','JAP','LVA','LTU','LUX','MEX','NOR','NZL','NLD','POL',\
'PRT','SVK','SVN','SWE','CHE','GBR','CZE','TUR']
elif region=='G77':
clist=['AFG','DZA','AGO','ATG','ARG','AZE','BHS','BHR','BGD','BRB','BLZ',
'BEN','BTN','BOL','BWA','BRA','BRN','BFA','BDI','CPV','KHM','CMR',
'CAF','TCD','CHL','CHN','COL','COM','COG','CRI','CIV','CUB','PRK',
'COD','DJI','DMA','DOM','ECU','EGY','SLV','GNQ','ERI','SWZ','ETH',
'FJI','GAB','GMB','GHA','GRD','GTM','GIN','GNB','GUY','HTI','HND',
'IND','IDN','IRN','IRQ','JAM','JOR','KEN','KIR','KWT','LAO','LBN',
'LSO','LBR','LBY','MDG','MWI','MYS','MDV','MLI','MHL','MRT','MUS',
'FSM','MNG','MAR','MOZ','MMR','NAM','NRU','NPL','NIC','NER','NGA',
'OMN','PAK','PAN','PNG','PRY','PER','PHL','QAT','RWA','KNA','LCA',
'VCT','WSM','STP','SAU','SEN','SYC','SLE','SGP','SLB','SOM','ZAF',
'SSD','LKA','PSE','SDN','SUR','SYR','TJK','THA','TLS','TGO','TON',
'TTO','TUN','TKM','UGA','ARE','TZA','URY','VUT','VEN','VNM','YEM',
'ZMB','ZWE']
elif region=='Commonwealth':
clist=self._cw
else:
clist=self._p_gs[self._p_gs['region_name']==region]['iso3'].to_list()
return sorted(clist)
def get_pandas(self):
return self._p_gs
# ---------------------------------------------------------------------
# --- GeoCountryclass -------------------------------------------------
# ---------------------------------------------------------------------
class GeoCountry():
"""GeoCountry class definition.
This class provides functions for specific countries and their states / departments / regions,
and their geo properties (geometry, population if available, etc.)
The list of supported countries is given by get_list_countries() function. """
# Assuming zip file here
_country_info_dict = {'FRA':'https://github.com/coa-project/coadata/raw/main/coastore/public.opendatasoft.com_912711563.zip',\
'USA':'https://alicia.data.socrata.com/api/geospatial/jhnu-yfrj?method=export&format=Original',\
'ITA':'https://raw.githubusercontent.com/openpolis/geojson-italy/master/geojson/limits_IT_provinces.geojson',\
'IND':'https://raw.githubusercontent.com/deldersveld/topojson/master/countries/india/india-states.json',\
'DEU':'https://github.com/jgehrcke/covid-19-germany-gae/raw/master/geodata/DE-counties.geojson',\
'ESP':'https://public.opendatasoft.com/explore/dataset/provincias-espanolas/download/?format=shp&timezone=Europe/Berlin&lang=en',\
# missing some counties 'GBR':'https://opendata.arcgis.com/datasets/69dc11c7386943b4ad8893c45648b1e1_0.zip?geometry=%7B%22xmin%22%3A-44.36%2C%22ymin%22%3A51.099%2C%22xmax%22%3A39.487%2C%22ymax%22%3A59.78%2C%22type%22%3A%22extent%22%2C%22spatialReference%22%3A%7B%22wkid%22%3A4326%7D%7D&outSR=%7B%22latestWkid%22%3A27700%2C%22wkid%22%3A27700%7D',\
'GBR':'https://github.com/coa-project/coadata/raw/main/coastore/opendata.arcgis.com_3256063640',\
# previously (but broken) : https://opendata.arcgis.com/datasets/3a4fa2ce68f642e399b4de07643eeed3_0.geojson',\
'BEL':'https://public.opendatasoft.com/explore/dataset/arrondissements-belges-2019/download/?format=shp&timezone=Europe/Berlin&lang=en',\
'PRT':'https://github.com/coa-project/coadata/raw/main/coastore/concelhos.zip',\
# (simplification of 'https://github.com/coa-project/coadata/raw/main'https://dados.gov.pt/en/datasets/r/59368d37-cbdb-426a-9472-5a04cf30fbe4',\
'MYS':'https://stacks.stanford.edu/file/druid:zd362bc5680/data.zip',\
'CHL':'http://geonode.meteochile.gob.cl/geoserver/wfs?format_options=charset%3AUTF-8&typename=geonode%3Adivision_comunal_geo_ide_1&outputFormat=SHAPE-ZIP&version=1.0.0&service=WFS&request=GetFeature',\
}
_source_dict = {'FRA':{'Basics':_country_info_dict['FRA'],\
'Subregion Flags':'http://sticker-departement.com/',\
'Region Flags':'https://fr.wikipedia.org/w/index.php?title=R%C3%A9gion_fran%C3%A7aise&oldid=177269957'},\
'USA':{'Basics':_country_info_dict['USA'],\
'Subregion informations':'https://en.wikipedia.org/wiki/List_of_states_and_territories_of_the_United_States'},\
'ITA':{'Basics':_country_info_dict['ITA']},\
'IND':{'Basics':_country_info_dict['IND']},\
'DEU':{'Basics':_country_info_dict['DEU']},\
'ESP':{'Basics':_country_info_dict['ESP']},\
'GBR':{'Basics':_country_info_dict['GBR'],'Regions':'http://geoportal1-ons.opendata.arcgis.com/datasets/0c3a9643cc7c4015bb80751aad1d2594_0.csv'},\
'BEL':{'Basics':_country_info_dict['BEL']},\
'PRT':{'Basics':_country_info_dict['PRT']},\
#,'District':'https://raw.githubusercontent.com/JoaoFOliveira/portuguese-municipalities/master/municipalities.json'},\
'MYS':{'Basics':_country_info_dict['MYS']},\
'CHL':{'Basics':_country_info_dict['CHL']},\
}
def __init__(self,country=None,**kwargs):
""" __init__ member function.
Must give as arg the country to deal with, as a valid ISO3 string.
Various args :
- dense_geometry (boolean). If True , the geometry of subregions and
region is changed in order to have dense overall geometry.
Default False.
- main_area (boolean). If True, only the geometry of the main area of
the country is taken into account.
"""
self._country=country
if country == None:
return None
if not country in self.get_list_countries():
raise CoaKeyError("Country "+str(country)+" not supported. Please see get_list_countries() and help. ")
kwargs_test(kwargs,['dense_geometry','main_area'],'Vad args used in this init of GeoCountry object. See help.')
dense_geometry=kwargs.get("dense_geometry",False)
main_area=kwargs.get("main_area",False)
if not isinstance(dense_geometry,bool) or not isinstance(main_area,bool):
raise CoaKeyError("GeoCountry kwargs are boolean. See help.")
self._country_data_region=None
self._country_data_subregion=None
self._municipality_region=None
url=self._country_info_dict[country]
# country by country, adapt the read file informations
# --- 'FRA' case ---------------------------------------------------------------------------------------
if self._country=='FRA':
self._country_data = gpd.read_file('zip://'+get_local_from_url(url,0,'.zip'))
# adding a flag for subregion (departements)
self._country_data['flag_subregion']=self._source_dict['FRA']['Subregion Flags']+'img/dept/sticker_plaque_immat_'+\
self._country_data['code_dept']+'_'+\
[n.lower() for n in self._country_data['nom_dept']]+'_moto.png' # picture of a sticker for motobikes, not so bad...
# Reading information to get region flags and correct names of regions
f_reg_flag=open(get_local_from_url(self._source_dict['FRA']['Region Flags'],0), 'r', encoding="utf8")
content_reg_flag = f_reg_flag.read()
f_reg_flag.close()
soup_reg_flag = bs4.BeautifulSoup(content_reg_flag,'lxml')
for img in soup_reg_flag.find_all('img'): # need to convert <img tags to src content for pandas_read
src=img.get('src')
if src[0] == '/':
src='http:'+src
img.replace_with(src)
tabs_reg_flag=pd.read_html(str(soup_reg_flag)) # pandas read the modified html
metropole=tabs_reg_flag[5][["Logo","Dénomination","Code INSEE[5]"]] # getting 5th table, and only usefull columns
ultramarin=tabs_reg_flag[6][["Logo","Dénomination","Code INSEE[5]"]] # getting 6th table
p_reg_flag=pd.concat([metropole,ultramarin]).rename(columns={"Code INSEE[5]":"code_region",\
"Logo":"flag_region",\
"Dénomination":"name_region"})
p_reg_flag=p_reg_flag[ | pd.notnull(p_reg_flag["code_region"]) | pandas.notnull |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
| assert_frame_equal(recons, df) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import sys
import pickle
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import pyqtgraph
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtTest import *
from Model_module import Model_module
from Data_module import Data_module
# from Sub_widget import another_result_explain
class Worker(QObject):
# Signal을 보낼 그릇을 생성# #############
train_value = pyqtSignal(object)
# nor_ab_value = pyqtSignal(object)
procedure_value = pyqtSignal(object)
verif_value = pyqtSignal(object)
timer = pyqtSignal(object)
symptom_db = pyqtSignal(object)
shap = pyqtSignal(object)
plot_db = pyqtSignal(object)
display_ex = pyqtSignal(object, object, object)
another_shap = pyqtSignal(object, object, object)
another_shap_table = pyqtSignal(object)
##########################################
@pyqtSlot(object)
def generate_db(self):
test_db = input('구현할 시나리오를 입력해주세요 : ')
print(f'입력된 시나리오 : {test_db}를 실행합니다.')
Model_module() # model module 내의 빈행렬 초기화
data_module = Data_module()
db, check_db = data_module.load_data(file_name=test_db) # test_db 불러오기
data_module.data_processing() # Min-Max o, 2 Dimension
liner = []
plot_data = []
normal_data = []
compare_data = {'Normal':[], 'Ab21-01':[], 'Ab21-02':[], 'Ab20-04':[], 'Ab15-07':[], 'Ab15-08':[], 'Ab63-04':[], 'Ab63-02':[], 'Ab21-12':[], 'Ab19-02':[], 'Ab21-11':[], 'Ab23-03':[], 'Ab60-02':[], 'Ab59-02':[], 'Ab23-01':[], 'Ab23-06':[]}
for line in range(np.shape(db)[0]):
QTest.qWait(0.01)
print(np.shape(db)[0], line)
data = np.array([data_module.load_real_data(row=line)])
liner.append(line)
check_data, check_parameter = data_module.load_real_check_data(row=line)
plot_data.append(check_data[0])
try: normal_data.append(normal_db.iloc[line])
except: pass
try: compare_data['Normal'].append(normal_db.iloc[line])
except: pass
try: compare_data['Ab21-01'].append(ab21_01.iloc[line])
except: pass
try: compare_data['Ab21-02'].append(ab21_02.iloc[line])
except: pass
try: compare_data['Ab20-04'].append(ab20_04.iloc[line])
except: pass
try: compare_data['Ab15-07'].append(ab15_07.iloc[line])
except: pass
try: compare_data['Ab15-08'].append(ab15_08.iloc[line])
except: pass
try: compare_data['Ab63-04'].append(ab63_04.iloc[line])
except: pass
try: compare_data['Ab63-02'].append(ab63_02.iloc[line])
except: pass
try: compare_data['Ab21-12'].append(ab21_12.iloc[line])
except: pass
try: compare_data['Ab19-02'].append(ab19_02.iloc[line])
except: pass
try: compare_data['Ab21-11'].append(ab21_11.iloc[line])
except: pass
try: compare_data['Ab23-03'].append(ab23_03.iloc[line])
except: pass
try: compare_data['Ab60-02'].append(ab60_02.iloc[line])
except: pass
try: compare_data['Ab59-02'].append(ab59_02.iloc[line])
except: pass
try: compare_data['Ab23-01'].append(ab23_01.iloc[line])
except: pass
try: compare_data['Ab23-06'].append(ab23_06.iloc[line])
except: pass
if np.shape(data) == (1, 10, 46):
dim2 = np.array(data_module.load_scaled_data(row=line - 9)) # 2차원 scale
# check_data, check_parameter = data_module.load_real_check_data(row=line - 8)
# plot_data.append(check_data[0])
train_untrain_reconstruction_error, train_untrain_error = model_module.train_untrain_classifier(data=data)
# normal_abnormal_reconstruction_error = model_module.normal_abnormal_classifier(data=data)
abnormal_procedure_result, abnormal_procedure_prediction, shap_add_des, shap_value = model_module.abnormal_procedure_classifier(data=dim2)
abnormal_verif_reconstruction_error, verif_threshold, abnormal_verif_error = model_module.abnormal_procedure_verification(data=data)
self.train_value.emit(train_untrain_error)
# self.nor_ab_value.emit(np.argmax(abnormal_procedure_result[line-9], axis=1)[0])
self.procedure_value.emit(np.argmax(abnormal_procedure_prediction, axis=1)[0])
self.verif_value.emit([abnormal_verif_error, verif_threshold])
self.timer.emit([line, check_parameter])
self.symptom_db.emit([np.argmax(abnormal_procedure_prediction, axis=1)[0], check_parameter])
self.shap.emit(shap_add_des)
self.plot_db.emit([liner, plot_data])
self.display_ex.emit(shap_add_des, [liner, plot_data], normal_data)
self.another_shap.emit(shap_value, [liner, plot_data], compare_data)
self.another_shap_table.emit(shap_value)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
class Mainwindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Real-Time Abnormal Diagnosis for NPP")
self.setGeometry(150, 50, 1700, 800)
# 그래프 초기조건
pyqtgraph.setConfigOption("background", "w")
pyqtgraph.setConfigOption("foreground", "k")
#############################################
self.selected_para = pd.read_csv('./DataBase/Final_parameter.csv')
# GUI part 1 Layout (진단 부분 통합)
layout_left = QVBoxLayout()
# 영 번째 그룹 설정 (Time and Power)
gb_0 = QGroupBox("Training Status") # 영 번째 그룹 이름 설정
layout_left.addWidget(gb_0) # 전체 틀에 영 번째 그룹 넣기
gb_0_layout = QBoxLayout(QBoxLayout.LeftToRight) # 영 번째 그룹 내용을 넣을 레이아웃 설정
# 첫 번째 그룹 설정
gb_1 = QGroupBox("Training Status") # 첫 번째 그룹 이름 설정
layout_left.addWidget(gb_1) # 전체 틀에 첫 번째 그룹 넣기
gb_1_layout = QBoxLayout(QBoxLayout.LeftToRight) # 첫 번째 그룹 내용을 넣을 레이아웃 설정
# 두 번째 그룹 설정
gb_2 = QGroupBox('NPP Status')
layout_left.addWidget(gb_2)
gb_2_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 세 번째 그룹 설정
gb_3 = QGroupBox(self)
layout_left.addWidget(gb_3)
gb_3_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 네 번째 그룹 설정
gb_4 = QGroupBox('Predicted Result Verification')
layout_left.addWidget(gb_4)
gb_4_layout = QBoxLayout(QBoxLayout.LeftToRight)
# 다섯 번째 그룹 설정
gb_5 = QGroupBox('Symptom check in scenario')
layout_left.addWidget(gb_5)
gb_5_layout = QBoxLayout(QBoxLayout.TopToBottom)
# Spacer 추가
# layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# 영 번째 그룹 내용
self.time_label = QLabel(self)
self.power_label = QPushButton(self)
# 첫 번째 그룹 내용
# Trained / Untrained condition label
self.trained_label = QPushButton('Trained')
self.Untrained_label = QPushButton('Untrained')
# 두 번째 그룹 내용
self.normal_label = QPushButton('Normal')
self.abnormal_label = QPushButton('Abnormal')
# 세 번째 그룹 내용
self.name_procedure = QLabel('Number of Procedure: ')
self.num_procedure = QLineEdit(self)
self.num_procedure.setAlignment(Qt.AlignCenter)
self.name_scnario = QLabel('Name of Procedure: ')
self.num_scnario = QLineEdit(self)
self.num_scnario.setAlignment(Qt.AlignCenter)
# 네 번째 그룹 내용
self.success_label = QPushButton('Diagnosis Success')
self.failure_label = QPushButton('Diagnosis Failure')
# 다섯 번째 그룹 내용
self.symptom_name = QLabel(self)
self.symptom1 = QCheckBox(self)
self.symptom2 = QCheckBox(self)
self.symptom3 = QCheckBox(self)
self.symptom4 = QCheckBox(self)
self.symptom5 = QCheckBox(self)
self.symptom6 = QCheckBox(self)
# 영 번째 그룹 내용 입력
gb_0_layout.addWidget(self.time_label)
gb_0_layout.addWidget(self.power_label)
gb_0.setLayout(gb_0_layout)
# 첫 번째 그룹 내용 입력
gb_1_layout.addWidget(self.trained_label)
gb_1_layout.addWidget(self.Untrained_label)
gb_1.setLayout(gb_1_layout) # 첫 번째 레이아웃 내용을 첫 번째 그룹 틀로 넣기
# 두 번째 그룹 내용 입력
gb_2_layout.addWidget(self.normal_label)
gb_2_layout.addWidget(self.abnormal_label)
gb_2.setLayout(gb_2_layout)
# 세 번째 그룹 내용 입력
gb_3_layout.addWidget(self.name_procedure)
gb_3_layout.addWidget(self.num_procedure)
gb_3_layout.addWidget(self.name_scnario)
gb_3_layout.addWidget(self.num_scnario)
gb_3.setLayout(gb_3_layout)
# 네 번째 그룹 내용 입력
gb_4_layout.addWidget(self.success_label)
gb_4_layout.addWidget(self.failure_label)
gb_4.setLayout(gb_4_layout)
# 다섯 번째 그룹 내용 입력
gb_5_layout.addWidget(self.symptom_name)
gb_5_layout.addWidget(self.symptom1)
gb_5_layout.addWidget(self.symptom2)
gb_5_layout.addWidget(self.symptom3)
gb_5_layout.addWidget(self.symptom4)
gb_5_layout.addWidget(self.symptom5)
gb_5_layout.addWidget(self.symptom6)
gb_5.setLayout(gb_5_layout)
# Start 버튼 맨 아래에 위치
self.start_btn = QPushButton('Start')
# layout_part1.addWidget(self.start_btn)
self.tableWidget = QTableWidget(0, 0)
self.tableWidget.setFixedHeight(500)
self.tableWidget.setFixedWidth(800)
# Plot 구현
self.plot_1 = pyqtgraph.PlotWidget(title=self)
self.plot_2 = pyqtgraph.PlotWidget(title=self)
self.plot_3 = pyqtgraph.PlotWidget(title=self)
self.plot_4 = pyqtgraph.PlotWidget(title=self)
# Explanation Alarm 구현
red_alarm = QGroupBox('Main basis for diagnosis')
red_alarm_layout = QGridLayout()
orange_alarm = QGroupBox('Sub basis for diagnosis')
orange_alarm_layout = QGridLayout()
# Display Button 생성
self.red1 = QPushButton(self)
self.red2 = QPushButton(self)
self.red3 = QPushButton(self)
self.red4 = QPushButton(self)
self.orange1 = QPushButton(self)
self.orange2 = QPushButton(self)
self.orange3 = QPushButton(self)
self.orange4 = QPushButton(self)
self.orange5 = QPushButton(self)
self.orange6 = QPushButton(self)
self.orange7 = QPushButton(self)
self.orange8 = QPushButton(self)
self.orange9 = QPushButton(self)
self.orange10 = QPushButton(self)
self.orange11 = QPushButton(self)
self.orange12 = QPushButton(self)
# Layout에 widget 삽입
red_alarm_layout.addWidget(self.red1, 0, 0)
red_alarm_layout.addWidget(self.red2, 0, 1)
red_alarm_layout.addWidget(self.red3, 1, 0)
red_alarm_layout.addWidget(self.red4, 1, 1)
orange_alarm_layout.addWidget(self.orange1, 0, 0)
orange_alarm_layout.addWidget(self.orange2, 0, 1)
orange_alarm_layout.addWidget(self.orange3, 1, 0)
orange_alarm_layout.addWidget(self.orange4, 1, 1)
orange_alarm_layout.addWidget(self.orange5, 2, 0)
orange_alarm_layout.addWidget(self.orange6, 2, 1)
orange_alarm_layout.addWidget(self.orange7, 3, 0)
orange_alarm_layout.addWidget(self.orange8, 3, 1)
orange_alarm_layout.addWidget(self.orange9, 4, 0)
orange_alarm_layout.addWidget(self.orange10, 4, 1)
orange_alarm_layout.addWidget(self.orange11, 5, 0)
orange_alarm_layout.addWidget(self.orange12, 5, 1)
# Group Box에 Layout 삽입
red_alarm.setLayout(red_alarm_layout)
orange_alarm.setLayout(orange_alarm_layout)
# 각 Group Box를 상위 Layout에 삽입
layout_part1 = QVBoxLayout()
detail_part = QHBoxLayout()
detailed_table = QPushButton('Detail Explanation [Table]')
self.another_classification = QPushButton('Why other scenarios were not chosen')
detail_part.addWidget(detailed_table)
detail_part.addWidget(self.another_classification)
alarm_main = QVBoxLayout()
alarm_main.addWidget(red_alarm)
alarm_main.addWidget(orange_alarm)
layout_part1.addLayout(layout_left)
layout_part1.addLayout(alarm_main)
layout_part1.addLayout(detail_part)
layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# GUI part2 Layout (XAI 구현)
layout_part2 = QVBoxLayout()
layout_part2.addWidget(self.plot_1)
layout_part2.addWidget(self.plot_2)
layout_part2.addWidget(self.plot_3)
layout_part2.addWidget(self.plot_4)
# layout_part2.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
# layout_part2.addWidget(self.tableWidget)
# GUI part1 and part2 통합
layout_base = QHBoxLayout()
layout_base.addLayout(layout_part1)
layout_base.addLayout(layout_part2)
# GUI 최종 통합 (start button을 하단에 배치시키기 위함)
total_layout = QVBoxLayout()
total_layout.addLayout(layout_base)
total_layout.addWidget(self.start_btn)
self.setLayout(total_layout) # setLayout : 최종 출력될 GUI 화면을 결정
# Threading Part##############################################################################################################
# 데이터 연산 부분 Thread화
self.worker = Worker()
self.worker_thread = QThread()
# Signal을 Main Thread 내의 함수와 연결
self.worker.train_value.connect(self.Determine_train)
self.worker.procedure_value.connect(self.Determine_abnormal)
self.worker.procedure_value.connect(self.Determine_procedure)
self.worker.verif_value.connect(self.verifit_result)
self.worker.timer.connect(self.time_display)
self.worker.symptom_db.connect(self.procedure_satisfaction)
# self.worker.shap.connect(self.explain_result)
self.worker.plot_db.connect(self.plotting)
self.worker.display_ex.connect(self.display_explain)
self.worker.moveToThread(self.worker_thread) # Worker class를 Thread로 이동
# self.worker_thread.started.connect(lambda: self.worker.generate_db())
self.start_btn.clicked.connect(lambda: self.worker.generate_db()) # 누르면 For문 실행
self.worker_thread.start()
# Threading Part##############################################################################################################
# 이벤트 처리 ----------------------------------------------------------------------------------------------------
detailed_table.clicked.connect(self.show_table)
self.another_classification.clicked.connect(self.show_another_result)
# Button 클릭 연동 이벤트 처리
convert_red_btn = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} # Red Button
convert_red_plot = {0: self.red1_plot, 1: self.red2_plot, 2: self.red3_plot, 3: self.red4_plot} #
convert_orange_btn = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5,
5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10,
10: self.orange11, 11: self.orange12} # Orange Button
convert_orange_plot = {0: self.orange1_plot, 1: self.orange2_plot, 2: self.orange3_plot, 3: self.orange4_plot, 4: self.orange5_plot,
5: self.orange6_plot, 6: self.orange7_plot, 7: self.orange8_plot, 8: self.orange9_plot, 9: self.orange10_plot,
10: self.orange11_plot, 11: self.orange12_plot}
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_red_btn[i].clicked.connect(convert_red_plot[i]) for i in range(4)]
self.red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_orange_btn[i].clicked.connect(convert_orange_plot[i]) for i in range(12)]
self.orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
self.show() # UI show command
def time_display(self, display_variable):
# display_variable[0] : time, display_variable[1].iloc[1]
self.time_label.setText(f'<b>Time :<b/> {display_variable[0]} sec')
self.time_label.setFont(QFont('Times new roman', 15))
self.time_label.setAlignment(Qt.AlignCenter)
self.power_label.setText(f'Power : {round(display_variable[1].iloc[1]["QPROREL"]*100, 2)}%')
if round(display_variable[1].iloc[1]["QPROREL"]*100, 2) < 95:
self.power_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
else:
self.power_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_train(self, train_untrain_reconstruction_error):
if train_untrain_reconstruction_error[0] <= 0.00225299: # Trained Data
self.trained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.Untrained_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # Untrianed Data
self.Untrained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.trained_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_abnormal(self, abnormal_diagnosis):
if abnormal_diagnosis == 0: # 정상상태
self.normal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.abnormal_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 비정상상태
self.abnormal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.normal_label.setStyleSheet('color : black;' 'background-color: light gray;')
def Determine_procedure(self, abnormal_procedure_result):
if abnormal_procedure_result == 0:
self.num_procedure.setText('Normal')
self.num_scnario.setText('Normal')
elif abnormal_procedure_result == 1:
self.num_procedure.setText('Ab21-01')
self.num_scnario.setText('가압기 압력 채널 고장 "고"')
elif abnormal_procedure_result == 2:
self.num_procedure.setText('Ab21-02')
self.num_scnario.setText('가압기 압력 채널 고장 "저"')
elif abnormal_procedure_result == 3:
self.num_procedure.setText('Ab20-04')
self.num_scnario.setText('가압기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 4:
self.num_procedure.setText('Ab15-07')
self.num_scnario.setText('증기발생기 수위 채널 고장 "저"')
elif abnormal_procedure_result == 5:
self.num_procedure.setText('Ab15-08')
self.num_scnario.setText('증기발생기 수위 채널 고장 "고"')
elif abnormal_procedure_result == 6:
self.num_procedure.setText('Ab63-04')
self.num_scnario.setText('제어봉 낙하')
elif abnormal_procedure_result == 7:
self.num_procedure.setText('Ab63-02')
self.num_scnario.setText('제어봉의 계속적인 삽입')
elif abnormal_procedure_result == 8:
self.num_procedure.setText('Ab21-12')
# self.num_scnario.setText('가압기 PORV 열림')
self.num_scnario.setText('Pressurizer PORV opening')
elif abnormal_procedure_result == 9:
self.num_procedure.setText('Ab19-02')
self.num_scnario.setText('가압기 안전밸브 고장')
elif abnormal_procedure_result == 10:
self.num_procedure.setText('Ab21-11')
self.num_scnario.setText('가압기 살수밸브 고장 "열림"')
elif abnormal_procedure_result == 11:
self.num_procedure.setText('Ab23-03')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "CVCS->CCW"')
elif abnormal_procedure_result == 12:
self.num_procedure.setText('Ab60-02')
self.num_scnario.setText('재생열교환기 전단부위 파열')
elif abnormal_procedure_result == 13:
self.num_procedure.setText('Ab59-02')
self.num_scnario.setText('충전수 유량조절밸브 후단 누설')
elif abnormal_procedure_result == 14:
self.num_procedure.setText('Ab23-01')
self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "RCS->CCW"')
elif abnormal_procedure_result == 15:
self.num_procedure.setText('Ab23-06')
self.num_scnario.setText('증기발생기 전열관 누설')
def verifit_result(self, verif_value):
if verif_value[0] <= verif_value[1]: # 진단 성공
self.success_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;')
self.failure_label.setStyleSheet('color : black;' 'background-color: light gray;')
else: # 진단 실패
self.failure_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;')
self.success_label.setStyleSheet('color : black;' 'background-color: light gray;')
def procedure_satisfaction(self, symptom_db):
# symptom_db[0] : classification result [0~15]
# symptom_db[1] : check_db [2,2222] -> 현시점과 이전시점 비교를 위함.
# symptom_db[1].iloc[0] : 이전 시점 # symptom_db[1].iloc[1] : 현재 시점
if symptom_db[0] == 0: # 정상 상태
self.symptom_name.setText('Diagnosis Result : Normal → Symptoms : 0')
self.symptom1.setText('')
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('')
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('')
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('')
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('')
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText('')
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 1:
self.symptom_name.setText('Diagnosis Result : Ab21-01 Pressurizer pressure channel failure "High" → Symptoms : 6')
self.symptom1.setText("채널 고장으로 인한 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 살수밸브 '열림' 지시")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 비례전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZP'] == 0:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 보조전열기 꺼짐")
if symptom_db[1].iloc[1]['QPRZB'] == 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText("실제 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("가압기 PORV 차단밸브 닫힘")
if symptom_db[1].iloc[1]['BHV6'] == 0:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 2:
self.symptom_name.setText('진단 : Ab21-02 가압기 압력 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 압력 지시")
if symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("실제 가압기 '고' 압력 지시")
if symptom_db[1].iloc[1]['PPRZ'] > symptom_db[1].iloc[1]['CPPRZH']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('가압기 PORV 열림 지시 및 경보 발생')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('실제 가압기 압력 감소로 가압기 PORV 닫힘') # 가압기 압력 감소에 대해 해결해야함.
if symptom_db[1].iloc[1]['BPORV'] == 0 and (symptom_db[1].iloc[0]['PPRZ'] > symptom_db[1].iloc[1]['PPRZ']):
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 3:
self.symptom_name.setText('진단 : Ab20-04 가압기 수위 채널 고장 "저" → 증상 : 5')
self.symptom1.setText("채널 고장으로 인한 가압기 '저' 수위 지시")
if symptom_db[1].iloc[1]['ZINST63'] < 17: # 나중에 다시 확인해야함.
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('"LETDN HX OUTLET FLOW LOW" 경보 발생')
if symptom_db[1].iloc[1]['UNRHXUT'] > symptom_db[1].iloc[1]['CULDHX']:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText('"CHARGING LINE FLOW HI/LO" 경보 발생')
if (symptom_db[1].iloc[1]['WCHGNO'] < symptom_db[1].iloc[1]['CWCHGL']) or (symptom_db[1].iloc[1]['WCHGNO'] > symptom_db[1].iloc[1]['CWCHGH']):
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText('충전 유량 증가')
if symptom_db[1].iloc[0]['WCHGNO'] < symptom_db[1].iloc[1]['WCHGNO']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom5.setText('건전한 수위지시계의 수위 지시치 증가')
if symptom_db[1].iloc[0]['ZPRZNO'] < symptom_db[1].iloc[1]['ZPRZNO']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
# else:
# self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 4:
self.symptom_name.setText('진단 : Ab15-07 증기발생기 수위 채널 고장 "저" → 증상 : ')
self.symptom1.setText('증기발생기 수위 "저" 경보 발생')
if symptom_db[1].iloc[1]['ZINST78']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST77']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST76']*0.01 < symptom_db[1].iloc[1]['CZSGW']:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText('해당 SG MFCV 열림 방향으로 진행 및 해당 SG 실제 급수유량 증가')
elif symptom_db[0] == 8:
# self.symptom_name.setText('진단 : Ab21-12 가압기 PORV 열림 → 증상 : 5')
self.symptom_name.setText('Diagnosis result : Ab21-12 Pressurizer PORV opening → Symptoms : 5')
# self.symptom1.setText('가압기 PORV 열림 지시 및 경보 발생')
self.symptom1.setText('Pressurizer PORV open indication and alarm')
if symptom_db[1].iloc[1]['BPORV'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생')
self.symptom2.setText('Aux. heater turn on instruction and alarm due to pressurizer low pressure')
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
self.symptom3.setText("pressurizer 'low' pressure indication and alarm")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL'] :
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom4.setText("PRT 고온 지시 및 경보 발생")
self.symptom4.setText("PRT high temperature indication and alarm")
if symptom_db[1].iloc[1]['UPRT'] > symptom_db[1].iloc[1]['CUPRT'] :
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
# self.symptom5.setText("PRT 고압 지시 및 경보 발생")
self.symptom5.setText("PRT high pressure indication and alarm")
if (symptom_db[1].iloc[1]['PPRT'] - 0.98E5) > symptom_db[1].iloc[1]['CPPRT']:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom6.setText("Blank")
self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
elif symptom_db[0] == 10:
self.symptom_name.setText("진단 : Ab21-11 가압기 살수밸브 고장 '열림' → 증상 : 4")
self.symptom1.setText("가압기 살수밸브 '열림' 지시 및 상태 표시등 점등")
if symptom_db[1].iloc[1]['BPRZSP'] > 0:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom2.setText("가압기 보조전열기 켜짐 지시 및 경보 발생")
if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1):
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생")
if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
self.symptom4.setText("가압기 수위 급격한 증가") # 급격한 증가에 대한 수정은 필요함 -> 추후 수정
if symptom_db[1].iloc[0]['ZINST63'] < symptom_db[1].iloc[1]['ZINST63']:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}")
else:
self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}")
def explain_result(self, shap_add_des):
'''
# shap_add_des['index'] : 변수 이름 / shap_add_des[0] : shap value
# shap_add_des['describe'] : 변수에 대한 설명 / shap_add_des['probability'] : shap value를 확률로 환산한 값
'''
self.tableWidget.setRowCount(len(shap_add_des))
self.tableWidget.setColumnCount(4)
self.tableWidget.setHorizontalHeaderLabels(["value_name", 'probability', 'describe', 'system'])
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
header.setSectionResizeMode(0, QHeaderView.Stretch)
header.setSectionResizeMode(1, QHeaderView.Stretch)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
header.setSectionResizeMode(3, QHeaderView.Stretch)
[self.tableWidget.setItem(i, 0, QTableWidgetItem(f"{shap_add_des['index'][i]}")) for i in range(len(shap_add_des['index']))]
[self.tableWidget.setItem(i, 1, QTableWidgetItem(f"{round(shap_add_des['probability'][i],2)}%")) for i in range(len(shap_add_des['probability']))]
[self.tableWidget.setItem(i, 2, QTableWidgetItem(f"{shap_add_des['describe'][i]}")) for i in range(len(shap_add_des['describe']))]
[self.tableWidget.setItem(i, 3, QTableWidgetItem(f"{shap_add_des['system'][i]}")) for i in range(len(shap_add_des['system']))]
delegate = AlignDelegate(self.tableWidget)
self.tableWidget.setItemDelegate(delegate)
def show_table(self):
self.worker.shap.connect(self.explain_result)
# 클릭시 Thread를 통해 신호를 전달하기 때문에 버퍼링이 발생함. 2초 정도? 이 부분은 나중에 생각해서 초기에 불러올지 고민해봐야할듯.
self.tableWidget.show()
def plotting(self, symptom_db):
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
# -- scatter --
# time = []
# value1, value2, value3 = [], [], []
# time.append(symptom_db[0])
# value1.append(round(symptom_db[1].iloc[1]['ZVCT'],2))
# value2.append(round(symptom_db[1].iloc[1]['BPORV'],2))
# value3.append(round(symptom_db[1].iloc[1]['UPRZ'],2))
# self.plotting_1 = self.plot_1.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_2 = self.plot_2.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# self.plotting_3 = self.plot_3.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5)
# -- Line plotting --
# self.plotting_1 = self.plot_1.plot(pen='w')
# self.plotting_2 = self.plot_2.plot(pen='w')
# self.plotting_3 = self.plot_3.plot(pen='w')
# self.plotting_4 = self.plot_4.plot(pen='w')
self.plot_1.showGrid(x=True, y=True, alpha=0.3)
self.plot_2.showGrid(x=True, y=True, alpha=0.3)
self.plot_3.showGrid(x=True, y=True, alpha=0.3)
self.plot_4.showGrid(x=True, y=True, alpha=0.3)
self.plotting_1 = self.plot_1.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_2 = self.plot_2.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_3 = self.plot_3.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_4 = self.plot_4.plot(pen=pyqtgraph.mkPen('k',width=3))
self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['BPORV'])
self.plot_1.setTitle('PORV open state')
self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRZN'])
self.plot_2.setTitle('Pressurizer pressure')
self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['UPRT'])
self.plot_3.setTitle('PRT temperature')
self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRT'])
self.plot_4.setTitle('PRT pressure')
# red_range = display_db[display_db['probability'] >= 10] # 10% 이상의 확률을 가진 변수
#
# print(bool(red_range["describe"].iloc[3]))
# try :
# self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]])
# if red_range["describe"].iloc[0] == None:
# self.plot_1.setTitle(self)
# else:
# self.plot_1.setTitle(f'{red_range["describe"].iloc[0]}')
# # self.plot_1.clear()
# except:
# print('plot1 fail')
# try:
# self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]])
# if red_range["describe"].iloc[1] == None:
# self.plot_2.setTitle(self)
# else:
# self.plot_2.setTitle(f'{red_range["describe"].iloc[1]}')
# # self.plot_2.clear()
# except:
# print('plot2 fail')
# try:
# self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]])
# if red_range["describe"].iloc[2] == None:
# self.plot_3.setTitle(self)
# else:
# self.plot_3.setTitle(f'{red_range["describe"].iloc[2]}')
# # self.plot_3.clear()
# except:
# print('plot3 fail')
# try:
# self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]])
# if red_range["describe"].iloc[3] == None:
# self.plot_4.setTitle(self)
# else:
# self.plot_4.setTitle(f'{red_range["describe"].iloc[3]}')
# # self.plot_4.clear()
# except:
# print('plot4 fail')
def display_explain(self, display_db, symptom_db, normal_db):
'''
# display_db['index'] : 변수 이름 / display_db[0] : shap value
# display_db['describe'] : 변수에 대한 설명 / display_db['probability'] : shap value를 확률로 환산한 값
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
'''
red_range = display_db[display_db['probability'] >=10]
orange_range = display_db[[display_db['probability'].iloc[i]<10 and display_db['probability'].iloc[i]>1 for i in range(len(display_db['probability']))]]
convert_red = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4}
convert_orange = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12}
if 4-len(red_range) == 0:
red_del = []
elif 4-len(red_range) == 1:
red_del = [3]
elif 4-len(red_range) == 2:
red_del = [2,3]
elif 4-len(red_range) == 3:
red_del = [1,2,3]
elif 4-len(red_range) == 4:
red_del = [0,1,2,3]
if 12-len(orange_range) == 0:
orange_del = []
elif 12-len(orange_range) == 1:
orange_del = [11]
elif 12-len(orange_range) == 2:
orange_del = [10,11]
elif 12-len(orange_range) == 3:
orange_del = [9,10,11]
elif 12-len(orange_range) == 4:
orange_del = [8,9,10,11]
elif 12-len(orange_range) == 5:
orange_del = [7,8,9,10,11]
elif 12-len(orange_range) == 6:
orange_del = [6,7,8,9,10,11]
elif 12-len(orange_range) == 7:
orange_del = [5,6,7,8,9,10,11]
elif 12-len(orange_range) == 8:
orange_del = [4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 9:
orange_del = [3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 10:
orange_del = [2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 11:
orange_del = [1,2,3,4,5,6,7,8,9,10,11]
elif 12-len(orange_range) == 12:
orange_del = [0,1,2,3,4,5,6,7,8,9,10,11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i],2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i],2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
# [convert_orange[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: orange;') for i in range(len(orange_range))]
# [convert_orange[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in orange_del]
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.red1.text().split()[0] != 'None':
self.red_plot_1.clear()
self.red_plot_1.setTitle(red_range['describe'].iloc[0])
self.red_plot_1.addLegend(offset=(-30,20))
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name = 'Real Data')
self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name = 'Normal Data')
# Red2 Button
if self.red2.text().split()[0] != 'None':
self.red_plot_2.clear()
self.red_plot_2.setTitle(red_range['describe'].iloc[1])
self.red_plot_2.addLegend(offset=(-30, 20))
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red3 Button
if self.red3.text().split()[0] != 'None':
self.red_plot_3.clear()
self.red_plot_3.setTitle(red_range['describe'].iloc[2])
self.red_plot_3.addLegend(offset=(-30, 20))
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Red4 Button
if self.red4.text().split()[0] != 'None':
self.red_plot_4.clear()
self.red_plot_4.setTitle(red_range['describe'].iloc[3])
self.red_plot_4.addLegend(offset=(-30, 20))
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange1 Button
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.clear()
self.orange_plot_1.setTitle(orange_range['describe'].iloc[0])
self.orange_plot_1.addLegend(offset=(-30, 20))
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange2 Button
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.clear()
self.orange_plot_2.setTitle(orange_range['describe'].iloc[1])
self.orange_plot_2.addLegend(offset=(-30, 20))
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange3 Button
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.clear()
self.orange_plot_3.setTitle(orange_range['describe'].iloc[2])
self.orange_plot_3.addLegend(offset=(-30, 20))
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange4 Button
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.clear()
self.orange_plot_4.setTitle(orange_range['describe'].iloc[3])
self.orange_plot_4.addLegend(offset=(-30, 20))
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[3]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange5 Button
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.clear()
self.orange_plot_5.setTitle(orange_range['describe'].iloc[4])
self.orange_plot_5.addLegend(offset=(-30, 20))
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_5.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[4]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange6 Button
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.clear()
self.orange_plot_6.setTitle(orange_range['describe'].iloc[5])
self.orange_plot_6.addLegend(offset=(-30, 20))
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_6.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[5]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange7 Button
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.clear()
self.orange_plot_7.setTitle(orange_range['describe'].iloc[6])
self.orange_plot_7.addLegend(offset=(-30, 20))
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_7.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[6]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange8 Button
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.clear()
self.orange_plot_8.setTitle(orange_range['describe'].iloc[7])
self.orange_plot_8.addLegend(offset=(-30, 20))
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_8.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[7]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange9 Button
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.clear()
self.orange_plot_9.setTitle(orange_range['describe'].iloc[8])
self.orange_plot_9.addLegend(offset=(-30, 20))
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_9.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[8]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange10 Button
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.clear()
self.orange_plot_10.setTitle(orange_range['describe'].iloc[9])
self.orange_plot_10.addLegend(offset=(-30, 20))
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_10.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[9]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange11 Button
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.clear()
self.orange_plot_11.setTitle(orange_range['describe'].iloc[10])
self.orange_plot_11.addLegend(offset=(-30, 20))
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_11.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[10]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
# Orange12 Button
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.clear()
self.orange_plot_12.setTitle(orange_range['describe'].iloc[11])
self.orange_plot_12.addLegend(offset=(-30, 20))
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.orange_plot_12.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[orange_range['index'].iloc[11]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data')
[convert_red[i].setCheckable(True) for i in range(4)]
[convert_orange[i].setCheckable(True) for i in range(12)]
def red1_plot(self):
if self.red1.isChecked():
if self.red1.text().split()[0] != 'None':
self.red_plot_1.show()
self.red1.setCheckable(False)
def red2_plot(self):
if self.red2.isChecked():
if self.red2.text().split()[0] != 'None':
self.red_plot_2.show()
self.red2.setCheckable(False)
def red3_plot(self):
if self.red3.isChecked():
if self.red3.text().split()[0] != 'None':
self.red_plot_3.show()
self.red3.setCheckable(False)
def red4_plot(self):
if self.red4.isChecked():
if self.red4.text().split()[0] != 'None':
self.red_plot_4.show()
self.red4.setCheckable(False)
def orange1_plot(self):
if self.orange1.isChecked():
if self.orange1.text().split()[0] != 'None':
self.orange_plot_1.show()
self.orange1.setCheckable(False)
def orange2_plot(self):
if self.orange2.isChecked():
if self.orange2.text().split()[0] != 'None':
self.orange_plot_2.show()
self.orange2.setCheckable(False)
def orange3_plot(self):
if self.orange3.isChecked():
if self.orange3.text().split()[0] != 'None':
self.orange_plot_3.show()
self.orange3.setCheckable(False)
def orange4_plot(self):
if self.orange4.isChecked():
if self.orange4.text().split()[0] != 'None':
self.orange_plot_4.show()
self.orange4.setCheckable(False)
def orange5_plot(self):
if self.orange5.isChecked():
if self.orange5.text().split()[0] != 'None':
self.orange_plot_5.show()
self.orange5.setCheckable(False)
def orange6_plot(self):
if self.orange6.isChecked():
if self.orange6.text().split()[0] != 'None':
self.orange_plot_6.show()
self.orange6.setCheckable(False)
def orange7_plot(self):
if self.orange7.isChecked():
if self.orange7.text().split()[0] != 'None':
self.orange_plot_7.show()
self.orange7.setCheckable(False)
def orange8_plot(self):
if self.orange8.isChecked():
if self.orange8.text().split()[0] != 'None':
self.orange_plot_8.show()
self.orange8.setCheckable(False)
def orange9_plot(self):
if self.orange9.isChecked():
if self.orange9.text().split()[0] != 'None':
self.orange_plot_9.show()
self.orange9.setCheckable(False)
def orange10_plot(self):
if self.orange10.isChecked():
if self.orange10.text().split()[0] != 'None':
self.orange_plot_10.show()
self.orange10.setCheckable(False)
def orange11_plot(self):
if self.orange11.isChecked():
if self.orange11.text().split()[0] != 'None':
self.orange_plot_11.show()
self.orange11.setCheckable(False)
def orange12_plot(self):
if self.orange12.isChecked():
if self.orange12.text().split()[0] != 'None':
self.orange_plot_12.show()
self.orange12.setCheckable(False)
def show_another_result(self):
self.other = another_result_explain()
self.worker.another_shap_table.connect(self.other.show_another_result_table)
self.worker.another_shap.connect(self.other.show_shap)
self.other.show()
class another_result_explain(QWidget):
def __init__(self):
super().__init__()
# 서브 인터페이스 초기 설정
self.setWindowTitle('Another Result Explanation')
self.setGeometry(300, 300, 800, 500)
self.selected_para = pd.read_csv('./DataBase/Final_parameter_200825.csv')
# 레이아웃 구성
combo_layout = QVBoxLayout()
self.title_label = QLabel("<b>선택되지 않은 시나리오에 대한 결과 해석<b/>")
self.title_label.setAlignment(Qt.AlignCenter)
self.blank = QLabel(self) # Enter를 위한 라벨
self.show_table = QPushButton("Show Table")
self.cb = QComboBox(self)
self.cb.addItem('Normal')
self.cb.addItem('Ab21-01: Pressurizer pressure channel failure (High)')
self.cb.addItem('Ab21-02: Pressurizer pressure channel failure (Low)')
self.cb.addItem('Ab20-04: Pressurizer level channel failure (Low)')
self.cb.addItem('Ab15-07: Steam generator level channel failure (High)')
self.cb.addItem('Ab15-08: Steam generator level channel failure (Low)')
self.cb.addItem('Ab63-04: Control rod fall')
self.cb.addItem('Ab63-02: Continuous insertion of control rod')
self.cb.addItem('Ab21-12: Pressurizer PORV opening')
self.cb.addItem('Ab19-02: Pressurizer safety valve failure')
self.cb.addItem('Ab21-11: Pressurizer spray valve failed opening')
self.cb.addItem('Ab23-03: Leakage from CVCS to RCS')
self.cb.addItem('Ab60-02: Rupture of the front end of the regenerative heat exchanger')
self.cb.addItem('Ab59-02: Leakage at the rear end of the charging flow control valve')
self.cb.addItem('Ab23-01: Leakage from CVCS to CCW')
self.cb.addItem('Ab23-06: Steam generator u-tube leakage')
# Explanation Alarm 구현
cb_red_alarm = QGroupBox('Main basis for diagnosis')
cb_red_alarm_layout = QGridLayout()
cb_orange_alarm = QGroupBox('Sub basis for diagnosis')
cb_orange_alarm_layout = QGridLayout()
# Display Button 생성
self.cb_red1 = QPushButton(self)
self.cb_red2 = QPushButton(self)
self.cb_red3 = QPushButton(self)
self.cb_red4 = QPushButton(self)
self.cb_orange1 = QPushButton(self)
self.cb_orange2 = QPushButton(self)
self.cb_orange3 = QPushButton(self)
self.cb_orange4 = QPushButton(self)
self.cb_orange5 = QPushButton(self)
self.cb_orange6 = QPushButton(self)
self.cb_orange7 = QPushButton(self)
self.cb_orange8 = QPushButton(self)
self.cb_orange9 = QPushButton(self)
self.cb_orange10 = QPushButton(self)
self.cb_orange11 = QPushButton(self)
self.cb_orange12 = QPushButton(self)
# Layout에 widget 삽입
cb_red_alarm_layout.addWidget(self.cb_red1, 0, 0)
cb_red_alarm_layout.addWidget(self.cb_red2, 0, 1)
cb_red_alarm_layout.addWidget(self.cb_red3, 1, 0)
cb_red_alarm_layout.addWidget(self.cb_red4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange1, 0, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange2, 0, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange3, 1, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange4, 1, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange5, 2, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange6, 2, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange7, 3, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange8, 3, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange9, 4, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange10, 4, 1)
cb_orange_alarm_layout.addWidget(self.cb_orange11, 5, 0)
cb_orange_alarm_layout.addWidget(self.cb_orange12, 5, 1)
cb_red_alarm.setLayout(cb_red_alarm_layout)
cb_orange_alarm.setLayout(cb_orange_alarm_layout)
combo_layout.addWidget(self.title_label)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.cb)
combo_layout.addWidget(self.blank)
# combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
combo_layout.addWidget(cb_red_alarm)
combo_layout.addWidget(cb_orange_alarm)
combo_layout.addWidget(self.blank)
combo_layout.addWidget(self.show_table)
combo_layout.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.setLayout(combo_layout)
self.combo_tableWidget = QTableWidget(0, 0)
self.combo_tableWidget.setFixedHeight(500)
self.combo_tableWidget.setFixedWidth(800)
# self.combo_tableWidget = QTableWidget(0, 0)
# 이벤트 처리 부분 ########################################################
self.show_table.clicked.connect(self.show_anoter_table)
self.cb.activated[str].connect(self.show_another_result_table)
self.cb.activated[str].connect(self.show_shap)
##########################################################################
# Button 클릭 연동 이벤트 처리
convert_cb_red_btn = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4} # Red Button
convert_cb_red_plot = {0: self.cb_red1_plot, 1: self.cb_red2_plot, 2: self.cb_red3_plot, 3: self.cb_red4_plot}
convert_cb_orange_btn = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12} # Orange Button
convert_cb_orange_plot = {0: self.cb_orange1_plot, 1: self.cb_orange2_plot, 2: self.cb_orange3_plot, 3: self.cb_orange4_plot,
4: self.cb_orange5_plot, 5: self.cb_orange6_plot, 6: self.cb_orange7_plot, 7: self.cb_orange8_plot,
8: self.cb_orange9_plot, 9: self.cb_orange10_plot, 10: self.cb_orange11_plot, 11: self.cb_orange12_plot}
################################################################################################################
# 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨.
# Red Button
[convert_cb_red_btn[i].clicked.connect(convert_cb_red_plot[i]) for i in range(4)]
self.cb_red_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_red_plot_4 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_red_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_red_plot_4.showGrid(x=True, y=True, alpha=0.3)
# Orange Button
[convert_cb_orange_btn[i].clicked.connect(convert_cb_orange_plot[i]) for i in range(12)]
self.cb_orange_plot_1 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_2 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_3 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_4 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_5 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_6 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_7 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_8 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_9 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_10 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_11 = pyqtgraph.PlotWidget(title=self)
self.cb_orange_plot_12 = pyqtgraph.PlotWidget(title=self)
# Grid setting
self.cb_orange_plot_1.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_2.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_3.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_4.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_5.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_6.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_7.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_8.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_9.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_10.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_11.showGrid(x=True, y=True, alpha=0.3)
self.cb_orange_plot_12.showGrid(x=True, y=True, alpha=0.3)
################################################################################################################
self.show() # Sub UI show command
def show_shap(self, all_shap, symptom_db, compare_data):
# all_shap : 전체 시나리오에 해당하는 shap_value를 가지고 있음.
# symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1]
if self.cb.currentText() == 'Normal':
step1 = pd.DataFrame(all_shap[0], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()]
elif self.cb.currentText() == 'Ab21-01: Pressurizer pressure channel failure (High)':
step1 = pd.DataFrame(all_shap[1], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-02: Pressurizer pressure channel failure (Low)':
step1 = pd.DataFrame(all_shap[2], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab20-04: Pressurizer level channel failure (Low)':
step1 = pd.DataFrame(all_shap[3], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-07: Steam generator level channel failure (High)':
step1 = pd.DataFrame(all_shap[4], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab15-08: Steam generator level channel failure (Low)':
step1 = pd.DataFrame(all_shap[5], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-04: Control rod fall':
step1 = pd.DataFrame(all_shap[6], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab63-02: Continuous insertion of control rod':
step1 = pd.DataFrame(all_shap[7], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-12: Pressurizer PORV opening':
step1 = pd.DataFrame(all_shap[8], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab19-02: Pressurizer safety valve failure':
step1 = pd.DataFrame(all_shap[9], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab21-11: Pressurizer spray valve failed opening':
step1 = pd.DataFrame(all_shap[10], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-03: Leakage from CVCS to RCS':
step1 = pd.DataFrame(all_shap[11], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab60-02: Rupture of the front end of the regenerative heat exchanger':
step1 = pd.DataFrame(all_shap[12], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab59-02: Leakage at the rear end of the charging flow control valve':
step1 = pd.DataFrame(all_shap[13], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-01: Leakage from CVCS to CCW':
step1 = pd.DataFrame(all_shap[14], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
elif self.cb.currentText() == 'Ab23-06: Steam generator u-tube leakage':
step1 = pd.DataFrame(all_shap[15], columns=self.selected_para['0'].tolist())
compared_db = compare_data[self.cb.currentText()[:7]]
step2 = step1.sort_values(by=0, ascending=True, axis=1)
step3 = step2[step2.iloc[:] < 0].dropna(axis=1).T
self.step4 = step3.reset_index()
col = self.step4['index']
var = [self.selected_para['0'][self.selected_para['0'] == col_].index for col_ in col]
val_col = [self.selected_para['1'][var_].iloc[0] for var_ in var]
proba = [(self.step4[0][val_num] / sum(self.step4[0])) * 100 for val_num in range(len(self.step4[0]))]
val_system = [self.selected_para['2'][var_].iloc[0] for var_ in var]
self.step4['describe'] = val_col
self.step4['probability'] = proba
self.step4['system'] = val_system
red_range = self.step4[self.step4['probability'] >= 10]
orange_range = self.step4[
[self.step4['probability'].iloc[i] < 10 and self.step4['probability'].iloc[i] > 1 for i in
range(len(self.step4['probability']))]]
convert_red = {0: self.cb_red1, 1: self.cb_red2, 2: self.cb_red3, 3: self.cb_red4}
convert_orange = {0: self.cb_orange1, 1: self.cb_orange2, 2: self.cb_orange3, 3: self.cb_orange4, 4: self.cb_orange5,
5: self.cb_orange6, 6: self.cb_orange7, 7: self.cb_orange8, 8: self.cb_orange9, 9: self.cb_orange10,
10: self.cb_orange11, 11: self.cb_orange12}
if 4 - len(red_range) == 0:
red_del = []
elif 4 - len(red_range) == 1:
red_del = [3]
elif 4 - len(red_range) == 2:
red_del = [2, 3]
elif 4 - len(red_range) == 3:
red_del = [1, 2, 3]
elif 4 - len(red_range) == 4:
red_del = [0, 1, 2, 3]
if 12 - len(orange_range) == 0:
orange_del = []
elif 12 - len(orange_range) == 1:
orange_del = [11]
elif 12 - len(orange_range) == 2:
orange_del = [10, 11]
elif 12 - len(orange_range) == 3:
orange_del = [9, 10, 11]
elif 12 - len(orange_range) == 4:
orange_del = [8, 9, 10, 11]
elif 12 - len(orange_range) == 5:
orange_del = [7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 6:
orange_del = [6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 7:
orange_del = [5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 8:
orange_del = [4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 9:
orange_del = [3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 10:
orange_del = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 11:
orange_del = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
elif 12 - len(orange_range) == 12:
orange_del = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
[convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i], 2)}%]') for i in range(len(red_range))]
[convert_red[i].setText('None\nParameter') for i in red_del]
[convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))]
[convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del]
[convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i], 2)}%]') for i in range(len(orange_range))]
[convert_orange[i].setText('None\nParameter') for i in orange_del]
#####################################################################################################################################
# 각 Button에 호환되는 Plotting 데이터 구축
# Red1 Button
if self.cb_red1.text().split()[0] != 'None':
self.cb_red_plot_1.clear()
self.cb_red_plot_1.setTitle(red_range['describe'].iloc[0])
self.cb_red_plot_1.addLegend(offset=(-30,20))
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red2 Button
if self.cb_red2.text().split()[0] != 'None':
self.cb_red_plot_2.clear()
self.cb_red_plot_2.setTitle(red_range['describe'].iloc[1])
self.cb_red_plot_2.addLegend(offset=(-30, 20))
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red3 Button
if self.cb_red3.text().split()[0] != 'None':
self.cb_red_plot_3.clear()
self.cb_red_plot_3.setTitle(red_range['describe'].iloc[2])
self.cb_red_plot_3.addLegend(offset=(-30, 20))
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_3.plot(x=symptom_db[0], y=pd.DataFrame(compared_db)[red_range['index'].iloc[2]], pen=pyqtgraph.mkPen('k', width=3), name=self.cb.currentText()[:7])
# Red4 Button
if self.cb_red4.text().split()[0] != 'None':
self.cb_red_plot_4.clear()
self.cb_red_plot_4.setTitle(red_range['describe'].iloc[3])
self.cb_red_plot_4.addLegend(offset=(-30, 20))
self.cb_red_plot_4.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data')
self.cb_red_plot_4.plot(x=symptom_db[0], y= | pd.DataFrame(compared_db) | pandas.DataFrame |
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from facenet_pytorch import MTCNN
import numpy as np
import pandas as pd
import torch
from torch import Tensor
import torch.nn.functional as F
from src.models.catalog.frame_info import FrameInfo
from src.models.catalog.properties import ColorSpace
from src.udfs.pytorch_abstract_udf import PytorchAbstractUDF
from skimage.transform import resize
from torch.autograd import Variable
from PIL import Image
from src.udfs.emotion.transforms import transforms as transforms
from src.udfs.emotion.vgg import VGG
class EmotionDetector(PytorchAbstractUDF):
"""
Arguments:
threshold (float): Threshold for classifier confidence score
"""
@property
def name(self) -> str:
return "Emotion_Detector"
def __init__(self):
super().__init__()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
self.model = MTCNN(keep_all=True, device=device)
self.net = VGG('VGG19')
checkpoint = torch.load('src/udfs/emotion/PrivateTest_model.t7')
self.net.load_state_dict(checkpoint['net'])
if device == torch.device('cuda:0'):
self.net.cuda()
self.net.eval()
self.cut_size = 44
self.transform_test = transforms.Compose([
transforms.TenCrop(self.cut_size),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops]))])
@property
def input_format(self) -> FrameInfo:
return FrameInfo(-1, -1, 3, ColorSpace.RGB)
@property
def labels(self) -> List[str]:
return ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
def _get_predictions(self, frames: Tensor) -> pd.DataFrame:
"""
Performs predictions on input frames
Arguments:
frames (np.ndarray): Frames on which predictions need
to be performed
Returns:
tuple containing predicted_classes (List[List[str]]),
predicted_boxes (List[List[BoundingBox]]),
predicted_scores (List[List[float]])
"""
boxes, b_score = self.model.detect(frames.permute(2,3,1,0)[:,:,:,-1]*255)
outcome = | pd.DataFrame() | pandas.DataFrame |
import ia_batch_utils as batch
import pandas as pd
def get_data(procid, name=""):
df = pd.DataFrame()
for i in procid:
new = batch.collect_data(i, '')
df = pd.concat([df,new])
df.to_csv(f"s3://eisai-basalforebrainsuperres2/test_stack_{name}.csv")
dupe_fields = [
'key',
'originalimage',
'label',
'process',
'version',
'name',
'resolution',
'batchid',
]
df.drop_duplicates(dupe_fields, inplace=True)
df = batch.pivot_data(
df,
index_fields=[
'project',
'subject',
'date',
'modality',
'repeat',
'originalimage',
],
column_name_fields=[
'key',
'label',
'resolution',
'process',
'version',
'name',
],
exclude_fields=['hashfields', 'batchid','extension', 'hashid'],
)
df.to_csv(f"s3://eisai-basalforebrainsuperres2/test_pivot_{name}.csv")
return df
def join_all(dfs, how):
df = dfs[0]
for d in dfs[1:]:
merge = | pd.merge(df, d, on='originalimage', how=how, suffixes=('', "_y")) | pandas.merge |
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import numpy as np
import sys
import urllib3
import json
import requests
import webbrowser
import warnings
from cptac.exceptions import HttpResponseError, InvalidParameterError, ParameterWarning
def get_interacting_proteins_string(protein, num_results=25):
"""
@Param protein:
The name of the protein that you want to generate a list of interacting proteins for.
@Param num_results (default=25):
The number of interacting proteins that you want to get.
@Return:
A pandas.Series of proteins known by the String api to be interacting partners with the specified protein.
This list will always also contain the protein you were looking for interactors for.
This method takes as a parameter the name of a protein. It then accesses the STRING database, through
a call to their public API, and generates a list of proteins known to be interacting partners with the specified
protein. Optional second parameter is num_results (which by default is 25), which specifies in the API call how many
interacting partners to retrieve from the database. The list of interacting proteins is returned to the caller
as a pandas.Series.
"""
# Send query to the STRING API
query_url = "https://string-db.org/api/json/network"
params = {
"identifiers": protein,
"species": "9606", # Homo sapiens
"limit": num_results,
}
query_resp = requests.get(query_url, params=params)
# Check that the response came back good
if query_resp.status_code != requests.codes.ok:
raise HttpResponseError(f"Submitting your query to the STRING API returned an HTTP status {query_resp.status_code}. The content returned from the request may be helpful:\n{query_resp.content.decode('utf-8')}")
# Put the response data in a dataframe
resp_df = pd.DataFrame(query_resp.json())
# Get the unique values of columns we want, as a list
interactors = resp_df["preferredName_A"].\
append(resp_df["preferredName_B"]).\
unique()
# Make sure the protein they searched is in the output list
if protein not in interactors:
interactors = np.insert(interactors, 0, protein)
# Sort and convert to series
interactors = np.sort(interactors)
interactors = pd.Series(interactors)
return interactors
def get_interacting_proteins_biogrid(protein):
"""Queries the BioGRID API to get interacting proteins for a given protein, based on curated literature references.
Parameters:
protein: The name of the protein that you want to generate a list of interacting proteins for.
Returns:
pandas.DataFrame: The interacting proteins, ranked by the number of literature references supporting them.
"""
# Post query to the BioGRID API
query_url = "https://webservice.thebiogrid.org/interactions/"
params = {
"searchNames": "true",
"geneList": protein,
"includeInteractors": "true",
"includeInteractorInteractions": "false",
"interSpeciesExcluded": "true",
"format": "json",
"taxId": "9606",
"start": "0",
"accesskey": "<KEY>"
}
query_resp = requests.get(query_url, params=params)
# Check that the response came back good
if query_resp.status_code != requests.codes.ok:
raise HttpResponseError(f"Submitting your query to the STRING API returned an HTTP status {query_resp.status_code}. The content returned from the request may be helpful:\n{query_resp.content.decode('utf-8')}")
elif len(query_resp.json()) == 0:
raise InvalidParameterError(f"No interactors found for '{protein}'. Are you sure you entered the identifier correctly?")
# Put the response data in a dataframe
resp_df = pd.DataFrame(query_resp.json()).transpose()
# Get a list of all the interactors, and rank them by how many references each has
interactors = resp_df["OFFICIAL_SYMBOL_A"].\
where(resp_df["OFFICIAL_SYMBOL_A"] != protein, other=resp_df["OFFICIAL_SYMBOL_B"]).\
value_counts().\
to_frame("num_references")
interactors.index.name = "protein"
return interactors
def get_interacting_proteins_bioplex(protein, secondary_interactions=False):
"""
@Param protein:
The name of the protein that you want to generate a list of interacting proteins for.
@Return:
A list of proteins which are interacting partners with the specified protein, according to the bioplex data table.
Returns None if specified protein isn't found, or no interacting partners are found.
This method takes as a parameter the name of a protein. It then accesses the bioplex data table and returns a list of any protein found to be an interacting partner to the given protein.
The Bioplex data table is the "BioPlex 3.0 Interactions (293T Cells)" file for the HEK293T cell line, downloaded from <https://bioplex.hms.harvard.edu/interactions.php>. The direct download link is <https://bioplex.hms.harvard.edu/data/BioPlex_293T_Network_10K_Dec_2019.tsv>. IMPORTANT: After downloading the file, you need to compress it using gzip.
"""
path_here = os.path.abspath(os.path.dirname(__file__))
file_name = os.path.join("data", "BioPlex_293T_Network_10K_Dec_2019.tsv.gz")
file_path = os.path.join(path_here, file_name)
# Read in the file, then sort to prioritize the interactions with the
# highest pInt (probability of interacting) and lowest pNI (probability of
# no interaction). Then, reset the index so that index number will
# correspond to rank under this sorting scheme.
bioplex_interactions = pd.read_csv(file_path, sep='\t').\
sort_values(by=["pInt", "pNI"], ascending=[False, True]).\
reset_index()
# Get all interactions with the protein of interest
A_df = bioplex_interactions.loc[bioplex_interactions['SymbolA'] == protein]
B_df = bioplex_interactions.loc[bioplex_interactions['SymbolB'] == protein]
A_interactions = list(A_df['SymbolB'])
B_interactions = list(B_df['SymbolA'])
all_interactions = list(set(A_interactions + B_interactions))
if secondary_interactions:
secondary_interactions_list = []
for interaction in all_interactions:
secondary = get_interacting_proteins_bioplex(interaction, False)
for si in secondary:
secondary_interactions_list.append(si)
for asi in secondary_interactions_list:
if asi not in all_interactions:
all_interactions.append(asi)
if len(all_interactions) > 0:
return all_interactions
else:
return None
def get_interacting_proteins_wikipathways(protein):
"""
@param protein:
String. The name of the protein
@Return:
A list of proteins known by the most recent WikiPathways download to be interacting parters with the specified protein.
Returns None if specified protein is not found in the WikiPathways dataframe (which was intersected with Uniprot).
This function takes a path to WikiPathways Dataframe file and protein name and returns a list of all the proteins that interact with it, using the pathways from the WikiPathways relsease file.
This function loads the WikiPathways dataframe, and iterates through the row labelled with that protein name, return every protein in a pathway that also contains that protein.
"""
path_here = os.path.abspath(os.path.dirname(__file__))
data_dir_name = "data"
file_name = "WikiPathwaysDataframe.tsv.gz"
file_path = os.path.join(path_here, data_dir_name, file_name)
proteinName = protein
df = pd.read_csv(file_path, sep="\t", index_col=0)
if (proteinName in df.index):
row = df.loc[proteinName]
filtered_df = df.loc[:, row.values.tolist()]
def has_true(values):
for val in values:
if val == True:
return True
return False
filtered_df_final = filtered_df.loc[filtered_df.apply(lambda row: has_true(row.values.tolist()), axis=1), :]
return filtered_df_final.index.tolist()
return list() # The protein was not found.
def list_pathways_wikipathways():
"""
@ Return:
A list of all the WikiPathways pathways
Uses the WikipathwaysDataFrame to return a list of all the possible pathways found.
"""
path_here = os.path.abspath(os.path.dirname(__file__))
data_dir_name = "data"
file_name = "WikiPathwaysDataframe.tsv.gz"
file_path = os.path.join(path_here, data_dir_name, file_name)
df = pd.read_csv(file_path, sep="\t", index_col=0)
return list(df.columns)
def get_pathways_with_proteins(proteins, database, reactome_resource="UniProt", quiet=False):
"""Query either the Reactome REST API or the WikiPathways downloaded dataframe to find pathways containing a particular gene or protein.
Parameters:
proteins (str or list of str): The protein(s) to look for matches to.
database (str): The database to use; either 'reactome' or 'wikipathways'.
reactome_resource (str, optional): If using Reactome, this is the resource the identifier(s) come from. Default is UniProt. Other options include HGNC, Ensembl, and GO. For more options, consult <https://reactome.org/content/schema/objects/ReferenceDatabase>. This parameter is meaningless if using WikiPathways.
quiet (bool, optional): Whether to suppress warnings issued when identifiers are not found. Default False.
Returns:
pandas.DataFrame: A table of pathways containing the given genes or proteins, with pathway names and, if using Reactome, their Reactome identifiers (which are needed for the pathway_overlay function).
"""
# Process string input
if isinstance(proteins, str):
proteins = [proteins]
if database.lower() == "reactome":
# Set headers and params
headers = {"accept": "application/json"}
params = {"species": "Homo sapiens"}
# Loop over proteins and get the interacting pathways
all_pathway_df = pd.DataFrame()
for id in proteins:
url = f"https://reactome.org/ContentService/data/mapping/{reactome_resource}/{id}/pathways"
resp = requests.get(url, headers=headers, params=params)
# Check that the response came back good
if resp.status_code == 404:
try:
msg = resp.json()["messages"]
except (json.JSONDecodeError, KeyError):
raise HttpResponseError(f"Your query returned an HTTP status {resp.status_code}. The content returned from the request may be helpful:\n{resp.content.decode('utf-8')}") from None
else:
if not quiet:
warnings.warn(f"The query for '{id}' returned HTTP 404 (not found). You may have mistyped the gene/protein ID or the reactome_resource name. The server gave the following message: {msg}", ParameterWarning, stacklevel=2)
continue
elif resp.status_code != requests.codes.ok:
raise HttpResponseError(f"Your query returned an HTTP status {resp.status_code}. The content returned from the request may be helpful:\n{resp.content.decode('utf-8')}")
# Parse out pathway IDs and names
pathway_dict = resp.json()
names = []
pathway_ids = []
for pathway in pathway_dict:
names.append(pathway["displayName"])
pathway_ids.append(pathway["stId"])
pathway_df = pd.DataFrame({"id": id, "pathway": names, "pathway_id": pathway_ids})
pathway_df = pathway_df.sort_values(by="pathway_id")
all_pathway_df = all_pathway_df.append(pathway_df)
elif database.lower() == "wikipathways":
path_here = os.path.abspath(os.path.dirname(__file__))
data_dir_name = "data"
file_name = "WikiPathwaysDataframe.tsv.gz"
file_path = os.path.join(path_here, data_dir_name, file_name)
df = pd.read_csv(file_path, sep="\t", index_col=0)
all_pathway_df = pd.DataFrame()
for protein in proteins:
if protein in df.index:
# Column headers are pathways; select pathways where the row for the protein has a
# True for that pathway's column, indicating membership
pathways = df.columns[df.loc[protein, :]].values
prot_df = pd.DataFrame({"id": protein, "pathway": pathways})
all_pathway_df = all_pathway_df.append(prot_df)
else:
if not quiet:
warnings.warn(f"The protein '{protein}' was not found in the WikiPathways data.", ParameterWarning, stacklevel=2)
else:
raise InvalidParameterError(f"Database '{database}' not recognized. Valid options: 'reactome', 'wikipathways'")
all_pathway_df = all_pathway_df.reset_index(drop=True)
return all_pathway_df
def get_proteins_in_pathways(pathways, database, quiet=False):
"""Query either the Reactome REST API or the downloaded WikiPathways dataframe to get a list of proteins contained in a particular pathway.
Parameters:
pathways (str or list of str): The pathway(s) to get the contained proteins for. If using Reactome, these must be pathway IDs (e.g. "R-HSA-140877").
database (str): The database to use; either 'reactome' or 'wikipathways'.
quiet (bool, optional): Whether to suppress warnings issued when identifiers are not found. Default False.
Returns:
pandas.DataFrame: The proteins contained in the pathways.
"""
# Process string input
if isinstance(pathways, str):
pathways = [pathways]
if database.lower() == "reactome":
# Set headers and url
headers = {"accept": "application/json"}
# Loop over ids and get the interacting pathways
all_protein_df = pd.DataFrame()
for pathway_id in pathways:
# Send the request
url = f"https://reactome.org/ContentService/data/participants/{pathway_id}"
resp = requests.get(url, headers=headers)
if resp.status_code == 404 or (resp.status_code == requests.codes.ok and (len(resp.content.decode("utf-8")) == 0 or len(resp.json()) == 0)):
if not quiet:
warnings.warn(f"The query for '{pathway_id}' found no results. You may have mistyped the pathway ID.", ParameterWarning, stacklevel=2)
continue
elif resp.status_code != requests.codes.ok:
raise HttpResponseError(f"Your query returned an HTTP status {resp.status_code}. The content returned from the request may be helpful:\n{resp.content.decode('utf-8')}")
# Parse all the proteins/genes out of the response
members_df = pd.json_normalize(resp.json(), record_path=["refEntities"])
prot_df = members_df[members_df["displayName"].str.startswith("UniProt:")]
prot_names = prot_df["displayName"].str.rsplit(" ", n=1, expand=True)[1].\
drop_duplicates(keep="first").\
sort_values().\
reset_index(drop=True)
pathway_df = pd.DataFrame({"pathway": pathway_id, "member": prot_names})
all_protein_df = all_protein_df.append(pathway_df)
all_protein_df = all_protein_df.drop_duplicates(keep="first")
elif database.lower() == "wikipathways":
path_here = os.path.abspath(os.path.dirname(__file__))
data_dir_name = "data"
file_name = "WikiPathwaysDataframe.tsv.gz"
file_path = os.path.join(path_here, data_dir_name, file_name)
df = pd.read_csv(file_path, sep="\t", index_col=0)
all_protein_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import os, sys, argparse, warnings, csv
warnings.filterwarnings('ignore')
import subprocess
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import colors
import matplotlib_venn
import math
from itertools import chain
from collections import Counter, Iterable
def get_args():
parser = argparse.ArgumentParser(description='dkato. Feb. 2022')
parser.add_argument('-AA' , dest ='AA', nargs='*',
help = 'paths to your amino acids files of genes(Venn diagram is not output if there are 6 or more files)')
parser.add_argument('-rps' , dest ='rps', nargs='*',
help = 'path to your results of rpsblast')
parser.add_argument('-e' , dest ='evalue', nargs='*',
default= ['1e-4'], help = 'evalue in rpsblast(default:1e-4)')
parser.add_argument('-bar' , dest ='bar_size',
default= 5, type = int, help = 'specify a integer value: graph size of bar plot(default:5)')
parser.add_argument('-b', dest='n_black',
default=1,type = int, help = 'Number of bars dyed in black in a bar graph(default:1)')
parser.add_argument('-PCA' , dest ='PCA_size',
default= 5, type = int, help = 'specify a integer value: graph size of PCA plot(default:5)')
parser.add_argument('-g', dest='n_green',
default=0,type = int, help = 'Number of points dyed in green in a PCA plot(default:0)')
parser.add_argument('-venn' , dest ='venn_size',
default= 7, type = int, help = 'specify a integer value: graph size of venn diagrams(default:7)')
parser.add_argument('-u' , dest ='num_unique',
default= 1, type = int,
help = 'Number of files to search for unique genes (number of files from the top)(default:1)')
parser.add_argument('-t', dest='num_threads',
default=48,type = int, help = 'num_threads(default:48)')
parser.add_argument('-cogdb' , dest ='cogdb',
default= '/home/tmp/db/COG/Cog',
help = 'path to your cogdb to run rpsblast(default:/home/tmp/db/COG/Cog)')
parser.add_argument('-cddid' , dest ='cddid',
default= '/home/tmp/db/COG/cdd2cog/cddid_COG.tbl',
help = 'path to your cddid_COG.tbl(default:/home/tmp/db/COG/cdd2cog/cddid_COG.tbl)')
parser.add_argument('-cog', dest='cog',
default='/home/tmp/db/COG/cdd2cog/cog-20.def.tsv',
help = 'path to your cog-20.def.tsv(default:/home/tmp/db/COG/cdd2cog/cog-20.def.tsv)')
return parser.parse_args()
#'/Users/daiki/Python/M2/rpsblast/data/cddid_COG.tbl',
#'/home/tmp/db/COG/cdd2cog/cddid_COG.tbl'
#'/Users/daiki/Python/M2/rpsblast/data/cog-20.def.tsv',
#'/home/tmp/db/COG/cdd2cog/cog-20.def.tsv'
default_colors = [
# r, g, b, a
[92, 192, 98, 0.5],
[90, 155, 212, 0.5],
[246, 236, 86, 0.6],
[241, 90, 96, 0.4],
[255, 117, 0, 0.3],
[82, 82, 190, 0.2],
]
default_colors = [
[i[0] / 255.0, i[1] / 255.0, i[2] / 255.0, i[3]]
for i in default_colors
]
def draw_ellipse(fig, ax, x, y, w, h, a, fillcolor):
e = patches.Ellipse(
xy=(x, y),
width=w,
height=h,
angle=a,
color=fillcolor)
ax.add_patch(e)
def draw_triangle(fig, ax, x1, y1, x2, y2, x3, y3, fillcolor):
xy = [
(x1, y1),
(x2, y2),
(x3, y3),
]
polygon = patches.Polygon(
xy=xy,
closed=True,
color=fillcolor)
ax.add_patch(polygon)
def draw_text(fig, ax, x, y, text, color=[0, 0, 0, 1], fontsize=14, ha="center", va="center"):
ax.text(
x, y, text,
horizontalalignment=ha,
verticalalignment=va,
fontsize=fontsize,
color="black")
def draw_annotate(fig, ax, x, y, textx, texty, text, color=[0, 0, 0, 1], arrowcolor=[0, 0, 0, 0.3]):
plt.annotate(
text,
xy=(x, y),
xytext=(textx, texty),
arrowprops=dict(color=arrowcolor, shrink=0, width=0.5, headwidth=8),
fontsize=14,
color=color,
xycoords="data",
textcoords="data",
horizontalalignment='center',
verticalalignment='center'
)
def get_labels(data, fill=["number"]):
"""
get a dict of labels for groups in data
@type data: list[Iterable]
@rtype: dict[str, str]
input
data: data to get label for
fill: ["number"|"logic"|"percent"]
return
labels: a dict of labels for different sets
example:
In [12]: get_labels([range(10), range(5,15), range(3,8)], fill=["number"])
Out[12]:
{'001': '0',
'010': '5',
'011': '0',
'100': '3',
'101': '2',
'110': '2',
'111': '3'}
"""
N = len(data)
sets_data = [set(data[i]) for i in range(N)] # sets for separate groups
s_all = set(chain(*data)) # union of all sets
# bin(3) --> '0b11', so bin(3).split('0b')[-1] will remove "0b"
set_collections = {}
for n in range(1, 2**N):
key = bin(n).split('0b')[-1].zfill(N)
value = s_all
sets_for_intersection = [sets_data[i] for i in range(N) if key[i] == '1']
sets_for_difference = [sets_data[i] for i in range(N) if key[i] == '0']
for s in sets_for_intersection:
value = value & s
for s in sets_for_difference:
value = value - s
set_collections[key] = value
labels = {k: "" for k in set_collections}
if "logic" in fill:
for k in set_collections:
labels[k] = k + ": "
if "number" in fill:
for k in set_collections:
labels[k] += str(len(set_collections[k]))
if "percent" in fill:
data_size = len(s_all)
for k in set_collections:
labels[k] += "(%.1f%%)" % (100.0 * len(set_collections[k]) / data_size)
return labels
def venn4(labels, ax, names=['A', 'B', 'C', 'D'], **options):
"""
plots a 4-set Venn diagram
@type labels: dict[str, str]
@type names: list[str]
@rtype: (Figure, AxesSubplot)
input
labels: a label dict where keys are identified via binary codes ('0001', '0010', '0100', ...),
hence a valid set could look like: {'0001': 'text 1', '0010': 'text 2', '0100': 'text 3', ...}.
unmentioned codes are considered as ''.
names: group names
more: colors, figsize, dpi, fontsize
return
pyplot Figure and AxesSubplot object
"""
colors = options.get('colors', [default_colors[i] for i in range(4)])
figsize = options.get('figsize', (12, 12))
dpi = options.get('dpi', 96)
fontsize = options.get('fontsize', 14)
fig = plt.figure(0, figsize=figsize, dpi=dpi)
#ax = fig.add_subplot(111, aspect='equal')
ax.set_axis_off()
ax.set_ylim(bottom=0.0, top=1.0)
ax.set_xlim(left=0.0, right=1.0)
# body
draw_ellipse(fig, ax, 0.350, 0.400, 0.72, 0.45, 140.0, colors[0])
draw_ellipse(fig, ax, 0.450, 0.500, 0.72, 0.45, 140.0, colors[1])
draw_ellipse(fig, ax, 0.544, 0.500, 0.72, 0.45, 40.0, colors[2])
draw_ellipse(fig, ax, 0.644, 0.400, 0.72, 0.45, 40.0, colors[3])
draw_text(fig, ax, 0.85, 0.42, labels.get('0001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.68, 0.72, labels.get('0010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.77, 0.59, labels.get('0011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.32, 0.72, labels.get('0100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.71, 0.30, labels.get('0101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.50, 0.66, labels.get('0110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.65, 0.50, labels.get('0111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.14, 0.42, labels.get('1000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.50, 0.17, labels.get('1001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.29, 0.30, labels.get('1010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.39, 0.24, labels.get('1011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.23, 0.59, labels.get('1100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.61, 0.24, labels.get('1101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.35, 0.50, labels.get('1110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.50, 0.38, labels.get('1111', ''), fontsize=fontsize)
# legend
draw_text(fig, ax, 0.13, 0.18, names[0], colors[0], fontsize=fontsize, ha="right")
draw_text(fig, ax, 0.18, 0.83, names[1], colors[1], fontsize=fontsize, ha="right", va="bottom")
draw_text(fig, ax, 0.82, 0.83, names[2], colors[2], fontsize=fontsize, ha="left", va="bottom")
draw_text(fig, ax, 0.87, 0.18, names[3], colors[3], fontsize=fontsize, ha="left", va="top")
#leg = ax.legend(names, loc='center left', bbox_to_anchor=(1.0, 0.5), fancybox=True)
#leg.get_frame().set_alpha(0.5)
return fig#, ax
def venn5(labels, ax, names=['A', 'B', 'C', 'D', 'E'], **options):
"""
plots a 5-set Venn diagram
@type labels: dict[str, str]
@type names: list[str]
@rtype: (Figure, AxesSubplot)
input
labels: a label dict where keys are identified via binary codes ('00001', '00010', '00100', ...),
hence a valid set could look like: {'00001': 'text 1', '00010': 'text 2', '00100': 'text 3', ...}.
unmentioned codes are considered as ''.
names: group names
more: colors, figsize, dpi, fontsize
return
pyplot Figure and AxesSubplot object
"""
colors = options.get('colors', [default_colors[i] for i in range(5)])
figsize = options.get('figsize', (13, 13))
dpi = options.get('dpi', 96)
fontsize = options.get('fontsize', 14)
fig = plt.figure(0, figsize=figsize, dpi=dpi)
#ax = fig.add_subplot(111, aspect='equal')
ax.set_axis_off()
ax.set_ylim(bottom=0.0, top=1.0)
ax.set_xlim(left=0.0, right=1.0)
# body
draw_ellipse(fig, ax, 0.428, 0.449, 0.87, 0.50, 155.0, colors[0])
draw_ellipse(fig, ax, 0.469, 0.543, 0.87, 0.50, 82.0, colors[1])
draw_ellipse(fig, ax, 0.558, 0.523, 0.87, 0.50, 10.0, colors[2])
draw_ellipse(fig, ax, 0.578, 0.432, 0.87, 0.50, 118.0, colors[3])
draw_ellipse(fig, ax, 0.489, 0.383, 0.87, 0.50, 46.0, colors[4])
draw_text(fig, ax, 0.27, 0.11, labels.get('00001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.72, 0.11, labels.get('00010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.55, 0.13, labels.get('00011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.91, 0.58, labels.get('00100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.78, 0.64, labels.get('00101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.84, 0.41, labels.get('00110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.76, 0.55, labels.get('00111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.51, 0.90, labels.get('01000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.39, 0.15, labels.get('01001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.42, 0.78, labels.get('01010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.50, 0.15, labels.get('01011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.67, 0.76, labels.get('01100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.70, 0.71, labels.get('01101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.51, 0.74, labels.get('01110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.64, 0.67, labels.get('01111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.10, 0.61, labels.get('10000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.20, 0.31, labels.get('10001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.76, 0.25, labels.get('10010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.65, 0.23, labels.get('10011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.18, 0.50, labels.get('10100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.21, 0.37, labels.get('10101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.81, 0.37, labels.get('10110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.74, 0.40, labels.get('10111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.27, 0.70, labels.get('11000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.34, 0.25, labels.get('11001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.33, 0.72, labels.get('11010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.51, 0.22, labels.get('11011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.25, 0.58, labels.get('11100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.28, 0.39, labels.get('11101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.36, 0.66, labels.get('11110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.51, 0.47, labels.get('11111', ''), fontsize=fontsize)
# legend
draw_text(fig, ax, 0.02, 0.72, names[0], colors[0], fontsize=fontsize, ha="right")
draw_text(fig, ax, 0.72, 0.94, names[1], colors[1], fontsize=fontsize, va="bottom")
draw_text(fig, ax, 0.97, 0.74, names[2], colors[2], fontsize=fontsize, ha="left")
draw_text(fig, ax, 0.88, 0.05, names[3], colors[3], fontsize=fontsize, ha="left")
draw_text(fig, ax, 0.12, 0.05, names[4], colors[4], fontsize=fontsize, ha="right")
#leg = ax.legend(names, loc='center left', bbox_to_anchor=(1.0, 0.5), fancybox=True)
#leg.get_frame().set_alpha(0.5)
return fig#, ax
def venn6(labels, ax, names=['A', 'B', 'C', 'D', 'E'], **options):
"""
plots a 6-set Venn diagram
@type labels: dict[str, str]
@type names: list[str]
@rtype: (Figure, AxesSubplot)
input
labels: a label dict where keys are identified via binary codes ('000001', '000010', '000100', ...),
hence a valid set could look like: {'000001': 'text 1', '000010': 'text 2', '000100': 'text 3', ...}.
unmentioned codes are considered as ''.
names: group names
more: colors, figsize, dpi, fontsize
return
pyplot Figure and AxesSubplot object
"""
colors = options.get('colors', [default_colors[i] for i in range(6)])
figsize = options.get('figsize', (20, 20))
dpi = options.get('dpi', 96)
fontsize = options.get('fontsize', 14)
fig = plt.figure(0, figsize=figsize, dpi=dpi)
#ax = fig.add_subplot(111, aspect='equal')
ax.set_axis_off()
ax.set_ylim(bottom=0.230, top=0.845)
ax.set_xlim(left=0.173, right=0.788)
# body
# See https://web.archive.org/web/20040819232503/http://www.hpl.hp.com/techreports/2000/HPL-2000-73.pdf
draw_triangle(fig, ax, 0.637, 0.921, 0.649, 0.274, 0.188, 0.667, colors[0])
draw_triangle(fig, ax, 0.981, 0.769, 0.335, 0.191, 0.393, 0.671, colors[1])
draw_triangle(fig, ax, 0.941, 0.397, 0.292, 0.475, 0.456, 0.747, colors[2])
draw_triangle(fig, ax, 0.662, 0.119, 0.316, 0.548, 0.662, 0.700, colors[3])
draw_triangle(fig, ax, 0.309, 0.081, 0.374, 0.718, 0.681, 0.488, colors[4])
draw_triangle(fig, ax, 0.016, 0.626, 0.726, 0.687, 0.522, 0.327, colors[5])
draw_text(fig, ax, 0.212, 0.562, labels.get('000001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.430, 0.249, labels.get('000010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.356, 0.444, labels.get('000011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.609, 0.255, labels.get('000100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.323, 0.546, labels.get('000101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.513, 0.316, labels.get('000110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.523, 0.348, labels.get('000111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.747, 0.458, labels.get('001000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.325, 0.492, labels.get('001001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.670, 0.481, labels.get('001010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.359, 0.478, labels.get('001011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.653, 0.444, labels.get('001100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.344, 0.526, labels.get('001101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.653, 0.466, labels.get('001110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.363, 0.503, labels.get('001111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.750, 0.616, labels.get('010000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.682, 0.654, labels.get('010001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.402, 0.310, labels.get('010010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.392, 0.421, labels.get('010011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.653, 0.691, labels.get('010100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.651, 0.644, labels.get('010101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.490, 0.340, labels.get('010110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.468, 0.399, labels.get('010111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.692, 0.545, labels.get('011000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.666, 0.592, labels.get('011001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.665, 0.496, labels.get('011010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.374, 0.470, labels.get('011011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.653, 0.537, labels.get('011100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.652, 0.579, labels.get('011101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.653, 0.488, labels.get('011110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.389, 0.486, labels.get('011111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.553, 0.806, labels.get('100000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.313, 0.604, labels.get('100001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.388, 0.694, labels.get('100010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.375, 0.633, labels.get('100011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.605, 0.359, labels.get('100100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.334, 0.555, labels.get('100101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.582, 0.397, labels.get('100110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.542, 0.372, labels.get('100111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.468, 0.708, labels.get('101000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.355, 0.572, labels.get('101001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.420, 0.679, labels.get('101010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.375, 0.597, labels.get('101011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.641, 0.436, labels.get('101100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.348, 0.538, labels.get('101101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.635, 0.453, labels.get('101110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.370, 0.548, labels.get('101111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.594, 0.689, labels.get('110000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.579, 0.670, labels.get('110001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.398, 0.670, labels.get('110010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.395, 0.653, labels.get('110011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.633, 0.682, labels.get('110100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.616, 0.656, labels.get('110101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.587, 0.427, labels.get('110110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.526, 0.415, labels.get('110111', ''), fontsize=fontsize)
draw_text(fig, ax, 0.495, 0.677, labels.get('111000', ''), fontsize=fontsize)
draw_text(fig, ax, 0.505, 0.648, labels.get('111001', ''), fontsize=fontsize)
draw_text(fig, ax, 0.428, 0.663, labels.get('111010', ''), fontsize=fontsize)
draw_text(fig, ax, 0.430, 0.631, labels.get('111011', ''), fontsize=fontsize)
draw_text(fig, ax, 0.639, 0.524, labels.get('111100', ''), fontsize=fontsize)
draw_text(fig, ax, 0.591, 0.604, labels.get('111101', ''), fontsize=fontsize)
draw_text(fig, ax, 0.622, 0.477, labels.get('111110', ''), fontsize=fontsize)
draw_text(fig, ax, 0.501, 0.523, labels.get('111111', ''), fontsize=fontsize)
# legend
draw_text(fig, ax, 0.674, 0.824, names[0], colors[0], fontsize=fontsize)
draw_text(fig, ax, 0.747, 0.751, names[1], colors[1], fontsize=fontsize)
draw_text(fig, ax, 0.739, 0.396, names[2], colors[2], fontsize=fontsize)
draw_text(fig, ax, 0.700, 0.247, names[3], colors[3], fontsize=fontsize)
draw_text(fig, ax, 0.291, 0.255, names[4], colors[4], fontsize=fontsize)
draw_text(fig, ax, 0.203, 0.484, names[5], colors[5], fontsize=fontsize)
#leg = ax.legend(names, loc='center left', bbox_to_anchor=(1.0, 0.5), fancybox=True)
#leg.get_frame().set_alpha(0.5)
return fig#, ax
def run_rpsblast(paths_to_proteins = None,
path_to_cogdb = None,
evalue = None, num_threads = None):
from subprocess import Popen
error1 = "specify the path to your Cog database with cogdb option. (default:/home/tmp/db/COG/Cog)"
#assert os.path.exists('/home/tmp/db/COG/Cog/'), error1
if f'rps_{evalue}' not in os.listdir(path='./'):
os.system(f'mkdir rps_{evalue}')
def split_list(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
path_to_rpsRes = []
block_paths = list(split_list(paths_to_proteins, num_threads))
for block_path in block_paths:
procs = []
for path in block_path:
name = os.path.splitext(os.path.basename(path))[0]
procs += [Popen(f"rpsblast -query {path} -db {path_to_cogdb} -out ./rps_{evalue}/{name}.txt -evalue {evalue} -outfmt 6"
, shell=True)]
path_to_rpsRes.append(f"./rps_{evalue}/{name}.txt")
[p.wait() for p in procs]
return path_to_rpsRes
def preprocess(rps = None,
cddid = None,
cog = None):
cddid["CDD"] = "CDD:" + cddid["CDD"].astype(str)
_ = pd.merge(rps, cddid, on = ["CDD"]).iloc[:, [0, 1, 12]]
_df = pd.merge(_, cog, on = ["COG"]).iloc[:, [0, 1, 2, 3, 4, 5]]
return _df, dict(Counter("".join(_df['Group'])))
def sorter(df_i = None,
A2Z = None):
tmp = []
for i in range(len(A2Z)):
if A2Z[i] in list(df_i.keys()):
tmp += [df_i[A2Z[i]]]
else:
tmp += [0]
return tmp
def get_main_dataset(path_to_rpsRes = None,
path_to_cddid = None,
path_to_cog = None, evalue = None):
if f'out_{evalue}' not in os.listdir(path='./'):
os.system(f'mkdir ./out_{evalue}/')
if 'COGdata' not in os.listdir(path=f"./out_{evalue}/"):
os.system(f'mkdir ./out_{evalue}/COGdata/')
df_i = {}
out_COG_i = {}
out = pd.DataFrame()
out_COG = pd.DataFrame()
A2Z = [chr(i) for i in range(65, 65+26)]
for i, path in enumerate(path_to_rpsRes):
#load data
rps_i = pd.read_table(path_to_rpsRes[i], names=["cdd_id", "CDD", "a", "b","c","d",
"e","f","g","h","eValue","j"]).drop_duplicates(['cdd_id'])
cddid = pd.read_table(path_to_cddid, names=["CDD", "COG", "a", "b", "c"])
cog = pd.read_table(path_to_cog, names=["COG", "Group", "gene_name",
"gene", "E3", "F3", "G3"], encoding='cp1252')
col_name = os.path.splitext(os.path.basename(path))[0]#[:-len('_rpsblastout')]
#processing
out_COG_i[col_name], df_i[col_name] = preprocess(rps = rps_i,
cddid = cddid,
cog = cog)
COG_i = out_COG_i[col_name].rename(columns={'cdd_id': f"{col_name}"}).iloc[:, [0,1,2,3,4,5]]
out_i = pd.DataFrame(sorter(df_i = df_i[col_name], A2Z = A2Z), columns=[f"{col_name}"])
out = pd.concat([out, out_i], axis = 1)
out_COG = pd.concat([out_COG, COG_i], axis = 1)
count_data = pd.concat([ | pd.DataFrame(A2Z, columns=['COG']) | pandas.DataFrame |
#!/usr/bin/env python3
import json
import argparse
import pickle
from collections import defaultdict
import numpy as np
import pandas
from loom_reader import LoomReader
from cluster import cluster as get_clusters
from delaunay import DictDelaunay2d
from sklearn import decomposition
def octagon(poly):
'''
Returns a bounding octagon.
>>> square = np.array([[0,0], [0,1], [1,1], [1,0]])
>>> octagon(square)
[[0, 0], [0, 1], [0, 1], [1, 1], [1, 1], [1, 0], [1, 0], [0, 0]]
>>> triangle = np.array([[1,0], [0,2], [2,3]])
>>> octagon(triangle)
[[0, 1], [0, 2], [1, 3], [2, 3], [2, 3], [2, 1], [1, 0], [1, 0]]
>>> type(octagon(triangle)[0][0])
<class 'int'>
'''
# SciPy has ConvexHull, but it segfaulted on me: perhaps
# https://github.com/scipy/scipy/issues/9751
# ... and even if I fixed it locally,
# not sure we want that fragility.
#
# Also: The goal is really just to get a simplified shape...
# A convex hull is too precise in some ways,
# while in others it falls short, ie, concavities.
#
# I kind-of like the obvious artificiality of an octagon.
# Was unsigned, and substraction causes underflow.
poly_as_int = poly.astype('int')
min_x = int(np.min(poly_as_int[:, [0]]))
max_x = int(np.max(poly_as_int[:, [0]]))
min_y = int(np.min(poly_as_int[:, [1]]))
max_y = int(np.max(poly_as_int[:, [1]]))
summed = np.sum(poly_as_int, axis=1)
diffed = np.diff(poly_as_int, axis=1)
min_sum = int(np.min(summed))
max_sum = int(np.max(summed))
min_diff = int(np.min(diffed))
max_diff = int(np.max(diffed))
return [
[min_x, min_sum - min_x],
[min_x, max_diff + min_x], # ^ Left
[max_y - max_diff, max_y],
[max_sum - max_y, max_y], # ^ Botton
[max_x, max_sum - max_x],
[max_x, min_diff + max_x], # ^ Right
[min_y - min_diff, min_y],
[min_sum - min_y, min_y] # ^ Top
]
def mean_coord(coords):
'''
The xy values in the Linnarsson data are not good:
They take a different corner as the origin.
So... we find the center of our polygon instead.
>>> mean_coord([[1,2], [3,4], [5,6]])
[3, 4]
'''
return [int(x) for x in np.mean(coords, axis=0).tolist()]
# Taken from http://linnarssonlab.org/osmFISH/clusters/
LOOKUP = {
"Astrocyte Gfap": "Astrocyte",
"Astrocyte Mfge8": "Astrocyte",
"C. Plexus": "Ventricle",
"Endothelial 1": "Vasculature",
"Endothelial": "Vasculature",
"Ependymal": "Ventricle",
"Hippocampus": "Excitatory neurons",
"Inhibitory CP": "Inhibitory neurons",
"Inhibitory Cnr1": "Inhibitory neurons",
"Inhibitory Crhbp": "Inhibitory neurons",
"Inhibitory IC": "Inhibitory neurons",
"Inhibitory Kcnip2": "Inhibitory neurons",
"Inhibitory Pthlh": "Inhibitory neurons",
"Inhibitory Vip": "Inhibitory neurons",
"Microglia": "Brain immune",
"Oligodendrocyte COP": "Oligodendrocytes",
"Oligodendrocyte MF": "Oligodendrocytes",
"Oligodendrocyte Mature": "Oligodendrocytes",
"Oligodendrocyte NF": "Oligodendrocytes",
"Oligodendrocyte Precursor cells": "Oligodendrocytes",
"Pericytes": "Vasculature",
"Perivascular Macrophages": "Brain immune",
"Pyramidal Cpne5": "Excitatory neurons",
"Pyramidal Kcnip2": "Excitatory neurons",
"Pyramidal L2-3 L5": "Excitatory neurons",
"Pyramidal L2-3": "Excitatory neurons",
"Pyramidal L3-4": "Excitatory neurons",
"Pyramidal L5": "Excitatory neurons",
"Pyramidal L6": "Excitatory neurons",
"Vascular Smooth Muscle": "Vasculature",
"pyramidal L4": "Excitatory neurons"
}
def get_neighborhoods(metadata):
'''
>>> cells = {
... 'O': { 'xy': [0,0], 'extra': 'field'},
... 'N': { 'xy': [0,1], 'extra': 'field'},
... 'E': { 'xy': [1,0], 'extra': 'field'},
... 'S': { 'xy': [0,-1], 'extra': 'field'},
... 'W': { 'xy': [-1,0], 'extra': 'field'}
... }
>>> neighborhoods = get_neighborhoods(cells)
>>> neighborhoods.keys()
dict_keys(['O::E::N', 'O::N::W', 'O::S::E', 'O::W::S'])
>>> neighborhoods['O::E::N']
{'poly': [[0, 0], [1, 0], [0, 1]]}
'''
coords = {}
for (k, v) in metadata.items():
coords[k] = v['xy']
triangles = DictDelaunay2d(coords).getTriangles()
neighborhoods = {}
for triangle in triangles:
key = '::'.join(triangle)
value = {
'poly': [coords[point] for point in triangle]
}
neighborhoods[key] = value
return neighborhoods
def get_genes(metadata):
'''
>>> metadata = {
... 'cell-1': {'genes': {'a': 1, 'b': 20}},
... 'cell-2': {'genes': {'a': 2, 'b': 10}}
... }
>>> genes = get_genes(metadata)
>>> genes['a']
{'max': 2, 'cells': {'cell-1': 1, 'cell-2': 2}}
>>> genes['b']
{'max': 20, 'cells': {'cell-1': 20, 'cell-2': 10}}
'''
genes = defaultdict(lambda: {'max': 0, 'cells': {}})
for cell_id, cell_data in metadata.items():
for gene_id, expression_level in cell_data['genes'].items():
gene_data = genes[gene_id]
gene_data['cells'][cell_id] = expression_level
if gene_data['max'] < expression_level:
gene_data['max'] = expression_level
return genes
def get_factors(metadata):
'''
>>> metadata = {
... "Santa's Little Helper": {'factors':{'eng': 'dog', 'sci': 'canine'}},
... "Snowball II": {'factors':{'eng': 'cat', 'sci': 'feline'}}
... }
>>> factors = get_factors(metadata)
>>> list(factors['eng'].keys())
['map', 'cells']
>>> factors['eng']['map']
['dog', 'cat']
>>> factors['eng']['cells']
{"Santa's Little Helper": 0, 'Snowball II': 1}
'''
factors = defaultdict(lambda: {'map': [], 'cells': {}})
for cell_id, cell_data in metadata.items():
for factor_id, factor_value in cell_data['factors'].items():
factor_data = factors[factor_id]
if factor_value not in factor_data['map']:
factor_data['map'].append(factor_value)
factor_index = factor_data['map'].index(factor_value)
factor_data['cells'][cell_id] = factor_index
return factors
def get_cell_sets(clusters, lookup):
'''
>>> from collections import namedtuple
>>> Cluster = namedtuple('Cluster', ['name', 'cell_ids'])
>>> clusters = {
... 1: Cluster('pyramidal L4', ['104', '110', '111']),
... 3: Cluster('vascular smooth muscle', ['1', '2', '3'])
... }
>>> lookup = {
... 'vascular smooth muscle': 'vasculature',
... 'pyramidal L4': 'excitatory neurons'
... }
>>> cell_sets = get_cell_sets(clusters, lookup)
>>> list(cell_sets.keys())
['version', 'datatype', 'tree']
>>> cell_sets['datatype']
'cell'
>>> list(cell_sets['tree'][0].keys())
['name', 'children']
>>> cell_sets['tree'][0]['name']
'Cell Type Annotations'
>>> sorted([ n['name'] for n in cell_sets['tree'][0]['children'] ])
['excitatory neurons', 'vasculature']
'''
# The parameter `lookup` is a dict mapping
# subclusters to clusters: `{ Subcluster Name: Cluster Name }`
# This `lookup` mapping can be used to fill in an intermediate
# dict `hierarchy`, closer to the data structure we want to output.
# ```
# {
# Cluster A: {
# Subcluster A: [1, 2],
# Subcluster B: [3, 4]
# },
# Cluster B: {...}
# }
# ```
hierarchy = {cluster_name: {} for cluster_name in lookup.values()}
for c in clusters.values():
subcluster = {c.name: c.cell_ids}
cluster_name = lookup.get(c.name)
cluster_dict = hierarchy.get(cluster_name)
cluster_dict.update(subcluster)
# Use the `hierarchy` dict to fill in an object
# conforming to the `cell-sets.json` schema.
cluster_nodes = []
for cluster_name in sorted(hierarchy.keys()):
cluster_dict = hierarchy[cluster_name]
subcluster_nodes = []
for subcluster_name in sorted(cluster_dict.keys()):
subcluster = cluster_dict[subcluster_name]
subcluster_nodes.append({
'name': subcluster_name,
'set': subcluster
})
cluster_nodes.append({
'name': cluster_name,
'children': subcluster_nodes,
})
# Construct the tree, according to the following schema:
# https://github.com/hubmapconsortium/vitessce/blob/d5f63aa1d08aa61f6b20f6ad6bbfba5fceb6b5ef/src/schemas/cell_sets.schema.json
cell_sets = {
'version': '0.1.2',
'datatype': 'cell',
'tree': [{
'name': 'Cell Type Annotations',
'children': cluster_nodes
}]
}
return cell_sets
def genes_to_samples_by_features(metadata):
'''
>>> metadata = {
... '0': {
... 'genes': {'A': 0, 'B': 0, 'A2': 0, 'B2': 0}
... },
... '1': {
... 'genes': {'A': 0, 'B': 1, 'A2': 0, 'B2': 1}
... },
... '2': {
... 'genes': {'A': 0, 'B': 4, 'A2': 0, 'B2': 4}
... }
... }
>>> s_by_f = genes_to_samples_by_features(metadata)
>>> s_by_f.shape
(3, 4)
'''
records = dict([(k, v['genes']) for k, v in metadata.items()])
return | pandas.DataFrame.from_dict(records, orient='index') | pandas.DataFrame.from_dict |
import os
import pandas as pd
from lxml import html, etree
from frankie import frankiefun, _htmlParse, transformations
@frankiefun("XPathRemove")
def _XPathRemove(doc, **kwargs):
xpath = kwargs['xpath']
parsedDoc = _htmlParse(doc)
element = parsedDoc.find(xpath)
if element is None:
return doc
element.clear()
doc = etree.tostring(parsedDoc, pretty_print=True, method="html")
return doc.decode("utf-8")
@frankiefun("XPathReplace")
def _XPathReplace(doc, **kwargs):
XPathFind = kwargs['find']
replace = kwargs['replace']
parsedDoc = _htmlParse(doc)
element = parsedDoc.find(XPathFind)
if element is not None:
element.clear()
element.append(etree.fromstring(replace))
doc = etree.tostring(parsedDoc, pretty_print=True, method="html")
return doc.decode("utf-8")
else:
return doc
@frankiefun("XPathCopyFromRemote")
def _XPathCopyFromRemote(doc, **kwargs):
origin = kwargs['origin']
XPathSource = kwargs['XPathSource']
XPathDest = kwargs['XPathDest']
parsedDoc = _htmlParse(doc)
sourceDoc = _htmlParse(requests.get(origin).text)
element = sourceDoc.find(XPathSource)
elementDest = parsedDoc.find(XPathDest)
elementDest.clear()
elementDest.append(element)
doc = etree.tostring(parsedDoc, pretty_print=True, method="html")
return doc.decode("utf-8")
@frankiefun("XPathCopyFromLocal")
def _XPathCopyFromLocal(doc, **kwargs):
origin = kwargs['origin']
XPathSource = kwargs['XPathSource']
XPathDest = kwargs['XPathDest']
if 'position' in kwargs:
position = kwargs['position']
else:
position = "AddBefore"
path = os.getcwd() + '/fragments/' + origin
sourceDoc = _htmlParse(open(path).read(), parseAsHtml=False)
parsedDoc = _htmlParse(doc)
element = sourceDoc.xpath(XPathSource) #[0].getchildren()
elementDest = parsedDoc.xpath(XPathDest)
if elementDest is not None and elementDest != []:
elementDest = elementDest[0]
else:
return doc
if element != None and element != []:
if position == 'AddBefore':
i = 1
for e in element:
elementDest.getparent().insert(elementDest.getparent().index(elementDest), e)
i = i + 1
elif position == 'AddLast':
for e in element:
elementDest.append(e)
doc = etree.tostring(parsedDoc, pretty_print=True, method="html")
return doc.decode("utf-8")
@frankiefun("XPathSetText")
def _XPathSetText(doc, **kwargs):
XPathFind = kwargs['XPathFind']
text = kwargs['text']
parsedDoc = _htmlParse(doc)
element = parsedDoc.find(XPathFind)
if element is not None:
element.clear()
fragments = html.fragments_fromstring(text)
last = None
for frag in fragments:
if isinstance(frag, lxml.etree._Element):
element.append(frag)
last = frag
else:
if last:
last.tail = frag
else:
element.text = frag
doc = etree.tostring(parsedDoc, pretty_print=True, method="html")
return doc.decode("utf-8")
else:
return doc
@frankiefun("AppendLast")
def _XPathAppendLast(doc, **kwargs):
kwargs['position'] = 'AddLast'
print(kwargs)
return transformations['XPathCopyFromLocal'](doc, **kwargs)
@frankiefun("XPathToDataFrame")
def _XPathToDataFrame(doc, **kwargs):
name = kwargs["name"]
dataframeDescriptor = kwargs["DataFrame"]
sourceDoc = _htmlParse(doc)
data = {}
schema = dataframeDescriptor['schema']
for field in schema.keys():
data[field] = sourceDoc.xpath(schema[field])
kwargs['ctx']['df'] = | pd.DataFrame(data) | pandas.DataFrame |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = pd.Series([1, 0, 0, 0, 2], index=index)
idx_exp = pd.date_range(start='20190101', freq='1h', periods=2)
expected = pd.Series([0.25, 2.], index=idx_exp)
out = forecast.resample(arg)
assert_series_equal(out, expected)
assert forecast.resample(None) is None
@pytest.fixture
def rfs_series():
return pd.Series([1, 2],
index=pd.DatetimeIndex(['20190101 01', '20190101 02']))
@pytest.mark.parametrize(
'start,end,start_slice,end_slice,fill_method,exp_val,exp_idx', [
(None, None, None, None, 'interpolate', [1, 1.5, 2],
['20190101 01', '20190101 0130', '20190101 02']),
('20190101', '20190101 0230', None, None, 'interpolate',
[1, 1, 1, 1.5, 2, 2],
['20190101', '20190101 0030', '20190101 01', '20190101 0130',
'20190101 02', '20190101 0230']),
('20190101', '20190101 02', '20190101 0030', '20190101 0130', 'bfill',
[1., 1, 2], ['20190101 0030', '20190101 01', '20190101 0130'])
]
)
def test_reindex_fill_slice(rfs_series, start, end, start_slice, end_slice,
fill_method, exp_val, exp_idx):
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_some_nan():
rfs_series = pd.Series([1, 2, None, 4], index=pd.DatetimeIndex([
'20190101 01', '20190101 02', '20190101 03', '20190101 04',
]))
start, end, start_slice, end_slice, fill_method = \
None, None, None, None, 'interpolate'
exp_val = [1, 1.5, 2, 2.5, 3, 3.5, 4]
exp_idx = [
'20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03', '20190101 0330', '20190101 04']
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_all_nan():
arg = pd.Series([None]*3, index=pd.DatetimeIndex(
['20190101 01', '20190101 02', '20190101 03']))
out = forecast.reindex_fill_slice(arg, freq='30min')
exp = pd.Series([None]*5, index=pd.DatetimeIndex(
['20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03']))
assert_series_equal(out, exp)
def test_reindex_fill_slice_empty():
out = forecast.reindex_fill_slice(pd.Series(dtype=float), freq='30min')
assert_series_equal(out, pd.Series(dtype=float))
def test_reindex_fill_slice_none():
out = forecast.reindex_fill_slice(None, freq='30min')
assert out is None
def test_cloud_cover_to_ghi_linear():
cloud_cover = pd.Series([0, 50, 100.])
ghi_clear = pd.Series([1000, 1000, 1000.])
out = forecast.cloud_cover_to_ghi_linear(cloud_cover, ghi_clear)
expected = pd.Series([1000, 675, 350.])
assert_series_equal(out, expected)
out = forecast.cloud_cover_to_ghi_linear(cloud_cover, ghi_clear, offset=20)
expected = pd.Series([1000, 600, 200.])
assert_series_equal(out, expected)
@pytest.mark.xfail(raises=AssertionError, strict=True)
def test_cloud_cover_to_irradiance_ghi_clear():
index = pd.date_range(start='20190101', periods=3, freq='1h')
cloud_cover = pd.Series([0, 50, 100.], index=index)
ghi_clear = pd.Series([10, 10, 1000.], index=index)
zenith = pd.Series([90.0, 89.9, 45], index=index)
out = forecast.cloud_cover_to_irradiance_ghi_clear(
cloud_cover, ghi_clear, zenith
)
# https://github.com/pvlib/pvlib-python/issues/681
ghi_exp = pd.Series([10., 6.75, 350.])
dni_exp = pd.Series([0., 0., 4.74198165e+01])
dhi_exp = pd.Series([10., 6.75, 316.46912616])
assert_series_equal(out[0], ghi_exp)
assert_series_equal(out[1], dni_exp)
| assert_series_equal(out[2], dhi_exp) | pandas.testing.assert_series_equal |
# built-in
import os
import pickle
# third-party
import pandas as pd
import numpy as np
import pyedflib as pyedf
# local
import utils
def get_baseline_seizure_data(patients_info_dir, saving_dir):
list_patients = [patient_id for patient_id in os.listdir(patients_info_dir) if 'MSEL' in patient_id]
for patient_id in list_patients:
pat = pickle.load(open(os.path.join(patients_info_dir, patient_id), 'rb'))
print(f'\n--- Checking patient {pat.id} ---')
if not os.path.isdir(os.path.join(saving_dir, pat.id)):
os.makedirs(os.path.join(saving_dir, pat.id))
get_baseline_data(patients_info_dir, os.path.join(saving_dir, pat.id), pat)
get_seizures_data(patients_info_dir, os.path.join(saving_dir, pat.id), pat)
# remove patient folder if empty
if os.listdir(os.path.join(saving_dir, pat.id)) == []:
os.rmdir(os.path.join(saving_dir, pat.id))
# ---------- AUXILIARY FUNCTIONS ---------- #
def get_baseline_data(patients_info_dir, saving_dir, pat):
# get the baseline files
baseline_files = [file for file in os.listdir(os.path.join(patients_info_dir)) if (file not in utils.get_seizure_files(pat) and file.endswith('.edf') and 'Empatica' in file)]
if baseline_files == []:
print(' patient has no baseline Empatica files')
return None
#get the modalities present in the baseline files
target_mod = set([base.split(' - ')[-1][:-4] for base in baseline_files])
if all([os.path.exists(os.path.join(saving_dir, f'baseline_data_{modality}')) for modality in target_mod]):
print(' patient already has baseline data')
return None
# run each date to join all corresponding modalities in a single dataframe
for modality in target_mod:
if os.path.exists(os.path.join(saving_dir, f'baseline_data_{modality}')):
print(f' patient already has modality {modality}')
continue
print(f' --- Checking modality {modality} ---')
#create a new dataframe for modality
df = pd.DataFrame()
# get the dates associated with the modality
baseline_dates = set([base.split(' - ')[1] for base in baseline_files if modality in base])
for date in sorted(baseline_dates):
name = f'{pat.id} - {date} - {modality}.edf'
print(f' file {name}')
try:
edf = pyedf.EdfReader(os.path.join(patients_info_dir, pat.id, name))
except Exception as e:
print(e)
# concatenate the new dataframe with df
df = pd.concat((df, utils.edf_to_df(edf, modality)), axis=0)
df.to_pickle(os.path.join(saving_dir, f'baseline_data_{modality}'))
def get_seizures_data(patients_info_dir, saving_dir, pat):
seizure_files = [file for file in utils.get_seizure_files(pat) if 'Empatica' in file]
if seizure_files == []:
print(' patient has no seizures recorded in Empatica files')
return None
target_mod = list(set([base.split(' - ')[-1][:-4] for base in seizure_files]))
if all([os.path.exists(os.path.join(saving_dir, f'baseline_data_{modality}.edf')) for modality in target_mod]):
print(' patient already has seizures data')
return None
for modality in target_mod:
if os.path.exists(os.path.join(saving_dir, f'seizures_data_{modality}')):
print(f' patient already has modality {modality}')
continue
print(f' --- Checking modality {modality} ---')
#create a new dataframe for each modality
df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas.core import common as com
from pandas.types.common import (is_integer,
is_float,
is_object_dtype,
is_integer_dtype,
is_float_dtype,
is_scalar,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_period_dtype,
is_bool_dtype,
pandas_dtype,
_ensure_int64,
_ensure_object)
from pandas.types.dtypes import PeriodDtype
from pandas.types.generic import ABCSeries
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
import pandas._period as period
from pandas._period import (Period, IncompatibleFrequency,
get_period_field_arr, _validate_end_alias,
_quarter_to_myear)
from pandas.core.base import _shared_docs
from pandas.indexes.base import _index_shared_docs, _ensure_index
from pandas import compat
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate_kwarg)
from pandas.lib import infer_dtype
import pandas.tslib as tslib
from pandas.compat import zip, u
import pandas.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self._values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self._values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = getattr(self._values, opname)(other._values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is tslib.NaT:
result = np.empty(len(self._values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
func = getattr(self._values, opname)
result = func(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return wrapper
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'qyear', 'freq',
'days_in_month', 'daysinmonth',
'to_timestamp', 'asfreq', 'start_time', 'end_time',
'is_leap_year']
_is_numeric_dtype = False
_infer_as_myclass = True
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, dtype=None,
**kwargs):
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if name is None and hasattr(data, 'name'):
name = data.name
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
msg = 'specified freq and dtype are different'
raise IncompatibleFrequency(msg)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=copy)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if not isinstance(data, (np.ndarray, PeriodIndex,
DatetimeIndex, Int64Index)):
if is_scalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = _ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq) for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = _ensure_object(data)
if freq is None:
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data._values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data._values,
base1, base2, 1)
else:
if is_object_dtype(data):
inferred = infer_dtype(data)
if inferred == 'integer':
data = data.astype(np.int64)
if freq is None and is_object_dtype(data):
# must contain Period instance and thus extract ordinals
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
if freq is None:
msg = 'freq not specified and cannot be inferred'
raise ValueError(msg)
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
data = _ensure_object(data)
data = period.extract_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not is_integer_dtype(values):
values = np.array(values, copy=False)
if (len(values) > 0 and is_float_dtype(values)):
raise TypeError("PeriodIndex can't take floats")
else:
return cls(values, name=name, freq=freq, **kwargs)
values = np.array(values, dtype='int64', copy=False)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified')
result.freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
def _shallow_copy(self, values=None, **kwargs):
if kwargs.get('freq') is None:
# freq must be provided
kwargs['freq'] = self.freq
if values is None:
values = self._values
return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
def __contains__(self, key):
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
try:
self.get_loc(key)
return True
except Exception:
return False
return False
@property
def asi8(self):
return self._values.view('i8')
@cache_readonly
def _int64index(self):
return Int64Index(self.asi8, name=self.name, fastpath=True)
@property
def values(self):
return self.asobject.values
@property
def _values(self):
return self._data
def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
return self.asobject.values
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
pass
elif (func is np.subtract):
name = self.name
left = context[1][0]
right = context[1][1]
if (isinstance(left, PeriodIndex) and
isinstance(right, PeriodIndex)):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return PeriodIndex(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
"""
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._values[mask].searchsorted(where_idx._values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._values < self._values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
if copy:
return self._int64index.copy()
else:
return self._int64index
elif is_datetime64_dtype(dtype):
return self.to_timestamp(how=how)
elif is_datetime64tz_dtype(dtype):
return self.to_timestamp(how=how).tz_localize(dtype.tz)
elif is_period_dtype(dtype):
return self.asfreq(freq=dtype.freq)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
@Substitution(klass='PeriodIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)
raise IncompatibleFrequency(msg)
value = value.ordinal
elif isinstance(value, compat.string_types):
value = Period(value, freq=self.freq).ordinal
return self._values.searchsorted(value, side=side, sorter=sorter)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
Janury 1st ('START') for example.
Returns
-------
new : PeriodIndex with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.tseries.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = _validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == 'E'
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period.period_asfreq_arr(ordinal, base1, base2, end)
if self.hasnans:
new_data[self._isnan] = tslib.iNaT
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
"""
DEPRECATED: use :meth:`to_timestamp` instead.
Cast to DatetimeIndex.
"""
warnings.warn("to_datetime is deprecated. Use self.to_timestamp(...)",
FutureWarning, stacklevel=2)
return self.to_timestamp()
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10,
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9,
"The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11,
"The number of days in the month")
daysinmonth = days_in_month
@property
def is_leap_year(self):
""" Logical indicating if the date belongs to a leap year """
return tslib._isleapyear_arr(self.year)
@property
def start_time(self):
return self.to_timestamp(how='start')
@property
def end_time(self):
return self.to_timestamp(how='end')
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.asobject.values
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data._values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _maybe_convert_timedelta(self, other):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
return nanos // offset_nanos
elif isinstance(other, offsets.DateOffset):
freqstr = other.rule_code
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
elif isinstance(other, np.ndarray):
if is_integer_dtype(other):
return other
elif is_timedelta64_dtype(other):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if (nanos % offset_nanos).all() == 0:
return nanos // offset_nanos
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
msg = "Input has different freq from PeriodIndex(freq={0})"
raise IncompatibleFrequency(msg.format(self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self.shift(ordinal_delta)
def _sub_datelike(self, other):
if other is tslib.NaT:
new_data = np.empty(len(self), dtype=np.int64)
new_data.fill(tslib.iNaT)
return TimedeltaIndex(new_data, name=self.name)
return NotImplemented
def _sub_period(self, other):
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
asi8 = self.asi8
new_data = asi8 - other.ordinal
if self.hasnans:
new_data = new_data.astype(np.float64)
new_data[self._isnan] = np.nan
# result must be Int64Index or Float64Index
return Index(new_data, name=self.name)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
Returns
-------
shifted : PeriodIndex
"""
values = self._values + n * self.freq.n
if self.hasnans:
values[self._isnan] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
@cache_readonly
def dtype(self):
return PeriodDtype.construct_from_string(self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = com._values_from_object(series)
try:
return com._maybe_box(self,
super(PeriodIndex, self).get_value(s, key),
series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies.Resolution.get_freq_group(reso)
freqn = frequencies.get_freq_group(self.freq)
vals = self._values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self._values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = _ensure_index(target)
if hasattr(target, 'freq') and target.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr)
raise IncompatibleFrequency(msg)
if isinstance(target, PeriodIndex):
target = target.asi8
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
return Index.get_indexer(self._int64index, target, method,
limit, tolerance)
def _get_unique_index(self, dropna=False):
"""
wrap Index._get_unique_index to handle NaT
"""
res = super(PeriodIndex, self)._get_unique_index(dropna=dropna)
if dropna:
res = res.dropna()
return res
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
try:
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
# as we have an invalid type
raise KeyError(key)
try:
ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
return self._int64index.get_loc(ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem']
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second, freq='S')
else:
raise KeyError(reso)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies.Resolution.get_freq_group(reso)
freqn = frequencies.get_freq_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def _convert_tolerance(self, tolerance):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance)
return self._maybe_convert_timedelta(tolerance)
def insert(self, loc, item):
if not isinstance(item, Period) or self.freq != item.freq:
return self.asobject.insert(loc, item)
idx = np.concatenate((self[:loc].asi8, np.array([item.ordinal]),
self[loc:].asi8))
return self._shallow_copy(idx)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
super(PeriodIndex, self)._assert_can_do_setop(other)
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex(rawarr, freq=self.freq)
return rawarr
def _format_native_types(self, na_rep=u('NaT'), date_format=None,
**kwargs):
values = self.asobject.values
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: u('%s') % dt
if self.hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt
in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(PeriodIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backcompat
self.freq = Period._maybe_convert_freq(own_state[1])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using
pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using
pytz/dateutil), or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
infer_dst : boolean, default False
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify 2 of start, end, periods')
if freq is not None:
_, mult = _gfc(freq)
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('Start and end must have same freq')
if (start is tslib.NaT or end is tslib.NaT):
raise ValueError('Start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(end.ordinal - periods + mult,
end.ordinal + 1, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in | zip(*arrays) | pandas.compat.zip |
# -*- coding: utf-8 -*-
from copy import deepcopy
import warnings
from itertools import chain, combinations
from collections import Counter
from typing import Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import (pearsonr as pearsonR,
spearmanr as spearmanR,
kendalltau as kendallTau)
from tqdm.auto import tqdm
import xgboost
from sklearn.base import RegressorMixin, ClassifierMixin, ClusterMixin, TransformerMixin
from sklearn.model_selection import train_test_split, BaseCrossValidator, KFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import (r2_score as R2,
mean_squared_error as MSE,
roc_auc_score as ROCAUC,
confusion_matrix,
multilabel_confusion_matrix,
matthews_corrcoef as MCC,
explained_variance_score as eVar,
max_error as maxE,
mean_absolute_error as MAE,
mean_squared_log_error as MSLE,
mean_poisson_deviance as MPD,
mean_gamma_deviance as MGD,
)
from prodec.Descriptor import Descriptor
from prodec.Transform import Transform
from .reader import read_molecular_descriptors, read_protein_descriptors
from .preprocess import yscrambling
from .neuralnet import (BaseNN,
SingleTaskNNClassifier,
SingleTaskNNRegressor,
MultiTaskNNRegressor,
MultiTaskNNClassifier
)
pd.set_option('mode.chained_assignment', None)
def filter_molecular_descriptors(data: Union[pd.DataFrame, Iterator],
column_name: str,
keep_values: Iterable,
progress: bool = True,
total: Optional[int] = None) -> pd.DataFrame:
"""Filter the data so that the desired column contains only the desired data.
:param data: data to be filtered, either a dataframe or an iterator of chunks
:param column_name: name of the column to apply the filter on
:param keep_values: allowed values
:return: a pandas dataframe
"""
if isinstance(data, pd.DataFrame):
return data[data[column_name].isin(keep_values)]
elif progress:
return pd.concat([chunk[chunk[column_name].isin(keep_values)]
for chunk in tqdm(data, total=total, desc='Loading molecular descriptors')],
axis=0)
else:
return pd.concat([chunk[chunk[column_name].isin(keep_values)]
for chunk in data],
axis=0)
def model_metrics(model, y_true, x_test) -> dict:
"""Determine performance metrics of a model
Beware R2 = 1 - (Residual sum of squares) / (Total sum of squares) != (Pearson r)²
R2_0, R2_0_prime, K and k_prime are derived from
<NAME>., & <NAME>. (2010).
Predictive Quantitative Structure–Activity Relationships Modeling.
In <NAME> & <NAME> (Eds.),
Handbook of Chemoinformatics Algorithms.
Chapman and Hall/CRC.
https://www.taylorfrancis.com/books/9781420082999
:param model: model to check the performance of
:param y_true: true labels
:param x_test: testing set of features
:return: a dictionary of metrics
"""
y_pred = model.predict(x_test)
# Regression metrics
if isinstance(model, (RegressorMixin, SingleTaskNNRegressor, MultiTaskNNRegressor)):
# Slope of predicted vs observed
k = sum(xi * yi for xi, yi in zip(y_true, y_pred)) / sum(xi ** 2 for xi in y_true)
# Slope of observed vs predicted
k_prime = sum(xi * yi for xi, yi in zip(y_true, y_pred)) / sum(yi ** 2 for yi in y_pred)
# Mean averages
y_true_mean = y_true.mean()
y_pred_mean = y_pred.mean()
return {'number' : y_true.size,
'R2' : R2(y_true, y_pred) if len(y_pred) >= 2 else 0,
'MSE' : MSE(y_true, y_pred, squared=True) if len(y_pred) >= 2 else 0,
'RMSE' : MSE(y_true, y_pred, squared=False) if len(y_pred) >= 2 else 0,
'MSLE' : MSLE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'RMSLE' : np.sqrt(MSLE(y_true, y_pred)) if len(y_pred) >= 2 else 0,
'MAE' : MAE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Explained Variance' : eVar(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Max Error' : maxE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Mean Poisson Distrib' : MPD(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Mean Gamma Distrib' : MGD(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Pearson r': pearsonR(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'Spearman r' : spearmanR(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'Kendall tau': kendallTau(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'R2_0 (pred. vs. obs.)' : 1 - (sum((xi - k_prime * yi) **2 for xi, yi in zip(y_true, y_pred)) / sum((xi - y_true_mean) ** 2 for xi in y_true)) if len(y_pred) >= 2 else 0,
'R\'2_0 (obs. vs. pred.)' : 1 - (sum((yi - k * xi) **2 for xi, yi in zip(y_true, y_pred)) / sum((yi - y_pred_mean) ** 2 for yi in y_pred)) if len(y_pred) >= 2 else 0,
'k slope (pred. vs obs.)' : k,
'k\' slope (obs. vs pred.)' : k_prime,
}
# Classification
elif isinstance(model, (ClassifierMixin, SingleTaskNNClassifier, MultiTaskNNClassifier)):
# Binary classification
if len(model.classes_) == 2:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=model.classes_).ravel()
values = {}
try:
mcc = MCC(y_true, y_pred)
values['MCC'] = mcc
except RuntimeWarning:
pass
values[':'.join(str(x) for x in model.classes_)] = ':'.join([str(int(sum(y_true == class_))) for class_ in model.classes_])
values['ACC'] = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) != 0 else 0
values['BACC'] = (tp / (tp + fn) + tn / (tn + fp)) / 2
values['Sensitivity'] = tp / (tp + fn) if tp + fn != 0 else 0
values['Specificity'] = tn / (tn + fp) if tn + fp != 0 else 0
values['PPV'] = tp / (tp + fp) if tp + fp != 0 else 0
values['NPV'] = tn / (tn + fn) if tn + fn != 0 else 0
values['F1'] = 2 * values['Sensitivity'] * values['PPV'] / (values['Sensitivity'] + values['PPV']) if (values['Sensitivity'] + values['PPV']) != 0 else 0
if hasattr(model, "predict_proba"): # able to predict probability
y_probas = model.predict_proba(x_test)
if y_probas.shape[1] == 1:
y_proba = y_probas.ravel()
values['AUC 1'] = ROCAUC(y_true, y_probas)
else:
for i in range(len(model.classes_)):
y_proba = y_probas[:, i].ravel()
try:
values['AUC %s' % model.classes_[i]] = ROCAUC(y_true, y_proba)
except ValueError:
warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. '
'Stratify your folds to avoid such warning.')
values['AUC %s' % model.classes_[i]] = np.nan
# Multiclasses
else:
i = 0
values = {}
for contingency_matrix in multilabel_confusion_matrix(y_true, y_pred):
tn, fp, fn, tp = contingency_matrix.ravel()
try:
mcc = MCC(y_true, y_pred)
values['%s|MCC' % model.classes_[i]] = mcc
except RuntimeWarning:
pass
values['%s|number' % model.classes_[i]] = int(sum(y_true == model.classes_[i]))
values['%s|ACC' % model.classes_[i]] = (tp + tn) / (tp + tn + fp + fn) if (
tp + tn + fp + fn) != 0 else 0
values['%s|BACC' % model.classes_[i]] = (tp / (tp + fn) + tn / (tn + fp)) / 2
values['%s|Sensitivity' % model.classes_[i]] = tp / (tp + fn) if tp + fn != 0 else 0
values['%s|Specificity' % model.classes_[i]] = tn / (tn + fp) if tn + fp != 0 else 0
values['%s|PPV' % model.classes_[i]] = tp / (tp + fp) if tp + fp != 0 else 0
values['%s|NPV' % model.classes_[i]] = tn / (tn + fn) if tn + fn != 0 else 0
values['%s|F1' % model.classes_[i]] = 2 * values['%s|Sensitivity' % model.classes_[i]] * values[
'%s|PPV' % model.classes_[i]] / (values['%s|Sensitivity' % model.classes_[i]] + values[
'%s|PPV' % model.classes_[i]]) if (values['%s|Sensitivity' % model.classes_[i]] + values[
'%s|PPV' % model.classes_[i]]) != 0 else 0
i += 1
if hasattr(model, "predict_proba"): # able to predict probability
y_probas = model.predict_proba(x_test)
try:
values['AUC 1 vs 1'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovo")
values['AUC 1 vs All'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovr")
except ValueError:
warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. '
'Stratify your folds to avoid such warning.')
values['AUC 1 vs 1'] = np.nan
values['AUC 1 vs All'] = np.nan
return values
else:
raise ValueError('model can only be classifier or regressor.')
def crossvalidate_model(data: pd.DataFrame,
model: Union[RegressorMixin, ClassifierMixin],
folds: BaseCrossValidator,
groups: List[int] = None,
verbose: bool = False
) -> Tuple[pd.DataFrame, Dict[str, Union[RegressorMixin, ClassifierMixin]]]:
"""Create a machine learning model predicting values in the first column
:param data: data containing the dependent vairable (in the first column) and other features
:param model: estimator (may be classifier or regressor) to use for model building
:param folds: cross-validator
:param groups: groups to split the labels according to
:param verbose: whether to show fold progression
:return: cross-validated performance and model trained on the entire dataset
"""
X, y = data.iloc[:, 1:], data.iloc[:, 0].values.ravel()
performance = []
if verbose:
pbar = tqdm(desc='Fitting model', total=folds.n_splits + 1)
models = {}
# Perform cross-validation
for i, (train, test) in enumerate(folds.split(X, y, groups)):
if verbose:
pbar.set_description(f'Fitting model on fold {i + 1}', refresh=True)
model.fit(X.iloc[train, :], y[train])
models[f'Fold {i + 1}'] = deepcopy(model)
performance.append(model_metrics(model, y[test], X.iloc[test, :]))
if verbose:
pbar.update()
# Organize result in a dataframe
performance = pd.DataFrame(performance)
performance.index = [f'Fold {i + 1}' for i in range(folds.n_splits)]
# Add average and sd of performance
performance.loc['Mean'] = [np.mean(performance[col]) if ':' not in col else '-' for col in performance]
performance.loc['SD'] = [np.std(performance[col]) if ':' not in col else '-' for col in performance]
# Fit model on the entire dataset
if verbose:
pbar.set_description('Fitting model on entire training set', refresh=True)
model.fit(X, y)
models['Full model'] = deepcopy(model)
if verbose:
pbar.update()
return performance, models
def train_test_proportional_group_split(data: pd.DataFrame,
groups: List[int],
test_size: float = 0.30,
verbose: bool = False
) -> Tuple[pd.DataFrame, pd.DataFrame, List[int], List[int]]:
"""Split the data into training and test sets according to the groups that respect most test_size
:param data: the data to be split up into training and test sets
:param groups: groups to split the data according to
:param test_size: approximate proportion of the input dataset to determine the test set
:param verbose: whether to log to stdout or not
:return: training and test sets and training and test groups
"""
counts = Counter(groups)
size = sum(counts.values())
# Get ordered permutations of groups without repetitions
permutations = list(chain.from_iterable(combinations(counts.keys(), r) for r in range(len(counts))))
# Get proportion of each permutation
proportions = [sum(counts[x] for x in p) / size for p in permutations]
# Get permutation minimizing difference to test_size
best, proportion = min(zip(permutations, proportions), key=lambda x: (x[1] - test_size) ** 2)
del counts, permutations, proportions
if verbose:
print(f'Best group permutation corresponds to {proportion:.2%} of the data')
# Get test set assignment
assignment = np.where(group in best for group in groups)
opposite = np.logical_not(assignment)
# Get training groups
t_groups = [x for x in groups if x not in best]
return data[opposite], data[assignment], t_groups, best
def qsar(data: pd.DataFrame,
endpoint: str = 'pchembl_value_Mean',
num_points: int = 30,
delta_activity: float = 2,
version: str = 'latest',
descriptors: str = 'mold2',
descriptor_path: Optional[str] = None,
descriptor_chunksize: Optional[int] = 50000,
activity_threshold: float = 6.5,
model: Union[RegressorMixin, ClassifierMixin] = xgboost.XGBRegressor(verbosity=0),
folds: int = 5,
stratify: bool = False,
split_by: str = 'Year',
split_year: int = 2013,
test_set_size: float = 0.30,
cluster_method: ClusterMixin = None,
custom_groups: pd.DataFrame = None,
scale: bool = False,
scale_method: TransformerMixin = StandardScaler(),
yscramble: bool = False,
random_state: int = 1234,
verbose: bool = True
) -> Tuple[pd.DataFrame,
Dict[str,
Optional[Union[TransformerMixin,
LabelEncoder,
BaseCrossValidator,
Dict[str,
Union[RegressorMixin,
ClassifierMixin]]]]]]:
"""Create QSAR models for as many targets with selected data source(s),
data quality, minimum number of datapoints and minimum activity amplitude.
:param data: Papyrus activity data
:param endpoint: value to be predicted or to derive classes from
:param num_points: minimum number of points for the activity of a target to be modelled
:param delta_activity: minimum difference between most and least active compounds for a target to be modelled
:param descriptors: type of desriptors to be used for model training
:param descriptor_path: path to Papyrus descriptors (default: pystow's default path)
:param descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking)
:param activity_threshold: threshold activity between acvtive and inactive compounds (ignored if using a regressor)
:param model: machine learning model to be used for QSAR modelling
:param folds: number of cross-validation folds to be performed
:param stratify: whether to stratify folds for cross validation, ignored if model is RegressorMixin
:param split_by: how should folds be determined {'random', 'Year', 'cluster', 'custom'}
If 'random', exactly test_set_size is extracted for test set.
If 'Year', the size of the test and training set are not looked at
If 'cluster' or 'custom', the groups giving proportion closest to test_set_size will be used to defined the test set
:param split_year: Year from which on the test set is extracted (ignored if split_by is not 'Year')
:param test_set_size: proportion of the dataset to be used as test set
:param cluster_method: clustering method to use to extract test set and cross-validation folds (ignored if split_by is not 'cluster')
:param custom_groups: custom groups to use to extract test set and cross-validation fold (ignored if split_by is not 'custom').
Groups must be a pandas DataFrame with only two Series. The first Series is either InChIKey or connectivity
(depending on whether stereochemistry data are being use or not). The second Series must be the group assignment
of each compound.
:param scale: should the features be scaled using the custom scaling_method
:param scale_method: scaling method to be applied to features (ignored if scale is False)
:param yscramble: should the endpoint be shuffled to compare performance to the unshuffled endpoint
:param random_state: seed to use for train/test splitting and KFold shuffling
:param verbose: log details to stdout
:return: both:
- a dataframe of the cross-validation results where each line is a fold of QSAR modelling of an accession
- a dictionary of the feature scaler (if used), label encoder (if mode is a classifier),
the data splitter for cross-validation, and for each accession in the data:
the fitted models on each cross-validation fold and the model fitted on the complete training set.
"""
if split_by.lower() not in ['year', 'random', 'cluster', 'custom']:
raise ValueError("split not supported, must be one of {'Year', 'random', 'cluster', 'custom'}")
if not isinstance(model, (RegressorMixin, ClassifierMixin)):
raise ValueError('model type can only be a Scikit-Learn compliant regressor or classifier')
warnings.filterwarnings("ignore", category=RuntimeWarning)
if isinstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("ignore", category=UserWarning)
model_type = 'regressor' if isinstance(model, RegressorMixin) else 'classifier'
# Keep only required fields
merge_on = 'connectivity' if 'connectivity' in data.columns else 'InChIKey'
if model_type == 'regressor':
features_to_ignore = [merge_on, 'target_id', endpoint, 'Year']
data = data[data['relation'] == '='][features_to_ignore]
else:
features_to_ignore = [merge_on, 'target_id', 'Activity_class', 'Year']
preserved = data[~data['Activity_class'].isna()]
preserved = preserved.drop(
columns=[col for col in preserved if col not in [merge_on, 'target_id', 'Activity_class', 'Year']])
active = data[data['Activity_class'].isna() & (data[endpoint] > activity_threshold)]
active = active[~active['relation'].str.contains('<')][features_to_ignore]
active.loc[:, 'Activity_class'] = 'A'
inactive = data[data['Activity_class'].isna() & (data[endpoint] <= activity_threshold)]
inactive = inactive[~inactive['relation'].str.contains('>')][features_to_ignore]
inactive.loc[:, 'Activity_class'] = 'N'
data = | pd.concat([preserved, active, inactive]) | pandas.concat |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype=dict(C=None))
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
result = df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._mgr is df2._mgr
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._mgr is df2._mgr
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
df = df_orig.copy()
df2 = df
df["A"] += 1
expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
df = df_orig.copy()
df2 = df
df["A"] += 1.5
expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df2, expected)
assert df._mgr is df2._mgr
@pytest.mark.parametrize(
"op",
[
"add",
"and",
"div",
"floordiv",
"mod",
"mul",
"or",
"pow",
"sub",
"truediv",
"xor",
],
)
def test_inplace_ops_identity2(self, op):
if op == "div":
return
df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})
operand = 2
if op in ("and", "or", "xor"):
# cannot use floats for boolean ops
df["a"] = [True, False, True]
df_copy = df.copy()
iop = f"__i{op}__"
op = f"__{op}__"
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
tm.assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ["A", "B", "C"]
columns = ["X", "Y", "Z"]
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [
[1, 2, 3],
(1, 2, 3),
np.array([1, 2, 3], dtype=np.int64),
range(1, 4),
]:
expected = | DataFrame({"X": val, "Y": val, "Z": val}, index=df.index) | pandas.DataFrame |
#!usr/bin/env python3
import pandas as pd
import numpy as np
#Set parameters
seed_number = 1234
#Import data
infile = "./Resources/TransitionMatrix.xlsx"
transition_matrix = pd.read_excel(infile, index_col=0)
transition_matrix.columns = range(transition_matrix.shape[1])
#Set Seed
np.random.seed(seed_number)
#Simulate Random Numbers
transitions_AAA=pd.DataFrame(np.random.rand(20,10000))
transitions_AA=pd.DataFrame(np.random.rand(20,10000))
transitions_A=pd.DataFrame(np.random.rand(20,10000))
transitions_BBB=pd.DataFrame(np.random.rand(20,10000))
transitions_BB=pd.DataFrame(np.random.rand(20,10000))
transitions_B=pd.DataFrame(np.random.rand(20,10000))
transitions_CCC=pd.DataFrame(np.random.rand(20,10000))
#Create Lookup Table
bond_rating_lookup_table = transition_matrix.cumsum()
#Create a dictionary that will be mapped to the dataframe of transition outcomes
bond_dict={
0:'AAA',1:'AA',2:'A',3:'BBB',4:'BB',5:'B',6:'CCC',7:'D'
}
#Calculate the transition outcome and map the corresponding bond rating
bond_final_rating_AAA = pd.DataFrame(np.searchsorted(bond_rating_lookup_table[0].values, transitions_AAA.values)).applymap(bond_dict.get)
bond_final_rating_AA = pd.DataFrame(np.searchsorted(bond_rating_lookup_table[1].values, transitions_AA.values)).applymap(bond_dict.get)
bond_final_rating_A = pd.DataFrame(np.searchsorted(bond_rating_lookup_table[2].values, transitions_A.values)).applymap(bond_dict.get)
bond_final_rating_BBB = pd.DataFrame(np.searchsorted(bond_rating_lookup_table[3].values, transitions_BBB.values)).applymap(bond_dict.get)
bond_final_rating_BB = pd.DataFrame(np.searchsorted(bond_rating_lookup_table[4].values, transitions_BB.values)).applymap(bond_dict.get)
bond_final_rating_B = pd.DataFrame(np.searchsorted(bond_rating_lookup_table[5].values, transitions_B.values)).applymap(bond_dict.get)
bond_final_rating_CCC = pd.DataFrame(np.searchsorted(bond_rating_lookup_table[6].values, transitions_CCC.values)).applymap(bond_dict.get)
#concatenate transitions of each bond into one dataframe
bond_final_rating= | pd.concat([bond_final_rating_AAA,bond_final_rating_AA,bond_final_rating_A,bond_final_rating_BBB,bond_final_rating_BB,bond_final_rating_B,bond_final_rating_CCC]) | pandas.concat |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(
self, obj, comparator, path, compression=False, **kwargs
):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
with pytest.raises(ValueError):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
with pytest.raises(ClosedFileError):
store.keys()
with pytest.raises(ClosedFileError):
"df" in store
with pytest.raises(ClosedFileError):
len(store)
with pytest.raises(ClosedFileError):
store["df"]
with pytest.raises(AttributeError):
store.df
with pytest.raises(ClosedFileError):
store.select("df")
with pytest.raises(ClosedFileError):
store.get("df")
with pytest.raises(ClosedFileError):
store.append("df2", df)
with pytest.raises(ClosedFileError):
store.put("df3", df)
with pytest.raises(ClosedFileError):
store.get_storer("df2")
with pytest.raises(ClosedFileError):
store.remove("df2")
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
@td.xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = pd.DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=pd.Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_copy(self, setup_path):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs
)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(setup_path)
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self, setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
def test_tseries_indices_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
| tm.assert_series_equal(result, ser) | pandas.util.testing.assert_series_equal |
# coding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import pandas as pd
import tushare as ts
from QUANTAXIS.QAUtil import (DATABASE, QASETTING, QA_util_date_stamp,
QA_util_date_valid, QA_util_dict_remove_key,
QA_util_log_info, QA_util_code_tolist, QA_util_date_str2int, QA_util_date_int2str,
QA_util_sql_mongo_sort_DESCENDING,
QA_util_time_stamp, QA_util_to_json_from_pandas,
trade_date_sse)
def set_token(token=None):
try:
if token is None:
token = QASETTING.get_config('TSPRO', 'token', None)
else:
QASETTING.set_config('TSPRO', 'token', token)
ts.set_token(token)
except:
print('请升级tushare 至最新版本 pip install tushare -U')
def get_pro():
try:
set_token()
pro = ts.pro_api()
except Exception as e:
if isinstance(e, NameError):
print('请设置tushare pro的token凭证码')
else:
print('请升级tushare 至最新版本 pip install tushare -U')
print(e)
pro = None
return pro
def QA_fetch_get_finindicator(start, end,code=None,collections=DATABASE.stock_report_finindicator_tushare):
query = { "end_date": {
"$lte": end,
"$gte": start}}
if code:
query['ts_code'] = {'$in': code}
cursor = collections.find(query, {"_id": 0}, batch_size=10000)#.sort([("ts_code",1),("end_date",1)])
data = []
i = 0
for post in cursor:
i = i+1
#print(post)
data.append(post)
return pd.DataFrame(data).sort_values(['ts_code','end_date'], ascending = True)
def QA_fetch_get_assetAliability(start, end,code=None,collections=DATABASE.stock_report_assetliability_tushare):
query = {"end_date": {
"$lte": end,
"$gte": start}}
if code:
query['ts_code'] = {'$in': code}
cursor = collections.find(query, {"_id": 0}, batch_size=10000)#.sort([("ts_code",1),("end_date",1)])
return pd.DataFrame([item for item in cursor]).sort_values(['ts_code','end_date'], ascending = True)
def QA_fetch_get_cashflow(start, end,code=None,collections=DATABASE.stock_report_cashflow_tushare):
query = {"end_date": {
"$lte": end,
"$gte": start}}
if code:
query['ts_code'] = {'$in': code}
cursor = collections.find(query, {"_id": 0}, batch_size=10000)#.sort([("ts_code",1),("end_date",1)])
#df = pd.DataFrame([item for item in cursor])
#print(df.head())
return pd.DataFrame([item for item in cursor]).sort_values(['ts_code','end_date'], ascending = True)
def QA_fetch_get_income(start, end,code=None,collections=DATABASE.stock_report_income_tushare):
query = {"end_date": {
"$lte": end,
"$gte": start}}
if code:
query['ts_code'] = {'$in': code}
cursor = collections.find(query, {"_id": 0}, batch_size=10000)#.sort([("ts_code",1),("end_date",1)])
return pd.DataFrame([item for item in cursor]).sort_values(['ts_code','end_date'], ascending = True)
def QA_SU_stock_info():
pro = get_pro()
return pro.stock_basic()
def QA_fetch_get_daily_adj(start, end,code=None,collections=DATABASE.stock_daily_adj_tushare):
query = {"trade_date": {
"$lte": end,
"$gte": start}}
if code:
query['ts_code'] = {'$in': code}
cursor = collections.find(query, {"_id": 0}, batch_size=10000)#.sort([("ts_code",1),("trade_date",1)])
return pd.DataFrame([item for item in cursor]).sort_values(['ts_code','trade_date'], ascending = True)
def QA_fetch_get_money_flow(start, end,code=None,collections=DATABASE.stock_daily_moneyflow_tushare):
query = {"trade_date": {
"$lte": end,
"$gte": start}}
if code:
query['ts_code'] = {'$in': code}
cursor = collections.find(query, {"_id": 0}, batch_size=10000)#.sort([("ts_code",1),("trade_date",1)])
return pd.DataFrame([item for item in cursor]).sort_values(['ts_code','trade_date'], ascending = True)
def QA_fetch_get_dailyindicator(start, end,code=None,collections=DATABASE.stock_daily_basic_tushare):
query = {"trade_date": {
"$lte": end,
"$gte": start}}
if code:
query['ts_code'] = {'$in': code}
cursor = collections.find(query, {"_id": 0}, batch_size=10000)#.sort([("ts_code",1),("trade_date",1)])
return | pd.DataFrame([item for item in cursor]) | pandas.DataFrame |
import os
from pathlib import Path
import subprocess
import threading
import time
# Auxiliar packages
import json
import pandas as pd
# Widgets
import ipywidgets as widgets
import IPython
# Plotly
import plotly.graph_objs as go
from plotly import tools
# Jupy4Syn
from .Configuration import Configuration
from .ScanParser import ScanParser
from .utils import logprint
class ScanGUI(widgets.Button):
def __init__(self, config=Configuration(), *args, **kwargs):
widgets.Button.__init__(self, *args, **kwargs)
# Config
self.config = config
self.plots_list = config.plots_list
# class Button values for ScanGUI
self.description='Start Scanning Plot'
self.disabled=False
self.button_style='success'
self.tooltip='Click me'
self.icon=''
self.layout = widgets.Layout(width='300px')
# Scan save file and directory
self.scan_save_dir = '/tmp/'
self.scan_save_file = 'scan_gui.temp'
self.stop_file = 'stop'
self.scan_path = Path(self.scan_save_dir + self.scan_save_file)
self.stop_path = Path(self.scan_save_dir + self.stop_file)
# Create scans, scanlogs and exports directories
p = Path("./scanlogs")
if not p.is_dir():
p.mkdir()
p = Path("./scans")
if not p.is_dir():
p.mkdir()
p = Path("./exports")
if not p.is_dir():
p.mkdir()
# Logging
self.output = widgets.Output()
# Scan-gui process
self.scan_gui_process = subprocess.Popen(["echo"])
# Threading
self.monitor = False
self.thread = threading.Thread()
self.refresh_thread = threading.Thread()
self.interrupted_scan = False
self.fig_thread = threading.Thread()
# Set callback function for click event
self.on_click(self._start_button)
# Widgets displays
self.start_button = widgets.VBox([self])
# Clean previous temp config file
try:
os.remove(str(self.scan_path))
os.remove(str(self.stop_path))
except:
pass
self.checkbox_live_plot = widgets.Checkbox(
value=False,
description="Live plot in Jupyter: ",
disabled=False,
style={'description_width': 'initial'}
)
self.checkbox_final_plot_jupy = widgets.Checkbox(
value=False,
description="Plot with Plotly after scan ends: ",
disabled=False,
style={'description_width': 'initial'}
)
self.checkbox_final_plot_pyqt = widgets.Checkbox(
value=True,
description="Plot with PyQtGraph after scan ends: ",
disabled=False,
style={'description_width': 'initial'}
)
self.select_plot_option = widgets.Dropdown(
options=['Plot after ends with PyQt', 'Plot after ends with Plotly', 'Live Plot'],
value='Plot after ends with PyQt',
# rows=10,
description='',
disabled=False,
style={'description_width': 'initial'}
)
self.fig = go.FigureWidget()
self.fig_box = widgets.Box()
self.refresh_icon_box = widgets.Box(layout=widgets.Layout(width='40px', height='40px'))
self.export = False
self.clear_threads = False
@staticmethod
def _start_button(b):
# Clear previous logs outputs
b.output.clear_output()
# with statement to output logs in stdou (if this option is enabled)
with b.output:
if b.monitor:
# Enable checkboxes
b.checkbox_live_plot.disabled = False
b.checkbox_final_plot_jupy.disabled = False
b.select_plot_option.disabled = False
# Change button monitor status
b.monitor = not b.monitor
# Change button to a "clicked status"
b.disabled = True
b.button_style = ''
b.description='Stopping...'
# We should sleep for some time to give some responsiveness to the user
time.sleep(0.5)
# Stop thread to monitor the save file
try:
logprint("Stopping threads", config=b.config)
b.interrupted_scan = True
b.started_scan = False
b.thread.join()
b.fig_thread.join()
b.refresh_thread.join()
b.clear_threads = False
time.sleep(1.5)
b.interrupted_scan = False
except Exception as e:
# If any error occurs, log that but dont stop code exection
logprint("Error in stopping threads", "[ERROR]", config=b.config)
logprint(str(e), "[ERROR]", config=b.config)
# Change button layout monitoring
b.disabled = False
b.button_style = 'success'
b.description='Start Scanning Plot'
else:
# Disable checkboxes
b.checkbox_live_plot.disabled = True
b.checkbox_final_plot_jupy.disabled = True
b.select_plot_option.disabled = True
if b.scan_gui_process.poll() is not None:
b.scan_gui_process = subprocess.Popen(["pydm --hide-nav-bar --hide-menu-bar /usr/local/SOL/GUI/scan-gui/scan_gui.py"],
shell=True)
# Change button monitor status
b.monitor = not b.monitor
# Change button to a "clicked status"
b.disabled = True
b.button_style = ''
b.description='Starting...'
# We should sleep for some time to give some responsiveness to the user
time.sleep(0.5)
# Clean previous scans config
try:
os.remove(str(b.scan_path))
os.remove(str(b.stop_path))
except:
pass
# Start thread to monitor the save file
try:
logprint("Starting thread", config=b.config)
b.thread = threading.Thread(target=b.monitor_save_file)
b.thread.start()
except Exception as e:
# If any error occurs, log that but dont stop code exection
logprint("Error in starting thread", "[ERROR]", config=b.config)
logprint(str(e), "[ERROR]", config=b.config)
# Change button layout monitoring
b.disabled = False
b.button_style = 'danger'
b.description='Stop Scanning Plot'
def monitor_save_file(self):
with self.output:
while self.monitor:
if self.scan_path.is_file():
# Started scan
self.started_scan = True
with open(str(self.scan_path)) as file:
try:
save_file = json.load(file)
except ValueError:
pass
os.remove(str(self.scan_path))
try:
os.remove(str(self.stop_path))
except:
pass
command = save_file["command"]["value"]
parser = self.scan_parser()
self.synchronous = save_file["checkSync"]["value"]
self.number_repeats = save_file["spinRepeat"]["value"]
# Waits for file creation by scan-gui
time.sleep(1.0)
# self.scan_names = self.get_scan_name_command(command, parser, self.number_repeats)
self.scan_names = self.get_scan_name_js(save_file, self.number_repeats)
config_name = self.get_config_name(command, parser)
self.plot_name = self.scan_names[-1] + "-jupy.png"
ts = time.gmtime()
year_month_day = time.strftime("%Y-%m-%d", ts)
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S", ts) + " UTC-0"
self.log_str = time_stamp + "| [SCAN]:\n" + \
"Scan with command: '" + command + "' repeated " + str(self.number_repeats) + " times\n" + \
"Scan configuration: '" + config_name + "'\n" + \
"Scan data saved in: '" + self.scan_names[-1] + "'\n" + \
"Jupyter Scan plot saved in: '" + self.plot_name + "'\n" + \
"PyQtGraph Scan plot saved in: '" + self.scan_names[-1] + ".png" + "'\n"
log_file_name = Path('./scanlogs/' + year_month_day + '-scanlog.txt')
with open(str(log_file_name), "a") as f:
f.write(self.log_str + '\n')
IPython.display.update_display(IPython.display.Pretty(self.log_str), display_id='text')
if self not in self.plots_list:
self.plots_list.append(self)
# Call live graph
self.list_motors = save_file["listMotors"]["value"]
self.fig_thread = threading.Thread(target=self.thread_plot)
self.fig_thread.start()
# Scan status icon
self.refresh_thread = threading.Thread(target=self.thread_refresh_icon)
self.refresh_thread.start()
elif self.stop_path.is_file():
self.started_scan = False
self.interrupted_scan = True
self.clear_threads = True
os.remove(str(self.stop_path))
else:
pass
if self.clear_threads:
try:
self.fig_thread.join()
self.refresh_thread.join()
except:
pass
self.clear_threads = False
time.sleep(0.5)
def get_filename_js(self, js_file):
file_name = js_file["editFilename"]["value"]
file_path = js_file["editFilepath"]["value"]
if file_name == "" and file_path == "":
raise Exception("Can't load files from scan with empty Filepath and Filename.")
elif file_path == "":
file_path = "/tmp"
file_name = file_path + "/" + file_name
return file_name
def get_filename_command(self, command, parser):
args = parser.parse_known_args(command.split(' '))
file_name = args[0].output
if file_name == "":
raise Exception("Can't load files from scan with empty Filepath and Filename.")
return file_name
def get_scan_name_command(self, command, parser, number_repeats):
# Waits for file to be written by scan writter
time.sleep(1.0)
fileName = self.get_filename_command(command, parser)
scan_names = []
leadingZeros = 4
newName = ""
cont = 0
while(True):
cont += 1
newName = fileName + "_" + str(cont).zfill(leadingZeros)
if(os.path.isfile(newName)):
continue
else:
for i in range(number_repeats):
scan_names.append(fileName + "_" + str(cont - 1 + i).zfill(leadingZeros))
break
return scan_names
def get_scan_name_js(self, js_file, number_repeats):
# Waits for file to be written by scan writter
time.sleep(1.0)
fileName = self.get_filename_js(js_file)
scan_names = []
leadingZeros = 4
newName = ""
cont = 0
while(True):
cont += 1
newName = fileName + "_" + str(cont).zfill(leadingZeros)
if(os.path.isfile(newName)):
continue
else:
for i in range(number_repeats):
scan_names.append(fileName + "_" + str(cont - 1 + i).zfill(leadingZeros))
break
return scan_names
def get_config_name(self, command, parser):
args = parser.parse_known_args(command.split(' '))
config_name = args[0].configuration
return config_name
def scan_parser(self):
parser = ScanParser()
return parser.parser
def update_pd(self, default_names, label):
dfs = []
number_non_empty = len(default_names)
for default_name in default_names:
try:
dfs.append(pd.read_csv(default_name, sep=' ', comment='#', header=None))
except:
dfs.append( | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import division, print_function
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pxl.styleplot import set_sns
import os
import argparse
from itertools import islice
R = 0.5375
D = R*2
nu = 1e-6
c = (0.04 + 0.06667)/2
H = 0.807
def clean_column_names(df):
"""Rename CSV column names so they are easier to work with."""
df.columns = [n.replace("(-)", "").lower().replace(".", "").strip()\
.replace(" ", "_").replace("(", "").replace(")", "") \
for n in df.columns]
return df
def load_timedata():
df = pd.read_csv("output/RM2_TimeData.csv")
df = clean_column_names(df)
df["theta_deg"] = np.rad2deg(df.theta_rad)
return df
def load_raw_xfoil_data(Re=1.5e6, alpha_name="alpha_deg"):
"""Load raw XFOIL data as DataFrame."""
fdir = "config/foildata/xfoil-raw"
fname = "NACA 0021_T1_Re{:.3f}_M0.00_N9.0.dat".format(Re/1e6)
fpath = os.path.join(fdir, fname)
alpha_deg = []
cl = []
cd = []
with open(fpath) as f:
for n, line in enumerate(f.readlines()):
if n >= 14:
ls = line.split()
alpha_deg.append(float(ls[0]))
cl.append(float(ls[1]))
cd.append(float(ls[2]))
df = pd.DataFrame()
df[alpha_name] = alpha_deg
df["cl"] = cl
df["cd"] = cd
return df
def load_probe_data(t1_fraction=0.5):
"""Load velocity probe data to dictionary of NumPy arrays.
Parameters
----------
t1_fraction : float
Fraction of simulation time after which statistics are computed.
"""
# First, obtain a list of all probe file names
probe_dir = "./output/probe"
fnames = sorted(os.listdir(probe_dir))
# For all probe files, read coordinates and average velocities
x_R = []
y_R = []
z_R = []
mean_u = []
mean_v = []
mean_w = []
for fname in fnames:
with open(os.path.join(probe_dir, fname)) as f:
for line in islice(f, 1, 2):
line = line.split(",")
x_R.append(float(line[0]))
y_R.append(float(line[1]))
z_R.append(float(line[2]))
t, u, v, w, _, _, _ = np.loadtxt(f, skiprows=2, delimiter=",",
unpack=True)
i1 = int(len(t)*t1_fraction)
mean_u.append(u[i1:].mean())
mean_w.append(v[i1:].mean()) # Swap v and w since y-up coord sys
mean_v.append(-w[i1:].mean())
x_R = np.array(x_R)
y_R_org = np.array(y_R)
z_R_org = np.array(z_R)
# Swap y and z since this is a y-up coord sys
z_R = y_R_org.copy()
y_R = -z_R_org.copy()
z_H = z_R*R/H
nz = len(np.unique(z_H))
ny = len(np.unique(y_R))
mean_u, mean_v, mean_w = (np.array(mean_u), np.array(mean_v),
np.array(mean_w))
# Reshape arrays so y_R indicates columns and z_R rows
y_R = y_R.reshape(nz, ny)
z_H = z_H.reshape(nz, ny)
mean_u = mean_u.reshape(nz, ny)
mean_v = mean_v.reshape(nz, ny)
mean_w = mean_w.reshape(nz, ny)
return {"y_R": y_R, "z_H": z_H, "mean_u": mean_u, "mean_v": mean_v,
"mean_w": mean_w}
def plot_perf(print_perf=True, save=False):
"""Plot power coefficient versus azimuthal angle."""
df = load_timedata()
if print_perf:
df_last = df.iloc[len(df)//2:]
print("From {:.1f}--{:.1f} degrees, mean_cp: {:.2f}".format(
df_last.theta_deg.min(), df_last.theta_deg.max(),
df_last.power_coeff.mean()))
fig, ax = plt.subplots()
ax.plot(df.theta_deg, df.power_coeff, marker="o")
ax.set_xlabel(r"$\theta$ (degrees)")
ax.set_ylabel(r"$C_P$")
fig.tight_layout()
if save:
fig.savefig("figures/perf.pdf")
fig.savefig("figures/perf.png", dpi=300)
def plot_perf_curves(exp=False, single_ds=False, alm=True, save=False):
"""Plot performance curves.
Parameters
----------
single_ds : bool
Whether or not to plot results from multiple dynamic stall models.
"""
fig, ax = plt.subplots(figsize=(7.5, 3), nrows=1, ncols=2)
df = | pd.read_csv("processed/tsr_sweep.csv") | pandas.read_csv |
import math
import numpy as np
import pandas as pd
import sklearn.datasets
import os
import urllib.request
# X: input variables (Pandas Dataframe)
# Y: output variable (Numpy Array)
def boston_housing():
d = sklearn.datasets.load_boston()
df = | pd.DataFrame(data=d.data, columns=d.feature_names) | pandas.DataFrame |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pandas import set_option
set_option("display.max_rows", 10)
pd.options.mode.chained_assignment = None
from sklearn import preprocessing
# Functions
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
def accuracy(conf):
total_correct = 0.
nb_classes = conf.shape[0]
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
acc = total_correct/sum(sum(conf))
return acc
adjacent_facies = np.array([[1], [0,2], [1], [4], [3,5], [4,6,7], [5,7], [5,6,8], [6,7]])
def accuracy_adjacent(conf, adjacent_facies):
nb_classes = conf.shape[0]
total_correct = 0.
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
for j in adjacent_facies[i]:
total_correct += conf[i][j]
return total_correct / sum(sum(conf))
# Loading Data
validationFull = pd.read_csv('../validation_data_nofacies.csv')
training_data = pd.read_csv('../facies_vectors.csv')
# Treat Data
training_data.fillna(training_data.mean(),inplace=True)
training_data['Well Name'] = training_data['Well Name'].astype('category')
training_data['Formation'] = training_data['Formation'].astype('category')
training_data['Well Name'].unique()
training_data.describe()
# Color Data
# 1=sandstone 2=c_siltstone 3=f_siltstone
# 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite
# 8=packstone 9=bafflestone
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00',
'#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
#facies_color_map is a dictionary that maps facies labels
#to their respective colors
facies_color_map = {}
for ind, label in enumerate(facies_labels):
facies_color_map[label] = facies_colors[ind]
def label_facies(row, labels):
return labels[ row['Facies'] -1]
training_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1)
# Condition dataset
correct_facies_labels = training_data['Facies'].values
feature_vectors = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
feature_vectors.describe()
scaler = preprocessing.StandardScaler().fit(feature_vectors)
scaled_features = scaler.transform(feature_vectors)
from sklearn.cross_validation import train_test_split
X_train, X_cv_test, y_train, y_cv_test = train_test_split(scaled_features,
correct_facies_labels, test_size=0.4, random_state=42)
X_cv, X_test, y_cv, y_test = train_test_split(X_cv_test, y_cv_test,
test_size=0.5, random_state=42)
# Train data
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
from scipy.stats import truncnorm
# Select model size
lower, upper = 1, 500
mu, sigma = (upper-lower)/2, (upper-lower)/2
sizes_rv = truncnorm((lower - mu) / sigma, (upper - mu) / sigma,
loc=mu, scale=sigma)
samples = 30
sizes_L1 = [ int(d) for d in sizes_rv.rvs(samples) ]
sizes_L2 = []
sizes_L3 = []
for sL1 in sizes_L1:
lower, upper = 1, sL1
mu, sigma = (upper-lower)/2, (upper-lower)/2
sizes_rv = truncnorm((lower - mu) / sigma, (upper - mu) / sigma,
loc=mu, scale=sigma)
sL2 = int(sizes_rv.rvs(1)[0])
sizes_L2.append(sL2)
lower, upper = 1, sL2
mu, sigma = (upper-lower)/2, (upper-lower)/2
sizes_rv = truncnorm((lower - mu) / sigma, (upper - mu) / sigma,
loc=mu, scale=sigma)
sL3 = int(sizes_rv.rvs(1)[0])
sizes_L3.append(sL3)
sizes = sorted(set(zip(sizes_L1, sizes_L2, sizes_L3)),
key=lambda s: sum(s))
train_error = np.array([])
cv_error = np.array([])
train_adj_error = np.array([])
cv_adj_error = np.array([])
minerr = 1
for i, s in enumerate(sizes):
clf = MLPClassifier(solver='lbfgs', alpha=0,
hidden_layer_sizes=s)
clf.fit(X_train,y_train)
# Compute errors
conf_cv = confusion_matrix(y_cv, clf.predict(X_cv))
conf_tr = confusion_matrix(y_train, clf.predict(X_train))
train_error = np.append(train_error, 1-accuracy(conf_tr))
cv_error = np.append(cv_error, 1-accuracy(conf_cv))
train_adj_error = np.append(train_adj_error,
1-accuracy_adjacent(conf_tr, adjacent_facies))
cv_adj_error = np.append(cv_adj_error,
1-accuracy_adjacent(conf_cv, adjacent_facies))
print('[ %3d%% done ] ' % (100*(i+1)/len(sizes),), end="")
if cv_error[-1] < minerr:
minerr = cv_error[-1]
print('CV error = %d%% with' % (100*minerr,), s)
else:
print()
sizes_sum = [ np.sum(s) for s in sizes ]
p = np.poly1d(np.polyfit(sizes_sum, cv_error, 2))
f, ax = plt.subplots()
ax.scatter(sizes_sum, cv_error)
ax.plot(range(1, max(sizes_sum)+1), p(range(1, max(sizes_sum)+1)))
ax.set_ylim([min(cv_error), max(cv_error)])
minsum = range(1, max(sizes_sum)+1)[np.argmin(p(range(1, max(sizes_sum)+1)))]
minsize = (int(minsum*4/7),int(minsum*2/7),int(minsum*1/7))
# Select regularization
alphas = np.append([0], np.sqrt(10)**np.arange(-10, 4.0, 1))
train_error = np.array([])
cv_error = np.array([])
train_adj_error = np.array([])
cv_adj_error = np.array([])
minerr = 1
for i, a in enumerate(alphas):
clf = MLPClassifier(solver='lbfgs', alpha=a,
hidden_layer_sizes=minsize)
clf.fit(X_train,y_train)
# Compute errors
conf_cv = confusion_matrix(y_cv, clf.predict(X_cv))
conf_tr = confusion_matrix(y_train, clf.predict(X_train))
train_error = np.append(train_error, 1-accuracy(conf_tr))
cv_error = np.append(cv_error, 1-accuracy(conf_cv))
train_adj_error = np.append(train_adj_error,
1-accuracy_adjacent(conf_tr, adjacent_facies))
cv_adj_error = np.append(cv_adj_error,
1-accuracy_adjacent(conf_cv, adjacent_facies))
print('[ %3d%% done ] ' % (100*(i+1)/len(alphas),), end="")
if cv_error[-1] < minerr:
minerr = cv_error[-1]
print('CV error = %d%% with %g' % (100*minerr, a))
else:
print()
p = np.poly1d(np.polyfit(np.log(alphas[1:]), cv_error[1:], 2))
f, ax = plt.subplots()
ax.scatter(np.log(alphas[1:]), cv_error[1:])
ax.plot(np.arange(-12, 4.0, .1), p(np.arange(-12, 4.0, .1)))
minalpha = np.arange(-12, 4.0, .1)[np.argmin(p(np.arange(-12, 4.0, .1)))]
minalpha = np.sqrt(10)**minalpha
clf = MLPClassifier(solver='lbfgs', alpha=minalpha,
hidden_layer_sizes=minsize)
clf.fit(X_train,y_train)
conf_te = confusion_matrix(y_test, clf.predict(X_test))
print('Predicted accuracy %.d%%' % (100*accuracy(conf_cv),))
# Retest with all data
clf_final = MLPClassifier(solver='lbfgs', alpha=minalpha,
hidden_layer_sizes=minsize)
clf_final.fit(scaled_features,correct_facies_labels)
# Apple to test
validation_features = validationFull.drop(['Formation', 'Well Name', 'Depth'], axis=1)
scaled_validation = scaler.transform(validation_features)
validation_output = clf_final.predict(scaled_validation)
validationFull['Facies']=validation_output
validationFull.to_csv('well_data_with_facies_DH.csv')
| pd.DataFrame({'alpha':minalpha, 'layer1': minsize[0], 'layer2': minsize[1],
'layer3': minsize[2]}, index=[0]) | pandas.DataFrame |
import pandas as pd
import datetime as dt
from functools import wraps
def log_step(func):
@wraps(func)
def wrapper(*args, **kwargs):
tic = dt.datetime.now()
result = func(*args, **kwargs)
time_taken = str(dt.datetime.now() - tic)
# print(f"Ran step {func.__name__} shape={result.shape} took {time_taken}s")
return result
return wrapper
@log_step
def start_pipeline(dataf):
return dataf.copy()
@log_step
def rename_columns(dataf):
return (dataf
.rename(columns={dataf.columns[-1]: 'DateTime'})
.rename(columns={colname: colname.strip() for colname in dataf.columns})
)
@log_step
def remove_rows_columns(dataf):
return (dataf
.drop(columns=[dataf.columns[0]]) # first column is station name
.drop([0]) # first row is units of measurement
.iloc[:-8] # last 8 rows are summary statistics
)
@log_step
def set_dtypes(dataf):
dataf['DateTime'] = dataf['DateTime'].apply(lambda x: x.strip().replace('24:00', '00:00'))
dataf['DateTime'] = | pd.to_datetime(dataf['DateTime'], infer_datetime_format=True) | pandas.to_datetime |
import time
time_start = time.time()
import os
import argparse as ap
import pandas as pd
import functions
parser = ap.ArgumentParser()
parser.add_argument('-n', "--stockName", help="Name of Stock")
parser.add_argument('-v', "--visualize", help="Visualizer on/off")
args = vars(parser.parse_args())
if args["stockName"] is None:
stock_name = '카카오'
else:
stock_name = args["stockName"]
if args["visualize"] is None:
is_visualize = False
else:
is_visualize = True
if not os.path.isfile('./code_df.csv'):
functions.save_as_csv()
print('save complete!')
print('Time required(s): ' + str(time.time() - time_start))
code_df = pd.read_csv('./code_df.csv')
# print(code_df)
# print(type(code_df))
url = functions.get_url(stock_name, code_df)
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not Index(i.values).is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',
'nl0000289783',
'nl0000289965', 'nl0000301109']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
def test_is_monotonic_decreasing(self):
i = MultiIndex.from_product([np.arange(9, -1, -1),
np.arange(9, -1, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
# string ordering
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['three', 'two', 'one']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['zenith', 'next', 'mom']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965',
'nl0000289783', 'lu0197800237',
'gb00b03mlx29']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
def test_is_strictly_monotonic_increasing(self):
idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_increasing
assert not idx._is_strictly_monotonic_increasing
def test_is_strictly_monotonic_decreasing(self):
idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
def test_reconstruct_sort(self):
# starts off lexsorted & monotonic
mi = MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert mi.is_lexsorted()
assert mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert recons.is_lexsorted()
assert recons.is_monotonic
assert mi is recons
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),
('x', 'b'), ('y', 'a'), ('z', 'b')],
names=['one', 'two'])
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=['col1', 'col2'])
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
def test_reconstruct_remove_unused(self):
# xref to GH 2770
df = DataFrame([['deleteMe', 1, 9],
['keepMe', 2, 9],
['keepMeToo', 3, 9]],
columns=['first', 'second', 'third'])
df2 = df.set_index(['first', 'second'], drop=False)
df2 = df2[df2['first'] != 'deleteMe']
# removed levels are there
expected = MultiIndex(levels=[['deleteMe', 'keepMe', 'keepMeToo'],
[1, 2, 3]],
labels=[[1, 2], [1, 2]],
names=['first', 'second'])
result = df2.index
tm.assert_index_equal(result, expected)
expected = MultiIndex(levels=[['keepMe', 'keepMeToo'],
[2, 3]],
labels=[[0, 1], [0, 1]],
names=['first', 'second'])
result = df2.index.remove_unused_levels()
tm.assert_index_equal(result, expected)
# idempotent
result2 = result.remove_unused_levels()
tm.assert_index_equal(result2, expected)
assert result2.is_(result)
@pytest.mark.parametrize('level0', [['a', 'd', 'b'],
['a', 'd', 'b', 'unused']])
@pytest.mark.parametrize('level1', [['w', 'x', 'y', 'z'],
['w', 'x', 'y', 'z', 'unused']])
def test_remove_unused_nan(self, level0, level1):
# GH 18417
mi = pd.MultiIndex(levels=[level0, level1],
labels=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]])
result = mi.remove_unused_levels()
tm.assert_index_equal(result, mi)
for level in 0, 1:
assert('unused' not in result.levels[level])
@pytest.mark.parametrize('first_type,second_type', [
('int64', 'int64'),
('datetime64[D]', 'str')])
def test_remove_unused_levels_large(self, first_type, second_type):
# GH16556
# because tests should be deterministic (and this test in particular
# checks that levels are removed, which is not the case for every
# random input):
rng = np.random.RandomState(4) # seed is arbitrary value that works
size = 1 << 16
df = DataFrame(dict(
first=rng.randint(0, 1 << 13, size).astype(first_type),
second=rng.randint(0, 1 << 10, size).astype(second_type),
third=rng.rand(size)))
df = df.groupby(['first', 'second']).sum()
df = df[df.third < 0.1]
result = df.index.remove_unused_levels()
assert len(result.levels[0]) < len(df.index.levels[0])
assert len(result.levels[1]) < len(df.index.levels[1])
assert result.equals(df.index)
expected = df.reset_index().set_index(['first', 'second']).index
tm.assert_index_equal(result, expected)
def test_isin(self):
values = [('foo', 2), ('bar', 3), ('quux', 4)]
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = MultiIndex.from_arrays([[], []])
result = idx.isin(values)
assert len(result) == 0
assert result.dtype == np.bool_
@pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_not_pypy(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, False]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, False]))
@pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_pypy(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, True]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, True]))
def test_isin_level_kwarg(self):
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
vals_0 = ['foo', 'bar', 'quux']
vals_1 = [2, 3, 10]
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
pytest.raises(IndexError, idx.isin, vals_0, level=5)
pytest.raises(IndexError, idx.isin, vals_0, level=-5)
pytest.raises(KeyError, idx.isin, vals_0, level=1.0)
pytest.raises(KeyError, idx.isin, vals_1, level=-1.0)
pytest.raises(KeyError, idx.isin, vals_1, level='A')
idx.names = ['A', 'B']
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))
pytest.raises(KeyError, idx.isin, vals_1, level='C')
def test_reindex_preserves_names_when_target_is_list_or_ndarray(self):
# GH6552
idx = self.index.copy()
target = idx.copy()
idx.names = target.names = [None, None]
other_dtype = pd.MultiIndex.from_product([[1, 2], [3, 4]])
# list & ndarray cases
assert idx.reindex([])[0].names == [None, None]
assert idx.reindex(np.array([]))[0].names == [None, None]
assert idx.reindex(target.tolist())[0].names == [None, None]
assert idx.reindex(target.values)[0].names == [None, None]
assert idx.reindex(other_dtype.tolist())[0].names == [None, None]
assert idx.reindex(other_dtype.values)[0].names == [None, None]
idx.names = ['foo', 'bar']
assert idx.reindex([])[0].names == ['foo', 'bar']
assert idx.reindex(np.array([]))[0].names == ['foo', 'bar']
assert idx.reindex(target.tolist())[0].names == ['foo', 'bar']
assert idx.reindex(target.values)[0].names == ['foo', 'bar']
assert idx.reindex(other_dtype.tolist())[0].names == ['foo', 'bar']
assert idx.reindex(other_dtype.values)[0].names == ['foo', 'bar']
def test_reindex_lvl_preserves_names_when_target_is_list_or_array(self):
# GH7774
idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']],
names=['foo', 'bar'])
assert idx.reindex([], level=0)[0].names == ['foo', 'bar']
assert idx.reindex([], level=1)[0].names == ['foo', 'bar']
def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']])
assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64
assert idx.reindex([], level=1)[0].levels[1].dtype.type == np.object_
def test_groupby(self):
groups = self.index.groupby(np.array([1, 1, 1, 2, 2, 2]))
labels = self.index.get_values().tolist()
exp = {1: labels[:3], 2: labels[3:]}
tm.assert_dict_equal(groups, exp)
# GH5620
groups = self.index.groupby(self.index)
exp = {key: [key] for key in self.index}
| tm.assert_dict_equal(groups, exp) | pandas.util.testing.assert_dict_equal |
# coding: utf-8
# ## General information
#
# This kernel is dedicated to EDA of PetFinder.my Adoption Prediction challenge as well as feature engineering and modelling.
#
# 
# (a screenshot of the PetFinder.my site)
#
# In this dataset we have lots of information: tabular data, texts and even images! This gives a lot of possibilties for feature engineering and modelling. The only limiting factor is the fact that the competition is kernel-only. On the other hand this will ensure everyone has the same computational resources.
#
# In this kernel I want to pay attention to several things:
# * comparing distribution of features in train and test data;
# * exploring features and their interactions;
# * trying various types of feature engineering;
# * trying various models without neural nets (for now);
#
# It is important to remember that this competition has stage 2, so our models will run against unseen data.
#
# *Work still in progress*
# In[1]:
#libraries
import numpy as np
import pandas as pd
import os
import json
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('ggplot')
import lightgbm as lgb
import xgboost as xgb
import time
import datetime
from PIL import Image
from wordcloud import WordCloud
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
import gc
from catboost import CatBoostClassifier
from tqdm import tqdm_notebook
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import random
import warnings
warnings.filterwarnings("ignore")
from functools import partial
pd.set_option('max_colwidth', 500)
pd.set_option('max_columns', 500)
pd.set_option('max_rows', 100)
import os
import scipy as sp
from math import sqrt
from collections import Counter
from sklearn.metrics import confusion_matrix as sk_cmatrix
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import TweetTokenizer
from sklearn.ensemble import RandomForestClassifier
import langdetect
import eli5
from IPython.display import display
from sklearn.metrics import cohen_kappa_score
def kappa(y_true, y_pred):
return cohen_kappa_score(y_true, y_pred, weights='quadratic')
# In[2]:
breeds = pd.read_csv('../input/breed_labels.csv')
colors = pd.read_csv('../input/color_labels.csv')
states = | pd.read_csv('../input/state_labels.csv') | pandas.read_csv |
import os
from sqlalchemy.types import Integer, Text, String, DateTime, Float
from sqlalchemy import create_engine
import pandas as pd
from configparser import ConfigParser # For configparser compatible formatting see: https://docs.python.org/3/library/configparser.html
import numpy as np
import logging
class DataHandler:
def __init__(self, config=None, configFilePath=None, separator=','):
self.data = None
self.config = config
self.dbString = None if not self.config else self.config['Database']['DB_CONNECTION_STRING']
self.filePath = None
self.configFilePath = configFilePath
self.separator = separator
def loadConfig(self, configFilePath):
'''Load and parse config data'''
if self.config:
print('Config already exists at %s. Nothing to load.' % self.configFilePath)
return
print('Loading config file %s' % self.configFilePath)
self.configFilePath = configFilePath
config = ConfigParser()
config.read(configFilePath)
self.config = config
self.dbString = config['Database']['DB_CONNECTION_STRING']
def loadData(self, fileName="311data"):
'''Load dataset into pandas object'''
if self.separator == ',':
dataFile = fileName + ".csv"
else:
dataFile = fileName + ".tsv"
self.filePath = os.path.join(self.config['Database']['DATA_DIRECTORY'], dataFile )
print('Loading dataset %s' % self.filePath)
self.data = pd.read_table(self.filePath,
sep=self.separator,
na_values=['nan'],
dtype={
'SRNumber':str,
'CreatedDate':str,
'UpdatedDate':str,
'ActionTaken':str,
'Owner':str,
'RequestType':str,
'Status':str,
'RequestSource':str,
'MobileOS':str,
'Anonymous':str,
'AssignTo':str,
'ServiceDate':str,
'ClosedDate':str,
'AddressVerified':str,
'ApproximateAddress':str,
'Address':str,
'HouseNumber':str,
'Direction':str,
'StreetName':str,
'Suffix':str,
'ZipCode':str,
'Latitude':str,
'Longitude':str,
'Location':str,
'TBMPage':str,
'TBMColumn':str,
'TBMRow':str,
'APC':str,
'CD':str,
'CDMember':str,
'NC':str,
'NCName':str,
'PolicePrecinct':str
})
def cleanData(self):
'''Perform general data filtering'''
print('Cleaning 311 dataset...')
data = self.data
zipIndex = (data['ZipCode'].str.isdigit()) | (data['ZipCode'].isna())
data['ZipCode'].loc[~zipIndex] = np.nan
# Format dates as datetime (Time intensive)
data['CreatedDate'] = pd.to_datetime(data['CreatedDate'])
data['ClosedDate'] = pd.to_datetime(data['ClosedDate'])
data['ServiceDate'] = pd.to_datetime(data['ServiceDate'])
# Compute service time
# New columns: closed_created, service_created
# xNOTE: SQLAlchemy/Postgres will convert these time deltas to integer values
# May wish to change these to a different format
data['closed_created'] = data.ClosedDate-data.CreatedDate
data['service_created'] = data.ServiceDate-data.CreatedDate
# drop NA values and reformat closed_created in units of hours
data = data[~data.closed_created.isna()]
# New column: closed_created in units of days
data['closed_createdD'] = data.closed_created / | pd.Timedelta(days=1) | pandas.Timedelta |
import random
import pandas as pd
from scipy.spatial.distance import cosine
from tqdm import tqdm
from preprocessing.duration_matrix import DurationSparseMatrix
DATA = 'data/'
POSTPROCESSING = 'postprocessing/'
def get_history_by_user(user_id: int) -> list:
df = pd.read_csv(f'{DATA}{POSTPROCESSING}watch_history.csv', index_col=0)
content = | pd.read_csv(f'{DATA}{POSTPROCESSING}content.csv', index_col='content_uid') | pandas.read_csv |
import cv2
import os
import numpy as np
import math
import time
from abc import abstractmethod, ABC
from pandas import DataFrame
def timeit(func):
'''
A decorator which computes the time cost.
'''
def wrapper(*args, **kw):
start = time.time()
print('%s starts...' % (func.__name__))
res = func(*args, **kw)
print('%s completed: %.3f s' % (func.__name__, time.time() - start))
return res
return wrapper
class FileHelper:
'''
Operations related to files.
'''
def __init__(self):
pass
def get_all_files(self, directory, flag):
'''
Get all files from a directory.
:param str directory: the directory that will be processed
:param str flag: the directory type, 'train' or 'test'
'''
if flag == 'train':
# return a dictionary which contains
# all the images and their labels
ret = dict()
for f in os.listdir(directory):
sub_dir = os.path.join(directory, f)
if os.path.isdir(sub_dir):
img_path_list = [os.path.join(sub_dir, i) for i in os.listdir(sub_dir) if i.endswith('.jpg')]
# read every image with grayscale mode
img_list = [cv2.imread(i, cv2.IMREAD_GRAYSCALE) for i in img_path_list]
ret[f.lower()] = img_list
return ret
if flag == 'test':
# return a tuple which contains all the images and their names
# sort the list in numeric order using the filenames
sorted_list = sorted(os.listdir(directory), key=lambda s: int(s.split('.')[0]))
name_list = [i for i in sorted_list if i.endswith('.jpg')]
img_path_list = [os.path.join(directory, i) for i in sorted_list if i.endswith('.jpg')]
img_list = [cv2.imread(i, cv2.IMREAD_GRAYSCALE) for i in img_path_list]
return img_list, name_list
class ImageHelper:
'''
Operations related to images.
'''
def __init__(self):
# initialize a sift instance
self.sift = cv2.xfeatures2d.SIFT_create()
def crop_square(self, image):
'''
Crop an image to a square about the centre.
'''
rows, cols = image.shape
if rows == cols:
return image
else:
diff = abs(rows - cols)
tmp = math.floor(diff / 2)
if rows > cols:
# The number of rows is bigger than the number of columns,
# so the number of columns will be reserved.
return image[tmp:tmp + cols, :]
if rows < cols:
# The number of columns is bigger than the number of rows,
# so the number of rows will be reserved.
return image[:, tmp:tmp + rows]
def tiny_image(self, image, normalize):
'''Create tiny image feature.
Basically follow three steps:
cropping, resizing, (normalizing)
:param array image: The array representation of an image.
:param boolean normalize: If set to True, the image will be made to have zero mean and unit length.
:return: The feature vector of the image.
:rtype: array
'''
crop_img = self.crop_square(image)
resized_img = cv2.resize(crop_img, (16, 16))
vector = resized_img.flatten() # flatten the array
if normalize:
zero_mean_vec = vector - np.mean(vector)
vector = zero_mean_vec / np.linalg.norm(zero_mean_vec)
return vector
def extract_patches(self, image, patch_size, stride):
'''Extract 2-D patches from an image.
Cut out the edges that cannot make up a patch.
:param array image: the array presentation of an image.
:param list patch_size: the dimensions of the patch.
:param int stride: the length of the gap between the start of one patch and the start of the next consecutive patch
:return: all the patches in the image
:rtype: array
'''
rows, cols = image.shape
p_row, p_col = patch_size
patch_list = []
for i in range(0, rows, stride):
if i + p_row > rows:
break # cut out the edges of rows
for j in range(0, cols, stride):
if j + p_col > cols:
break # cut out the edges of columns
patch = image[i:i+p_row, j:j+p_col]
patch_list.append(patch.flatten()) # flatten every 2-D patch into a vector
return np.array(patch_list)
def gen_sift_des(self, image, dense, step_size=5):
'''
Generate sift descriptors of the given image.
:param boolean dense: If dense is True, dense sift descriptors will be used.
:param int step_size: the length of the gap between two sift descriptors
:return: all the sift descriptors of an image
'''
if dense:
kp = [cv2.KeyPoint(x, y, step_size) for y in range(0, image.shape[0], step_size)
for x in range(0, image.shape[1], step_size)]
descriptors = self.sift.compute(image, kp)[1]
else:
descriptors = self.sift.detectAndCompute(image, None)[1]
return descriptors
class BagOfWords(ABC):
'''
Operations related to Bag of Visual Words.
'''
def __init__(self, train_dir, test_dir):
'''
:param str train_dir: the directory of the training set
:param str test_dir: the directory of the testing set
'''
self.file_h = FileHelper()
self.train_dir = train_dir
self.test_dir = test_dir
@timeit
def BOVW_training_set(self):
'''Convert the training set to the presentation of Bag of Visual Words.
Use a dataframe to save the features and labels of images
which will be used in the classifier.
:return: a dataframe containing the features and image labels
:rtype: DataFrame
'''
dataset = []
# get the images and their labels from the training set
img_dict = self.file_h.get_all_files(self.train_dir, 'train')
for label, img_list in img_dict.items():
for img in img_list:
# generate the presentation of Bag of Visual Words for an image
# using the trained kmeans model
bovw = self.generate_BOVW(img)
one_row = np.append(bovw, label)
dataset.append(one_row)
return | DataFrame(dataset) | pandas.DataFrame |
import time
import requests
import datetime as dt
import pandas as pd
import numpy as np
import os
import re
import zipfile
import pandas as pd
import itertools
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from bs4 import BeautifulSoup
from financePy import scraper as scr
from financePy import general_tools as gt
try:
from io import StringIO
from io import BytesIO
except ImportError:
from StringIO import StringIO
import json
import matplotlib.pyplot as plt
headers = {
'ApiKey': '<KEY>',
# 'Host': 'api-global.morningstar.com',
# 'Origin': 'http://www.morningstar.com',
# 'Pragma': 'no-cache',
# 'Referer': 'http://www.morningstar.com/stocks/xnas/aapl/quote.html',
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0',
'X-API-REALTIME-E': 'eyJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiUlNBLU9BRVAifQ.wX_lECTDnzuDLYRJwI-0hxr-afvqZ-GqrevQMauXMm6kdzx1h6nHmIz5laPMcFjk9g123qS6FRbv2rJIUI4Dsrg2b306xFJ1g2H8h4qiExdYaOd3ag7BA4bXNJiwcu640HMcRHbyN5DCkJvY9ckqEBV1gOwf5vGn0AnFawqJ1rI.hf31mkmpQI_UVGgq.9vtGBjMiejWToIH-ZbZByB7gVgaCCyAy2_SbAcWZVKeHiumgBN8eP-4UlJ2Hc1oFMTRWIJvPsc-4tS8UP_GYTUPL041xxEE_EKP7M1iYPPbSt7YgJgxeC5_ROiIY8TF-Il9Qnpx2x3U3mLjEDp4PBSKFgj1NGq-Fg_53oTNxWaRnxMC1fsJejL70UM827pKxrRnK3at-yGdfHHku6WjBqdw3Wg.gw8hKKyUYdqwwRVqRGUa1w',
'X-SAL-ContentType': 'nNsGdN3REOnPMlKDShOYjlk6VYiEVLSdpfpXAm7o2Tk='}
def tickers(country):
country = 'united states' if country.lower().replace(' ','') in ['usa','unitedstates'] else country
url = 'https://www.countrycode.org/'
r = requests.get(url)
content = r.content.decode('utf-8')
soup = BeautifulSoup(content,'lxml')
countries = gt.parse_html_table(soup)
countries.columns = list(map(lambda x: x.replace(' ','').replace('\n','').lower() ,countries.columns))
countries.country = countries.country.str.lower()
api = '286286941689219'
iso_code = countries[countries.country == country].isocodes.tolist()[0][-3:].lower()
url = 'https://res.cloudinary.com/torquato/raw/upload/v1524066556/financePy/official_%s_MS.csv' % iso_code
r = requests.get(url, headers = {'api' : api})
content = r.content.decode('utf-8')
res = pd.read_csv(StringIO(content))
res.set_index(res.columns[0], inplace = True)
return res
def trailing_returns(ticker,instrum):
fr = instrum['traili_ret_freq'].lower()
frequency = 'd' if fr in ['d','daily'] else (
'm' if fr in ['m','monthly'] else(
'q' if fr in ['q','quarterly'] else None))
if frequency == 'd':
format__ = ['1Day', '1Week', '1Month', '3Month', 'YearToDate', '1Year', '3Year', '5Year', '10Year', '15Year']
format_ = ['trailing'+x+'Return' for x in format__]
elif frequency == 'm':
format__ = ['1Month', '3Month', '6Month', 'YearToDate', '1Year', '3Year', '5Year', '10Year', '15Year']
format_ = ['trailing'+x+'Return' for x in format__]
elif frequency == 'q':
format__ = ['1Month', '3Month', '6Month', 'YearToDate', '1Year', '3Year', '5Year', '10Year', '15Year']
format_ = ['trailing'+x+'Return' for x in format__]
else:
raise ValueError(fr + ' is not a correct form for frequency')
url = 'https://api-global.morningstar.com/sal-service/v1/stock/trailingTotalReturns/%s/data?dataType=%s&locale=en-US' % (ticker,frequency)
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
json_res = json.load(StringIO(content))
columns = ['name'] + format_
rows = [i for i in json_res['trailingTotalReturnsList']]
array = []
for col in columns:
array += [[x[col] for x in rows]]
data = pd.DataFrame(np.array(array).T)
data.columns = ['name'] + format_
data.set_index('name', inplace = True)
data.dropna(how = 'all', inplace = True)
data = data.apply(lambda x: x.astype(float))
return data
def dividens(ticker,instrum):
url = 'https://api-global.morningstar.com/sal-service/v1/stock/dividends/v2/%s/data?locale=en-US' % ticker
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
json_res = json.load(StringIO(content))
columns = [ x if len(x) <= 5 else x.split('.')[-1] for x in json_res['columnDefs_labels'][1:]]
rows = [x['label'] for x in json_res['rows']]
array = np.array([x['datum'] if x['datum'] != [] else [None]*len(columns) for x in json_res['rows']])
data = pd.DataFrame(array, index = rows, columns = columns)
data = data.apply(lambda x: x.astype(float))
return data
def splits(ticker,instrum):
url = 'https://api-global.morningstar.com/sal-service/v1/stock/split/%s/data?locale=en-US' % ticker
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
json_res = json.load(StringIO(content))
data1 = pd.DataFrame(json_res['splitHistory'])
data2 = pd.DataFrame(json_res['splitOffHistory'])
return {'splitOffHistory' : data2 , 'splitHistory' : data1}
def ownership(ticker,instrum):
# opt1 = ['OwnershipData','ConcentratedOwners','Buyers','Sellers']
# opt2 = ['mutualfund','institution']
results = {}
for i in itertools.product(instrum['own_opt'][0],instrum['own_opt'][1]):
url = 'https://api-global.morningstar.com/sal-service/v1/stock/ownership/v1/%s/%s/%s/5/data?locale=en-US' % (ticker,i[0],i[1])
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
json_res = json.load(StringIO(content))
columns = [ x['columnId'] for x in json_res['columnDefs']][1:]
rows = [x['name'] if x['name'] != None else 'Total' for x in json_res['rows']]
array = []
for col in columns:
array += [[x[col] for x in json_res['rows']]]
array = np.array(array).T
data = pd.DataFrame(array, index = rows, columns = columns)
data.replace('_PO_',np.nan,inplace = True)
data.dropna(1,how='all',inplace = True)
results[i] = data
return results
def executives(ticker,instrum):
url = 'https://api-global.morningstar.com/sal-service/v1/stock/insiders/%s/%s/data?locale=en-US' % (instrum['exe_opt'][0],ticker)
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
if len(content) == 0:
return ticker
json_res = json.load(StringIO(content))
if json_res['rows'] == []:
return ticker
columns = json_res['datesDef']
outer_rows = [x['name']+':'+x['title'] if x['type'] == 'person' else x['personId'] for x in json_res['rows']]
inner_rows = [[x['name'] for x in json_res['rows'][i]['compensation']] for i in range(len(json_res['rows']))]
array = []
for i in range(len(json_res['rows'])):
inner_dic = []
for inn in json_res['rows'][i]['compensation']:
for row in inner_rows[i]:
if inn['name'] == row:
inner_dic += [inn['datum']]
array += inner_dic
try:
array = np.array(array)[:-len(inner_rows[0])]
except:
print('\n\n\n'+content+'\n'+url+'\n\n')
idx = pd.MultiIndex.from_product( [outer_rows[:-1],inner_rows[0]] , names = ['Person','Compensations'])
data = pd.DataFrame(array, idx, columns)
data.dropna(how = 'all', inplace = True)
return data
def company_profile(ticker,instrum):
url = 'https://api-global.morningstar.com/sal-service/v1/stock/companyProfile/%s?locale=en-US' % ticker
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
json_res = json.load(StringIO(content))
json_res['sections'].pop('contact')
data = pd.DataFrame(json_res['sections'])
data.columns = data.iloc[0]
data = data[1:]
data = pd.DataFrame(data.values.T,index = data.columns, columns = ['Profile'])
return data
def real_time_info(ticker,instrum):
url = 'https://api-global.morningstar.com/sal-service/v1/stock/realTime/v3/%s/data?locale=en-US' % ticker
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
json_res = json.load(StringIO(content))
data = pd.Series(json_res)
data.name = 'Realt Time info'
return data
def complete_valuation(ticker,instrum):
url = 'https://api-global.morningstar.com/sal-service/v1/stock/valuation/v2/%s?locale=en-US' % ticker
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
json_res = json.load(StringIO(content))
first = json_res['Collapsed']
second = json_res['Expanded']
columns = first['columnDefs'][1:]
rows = [x['label'] for x in first['rows']] + [x['label'] for x in second['rows']]
array = np.array([x['datum'] for x in first['rows']+second['rows']])
data = pd.DataFrame(array, index = rows, columns = columns)
data = data.apply(lambda x: x.astype(float))
return data
def current_valuation(ticker,instrum):
url = 'http://financials.morningstar.com/valuate/current-valuation-list.action?&t=%s®ion=usa&culture=en-US&cur=&adsFlag=true&_=1490717022553' % ticker
r = requests.get(url)
content = r.content.decode('utf-8')
if len(content) == 0:
return ticker
soup = BeautifulSoup(content, 'lxml')
table = soup.find_all('table')[0]
column_names = []
for row in table.find_all('tr'):
th_tags = row.find_all('th')
if len(th_tags) > 0 and len(column_names) == 0:
for th in th_tags:
column_names.append(th.get_text())
while '' in column_names:
column_names.remove('')
df = pd.DataFrame(columns = ['ind'] + column_names)
row_marker = 0
for row in table.find_all('tr'):
if row.find('td') == None or row.find('th') == None:
pass
else:
if row.find('th').get_text() == 'Price/Fair Value':
pass
else:
row_in = []
row_in += [row.find('th').get_text()]
columns = row.find_all('td')
for column in columns:
row_in += [column.get_text()]
df.loc[row_marker] = row_in
if len(columns) > 0:
row_marker += 1
df.set_index('ind', inplace = True)
df.replace('—',np.nan,inplace = True)
df.dropna(how = 'all', inplace=True)
df = df.apply(lambda x: x.astype(float))
return df
def forward_valuation(ticker,instrum):
url = 'http://financials.morningstar.com/valuate/forward-valuation-list.action?&t=%s®ion=usa&culture=en-US&cur=&adsFlag=true&_=1490717022554' % ticker
r = requests.get(url)
content = r.content.decode('utf-8')
if len(content) == 0:
return ticker
soup = BeautifulSoup(content, 'lxml')
table = soup.find_all('table')[0]
column_names = []
for row in table.find_all('tr'):
th_tags = row.find_all('th')
if len(th_tags) > 0 and len(column_names) == 0:
for th in th_tags:
column_names.append(th.get_text())
while '' in column_names:
column_names.remove('')
df = pd.DataFrame(columns = ['ind'] + column_names)
row_marker = 0
for row in table.find_all('tr'):
row_in = []
columns = row.find_all('td')
for column in columns:
row_in += [column.get_text()]
if len(row_in) < len(column_names) or '' in row_in:
pass
else:
df.loc[row_marker] = row_in
if len(columns) > 0:
row_marker += 1
df.set_index('ind', inplace = True)
df.replace('—',np.nan,inplace = True)
df.dropna(1,how = 'all',inplace=True)
df = df.apply(lambda x: x.astype(float))
return df
def history_valuation(ticker,instrum):
url = 'http://financials.morningstar.com/valuate/valuation-history.action?&t=%s®ion=usa&culture=en-US&cur=&type=price-earnings&_=1490717022555' % ticker
r = requests.get(url)
content = r.content.decode('utf-8')
if len(content) == 0:
return ticker
soup = BeautifulSoup(content, 'lxml')
ths = soup.find_all('th')
second_index = []
for th in ths:
text = th.get_text()
if text not in second_index:
second_index.append(text)
trs = soup.find_all('tr')
first_index = []
rows = []
for tr in trs:
row = []
tds = tr.find_all('td')
for td in tds:
row.append(td.get_text())
if '\xa0' in row:
first_index.append(row[0])
else:
rows += [row]
column_names = [i for i in range(2018-len(rows[0]),2018)]
idx = pd.MultiIndex.from_product([first_index, second_index],
names = ['Category','Ticker'])
df = pd.DataFrame(np.array(rows), idx, column_names)
df.replace('—',np.nan,inplace = True)
df.dropna(1,how = 'all',inplace=True)
df = df.apply(lambda x: x.astype(float))
return df
def financials(ticker,instrum):
categories = list(map(lambda x: 'bs' if x.lower().replace(' ','') == 'balancesheet' or x == 'bs' \
else ( 'is' if x.lower().replace(' ','') == 'incomestatement' or x == 'is'\
else ( 'cf' if x.lower().replace(' ','') == 'cashflow' or x == 'cf'\
else print(x +' is not a meaningful code'))), instrum['fin_cat']))
if None in categories:
raise ValueError('Categories code error')
frequency = '12' if instrum['fin_frequen'].lower().replace(' ','') in ['a','annual']\
else ( '3' if instrum['fin_frequen'].lower().replace(' ','') in ['q','quarterly']\
else print(instrum['fin_frequen'] +' is not a meaningful code'))
if frequency == None:
raise ValueError('Frequency code error')
counter = 0
tables = {}
for typ in categories:
url = 'http://financials.morningstar.com/ajax/ReportProcess4CSV.html?t=%s&reportType=%s&period=%s&dataType=A&order=asc&columnYear=5&number=3' % (ticker, typ, frequency )
r = requests.get(url)
content1 = r.content.decode("utf-8")
while len(content1) == 0 :
r = requests.get(url)
content1 = r.content.decode("utf-8")
counter += 1
if counter >= 5:
return ticker
content = StringIO( content1[content1.find('. ')+1:])
data = pd.read_csv(content, sep=',')
data.set_index(data.columns[0], inplace = True)
data.dropna(how = 'all', inplace = True)
tables[typ] = data
time.sleep(0.4)
return tables
def key_ratio(ticker,instrum):
url= 'http://financials.morningstar.com/ajax/exportKR2CSV.html?t='+ ticker
r = requests.get(url)
content = r.content.decode("utf-8")
if len(content) == 0 or content == 'We’re sorry. There is no available information in our database to display.':
return ticker
content = StringIO( content[content.find('ls\n')+3:])
data = pd.read_csv(content, sep=',')
data[data.columns[0]].fillna(method = 'ffill',inplace = True)
data.set_index(data.columns[0], inplace = True)
data.index.name = 'Financials'
data.dropna(how = 'all', inplace = True)
data.replace({',':''}, regex = True, inplace = True)
new_dataframe = data[data[data.columns[-2]] == data.columns[-2]].index.tolist()
result = {}
for i in range(len(new_dataframe)+1):
if i == 0:
result[data.index.name] = data.loc[:new_dataframe[i],:][:-1].apply(lambda x: x.astype(float))
elif i == len(new_dataframe):
temp = data.loc[new_dataframe[-1]:,:]
temp.index.name = temp.index.tolist()[0]
temp = temp[1:]
result[new_dataframe[i-1]] = temp.apply(lambda x: x.astype(float))
else:
temp = data.loc[new_dataframe[i-1]:new_dataframe[i],:][:-1]
temp.index.name = temp.index.tolist()[0]
temp = temp[1:]
result[new_dataframe[i-1]] = temp.apply(lambda x: x.astype(float))
return result
def get_yield(ticker,instrum):
url = 'http://financials.morningstar.com/valuate/valuation-yield.action?&t=%s®ion=usa&culture=en-US&cur=&_=1490717022555' % ticker
r = requests.get(url)
content = r.content.decode('utf-8')
if len(content) == 0:
return ticker
soup = BeautifulSoup(content, 'lxml')
res = str(soup)
ticks = res[res.find('ticks')+7:res.find('tickFormatter')-11].split('"')[1:12:2]
grezzo = res[res.find('var data =')+30:res.find('</script>')-23].replace(' ','').split(',')
values = [float(x) if '.' in x or len(x) <= 3 else '' for x in grezzo]
while '' in values:
values.remove('')
data = pd.DataFrame(values, ticks, columns = ['Dividend Yield'])
return data
def priceValue(ticker,instrument):
url = 'https://api-global.morningstar.com/sal-service/v1/stock/priceFairValue/v1/%s/chart' % ticker
r = requests.get(url, headers=headers)
content = r.content.decode('utf-8')
json_res = json.load(StringIO(content))
#### Same with selenium, almost 1000 times slower
###### TOTAL RETURN AND INDEX
#
#url = 'http://www.morningstar.com/stocks/xnas/aapl/quote.html'
#
#o = Options()
#o.add_argument('-headless')
#driver = webdriver.Firefox( executable_path = '/usr/local/bin/geckodriver', firefox_options=o)
#driver.get(url)
#result = driver.find_element_by_xpath('//table[@class="total-table"]')
#data = pd.read_csv(StringIO(result.text.replace(' ',',')))
#
#### TRAILING RETURNS
#
#url = 'http://www.morningstar.com/stocks/xnas/aapl/quote.html'
#
#o = Options()
#o.add_argument('-headless')
#driver = webdriver.Firefox( executable_path = '/usr/local/bin/geckodriver', firefox_options=o)
#driver.get(url)
#result_dad = driver.find_element_by_xpath('//div[@class="sal-trailing-returns__table daily"]')
#result = result_dad.find_elements_by_xpath('table')[0]
#
#soup = BeautifulSoup(result.get_attribute('innerHTML'), 'lxml')
#data = st.parse_html_table(soup)
#data.columns = ( x.replace(' ','').replace('\n','') for x in data.columns)
#data
#
#
#r = requests.get(url)
#content = r.content.decode('utf-8')
#soup = BeautifulSoup(content, 'lxml')
#
#
#prova = st.parse_html_table(soup)
#
#
#soup = BeautifulSoup(result.get_attribute('innerHTML'), 'lxml')
#data1 = st.parse_html_table(soup)
#data1.replace({' ':''}, regex = True, inplace = True)
#data1.replace('', np.nan , inplace = True)
#data1.columns = [x.replace(' ','').replace('\n','') for x in data1.columns]
#data1
#
#
##### DIVIDENDS
#
#url1 = 'http://www.morningstar.com/stocks/xnas/aapl/quote.html'
#url2 = 'http://www.morningstar.com/stocks/xmil/atl/quote.html'
#
#o = Options()
#o.add_argument('-headless')
#driver = webdriver.Firefox( executable_path = '/usr/local/bin/geckodriver', firefox_options=o)
#driver.get(url2)
#result_dad = driver.find_element_by_xpath('//div[@class="dividends-table-bottom dividends-bottom"]')
#result = result_dad.find_elements_by_xpath('table')[0]
#
#
#soup = BeautifulSoup(result.get_attribute('innerHTML'), 'lxml')
#data = st.parse_html_table(soup)
#data.replace({'\n':''}, regex = True, inplace = True)
#data.replace('', np.nan , inplace = True)
#
#data.columns = data.iloc[0,:]
#data.set_index('Calendar', inplace = True)
#data = data[1:]
#data.dropna(how = 'all', inplace = True)
#data
#
def historical(ticker,instrum ,interval="1d"):
start_date,end_date = gt.dates_checker(instrum['start_date'],instrum['end_date'],'-')
url = 'http://globalquote.morningstar.com/globalcomponent/RealtimeHistoricalStockData.ashx?ticker=%s&showVol=true&dtype=his&f=d&curry=USD&range=%s|%s&isD=true&isS=true&hasF=true&ProdCode=DIRECT' % (ticker,start_date,end_date)
r = requests.get(url)
content = r.content.decode('utf-8')
json_res = json.loads(content)
if json_res == None:
return ticker
pricedata = json_res['PriceDataList'][0]
datapoint = pricedata['Datapoints']
o = pd.Series([ x[0] for x in datapoint])
o.name = 'Open'
h = pd.Series([ x[1] for x in datapoint])
h.name = 'High'
l = pd.Series([ x[2] for x in datapoint])
l.name = 'Low'
c = pd.Series([ x[3] for x in datapoint])
c.name = 'Close'
ohlc = pd.concat([o,h,l,c],1)
volume = pd.Series(json_res['VolumeList']['Datapoints'])
volume.name = 'Volume'
ohlc = pd.concat([ohlc,volume],1)
dates = np.array(pricedata['DateIndexs'])
dates = dates - dates[-1] + len(pd.date_range(start_date,end_date)) -1
date = | pd.date_range(start_date,end_date) | pandas.date_range |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= pd.to_datetime(omicron_start_date)).sum()
for v in third_date_range.values()
],
)
)
idx = {}
kk = 0
for k in third_date_range.keys():
idx[k] = range(days_into_omicron[kk], days_into_omicron[kk + 1])
kk += 1
# tile the reduction in vaccination effect for omicron (i.e. VE is (1+r)*VE)
voc_vacc_product = np.zeros_like(vacc_ts_delta)
# calculate the voc effects
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# sample the right R_L
sim_R = samples["R_Li[" + state_key[state] + "]"].values
for n in range(mob_samples):
# add gaussian noise to predictors before forecast
# df_state.loc[
df_state.loc[df_state.date < mob_forecast_date, predictors] = (
state_Rmed[state][:, :, n] / 100
)
# add gaussian noise to predictors after forecast
df_state.loc[df_state.date >= mob_forecast_date, predictors] = (
state_sims[state][:, :, n] / 100
)
## ADVANCED SCENARIO MODELLING - USE ONLY FOR POINT ESTIMATES
# set non-grocery values to 0
if advanced_scenario_modelling:
df_state.loc[:, predictors[0]] = 0
df_state.loc[:, predictors[2]] = 0
df_state.loc[:, predictors[3]] = 0
df_state.loc[:, predictors[4]] = 0
df1 = df_state.loc[df_state.date <= ban]
X1 = df1[predictors] # N by K
md[: X1.shape[0], :] = 1
if n == 0:
# initialise arrays (loggodds)
# N by K times (Nsamples by K )^T = Ndate by Nsamples
logodds = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
logodds = np.append(logodds, X2 @ post_values[:, n], axis=0)
logodds = np.append(logodds, X3 @ post_values[:, n], axis=0)
else:
# concatenate to pre-existing logodds martrix
logodds1 = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
prop2 = df_md.loc[ban:new_pol, state].values
prop3 = df_md.loc[new_pol:, state].values
logodds2 = X2 @ post_values[:, n]
logodds3 = X3 @ post_values[:, n]
logodds_sample = np.append(logodds1, logodds2, axis=0)
logodds_sample = np.append(logodds_sample, logodds3, axis=0)
# concatenate to previous
logodds = np.vstack((logodds, logodds_sample))
# create an matrix of mob_samples realisations which is an indicator of the voc (delta right now)
# which will be 1 up until the voc_start_date and then it will be values from the posterior sample
voc_multiplier_alpha = samples["voc_effect_alpha"].values
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# number of days into omicron forecast
tt = 0
# loop over days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
tmp_date = pd.to_datetime("2020-03-01")
# get the correct Omicron start date
# omicron_start_date_tmp = np.maximum(
# pd.to_datetime(omicron_start_date),
# pd.to_datetime(third_date_range[state][0]),
# )
omicron_start_date_tmp = pd.to_datetime(omicron_start_date)
omicron_start_day_tmp = (
pd.to_datetime(omicron_start_date_tmp) - pd.to_datetime(start_date)
).days
for ii in range(mob_samples):
# if before omicron introduced in a jurisdiction, we consider what period we're at:
# 1. Wildtype
# 2. Alpha
# 3. Delta
voc_vacc_product[:, ii] = vacc_ts_delta[:, ii]
idx_start = df_state.loc[df_state.date < alpha_start_date].shape[0]
idx_end = df_state.loc[df_state.date < delta_start_date].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_alpha[ii]
idx_start = idx_end
idx_end = df_state.loc[df_state.date < omicron_start_date_tmp].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
idx_start = idx_end
idx_end = np.shape(voc_vacc_product)[0]
if strain == "Delta":
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
elif strain == "Omicron":
# if omicron we need to account for the Omicron VE prior to the introduction of
# omicron in mid November
voc_vacc_product[idx_start:idx_end, ii] = (
vacc_ts_omicron[idx_start:idx_end, ii] * voc_multiplier_omicron[ii]
)
# save the components of the TP
pd.DataFrame(sim_R).to_csv(results_dir + "baseline_R_L_" + strain + ".csv")
pd.DataFrame(md).to_csv(results_dir + "md_" + strain + ".csv")
pd.DataFrame(masks).to_csv(results_dir + "masks_" + strain + ".csv")
macro = 2 * expit(logodds.T)
pd.DataFrame(macro).to_csv(results_dir + "macro_" + strain + ".csv")
pd.DataFrame(voc_vacc_product).to_csv(results_dir + "voc_vacc_product_" + strain + ".csv")
# calculate TP
R_L = (
2 * expit(logodds.T)
* md
* masks
* sim_R
* voc_vacc_product
)
# now we increase TP by 15% based on school reopening (this code can probably be reused
# but inferring it would be pretty difficult
# due to lockdowns and various interruptions since March 2020)
if scenarios[state] == "school_opening_2022":
R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :] = (
1.15 * R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :]
)
# calculate summary stats
R_L_med = np.median(R_L, axis=1)
R_L_lower = np.percentile(R_L, 25, axis=1)
R_L_upper = np.percentile(R_L, 75, axis=1)
R_L_bottom = np.percentile(R_L, 5, axis=1)
R_L_top = np.percentile(R_L, 95, axis=1)
# R_L
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend([typ] * df_state.shape[0])
state_Rs["date"].extend(dd.values) # repeat mob_samples times?
state_Rs["lower"].extend(R_L_lower)
state_Rs["median"].extend(R_L_med)
state_Rs["upper"].extend(R_L_upper)
state_Rs["top"].extend(R_L_top)
state_Rs["bottom"].extend(R_L_bottom)
state_Rs["mean"].extend(np.mean(R_L, axis=1))
state_Rs["std"].extend(np.std(R_L, axis=1))
state_R[state] = R_L
# generate a summary for the R_I
for state in states:
# R_I
if strain == "Delta":
R_I = samples["R_I"].values[:df_state.shape[0]]
elif strain == "Omicron":
# if Omicron period, then we need to multiply in the VoC effect as there's a period
# in the fitting where Delta and Omicron overlap (i.e. R_I = R_I * P(t) where P(t) is
# a product term).
R_I = samples["R_I_omicron"].values[:df_state.shape[0]]
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend(["R_I"] * df_state.shape[0])
state_Rs["date"].extend(dd.values)
state_Rs["lower"].extend(np.repeat(np.percentile(R_I, 25), df_state.shape[0]))
state_Rs["median"].extend(np.repeat(np.median(R_I), df_state.shape[0]))
state_Rs["upper"].extend(np.repeat(np.percentile(R_I, 75), df_state.shape[0]))
state_Rs["top"].extend(np.repeat(np.percentile(R_I, 95), df_state.shape[0]))
state_Rs["bottom"].extend(np.repeat(np.percentile(R_I, 5), df_state.shape[0]))
state_Rs["mean"].extend(np.repeat(np.mean(R_I), df_state.shape[0]))
state_Rs["std"].extend(np.repeat(np.std(R_I), df_state.shape[0]))
df_Rhats = pd.DataFrame().from_dict(state_Rs)
df_Rhats = df_Rhats.set_index(["state", "date", "type"])
d = pd.DataFrame()
for state in states:
for i, typ in enumerate(forecast_type):
if i == 0:
t = pd.DataFrame.from_dict(state_R[state])
t["date"] = dd.values
t["state"] = state
t["type"] = typ
else:
temp = pd.DataFrame.from_dict(state_R[state])
temp["date"] = dd.values
temp["state"] = state
temp["type"] = typ
t = t.append(temp)
# R_I
if strain == "Delta":
# use the Delta import reproduction number before Omicron starts
i = pd.DataFrame(np.tile(samples["R_I"].values, (len(dd.values), 1)))
elif strain == "Omicron":
# use the Omicron import reproduction number after Omicron starts
i = pd.DataFrame(np.tile(samples["R_I_omicron"].values, (len(dd.values), 1)))
i["date"] = dd.values
i["type"] = "R_I"
i["state"] = state
t = t.append(i)
d = d.append(t)
d = d.set_index(["state", "date", "type"])
df_Rhats = df_Rhats.join(d)
df_Rhats = df_Rhats.reset_index()
df_Rhats.state = df_Rhats.state.astype(str)
df_Rhats.type = df_Rhats.type.astype(str)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=6 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
| pd.to_datetime(today) | pandas.to_datetime |
import pandas as pd
import numpy as np
from copy import *
from bisect import *
from scipy.optimize import curve_fit
from sklearn.metrics import *
from collections import defaultdict as defd
import datetime,pickle
from DemandHelper import *
import warnings
warnings.filterwarnings("ignore")
#################################################################
#################################################################
#################################################################
class DemandForecastModel:
def __init__(self,rank_model='',forecast='',rmodel_beta=1.0,final_beta=1.0):
if rank_model != '':
self.ingest(rank_model,forecast,rmodel_beta,final_beta)
def ingest(self,rank_model,forecast,rmodel_beta=1.0,final_beta=1.0):
self.rank_model = rank_model
self.rmodel_beta = rmodel_beta
self.forecast = forecast
self.final_beta = final_beta
self.alldates = sorted(forecast.index)
def predict(self,rank=10000,date='2018-07-04',buybox=100):
if 'str' not in str(type(date)): date = str(date)[:10]
pred1 = self.rank_model.predict([rank])[0]
pred2 = pred1*self.rmodel_beta
d = self.forecast.loc[date]
mid,lo,hi = d['yhat'],d['yhat_lower'],d['yhat_upper']
rdr_preds = np.array([lo,mid,hi])
pred3 = pred2*rdr_preds
pred4 = pred3*self.final_beta
pred5 = global2local(pred4,buybox)
return pred5
#################################################################
#################################################################
# Export a fitted model to text file:
# These filenames normally end in '.pkl'
def ExportModel(filename,model_object):
pickle.dump(model_object, open(filename, 'wb'))
print('Model Saved TO: '+filename)
# Import a fitted model from text file:
# These filenames normally end in '.pkl'
def ImportModel(filename):
model_object = pickle.load(open(filename, 'rb'))
print('Model Imported FROM: '+filename)
return model_object
def GetToday():
today = datetime.datetime.today()
return str(today)[:10]
#################################################################
#################################################################
#################################################################
short2long = {
'H&G' : 'Home & Garden',
'L&G' : 'Lawn & Garden',
'SPORTS' : 'Sports & Outdoors',
'HI' : 'Home Improvement',
'TOY' : 'Toys & Games',
'KIT' : 'Home & Kitchen',
}
long2short = {}
for short in sorted(short2long):
long2short[short2long[short]] = short
Shorts = sorted(short2long)
Longs = sorted(long2short)
def ConvertToShort(thing):
if thing in long2short: return long2short[thing]
return thing
Models2 = {}
for SH in Shorts:
fn = 'MODELS/'+SH+'/DFM2.pkl'
model = ImportModel(fn)
Models2[SH] = model
AllDates = sorted(set([str(a)[:10] for a in Models2['H&G'].alldates]))
#################################################################
#################################################################
# Returns a list of valid category names:
def GetCategories2():
return sorted(long2short)
# SPREETAIL DEMAND PREDICTION:
# cat : Category (String or List)
# rank : Sales Rank (Integer, 2-List, Long-List)
# date1 : First Date of Forecast ("2018-09-03")
# date2 : Final Date of Forecast OR # Days Forward ("2018-10-03" or 30)
# bb_ratio : BuyBox Percent (100.0)
# md_ratio : Marketplace Distribution Percent
def SpreetailPredict(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
if (not date1) or (str(date1).lower()=='today'): date1 = GetToday()
index1 = bisect_left(AllDates,date1)
if len(str(date2)) >10: date2 = str(date2)[:10]
if len(str(date2))==10: index2 = bisect_left(AllDates,date2)
else: index2 = index1+int(date2)
index_dif = abs(index2-index1)
index1 = min([index1,index2])
index2 = index1+index_dif
DateRange = AllDates[index1:index2+1]
LEN = len(DateRange)
#--------------------------------------
tdf = | pd.DataFrame() | pandas.DataFrame |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = | DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) | pandas.DataFrame |
'''GDELTeda.py
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
Class for collecting Pymongo and Pandas operations to automate EDA on
subsets of GDELT records (Events/Mentions, GKG, or joins).
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations. If those directories are not
already present, a fallback method for string-literal directory reorientation
may be found in '__init__()' at this tag: # A02b - Project directory path.
Specification for any given user's main project directory should be made for
that os.chdir() call.
See also GDELTbase.py, tag # A01a - backup path specification, as any given
user's project directory must be specified there, also.
Contents:
A00 - GDELTeda
A01 - shared class data
A02 - __init__ with instanced data
A02a - Project directory maintenance
A02b - Project directory path specification
Note: Specification at A02b should be changed to suit a user's desired
directory structure, given their local filesystem.
B00 - class methods
B01 - batchEDA()
B02 - eventsBatchEDA()
B03 - mentionsBatchEDA()
B04 - gkgBatchEDA()
Note: see GDELTedaGKGhelpers.py for helper function code & docs
B05 - realtimeEDA()
B06 - loopEDA()
C00 - main w/ testing
C01 - previously-run GDELT realtime EDA testing
'''
import json
import multiprocessing
import numpy as np
import os
import pandas as pd
import pymongo
import shutil
import wget
from datetime import datetime, timedelta, timezone
from GDELTbase import GDELTbase
from GDELTedaGKGhelpers import GDELTedaGKGhelpers
from pandas_profiling import ProfileReport
from pprint import pprint as pp
from time import time, sleep
from urllib.error import HTTPError
from zipfile import ZipFile as zf
# A00
class GDELTeda:
'''Collects Pymongo and Pandas operations for querying GDELT records
subsets and performing semi-automated EDA.
Shared class data:
-----------------
logPath - dict
Various os.path objects for EDA log storage.
configFilePaths - dict
Various os.path objects for pandas_profiling.ProfileReport
configuration files, copied to EDA log storage directories upon
__init__, for use in report generation.
Instanced class data:
--------------------
gBase - GDELTbase instance
Used for class member functions, essential for realtimeEDA().
Class methods:
-------------
batchEDA()
eventsBatchEDA()
mentionsBatchEDA()
gkgBatchEDA()
realtimeEDA()
loopEDA()
Helper functions from GDELTedaGKGhelpers.py used in gkgBatchEDA():
pullMainGKGcolumns()
applyDtypes()
convertDatetimes()
convertGKGV15Tone()
mainReport()
locationsReport()
countsReport()
themesReport()
personsReport()
organizationsReport()
'''
# A01 - shared class data
# These paths are set relative to the location of this script, one directory
# up and in 'EDAlogs' parallel to the script directory, which can be named
# arbitrarily.
logPath = {}
logPath['base'] = os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'EDAlogs')
logPath['events'] = {}
logPath['events'] = {
'table' : os.path.join(logPath['base'], 'events'),
'batch' : os.path.join(logPath['base'], 'events', 'batch'),
'realtime' : os.path.join(logPath['base'], 'events', 'realtime'),
}
logPath['mentions'] = {
'table' : os.path.join(logPath['base'], 'mentions'),
'batch' : os.path.join(logPath['base'], 'mentions', 'batch'),
'realtime' : os.path.join(logPath['base'], 'mentions', 'realtime'),
}
logPath['gkg'] = {
'table' : os.path.join(logPath['base'], 'gkg'),
'batch' : os.path.join(logPath['base'], 'gkg', 'batch'),
'realtime' : os.path.join(logPath['base'], 'gkg', 'realtime'),
}
# Turns out, the following isn't the greatest way of keeping track
# of each configuration file. It's easiest to just leave them in the
# exact directories where ProfileReport.to_html() is aimed (via
# os.chdir()), since it's pesky maneuvering outside parameters into
# multiprocessing Pool.map() calls.
# Still, these can and are used in realtimeEDA(), since the size of
# just the most recent datafiles should permit handling them without
# regard for Pandas DataFrame RAM impact (it's greedy, easiest method
# for mitigation is multiprocessing threads, that shouldn't be
# necessary for realtimeEDA()).
# Regardless, all these entries are for copying ProfileReport config
# files to their appropriate directories for use, given base-copies
# present in the 'scripts' directory. Those base copies may be edited
# in 'scripts', since each file will be copied from there.
configFilePaths = {}
configFilePaths['events'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_realtime.yaml"),
}
configFilePaths['mentions'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_realtime.yaml"),
}
configFilePaths['gkg'] = {}
configFilePaths['gkg']['batch'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_batch.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_batch.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_batch.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_batch.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_batch.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_batch.yaml"),
}
configFilePaths['gkg']['realtime'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_realtime.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_realtime.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_realtime.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_realtime.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_realtime.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_realtime.yaml"),
}
# A02
def __init__(self, tableList = ['events', 'mentions', 'gkg']):
'''GDELTeda class initialization, takes a list of GDELT tables to
perform EDA on. Instantiates a GDELTbase() instance for use by class
methods and checks for presence of EDAlogs directories, creating them if
they aren't present, and copying all ProfileReport-required config files
to their applicable directories.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Controls detection and creation of .../EDALogs/... subdirectories for
collection of Pandas Profiling ProfileReport HTML EDA document output.
Also controls permission for class member functions to perform
operations on tables specified by those functions' tableList parameters
as a failsafe against a lack of project directories required for those
operations, specifically output of HTML EDA documents.
output:
------
Produces exhaustive EDA for GDELT record subsets for specified tables
through Pandas Profiling ProfileReport-output HTML documents.
All procedurally automated steps towards report generation are shown
in console output during script execution.
'''
# instancing tables for operations to be passed to member functions
self.tableList = tableList
print("Instantiating GDELTeda...\n")
self.gBase = GDELTbase()
if 'events' not in tableList and \
'mentions' not in tableList and \
'gkg' not in tableList:
print("Error! 'tableList' values do not include a valid GDELT table.",
"\nPlease use one or more of 'events', 'mentions', and/or 'gkg'.")
# instancing trackers for realtimeEDA() and loopEDA()
self.realtimeStarted = False
self.realtimeLooping = False
self.realtimeWindow = 0
self.lastRealDatetime = ''
self.nextRealDatetime = ''
# A02a - Project EDA log directories confirmation and/or creation, and
# Pandas Profiling ProfileReport configuration file copying from 'scripts'
# directory.
print(" Checking log directory...")
if not os.path.isdir(self.logPath['base']):
print(" Doesn't exist! Making...")
# A02b - Project directory path
# For obvious reasons, any user of this script should change this
# string to suit their needs. The directory described with this string
# should be one directory above the location of the 'scripts' directory
# this file should be in. If this file is not in 'scripts', unpredictable
# behavior may occur, and no guarantees of functionality are intended for
# such a state.
os.chdir('C:\\Users\\urf\\Projects\\WGU capstone')
os.mkdir(self.logPath['base'])
for table in tableList:
# switch to EDAlogs directory
os.chdir(self.logPath['base'])
# Branch: table subdirectories not found, create all
if not os.path.isdir(self.logPath[table]['table']):
print("Did not find .../EDAlogs/", table, "...")
print(" Creating .../EDAlogs/", table, "...")
os.mkdir(self.logPath[table]['table'])
os.chdir(self.logPath[table]['table'])
print(" Creating .../EDAlogs/", table, "/batch")
os.mkdir(self.logPath[table]['batch'])
print(" Creating .../EDAlogs/", table, "/realtime")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Branch: table subdirectories found, create batch/realtime directories
# if not present.
else:
print(" Found .../EDAlogs/", table,"...")
os.chdir(self.logPath[table]['table'])
if not os.path.isdir(self.logPath[table]['batch']):
print(" Did not find .../EDAlogs/", table, "/batch , creating...")
os.mkdir(self.logPath[table]['batch'])
if not os.path.isdir(self.logPath[table]['realtime']):
print(" Did not find .../EDAlogs/", table, "/realtime , creating...")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Copying pandas_profiling.ProfileReport configuration files
print(" Copying configuration files...\n")
if table == 'gkg':
# There's a lot of these, but full normalization of GKG is
# prohibitively RAM-expensive, so reports need to be generated for
# both the main columns and the main columns normalized for each
# variable-length subfield.
shutil.copy(self.configFilePaths[table]['realtime']['main'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['locations'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['counts'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['themes'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['persons'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['organizations'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['main'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['locations'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['counts'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['themes'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['persons'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['organizations'],
self.logPath[table]['batch'])
else:
shutil.copy(self.configFilePaths[table]['realtime'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch'],
self.logPath[table]['batch'])
# B00 - class methods
# B01
def batchEDA(self, tableList = ['events','mentions','gkg']):
'''Reshapes and re-types GDELT records for generating Pandas
Profiling ProfileReport()-automated, simple EDA reports from Pandas
DataFrames, from MongoDB-query-cursors.
WARNING: extremely RAM, disk I/O, and processing intensive. Be aware of
what resources are available for these operations at runtime.
Relies on Python multiprocessing.Pool.map() calls against class member
functions eventsBatchEDA() and mentionsBatchEDA(), and a regular call on
gkgBatchEDA(), which uses multiprocessing.Pool.map() calls within it.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Permits limiting analysis to one or more tables.
Output:
------
Displays progress through the function's operations via console output
while producing Pandas Profiling ProfileReport.to_file() html documents
for
'''
if tableList != self.tableList:
print("\n Error: this GDELTeda object may have been initialized\n",
" without checking for the presence of directories\n",
" required for this function's operations.\n",
" Please check GDELTeda parameters and try again.")
for table in tableList:
print("\n------------------------------------------------------------\n")
print("Executing batch EDA on GDELT table", table, "records...")
# WARNING: RAM, PROCESSING, and DISK I/O INTENSIVE
# Events and Mentions are both much easier to handle than GKG, so
# they're called in their own collective function threads with
# multiprocessing.Pool(1).map().
if table == 'events':
os.chdir(self.logPath['events']['batch'])
pool = multiprocessing.Pool(1)
eventsReported = pool.map(self.eventsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'mentions':
os.chdir(self.logPath['mentions']['batch'])
pool = multiprocessing.Pool(1)
mentionsReported = pool.map(self.mentionsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'gkg':
# Here's the GKG bottleneck! Future investigation of parallelization
# improvements may yield gains here, as normalization of all subfield
# and variable-length measures is very RAM expensive, given the
# expansion in records required.
# So, current handling of GKG subfield and variable-length measures
# is isolating most operations in their own process threads within
# gkgBatchEDA() execution, forcing deallocation of those resources upon
# each Pool.close(), as with Events and Mentions table operations above
# which themselves do not require any additional subfield handling.
os.chdir(self.logPath['gkg']['batch'])
self.gkgBatchEDA()
# B02
def eventsBatchEDA(mode):
'''Performs automatic EDA on GDELT Events record subsets. See
function batchEDA() for "if table == 'events':" case handling and how
this function is invoked as a multiprocessing.Pool.map() call, intended
to isolate its RAM requirements for deallocation upon Pool.close().
In its current state, this function can handle collections of GDELT
Events records up to at least the size of the batch EDA test subset used
in this capstone project, the 30 day period from 05/24/2020 to
06/22/2020.
Parameters:
----------
mode - arbitrary
This parameter is included to meet Python multiprocessing.Pool.map()
function requirements. As such, it is present only to receive a
parameter determined by map(), e.g. one iteration of the function will
execute.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
columnNames = [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
]
columnTypes = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': pd.StringDtype(),
'EventBaseCode': pd.StringDtype(),
'EventRootCode': pd.StringDtype(),
'QuadClass': type(1),
'AvgTone': type(1.1),
'Actor1Geo_Type': type(1),
'Actor1Geo_FullName': pd.StringDtype(),
'Actor1Geo_Lat': pd.StringDtype(),
'Actor1Geo_Long': pd.StringDtype(),
'Actor2Geo_Type': type(1),
'Actor2Geo_FullName': | pd.StringDtype() | pandas.StringDtype |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= | pd.to_datetime(omicron_start_date) | pandas.to_datetime |
import streamlit as st
import warnings # hides warning messages
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#from imblearn.over_sampling import SMOTE
#import itertools
import math
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.linear_model import LogisticRegression
st.set_option('deprecation.showPyplotGlobalUse', False)
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
#########
#st.title("------------ Model Background ----------------")
st.title("Sovereign Risk Model: Calculating the Probability that a Country will Default")
st.title("-------------------------------------------")
st.title("Model Background")
st.write("""
## Scroll down for web app functionality
""")
st.title("-------------------------------------------")
from PIL import Image
img = Image.open("world_map.jpg")
st.image(img, width = 700, caption = "Map of all countries")
st.write(""" # """)
st.write("""
# Countries have defaulted over the years
""")
country_name = st.sidebar.selectbox("Select Country", ('Afghanistan', 'Albania', 'Algeria', 'American Samoa', 'Andorra',
'Angola', 'Antigua and Barbuda', 'Argentina', 'Armenia', 'Aruba',
'Australia', 'Austria', 'Azerbaijan', 'Bahamas The', 'Bahrain',
'Bangladesh', 'Barbados', 'Belarus', 'Belgium', 'Belize', 'Benin',
'Bermuda', 'Bhutan', 'Bolivia', 'Bosnia and Herzegovina',
'Botswana', 'Brazil', 'Brunei Darussalam', 'Bulgaria',
'Burkina Faso', 'Burundi', 'Cambodia', 'Cameroon', 'Canada',
'Cape Verde', 'Cayman Islands', 'Central African Republic', 'Chad',
'Chile', 'China', 'Colombia', 'Comoros', 'Congo Dem.Rep.',
'Congo Rep.', 'Costa Rica', "Cote d'Ivoire", 'Croatia', 'Cuba',
'Curacao', 'Cyprus', 'Czech Republic', 'Denmark', 'Djibouti',
'Dominica', 'Dominican Republic', 'Ecuador', 'Egypt Arab Rep.',
'El Salvador', 'Equatorial Guinea', 'Eritrea', 'Estonia',
'Ethiopia', 'Fiji', 'Finland', 'France', 'French Polynesia',
'Gabon', 'Gambia The', 'Georgia', 'Germany', 'Ghana', 'Greece',
'Greenland', 'Grenada', 'Guam', 'Guatemala', 'Guinea',
'Guinea-Bissau', 'Guyana', 'Haiti', 'Honduras',
'Hong Kong SARChina', 'Hungary', 'Iceland', 'India', 'Indonesia',
'Iran Islamic Rep.', 'Iraq', 'Ireland', 'Isle of Man', 'Israel',
'Italy', 'Jamaica', 'Japan', 'Jordan', 'Kazakhstan', 'Kenya',
'Kiribati', 'Korea Dem.Rep.', 'Korea Rep.', 'Kosovo', 'Kuwait',
'Kyrgyz Republic', 'Lao PDR', 'Latvia', 'Lebanon', 'Lesotho',
'Liberia', 'Libya', 'Liechtenstein', 'Lithuania', 'Luxembourg',
'Macao SAR China', 'Macedonia FYR', 'Madagascar', 'Malawi',
'Malaysia', 'Maldives', 'Mali', 'Malta', 'Marshall Islands',
'Mauritania', 'Mauritius', 'Mexico', 'Micronesia Fed.Sts.',
'Moldova', 'Monaco', 'Mongolia', 'Montenegro', 'Morocco',
'Mozambique', 'Myanmar', 'Namibia', 'Nepal', 'Netherlands',
'New Caledonia', 'New Zealand', 'Nicaragua', 'Niger', 'Nigeria',
'Northern Mariana Islands', 'Norway', 'Oman', 'Pakistan', 'Palau',
'Panama', 'Papua New Guinea', 'Paraguay', 'Peru', 'Philippines',
'Poland', 'Portugal', 'PuertoRico', 'Qatar', 'Romania',
'Russian Federation', 'Rwanda', 'Samoa', 'SanMarino',
'Sao Tome and Principe', 'SaudiArabia', 'Senegal', 'Serbia',
'Seychelles', 'SierraLeone', 'Singapore',
'Sint Maarten (Dutchpart)', 'Slovak Republic', 'Slovenia',
'Solomon Islands', 'Somalia', 'South Africa', 'South Sudan',
'Spain', 'Sri Lanka', 'St.Kitts and Nevis', 'St.Lucia',
'St.Martin (Frenchpart)', 'St.Vincent and the Grenadines', 'Sudan',
'Suriname', 'Swaziland', 'Sweden', 'Switzerland',
'Syrian Arab Republic', 'Tajikistan', 'Tanzania', 'Thailand',
'Timor-Leste', 'Togo', 'Tonga', 'Trinidad and Tobago', 'Tunisia',
'Turkey', 'Turkmenistan', 'Turks and Caicos Islands', 'Tuvalu',
'Uganda', 'Ukraine', 'United Arab Emirates', 'United Kingdom',
'United States', 'Uruguay', 'Uzbekistan', 'Vanuatu',
'Venezuela RB', 'Vietnam', 'West Bank and Gaza', 'Yemen Rep.',
'Zambia', 'Zimbabwe', 'Taiwan'
))
#st.write(country_name)
#classifier_name = st.sidebar.selectbox("Select Classifier",("XGBoost", "Random Forest", "Logistic Regression"))
classifier_name = "XGBoost"
#classifier_name = st.sidebar.selectbox("Select Classifier",("Logistic Regression", "Random Forest", "XGBoost", "Naive Bayes", "KNN"))
#classifier_name = st.sidebar.selectbox("Select Classifier",("Logistic Regression", "Random Forest", "XGBoost"))
#classifier_name = "Logistic Regression"
#classifier_name = "Random Forest"
#Slider
#future_year = st.sidebar.slider("Year", 2021, 2025)
##########
#default number per year
h = | pd.read_csv("Data_number_of_defaults.csv") | pandas.read_csv |
#In 1
from __future__ import print_function
from __future__ import division
import pandas as pd
import numpy as np
# from matplotlib import pyplot as plt
# import seaborn as sns
# from sklearn.model_selection import train_test_split
import statsmodels.api as sm
# just for the sake of this blog post!
from warnings import filterwarnings
filterwarnings('ignore')
#In 2
# load the provided data
train_features = pd.read_csv('data/dengue_features_train.csv',
index_col=[0,1,2])
train_labels = pd.read_csv('data/dengue_labels_train.csv',
index_col=[0,1,2])
#In 3
# Seperate data for San Juan
sj_train_features = train_features.loc['sj']
sj_train_labels = train_labels.loc['sj']
#In 6
# Remove `week_start_date` string.
sj_train_features.drop('week_start_date', axis=1, inplace=True)
#In 7
# Null check
pd.isnull(sj_train_features).any()
#In 9
sj_train_features.fillna(method='ffill', inplace=True)
#In 13
sj_train_features['total_cases'] = sj_train_labels.total_cases
#In 14
# compute the correlations
sj_correlations = sj_train_features.corr()
#In 19
def preprocess_data(data_path, labels_path=None):
# load data and set index to city, year, weekofyear
df = | pd.read_csv(data_path, index_col=[0, 1, 2]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 28 22:42:57 2017
@author: 坤
"""
import os
import numpy as np
import pandas as pa
import math as Math
import sys
import pymysql
import dateutil
'''
过滤大震附近小震
参数 地址目录 时间 以及半径
'''
def findEqM(df,minz,maxz):
find=df[df['magnitude']>=int(minz)]
return find[find['magnitude']<int(maxz)]
def getRadian(degree):
return degree * Math.pi / 180.0
def distance(lat1,lon1,lat2,lon2):
EARTH_RADIUS = 6378.137
radLat1 = getRadian(lat1)
radLat2 = getRadian(lat2)
a = radLat1 - radLat2#// 两点纬度差
b = getRadian(lon1) - getRadian(lon2)#// 两点的经度差
a=np.array(a)
b=np.array(b)
s = 2 * np.arcsin(np.sqrt(np.power(np.sin(a / 2), 2) + np.cos(radLat1)* np.cos(radLat2) * np.power(np.sin(b / 2), 2)));
s = s * EARTH_RADIUS
return s ;
def flter(data,Bdata,time,radii):
try:
MinTime=data.eqtime-dateutil.relativedelta.relativedelta(days=time)
MaxTime=data.eqtime#+dateutil.relativedelta.relativedelta(days=time)
BdataCopy=Bdata.copy()
BdataCopy=BdataCopy[(BdataCopy.eqtime>=MinTime)&(BdataCopy.eqtime<=MaxTime)]
BdataCopy.depth=0
BdataCopy.depth=distance(data.latitude,data.longitude,BdataCopy.loc[:,'latitude'],BdataCopy.loc[:,'longitude'])
BdataCopy=BdataCopy[BdataCopy.depth<=radii]
Bdata.drop(BdataCopy.index.tolist(),axis=0,inplace=True)
except BaseException:
BdataCopy= | pa.DataFrame() | pandas.DataFrame |
from __future__ import division, print_function
import time
from IPython.display import display
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.externals import joblib
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from matplotlib_venn import venn3
from mpl_toolkits.mplot3d import Axes3D
# -----------------------------BASICS-----------------------------------
def readCSV(filename):
df = pd.read_csv(filename)
df = df.set_index(pd.DatetimeIndex(df['timestamp']))
df = df.drop(['timestamp'], axis=1)
return df
def error(y_test, y_pred):
mse = metrics.mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
print('MSE: ', mse, '\nRMSE: ', rmse)
# -------------------------DATA ENRICHMENT------------------------------
def createValidationSet(df):
rand_values = np.random.random(df.shape)
cond = (rand_values < .1)
df_validation = df.mask(cond)
df_validation['value set to NaN'] = cond
return df_validation
def interpolate(df,method,l,d):
if d != None:
interpolations = pd.DataFrame(df.interpolate(method='polynomial',
limit=l,
order = int(d),
limit_area='inside'))
else:
interpolations = pd.DataFrame(df.interpolate(method=method,
limit=l,
limit_area='inside'))
interpolations.columns = [method]
return interpolations
def dropNotInterpolatedBlancs(df):
'''
drop not interpolated values, save as interpolated_sensors.csv,
standarize data, save as standarized_sensors.csv,
split into train and test sets, save as train.csv and test.csv
'''
df = mf.readCSV('interpolated_sensors.csv')
print('data after interpolation')
print('# of records: '+ str(df.shape[0]) +
'\t # of variables: ' + str(df.shape[1]))
print(df.isnull().sum())
df = df.dropna()
print('data after drop blancs that interpolation can not solve')
print('# of records: '+ str(df.shape[0]) +
'\t # of variables: ' + str(df.shape[1]))
print(df.isnull().sum())
return df
#------------------------------PREPARE FOR ML---------------------------
def splitData(df,test_volume):
df_train, df_test = train_test_split(df, test_size=test_volume)
df_train.is_copy = False
df_test.is_copy = False
return df_train,df_test
def fitStandardScaler(df):
scaler = StandardScaler()
scaler.fit(df.copy())
joblib.dump(scaler, 'StandardScaler/StandardScaler.sav')
print('\n StandardScaler is ready to normalize your data,')
print('find the model at StandardScaler/StandardScaler.sav')
def standarizing(df):
scaler = joblib.load('StandardScaler/StandardScaler.sav')
std = scaler.transform(df)
df_std = pd.DataFrame(data = std,
index = df.index,
columns = df.columns)
return df_std
def inverseStandarizing(df_std):
scaler = joblib.load('StandardScaler/StandardScaler.sav')
original = scaler.inverse_transform(df_std)
df_original = pd.DataFrame(data = original,
index = df_std.index,
columns = df_std.columns)
return df_original
# ---------------------DIMENSIONALITY REDUCTION-------------------------
def pca(df_train, df_test):
pca = PCA(n_components=2)
pca.fit(df_train)
principalComponents = pca.transform(df_train)
df_pca_train = pd.DataFrame(data = principalComponents,
columns = ['pc1', 'pc2'],
index = df_train.index)
principalComponents = pca.transform(df_test)
df_pca_test = pd.DataFrame(data = principalComponents,
columns = ['pc1', 'pc2'],
index = df_test.index)
return df_pca_train, df_pca_test
def tsne(df_train, df_test):
print('start TSNE')
time_start = time.time()
df = pd.concat([df_train, df_test])
tsne = TSNE(n_components=2, verbose=1, n_iter=1000, perplexity=35)
principalComponents = tsne.fit_transform(df)
print ('t-SNE done! Time elapsed: {} seconds'.format(
time.time()-time_start))
df_tsne = pd.DataFrame(data = principalComponents,
columns = ['c1', 'c2'],
index = df.index)
df_tsne_train = df_tsne[:len(df_train)]
df_tsne_test = df_tsne[len(df_test):]
return df_tsne_train, df_tsne_test
# -------------------------MACHINE LEARNING-----------------------------
def instanceModel(df_train, folder, model_file, c):
if folder == 'IsolationForest':
model = IsolationForest(n_estimators=100, verbose=0,
contamination=c, max_samples =0.03,
max_features = 1.0)
elif folder == 'EllipticEnvelope':
model = EllipticEnvelope(contamination=c,
store_precision=False)
elif folder == 'LocalOutlierFactor':
k = int(len(df_train)*c)
model = LocalOutlierFactor(n_neighbors=k, algorithm='auto',
leaf_size=30, metric='euclidean',
metric_params=None,
contamination=c)
elif folder == 'OneClassSVM':
model = OneClassSVM(kernel='rbf', degree=3, nu=c,
cache_size=500, verbose=0)
model.fit(df_train)
joblib.dump(model, folder + '/' + model_file)
print('\n'+ folder + ' trained succesfully, saved as ' + model_file)
def predict(df, folder, pred_file, clf):
if folder == 'LocalOutlierFactor':
predictions = clf._predict(df)
else:
predictions = clf.predict(df)
predictions = pd.DataFrame(data = predictions, columns = ['outliers'],
index = df.index)
predictions.to_csv(folder + '/' + pred_file)
print('\nPredictions done succesfully, saved as '
+ folder + '/' + pred_file)
def ensemble(algorithms,c,dataset):
print('\n############## ENSEMBLE CALCULATIONS ##################')
df_ensemble = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 8 19:52:14 2018
@author: benji
"""
# Demo file for Spyder Tutorial
# <NAME>, University of Southampton, UK
import pandas as pd
def hello():
"""Print "Hello World" and return None"""
print("Hello World")
# main program starts here
def bye():
"Print goodbye, return NOne"
print('goodbye')
print('idk')
hello()
print('hi')
print('this is weird')
city_names = | pd.Series(['San Francisco', 'San Jose', 'Sacramento']) | pandas.Series |
import pandas as pd
import numpy as np
import os
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import precision_recall_fscore_support
import xgboost as xgb
from tqdm import tqdm
import argparse
from pk_classifier.bootstrap import simple_tokenizer, TextSelector, f1_eval, update, read_in_bow, make_ids_per_test, \
split_train_val_test
from pk_classifier.stats import plot_df
def process_them(input_tuple, rounds, test_prop, out_path_results, out_path_figure, out_path_bootstrap, use_idf):
all_features, all_labs = read_in_bow(input_tuple[0], input_tuple[1])
ids_per_test = make_ids_per_test(inp_df=all_labs)
all_metrics_test = []
optimal_epochs = []
median_optimal_epochs = []
median_f1s = []
a = 0
for round_i in tqdm(np.arange(rounds)):
rd_seed = 10042006 + round_i
per = test_prop
# =====================================================================================================
# Make splits: 60% train, 20% validation, 20% temp test
# ======================================================================================================
x_train, x_val, x_test, y_train, y_val, y_test, pmids_train, pmids_val, pmids_test = \
split_train_val_test(features=all_features, labels=all_labs, test_size=per, seed=rd_seed)
# =====================================================================================================
# Decide max number of iterations using early stopping criteria on the validation set
# ======================================================================================================
balancing_factor = y_train.value_counts()["Not Relevant"] / y_train.value_counts()["Relevant"]
if round_i == 0:
print("Training with--- ", y_train.value_counts()["Relevant"], " ---Relevant instances")
encoder = CountVectorizer(tokenizer=simple_tokenizer, ngram_range=(1, 1), lowercase=False, preprocessor=None,
min_df=2)
normalizer = TfidfTransformer(norm="l1", use_idf=use_idf)
decoder = xgb.XGBClassifier(random_state=rd_seed, n_jobs=-1, n_estimators=2000, objective='binary:logistic',
max_depth=4, learning_rate=0.1, colsample_bytree=1.,
scale_pos_weight=balancing_factor, nthread=-1)
# Define encoding pipeline
enc_pip = Pipeline([
('encoder', FeatureUnion(transformer_list=[
('bow', Pipeline([
('selector', TextSelector('BoW_Ready', emb=False)),
('vect', encoder),
('norm', normalizer)
])
)
]))
])
x_train_features = enc_pip.fit_transform(x_train)
x_val_features = enc_pip.transform(x_val)
if a == 0:
print("Using: ", x_train_features.shape[1], "features")
a = 1
eval_set = [(x_train_features, y_train), (x_val_features, y_val)]
decoder.fit(x_train_features, y_train, eval_set=eval_set, verbose=False,
early_stopping_rounds=200, eval_metric=f1_eval)
optimal_epochs.append(decoder.best_ntree_limit)
median_epochs = np.median(optimal_epochs)
median_optimal_epochs.append(median_epochs)
if round_i in np.arange(0, rounds, 20):
print("Median number of epochs:", median_epochs)
# =====================================================================================================
# Apply predictions to the temp test set
# ======================================================================================================
x_test_encoded = enc_pip.transform(x_test)
pred_test = decoder.predict(x_test_encoded)
test_results = | pd.DataFrame(pred_test == y_test.values, columns=['Result']) | pandas.DataFrame |
import datetime
import logging
import click
import pandas as pd
import tushare as ts
from tusharedb import config, db, util
api = ts.pro_api(token=config.TS_TOKEN)
logger = logging.getLogger(__name__)
def sync_daily():
sate = db.StateDb()
dates = [date for date in sate.list_daily()]
apis = ['daily', 'adj_factor', 'daily_basic']
# apis = ['daily_basic', ]
# methods = {'daily': db.dbs[config.DT_DAILY_BFQ],
# 'adj_factor': db.dbs[config.DT_DAILY_ADJFACTOR], 'daily_basic': db.dbs[config.DT_DAILY_BASIC]}
with click.progressbar(pd.date_range(config.SYNC_START, datetime.datetime.now())) as bar:
for date in bar:
_date = date.strftime('%Y%m%d')
# 重新收集近2天数据
if _date in dates and date < (datetime.datetime.now() - datetime.timedelta(days=2)):
continue
for api_name in apis:
logger.debug(
'fetch data from tushare, api_name: %s, trade_date: %s', api_name, _date)
util.speed_it()
df = api.query(api_name, trade_date=_date)
if df.empty:
continue
else:
api_type = config.API_TYPE_TRADE_DATE
dbcls_config = config.get_write_api_db(api_name, api_type)
dbcls = db.dbs[dbcls_config]
dbcls(_date).save(df)
sate.append_daily(_date)
def _sync_code(start, end, apis, recent, ok_codes, callback, ):
codes = api.stock_basic(list_status='L',
fields='ts_code')
codes = codes.ts_code
sate = db.StateDb()
# codes = ['601600.SH', '601601.SH']
with click.progressbar(codes) as bar:
for code in bar:
if code in ok_codes:
logger.debug('pass %s', code)
continue
for api_name in apis:
dfs = []
start_date = start
config_end_date = end
for end_date in pd.date_range(start=start_date, end=config_end_date, freq='5Y'):
util.speed_it()
logger.debug('fetch data from tushare, api_name: %s, ts_code:%s, start_date: %s, end_date:%s',
api_name, code, start_date.strftime('%Y%m%d'), end_date.strftime('%Y%m%d'))
dfs.append(api.query(api_name, ts_code=code,
start_date=start_date.strftime('%Y%m%d'), end_date=end_date.strftime('%Y%m%d')))
start_date = end_date + datetime.timedelta(days=1)
logger.debug('fetch data from tushare, api_name: %s, ts_code:%s, start_date: %s, end_date:%s',
api_name, code, start_date.strftime('%Y%m%d'), config_end_date.strftime('%Y%m%d'))
util.speed_it()
dfs.append(api.query(api_name, ts_code=code,
start_date=start_date.strftime('%Y%m%d'), end_date=config_end_date.strftime('%Y%m%d')))
df = pd.concat(dfs)
if df.empty:
continue
else:
db_config = config.get_write_api_db(
api_name, config.API_TYPE_TS_CODE, recent=recent)
dbcls = db.dbs[db_config]
dbcls(code).delete()
dbcls(code).save(df)
callback(code)
def sync_code_history():
sate = db.StateDb()
start = datetime.datetime.strptime(
config.SYNC_START, '%Y-%m-%d')
end = datetime.datetime.strptime(
sate.sync_code_history_end, '%Y-%m-%d')
apis = ['daily', 'adj_factor', 'daily_basic']
ok_codes = [date for date in sate.list_code()]
_sync_code(start, end, apis, False, ok_codes, sate.append_code)
def sync_code_recent(nocache=True):
sate = db.StateDb()
start = datetime.datetime.strptime(
sate.sync_code_history_end, '%Y-%m-%d') + datetime.timedelta(days=1)
end = datetime.datetime.now()
apis = ['daily', 'adj_factor', 'daily_basic']
if nocache:
logging.info('try to delete')
sate.delete_recentcode()
ok_codes = [date for date in sate.list_recentcode()]
_sync_code(start, end, apis, True, ok_codes, sate.append_recentcode)
# 正常结束,去掉cache记录
sate.delete_recentcode()
def sync_index():
api_names = ['index_daily', ]
codes = ['399001.SZ', '399005.SZ', '399006.SZ', '000001.SH']
# codes = ['601600.SH', '601601.SH']
start = datetime.datetime.strptime(config.SYNC_START, '%Y-%m-%d')
end = datetime.datetime.now()
with click.progressbar(codes) as bar:
for code in bar:
for api_name in api_names:
dfs = []
start_date = start
config_end_date = end
for end_date in pd.date_range(start=start_date, end=config_end_date, freq='5Y'):
logger.debug('%s,%s,%s,%s', api_name,
code, start_date, end_date)
util.speed_20_per_min()
dfs.append(api.query(api_name, ts_code=code,
start_date=start_date.strftime('%Y%m%d'), end_date=end_date.strftime('%Y%m%d')))
start_date = end_date + datetime.timedelta(days=1)
logger.debug('%s,%s,%s,%s', api_name,
code, start_date, config_end_date)
util.speed_20_per_min()
dfs.append(api.query(api_name, ts_code=code,
start_date=start_date.strftime('%Y%m%d'), end_date=config_end_date.strftime('%Y%m%d')))
df = pd.concat(dfs)
df = df.sort_values('trade_date')
df.index = | pd.DatetimeIndex(df.trade_date) | pandas.DatetimeIndex |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with | tm.assert_raises_regex(ValueError, msg) | pandas.util.testing.assert_raises_regex |
import pandas as pd
import bs4 as bs
# import urllib.request
# !pip install yfinance
import yfinance as yf
def get_etf_holdings(etf_symbol, shares):
#read the top 25 holdings in data frame
dfs = pd.read_html("https://ycharts.com/companies/{}/holdings".format(etf_symbol),header=0)
holdings = dfs[0]
holdings.drop(["% Chg", "Price"], inplace=True, axis=1)
#get etf price from most recent close
etf_info = yf.Ticker(etf_symbol)
etf_price = etf_info.info["previousClose"]
#convert weight to float
holdings["% Weight"] = holdings["% Weight"].str.rstrip("%")
holdings["% Weight"] = holdings["% Weight"].astype('float')
#calculate $ owned
holdings["Amount Owned"] = shares * etf_price * (holdings["% Weight"] / 100)
holdings.drop(["% Weight"], inplace=True, axis=1)
return(holdings)
def get_overall_holdings(etf_param_dict):
print(etf_param_dict)
ret = | pd.DataFrame(columns=["Symbol", "Name", "Amount Owned"]) | pandas.DataFrame |
import datetime
import os
from typing import List, Dict, Optional
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import pandas as pd
from pydantic import BaseModel
API_URL = os.environ.get("API_URL", None)
if API_URL is None:
raise ValueError("API_URL not known")
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["GET"],
allow_headers=["*"],
)
class CountyReport(BaseModel):
name: str
fips: Optional[int]
sources: Dict[str, List[str]] # {url: [var1, var2]}
class StateReport(CountyReport):
counties: List[CountyReport]
class Reports(BaseModel):
week_end: datetime.datetime
week_start: datetime.datetime
updates: List[StateReport]
def aggregate_week_fips_updates(x):
return pd.Series(
{
"state": x["state"].iloc[0],
"name": x["fullname"].iloc[0],
"sources": (
x.groupby("source").apply(lambda y: list(y["variable_name"])).to_dict()
),
}
)
def aggregate_week_updates(week_state):
out = {}
out["name"] = state = week_state.name[1]
state_only = week_state.query("name == @state")
out["sources"] = {}
for ss in list(state_only["sources"]):
out["sources"].update(ss)
non_state: pd.DataFrame = week_state.query("name != @state")
out["counties"] = []
def group_county_sources(c_df: pd.DataFrame):
c_out = {
"name": c_df["name"].iloc[0],
"fips": c_df["fips"].iloc[0],
"sources": {},
}
for c_src in list(c_df["sources"]):
c_out["sources"].update(c_src)
return c_out
out["counties"] = list(non_state.groupby("fips").apply(group_county_sources))
return pd.Series(out)
def flatten_by_date(x):
return x.drop(["start_date", "state"], axis=1).to_dict(orient="records")
def get_reports():
df = | pd.read_json(f"{API_URL}/us_covid_variable_start_date") | pandas.read_json |
# -*- coding: utf-8 -*-
"""
Script to determine the optimal regression to use for pre-1998 69607.
"""
from core.ts.sw import malf7d, flow_stats, flow_reg, stream_nat
from core.ecan_io import flow_import, rd_henry, rd_hydstra_db, rd_ts
from pandas import concat
from numpy import nan, log, in1d
import seaborn as sns
from core.ts.plot import hydrograph_plot, reg_plot
#################################
#### Parameters
g_sites = [69607, 69650]
r_sites = [69618]
rec_path = r'C:\ecan\shared\base_data\flow\all_flow_rec_data.csv'
catch_sites_csv = 'C:/ecan/local/Projects/otop/GIS/vector/min_flow/results/catch_sites.csv'
catch_shp = 'C:/ecan/local/Projects/otop/GIS/vector/min_flow/catch1.shp'
end_corr_date = '1996-12-31'
regplt_export = r'C:\ecan\local\Projects\requests\opihi_SH1_predam\reg1.png'
stats_export = r'C:\ecan\local\Projects\requests\opihi_SH1_predam\69607_predam_stats.csv'
ts_export = r'C:\ecan\local\Projects\requests\opihi_SH1_predam\69607_predam_ts.csv'
regplt_export2 = r'C:\ecan\local\Projects\requests\opihi_SH1_predam\69650_reg1.png'
stats_export2 = r'C:\ecan\local\Projects\requests\opihi_SH1_predam\69650_predam_stats.csv'
ts_export2 = r'C:\ecan\local\Projects\requests\opihi_SH1_predam\69650_predam_ts.csv'
#################################
#### Read in data
g_flow = rd_henry(g_sites, end=end_corr_date, sites_by_col=True)
g_flow.columns = g_flow.columns.astype('int32')
r_flow_all = rd_ts(rec_path)
r_flow_all.columns = r_flow_all.columns.astype('int32')
r_flow = r_flow_all[r_sites][:end_corr_date]
### Alt with naturalisation
flow, gaugings, nat_flow, nat_gauge = stream_nat(catch_shp, catch_sites_csv, flow_csv=r_flow, export=False)
g_flow = nat_gauge[g_sites]
t1 = nat_flow[r_sites].dropna()
x1 = r_flow[~in1d(r_flow.index, t1.index)].dropna()
r_flow = concat([x1, t1], axis=0)
#### Clean up data for regression
g_flow[g_flow <= 0 ] = nan
g_flow[g_flow > 2000 ] = nan
r_flow[r_flow <= 0 ] = nan
#### Regression
reg1, new_ts = flow_reg(r_flow, g_flow, make_ts=True, logs=False)
###############################
#### Plot
data = concat([g_flow[69650], r_flow], axis=1).dropna()
data.columns = data.columns.astype('str')
sns.regplot(x=log(data[r_sites]), y=log(data[g_sites]))
lm = sns.regplot(x='69618', y='69650', data=data, truncate=True)
lm.axes.set_ylim(0, 20)
lm.axes.set_xlim(0, 20)
###############################
#### Export results
predam1 = new_ts.loc[new_ts[69650].first_valid_index():, 69650]
predam2 = predam1.copy()
predam2[predam2 > (reg1.loc[69650].max_y * 1.5)] = nan
stats1 = flow_stats(predam1)
stats2 = flow_stats(predam2)
malf1 = malf7d(predam1, intervals=[10, 20, 30])
stats_both = concat([concat([stats1, malf1], axis=1), concat([stats2, malf1], axis=1)])
stats_both.index = ['all_flows', 'constrained_flows']
stats_both.to_csv(stats_export2, header=True)
predam1.to_csv(ts_export2, header=True)
#plt1 = hydrograph_plot(predam1)
plt2 = reg_plot(r_flow[69618], g_flow[69650], freq='day', export=True, export_path=regplt_export2)
##############################
#### Testing
t1 = nat_flow.dropna()
t2 = (t1 - r_flow).dropna()
t2.plot()
x1 = r_flow[~in1d(r_flow.index, t1.index)].dropna()
r_flow = | concat([x1, t1], axis=0) | pandas.concat |
import os
import random
import argparse
from typing import Any, Dict, List
import pandas as pd
from generators import FakeGenerators
fake = FakeGenerators()
feed = | pd.DataFrame() | pandas.DataFrame |
import re
import socket
from datetime import datetime
from urlextract import URLExtract
import urllib.parse as urlparse
from urllib.parse import parse_qs
import click
import argparse
import csv
import os
from dateutil.parser import parse
import pandas as pd
from urllib.parse import unquote
import hashlib
# When you need to connect to a database
#from pandas.io import sql
#import mysql.connector
#from sqlalchemy import create_engine
#import mysql.connector
#Global Variables
data = []
map_refer = {}
# Missing a ArgumentParser
#parser = argparse.ArgumentParser(description='Description of your program')
#parser.add_argument('-p','--path', help='Localization of the patching files', default= "./Files/")
#args = vars(parser.parse_args())
def extract(request):
"""
Extract url domain from wayback request.
"""
extractor = URLExtract()
try:
urls = extractor.find_urls('/'.join(request.split('/')[3:]))
if urls:
return urls[0]
else:
return None
except:
import pdb;pdb.set_trace()
def getParametersFromRequestWayback(request, df, i):
"""
Extract parameters from wayback request.
"""
# Just a sanity check.
if not pd.isnull(df.at[i, 'DATE']):
try:
# Generate timestamp using the parameter DATE
date_simple = df.at[i, 'DATE'].replace("[", "").replace("]", "")
date = datetime.strptime(date_simple, "%d/%b/%Y:%H:%M:%S")
# Just a sanity check.
if re.match(r"GET /wayback/[0-9]+", request):
#Extract url domain
url = extract(request)
if urlparse.urlparse(url).netloc != "":
final_url = urlparse.urlparse(url).netloc
else:
final_url = url
#Put into a list to later generate a dataframe
data.append([df.at[i, "IP_ADDRESS"], df.at[i, "USER_AGENT"], date.timestamp(), df.at[i, "REQUEST"], df.at[i, "STATUS_CODE"], df.at[i, "PREVIOUS_REQUEST"], final_url])
except:
raise ValueError("Error - getParametersFromRequestWayback function")
def getParametersFromRequest(request, df, i, boolRequest):
"""
Extract and process the parameters from query request.
Function only used for Apache logs.
"""
# Check whether we are processing the request or the previous_request
if boolRequest:
#This request will not be analyzed in the first analysis, however it is done for later analysis.
#Image Search JSP and Page Search JSP will be treated as equals.
if request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
# Set the parameter BOOL_QUERY (i.e., =1 means the line is a query)
df.at[i, 'BOOL_QUERY'] = 1
# Set the parameter TYPE_SEARCH
if request.startswith("GET /search.jsp?"):
df.at[i, 'TYPE_SEARCH'] = "search_jsp"
else:
df.at[i, 'TYPE_SEARCH'] = "images_jsp"
# Parse the REQUEST and Set the parameters TRACKINGID, USER_TRACKING_ID, SEARCH_TRACKING_ID, QUERY, LANG_REQUEST, FROM_REQUEST, TO_REQUEST
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
try:
df.at[i, 'QUERY'] = unquote(parse_qs(parsed.query)['query'][0])
df.at[i, 'LANG_REQUEST'] = parse_qs(parsed.query)['l'][0]
except:
df.at[i, 'BOT'] = 1
try:
df.at[i, 'FROM_REQUEST'] = parse_qs(parsed.query)['dateStart'][0]
df.at[i, 'TO_REQUEST'] = parse_qs(parsed.query)['dateEnd'][0]
except:
df.at[i, 'FROM_REQUEST'] = None
df.at[i, 'TO_REQUEST'] = None
#Image Search API and Page Search API calls will be treated as equals.
elif "textsearch?" in request or "imagesearch?" in request:
# Set the parameter BOOL_QUERY (i.e., =1 means the line is a query)
df.at[i, 'BOOL_QUERY'] = 1
# Set the parameter TYPE_SEARCH
if request.startswith("GET /imagesearch?"):
df.at[i, 'TYPE_SEARCH'] = "imagesearch"
else:
df.at[i, 'TYPE_SEARCH'] = "textsearch"
# Parse the REQUEST and Set the parameters TRACKINGID, USER_TRACKING_ID, SEARCH_TRACKING_ID, QUERY, MAXITEMS, PAGE, FROM_REQUEST, TO_REQUEST
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
try:
#import pdb;pdb.set_trace()
df.at[i, 'QUERY'] = unquote(parse_qs(parsed.query)['q'][0])
offset = int(parse_qs(parsed.query)['offset'][0])
df.at[i, 'MAXITEMS'] = int(parse_qs(parsed.query)['maxItems'][0])
df.at[i, 'PAGE'] = int(offset/df.at[i, 'MAXITEMS'])
except:
df.at[i, 'BOT'] = 1
try:
df.at[i, 'FROM_REQUEST'] = parse_qs(parsed.query)['from'][0]
df.at[i, 'TO_REQUEST'] = parse_qs(parsed.query)['to'][0]
except:
df.at[i, 'FROM_REQUEST'] = None
df.at[i, 'TO_REQUEST'] = None
#Process the parameter REQUEST and set the parameter PREVIOUS_REQUEST
else:
if request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
parsed = urlparse.urlparse(request)
df.at[i, 'PREVIOUS_QUERY'] = parse_qs(parsed.query)['query'][0]
elif request.startswith("GET /imagesearch?") or request.startswith("GET /textsearch?"):
parsed = urlparse.urlparse(request)
df.at[i, 'PREVIOUS_QUERY'] = parse_qs(parsed.query)['q'][0]
def processDataframe(request, previous_request, file_name, df, i, all_info_date):
"""
Function to process each log depending on the format (Apache vs Log4j)
"""
# Check if we are processing the Apache Log
if "logfile" in file_name:
getParametersFromRequest(request.replace(" HTTP/1.1", ""), df, i, True)
if pd.isnull(previous_request):
getParametersFromRequest(previous_request.replace(" HTTP/1.1", ""), df, i, False)
# if we are not processing the Apache Log
else:
#Only thing needed from request
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
# Just a sanity check.
if not pd.isnull(df.at[i, 'DATE']):
try:
# Generate TIMESTAMP using the parameter DATE and Set the parameters YEAR, MONTH, DAY, HOUR, MINUTE
date_simple = df.at[i, 'DATE'].replace("[", "").replace("]", "")
date = datetime.strptime(date_simple, "%d/%b/%Y:%H:%M:%S")
df.at[i, 'TIMESTAMP'] = date.timestamp()
if all_info_date:
df.at[i, 'YEAR'] = date.year
df.at[i, 'MONTH'] = date.month
df.at[i, 'DAY'] = date.day
df.at[i, 'HOUR'] = date.hour
df.at[i, 'MINUTE'] = date.minute
except:
df.at[i, 'BOT'] = 1
else:
df.at[i, 'BOT'] = 1
return date
def mergeFiles():
"""
Function that will process each log and merge them (The core of this file).
"""
click.secho("Start Process...", fg='green')
#Location\path of the Logs.
mypath = "./data/"
#Create Dataframes for each (Apache Log, Image Search API Log4j, Page Search API Log4j, Webapp API Log4j).
df_merge_apache_file = None
df_merge_image_file = None
df_merge_page_file = None
df_merge_arquivo_webapp_file = None
# Just to initialize variables that we are going to use (can be removed).
df_log = None
df_image = None
df_page = None
df_arquivo = None
## For each log file:
for subdir, dirs, files in os.walk(mypath):
#If list is not empty.
if files:
## Progress bar with the number of log files.
with click.progressbar(length=len(files), show_pos=True) as progress_bar_total:
for file in files:
progress_bar_total.update(1)
#Get Filename
file_name = os.path.join(subdir, file)
# Process Apache Logs
if file_name.startswith("./data/logs/arquivo.pt_apache/logfile"):
#Read file into Dataframe
names_apache = ["IP_ADDRESS", "CLIENT_ID", "USER_ID", "DATE", "ZONE", "REQUEST", "STATUS_CODE", "SIZE_RESPONSE", "PREVIOUS_REQUEST", "USER_AGENT", "RESPONSE_TIME"]
df_log = pd.read_csv(file_name, sep='\s+', names=names_apache)
#Init new collumns
df_log["UNIQUE_USER"] = ""
df_log["SPELLCHECKED"] = 0
df_log["REFER"] = ""
#Tracking
df_log["TRACKINGID"] = ""
df_log["USER_TRACKING_ID"] = ""
df_log["SEARCH_TRACKING_ID"] = ""
#Date
df_log["TIMESTAMP"] = 0
df_log["YEAR"] = 0
df_log["MONTH"] = 0
df_log["DAY"] = 0
df_log["HOUR"] = 0
df_log["MINUTE"] = 0
#Search and Query
df_log["TYPE_SEARCH"] = ""
df_log["QUERY"] = ""
df_log["LANG_REQUEST"] = ""
df_log["FROM_REQUEST"] = ""
df_log["TO_REQUEST"] = ""
df_log["PREVIOUS_QUERY"] = ""
df_log["MAXITEMS"] = 0
df_log["PAGE"] = 0
#Query from robots or internal requests (default is 0, "Not a Bot")
df_log["BOT"] = 0
## Progress Bar of the number of lines processed (Apache Log File).
with click.progressbar(length=df_log.shape[0], show_pos=True) as progress_bar:
for i in df_log.index:
progress_bar.update(1)
#Get Request
request = df_log.at[i, 'REQUEST']
#Get Previous Request
previous_request = df_log.at[i, 'PREVIOUS_REQUEST']
#Problem with some requestes
if isinstance(request, str) and isinstance(previous_request, str):
#We will create different files (Query Log file and Wayback Log file)
# Check if the request is not from wayback
if "wayback" not in request:
# Only process requests from textsearch, imagesearch, search.jsp, and images.jsp.
if request.startswith("GET /textsearch?") or request.startswith("GET /imagesearch?") or request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
processDataframe(request, previous_request, file_name, df_log, i, True)
#Generate a unique identifier for each user, making it an anonymized user.
string_user = str(df_log.at[i, 'IP_ADDRESS']) + str(df_log.at[i, 'USER_AGENT'])
df_log.at[i, 'UNIQUE_USER'] = int(hashlib.sha1(string_user.encode("utf-8")).hexdigest(), 16) % (10 ** 8)
#Check if the entry was generated because the user clicked on the query suggestion.
if "spellchecked=true" in previous_request:
df_log.at[i, 'SPELLCHECKED'] = 1
#Get a dictionary with the refers
if "arquivo.pt" not in previous_request:
df_log.at[i, 'REFER'] = previous_request
if previous_request not in map_refer:
map_refer[previous_request] = 1
else:
map_refer[previous_request] += 1
else:
#This condition removes lines such as "GET /js/jquery-1.3.2.min.js HTTP/1.1"
df_log.at[i, 'BOT'] = 1
else:
"""
Process the wayback requests
"""
#Set the entrie as "Bot" to not appear in the queries dataset.
df_log.at[i, 'BOT'] = 1
getParametersFromRequestWayback(request, df_log, i)
else:
df_log.at[i, 'BOT'] = 1
#Remove entries from "BOTs"
df_log = df_log[df_log['BOT']==0]
#Concatenate the file with previous files
df_log = df_log[['IP_ADDRESS', 'STATUS_CODE', 'REQUEST', 'USER_AGENT', 'TRACKINGID', 'USER_TRACKING_ID', 'SEARCH_TRACKING_ID', 'TIMESTAMP', 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'TYPE_SEARCH', 'QUERY', 'PAGE', 'MAXITEMS', 'LANG_REQUEST', 'FROM_REQUEST', 'TO_REQUEST', 'REFER', 'SPELLCHECKED', 'UNIQUE_USER']]
frames = [df_merge_apache_file, df_log]
df_merge_apache_file = pd.concat(frames)
## Logs Image Search API
if file_name.startswith("./data/logs/arquivo.pt_image_search/imagesearch"):
#Read file into DataFrame
names_image_search = ["DATE", "LOG_TYPE", "APPLICATION", "-", "IP_ADDRESS", "USER_AGENT", "URL_REQUEST", "IMAGE_SEARCH_RESPONSE(ms)", "IMAGE_SEARCH_PARAMETERS", "IMAGE_SEARCH_RESULTS"]
df_image = pd.read_csv(file_name, sep='\t', error_bad_lines=False, names=names_image_search)
#Init New Collumns
df_image["TRACKINGID"] = ""
df_image["BOT"] = 0
df_image["TIMESTAMP"] = 0
## Progress Bar of the number of lines processed (Image Search API Log4j).
with click.progressbar(length=df_image.shape[0], show_pos=True) as progress_bar:
for i in df_image.index:
progress_bar.update(1)
# Just a sanity check.
if not pd.isnull(df_image.at[i, 'IP_ADDRESS']):
request = df_image.at[i, 'URL_REQUEST']
# Just a sanity check.
if not pd.isnull(request):
#Missing process better the URL #FIXME
processDataframe(request, "", file_name, df_image, i, False)
#Remove "ms" from the string
df_image.at[i, 'IMAGE_SEARCH_RESPONSE(ms)'] = df_image.at[i, 'IMAGE_SEARCH_RESPONSE(ms)'].replace("ms", "")
else:
df_image.at[i, 'BOT'] = 1
else:
df_image.at[i, 'BOT'] = 1
#Remove entries from "BOTs" and entries with empty TRACKINGID
df_image = df_image[df_image['BOT']==0]
df_image = df_image[df_image["TRACKINGID"] != ""]
#Concatenate the file with previous files
df_image = df_image[["TIMESTAMP", "IP_ADDRESS", "USER_AGENT", "URL_REQUEST", "IMAGE_SEARCH_RESPONSE(ms)", "IMAGE_SEARCH_PARAMETERS", "IMAGE_SEARCH_RESULTS", "TRACKINGID"]]
frames = [df_merge_image_file, df_image]
df_merge_image_file = pd.concat(frames)
if file_name.startswith("./data/logs/arquivo.pt_pagesearch/pagesearchwebapp"):
#Read file into DataFrame
names_page_search = ["DATE", "LOG_TYPE", "APPLICATION", "-", "IP_ADDRESS", "USER_AGENT", "URL_REQUEST", "PAGE_SEARCH_RESPONSE(ms)", "PAGE_SEARCH_PARAMETERS", "PAGE_SEARCH_SEARCH_RESULTS"]
df_page = pd.read_csv(file_name, sep='\t', error_bad_lines=False, names=names_page_search, encoding='utf-8')
#We only need entrie from the keyword "PageSearchController"
df_page = df_page[df_page['APPLICATION']=="PageSearchController"]
#Init New Collumns
df_page["TRACKINGID"] = ""
df_page["BOT"] = 0
df_page["TIMESTAMP"] = 0
## Progress Bar of the number of lines processed (Page Search API Log4j).
with click.progressbar(length=df_page.shape[0], show_pos=True) as progress_bar:
for i in df_page.index:
progress_bar.update(1)
# Just a sanity check.
if not pd.isnull(df_page.at[i, 'IP_ADDRESS']) and "(versionHistory)" not in df_page.at[i, 'IP_ADDRESS']:
request = df_page.at[i, 'URL_REQUEST']
# Just a sanity check.
if not pd.isnull(request):
#Missing process better the URL #FIXME
processDataframe(request, "", file_name, df_page, i, False)
#Remove "ms" from the string
df_page.at[i, 'PAGE_SEARCH_RESPONSE(ms)'] = df_page.at[i, 'PAGE_SEARCH_RESPONSE(ms)'].replace("ms", "")
else:
df_page.at[i, 'BOT'] = 1
else:
df_page.at[i, 'BOT'] = 1
#Remove entries from "BOTs" and empty TRACKINGID
df_page = df_page[df_page['BOT']==0]
df_page = df_page[df_page["TRACKINGID"] != ""]
#Concatenate the file with previous files
df_page = df_page[["TIMESTAMP", "IP_ADDRESS", "USER_AGENT", "URL_REQUEST", "PAGE_SEARCH_RESPONSE(ms)", "PAGE_SEARCH_PARAMETERS", "PAGE_SEARCH_SEARCH_RESULTS", "TRACKINGID"]]
frames = [df_merge_page_file, df_page]
df_merge_page_file = pd.concat(frames)
if file_name.startswith("./data/logs/arquivo.pt_arquivo_webapp/arquivo-webapp.log"):
#Read file into DataFrame
names_arquivo = ["DATE", "LOG_TYPE", "USER", "NADA", "IP_ADDRESS", "USER_AGENT", "REQUEST", "TRACKINGID", "SESSION_ID", "TIMESTAMP_URL", "URL"]
df_arquivo = | pd.read_csv(file_name, sep='\t', error_bad_lines=False, names=names_arquivo) | pandas.read_csv |
# Import Libraries
import statistics
import numpy as np
import pandas as pd
import streamlit as st
# PREDICTION FUNCTION
def predict_AQI(city, week, year, multi_week, month):
if city == 'Chicago':
data = pd.read_csv("pages/data/chi_actual_pred.csv")
if multi_week:
result = []
actual = []
for i in week.values():
result_val = pd.DataFrame(data[(data["week"] == (i)) & (data["year"] == int(year))])
result_val = result_val.iloc[:, 1].values
actual_val = pd.DataFrame(data[(data["week"] == (i)) & (data["year"] == int(year))])
actual_val = actual_val.iloc[:, 6].values
result.append(np.array_repr(result_val))
actual.append(np.array_repr(actual_val))
f_r = []
f_a = []
for i in result:
i = i.replace('array([', '')
f_r.append(i.replace('])', ''))
for i in actual:
i = i.replace('array([', '')
f_a.append(i.replace('])', ''))
return f_r, f_a
elif month != '0':
result = pd.DataFrame(data[(data["month"] == int(month)) & (data["year"] == int(year))])
result = statistics.mean(result.iloc[:, 1].values)
actual = pd.DataFrame(data[(data["month"] == int(month)) & (data["year"] == int(year))])
actual = statistics.mean(actual.iloc[:, 6].values)
return result, actual
else:
result = pd.DataFrame(data[(data["week"] == int(week)) & (data["year"] == int(year))])
result = result.iloc[:, 1].values
actual = pd.DataFrame(data[(data["week"] == int(week)) & (data["year"] == int(year))])
actual = actual.iloc[:, 6].values
return result, actual
if city == 'Philadelphia':
data = pd.read_csv("pages/data/phl_actual_pred.csv")
if multi_week:
result = []
actual = []
for i in week.values():
result_val = pd.DataFrame(data[(data["week"] == (i)) & (data["year"] == int(year))])
result_val = result_val.iloc[:, 1].values
actual_val = pd.DataFrame(data[(data["week"] == (i)) & (data["year"] == int(year))])
actual_val = actual_val.iloc[:, 7].values
result.append(np.array_repr(result_val))
actual.append(np.array_repr(actual_val))
f_r = []
f_a = []
for i in result:
i = i.replace('array([', '')
f_r.append(i.replace('])', ''))
for i in actual:
i = i.replace('array([', '')
f_a.append(i.replace('])', ''))
return f_r, f_a
elif month != '0':
result = pd.DataFrame(data[(data["month"] == int(month)) & (data["year"] == int(year))])
result = statistics.mean(result.iloc[:, 1].values)
actual = pd.DataFrame(data[(data["month"] == int(month)) & (data["year"] == int(year))])
actual = statistics.mean(actual.iloc[:, 7].values)
return result, actual
else:
result = pd.DataFrame(data[(data["week"] == int(week)) & (data["year"] == int(year))])
result = result.iloc[:, 1].values
actual = pd.DataFrame(data[(data["week"] == int(week)) & (data["year"] == int(year))])
actual = actual.iloc[:, 7].values
return result, actual
# APPLICATION FUNCTION
def app():
# description
st.write("This application has been designed using transfer learning"
" to predict the AQI values for specific cities. New York City is "
"the source city of the model and the weights of this city were transferred to "
"similar cities of Chicago, IL and Philadelphia, PA to predict the AQI. "
"Below the user will get the chance to chose a city, week, and year and find out the "
"AQI for the inputs. ")
st.markdown("***This application has been developed as part of "
"Pennsylvania State University DS440 Capstone Project.***")
# user input variables
city = st.selectbox("Please enter the city you would like to predict:",
('Chicago', 'Philadelphia'))
week = st.number_input("Please enter the week of the year you would like to predict:",
min_value=1, max_value=53)
year = st.selectbox("Please enter the year you would like to predict:",
('2018', '2019', '2020', '2021'))
funct = st.selectbox("If you would rather choose multiple weeks of information or a monthly "
"average for a given year, please select one of the following, else keep blank",
('-', 'Multiple weeks for given year', 'Average AQI for given month and year'))
if funct == 'Multiple weeks for given year':
week_dict = {}
num_weeks = st.number_input("Please enter how many weeks you would like:", min_value=1, max_value=53)
count = int(num_weeks)
while count != 0:
week_dict[count] = ""
count -= 1
for count, value in week_dict.items():
week_dict[count] = int(st.number_input("Please enter the week of the year you"
" would like to predict:", min_value=1, max_value=53, key=count))
if funct == 'Average AQI for given month and year':
month = st.selectbox("Please enter the month you would like to predict:", ('1', '2', '3', '4', '5', '6',
'7', '8',
'9', '10', '11', '12'))
if st.button('Predict'):
if city == 'Chicago':
if funct == 'Multiple weeks for given year':
result = predict_AQI(city, week_dict, year, True, '0')
d = {'lat': [41.965193], 'lon': [-87.876265]}
df = pd.DataFrame(d)
st.write("Below is the coordinate the AQI was measured at")
st.map(df)
elif funct == 'Average AQI for given month and year':
result = predict_AQI(city, week, year, False, month)
d = {'lat': [41.965193], 'lon': [-87.876265]}
df = pd.DataFrame(d)
st.write("Below is the coordinates the AQI was measured at")
st.map(df)
else:
result = predict_AQI(city, week, year, False, '0')
d = {'lat': [41.965193], 'lon': [-87.876265]}
df = pd.DataFrame(d)
st.write("Below is the coordinate the AQI was measured at")
st.map(df)
if city == 'Philadelphia':
if funct == 'Multiple weeks for given year':
result = predict_AQI(city, week_dict, year, True, '0')
d = {'lat': [39.988842], 'lon': [-75.207205]}
df = pd.DataFrame(d)
st.write("Below is the coordinate the AQI was measured at")
st.map(df)
elif funct == 'Average AQI for given month and year':
result = predict_AQI(city, week, year, False, month)
d = {'lat': [39.988842], 'lon': [-75.207205]}
df = | pd.DataFrame(d) | pandas.DataFrame |
# -*- coding: utf-8 -*- %reset -f
"""
@author: <NAME>
"""
# Demonstration of generating samples restrected
# settings
file_name = 'virtual_resin_x.csv'
number_of_samples_generated = 10000
x_max_rate = 1.1 # this value is multiplied to the maximum value in dataset and is used as the upper limit for generated samples
x_min_rate = 0.9 # this value is multiplied to the minimum value in dataset and is used as the lower limit for generated samples
#zero_variable_numbers = [0, 2] # numbers of x-variables whose values are 0. Empty (zero_variable_numbers = []) is ok
zero_variable_numbers = []
list_of_component_numbers = [0, 1, 2] # numbers of x-variables whose sum is 'desired_sum_of_components' below. Empty (list_of_component_numbers = []) is ok
#list_of_component_numbers = []
desired_sum_of_components = 1 # sum of x-variables whose numbers are 'list_of_component_numbers'
decimals = [2, 2, 2, 0, -1] # numbers of decimals for x-variables. The length must be the same as the number of x-variables. If empty (decimals = []), not rounded off
#decimals = []
import numpy as np
import pandas as pd
# load dataset
x = | pd.read_csv(file_name, index_col=0, header=0) | pandas.read_csv |
import re
from datetime import datetime
import nose
import pytz
import platform
from time import sleep
import os
import logging
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas import NaT
from pandas.compat import u, range
from pandas.core.frame import DataFrame
import pandas.io.gbq as gbq
import pandas.util.testing as tm
from pandas.compat.numpy import np_datetime64_compat
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
_IMPORTS = False
_GOOGLE_API_CLIENT_INSTALLED = False
_GOOGLE_API_CLIENT_VALID_VERSION = False
_HTTPLIB2_INSTALLED = False
_SETUPTOOLS_INSTALLED = False
def _skip_if_no_project_id():
if not _get_project_id():
raise nose.SkipTest(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return PRIVATE_KEY_JSON_PATH
def _get_private_key_contents():
if _in_travis_environment():
with open(os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])) as f:
return f.read()
else:
return PRIVATE_KEY_JSON_CONTENTS
def _test_imports():
global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
_HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
try:
import pkg_resources
_SETUPTOOLS_INSTALLED = True
except ImportError:
_SETUPTOOLS_INSTALLED = False
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
if _SETUPTOOLS_INSTALLED:
try:
try:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
except:
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
from oauth2client.client import OAuth2WebServerFlow # noqa
from oauth2client.client import AccessTokenRefreshError # noqa
from oauth2client.file import Storage # noqa
from oauth2client.tools import run_flow # noqa
_GOOGLE_API_CLIENT_INSTALLED = True
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution(
'google-api-python-client').version
if (StrictVersion(_GOOGLE_API_CLIENT_VERSION) >=
StrictVersion(google_api_minimum_version)):
_GOOGLE_API_CLIENT_VALID_VERSION = True
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
try:
import httplib2 # noqa
_HTTPLIB2_INSTALLED = True
except ImportError:
_HTTPLIB2_INSTALLED = False
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
raise ImportError("pandas requires google-api-python-client >= {0} "
"for Google BigQuery support, "
"current version {1}"
.format(google_api_minimum_version,
_GOOGLE_API_CLIENT_VERSION))
if not _HTTPLIB2_INSTALLED:
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
# Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
# - ServiceAccountCredentials from oauth2client.service_account
# SignedJwtAssertionCredentials is available in oauthclient < 2.0.0
# ServiceAccountCredentials is available in oauthclient >= 2.0.0
oauth2client_v1 = True
oauth2client_v2 = True
try:
from oauth2client.client import SignedJwtAssertionCredentials # noqa
except ImportError:
oauth2client_v1 = False
try:
from oauth2client.service_account import ServiceAccountCredentials # noqa
except ImportError:
oauth2client_v2 = False
if not oauth2client_v1 and not oauth2client_v2:
raise ImportError("Missing oauth2client required for BigQuery "
"service account support")
def _setup_common():
try:
_test_imports()
except (ImportError, NotImplementedError) as import_exception:
raise nose.SkipTest(import_exception)
if _in_travis_environment():
logging.getLogger('oauth2client').setLevel(logging.ERROR)
logging.getLogger('apiclient').setLevel(logging.ERROR)
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See Issue #13577
import httplib2
try:
from googleapiclient.discovery import build
except ImportError:
from apiclient.discovery import build
try:
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
jobs = bigquery_service.jobs()
job_data = {'configuration': {'query': {'query': 'SELECT 1'}}}
jobs.insert(projectId=_get_project_id(), body=job_data).execute()
return True
except:
return False
def clean_gbq_environment(private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with tm.assert_produces_warning(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
class TestGBQConnectorIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Can get default_credentials "
"from the environment!")
credentials = self.sut.get_application_default_credentials()
self.assertIsNone(credentials)
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Cannot get default_credentials "
"from the environment!")
from oauth2client.client import GoogleCredentials
credentials = self.sut.get_application_default_credentials()
self.assertTrue(isinstance(credentials, GoogleCredentials))
class TestGBQConnectorServiceAccountKeyPathIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class TestGBQConnectorServiceAccountKeyContentsIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class GBQUnitTests(tm.TestCase):
def setUp(self):
_setup_common()
def test_import_google_api_python_client(self):
if compat.PY2:
with tm.assertRaises(ImportError):
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
else:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
def test_should_return_bigquery_integers_as_python_floats(self):
result = gbq._parse_entry(1, 'INTEGER')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_floats_as_python_floats(self):
result = gbq._parse_entry(1, 'FLOAT')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_timestamps_as_numpy_datetime(self):
result = gbq._parse_entry('0e9', 'TIMESTAMP')
tm.assert_equal(result, np_datetime64_compat('1970-01-01T00:00:00Z'))
def test_should_return_bigquery_booleans_as_python_booleans(self):
result = gbq._parse_entry('false', 'BOOLEAN')
tm.assert_equal(result, False)
def test_should_return_bigquery_strings_as_python_strings(self):
result = gbq._parse_entry('STRING', 'STRING')
tm.assert_equal(result, 'STRING')
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with tm.assertRaises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.to_gbq(DataFrame(), 'dataset.tablename')
def test_read_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.read_gbq('SELECT "1" as NUMBER_1')
def test_that_parse_data_works_properly(self):
test_schema = {'fields': [
{'mode': 'NULLABLE', 'name': 'VALID_STRING', 'type': 'STRING'}]}
test_page = [{'f': [{'v': 'PI'}]}]
test_output = gbq._parse_data(test_schema, test_page)
correct_output = DataFrame({'VALID_STRING': ['PI']})
tm.assert_frame_equal(test_output, correct_output)
def test_read_gbq_with_invalid_private_key_json_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='y')
def test_read_gbq_with_empty_private_key_json_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x', private_key='{}')
def test_read_gbq_with_private_key_json_wrong_types_should_fail(self):
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key='{ "client_email" : 1, "private_key" : True }')
def test_read_gbq_with_empty_private_key_file_should_fail(self):
with tm.ensure_clean() as empty_file_path:
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq('SELECT 1', project_id='x',
private_key=empty_file_path)
def test_read_gbq_with_corrupted_private_key_json_should_fail(self):
_skip_if_no_private_key_path()
with tm.assertRaises(gbq.InvalidPrivateKeyFormat):
gbq.read_gbq(
'SELECT 1', project_id='x',
private_key=re.sub('[a-z]', '9', _get_private_key_path()))
class TestReadGBQIntegration(tm.TestCase):
@classmethod
def setUpClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_setup_common()
def setUp(self):
# - PER-TEST FIXTURES -
# put here any instruction you want to be run *BEFORE* *EVERY* test is
# executed.
pass
@classmethod
def tearDownClass(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
pass
def tearDown(self):
# - PER-TEST FIXTURES -
# put here any instructions you want to be run *AFTER* *EVERY* test is
# executed.
pass
def test_should_read_as_user_account(self):
if _in_travis_environment():
raise nose.SkipTest("Cannot run local auth in travis environment")
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_read_as_service_account_with_key_path(self):
_skip_if_no_private_key_path()
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_read_as_service_account_with_key_contents(self):
_skip_if_no_private_key_contents()
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_contents())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_properly_handle_valid_strings(self):
query = 'SELECT "PI" as VALID_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']}))
def test_should_properly_handle_empty_strings(self):
query = 'SELECT "" as EMPTY_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'EMPTY_STRING': [""]}))
def test_should_properly_handle_null_strings(self):
query = 'SELECT STRING(NULL) as NULL_STRING'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_STRING': [None]}))
def test_should_properly_handle_valid_integers(self):
query = 'SELECT INTEGER(3) as VALID_INTEGER'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'VALID_INTEGER': [3]}))
def test_should_properly_handle_null_integers(self):
query = 'SELECT INTEGER(NULL) as NULL_INTEGER'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_INTEGER': [np.nan]}))
def test_should_properly_handle_valid_floats(self):
query = 'SELECT PI() as VALID_FLOAT'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame(
{'VALID_FLOAT': [3.141592653589793]}))
def test_should_properly_handle_null_floats(self):
query = 'SELECT FLOAT(NULL) as NULL_FLOAT'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_FLOAT': [np.nan]}))
def test_should_properly_handle_timestamp_unix_epoch(self):
query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame(
{'UNIX_EPOCH': [np.datetime64('1970-01-01T00:00:00.000000Z')]}))
def test_should_properly_handle_arbitrary_timestamp(self):
query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({
'VALID_TIMESTAMP': [np.datetime64('2004-09-15T05:00:00.000000Z')]
}))
def test_should_properly_handle_null_timestamp(self):
query = 'SELECT TIMESTAMP(NULL) as NULL_TIMESTAMP'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, DataFrame({'NULL_TIMESTAMP': [NaT]}))
def test_should_properly_handle_true_boolean(self):
query = 'SELECT BOOLEAN(TRUE) as TRUE_BOOLEAN'
df = gbq.read_gbq(query, project_id=_get_project_id(),
private_key=_get_private_key_path())
tm.assert_frame_equal(df, | DataFrame({'TRUE_BOOLEAN': [True]}) | pandas.core.frame.DataFrame |
# Copyright 2022 <NAME>, <NAME>, <NAME>.
# Licensed under the BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
# This file may not be copied, modified, or distributed
# except according to those terms.
import os
from datetime import date, datetime
from os import getcwd, listdir, mkdir, path
from shutil import copy2, move
from Bio import SeqIO
import pandas as pd
from ruamel import yaml # conda install -c conda-forge ruamel.yaml
# define location of sample sheet and workflow config
def update_sample_sheet():
print("""
This function updates the samples.csv file in your config with all .fasta files
included in data/query and data/reference. Please make sure all query files are
single fastas per sample. Reference fasta files can be multiple sequenence fasta
files. All sequences have to be named properly (≤ ten letters) as some applications
cutoff sequence names.
Please make sure to define a tag after the sample sheet is generated
""")
config = snakemake.config
QUERY_PATH = str(config["data-handling"]["data-query"])
REFERENCE_PATH = str(config["data-handling"]["data-reference"])
incoming_files = [f for f in listdir(QUERY_PATH)]
if not incoming_files:
print("No files in data/query")
new_files_reference = pd.DataFrame(columns=["sample_name", "file", "type", "tag"])
else:
print("Updating sample sheet")
# create dataframe
new_files_query = pd.DataFrame(incoming_files, columns=["file"])
# get id of sample by splitting the file handle
new_files_query["sample_name"] = new_files_query["file"].apply(
lambda x: (x.rsplit(".", 1)[0])
)
new_files_query["type"] = "query"
new_files_query["tag"] = "your_tag"
# add path of file
new_files_query["file"] = QUERY_PATH + "/" + new_files_query["file"]
print(new_files_query)
print("\t{} query samples added".format(len(new_files_query)))
incoming_files = [f for f in listdir(REFERENCE_PATH)]
if not incoming_files:
print("No files in data/reference")
new_files_reference = | pd.DataFrame(columns=["sample_name", "file", "type"]) | pandas.DataFrame |
#
# Copyright (C) 2021 The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from datetime import date
from typing import Optional, Sequence
import pandas as pd
from delta_sharing.protocol import AddFile, Metadata, Table
from delta_sharing.reader import DeltaSharingReader
from delta_sharing.rest_client import ListFilesInTableResponse, DataSharingRestClient
from delta_sharing.tests.conftest import ENABLE_INTEGRATION, SKIP_MESSAGE
def test_to_pandas_non_partitioned(tmp_path):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
pdf2 = pd.DataFrame({"a": [4, 5, 6], "b": ["d", "e", "f"]})
pdf1.to_parquet(tmp_path / "pdf1.parquet")
pdf2.to_parquet(tmp_path / "pdf2.parquet")
class RestClientMock:
def list_files_in_table(
self,
table: Table,
*,
predicateHints: Optional[Sequence[str]] = None,
limitHint: Optional[int] = None,
) -> ListFilesInTableResponse:
assert table == Table("table_name", "share_name", "schema_name")
metadata = Metadata(
schema_string=(
'{"fields":['
'{"metadata":{},"name":"a","nullable":true,"type":"long"},'
'{"metadata":{},"name":"b","nullable":true,"type":"string"}'
'],"type":"struct"}'
)
)
add_files = [
AddFile(
url=str(tmp_path / "pdf1.parquet"),
id="pdf1",
partition_values={},
size=0,
stats="",
),
AddFile(
url=str(tmp_path / "pdf2.parquet"),
id="pdf2",
partition_values={},
size=0,
stats="",
),
]
return ListFilesInTableResponse(
table=table, protocol=None, metadata=metadata, add_files=add_files
)
reader = DeltaSharingReader(Table("table_name", "share_name", "schema_name"), RestClientMock())
pdf = reader.to_pandas()
expected = pd.concat([pdf1, pdf2]).reset_index(drop=True)
| pd.testing.assert_frame_equal(pdf, expected) | pandas.testing.assert_frame_equal |
"""Merge two or more tables as data frames.
"""
import argparse
from functools import reduce
import pandas as pd
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tables", nargs="+", help="tables to concatenate")
parser.add_argument("--separator", default="\t", help="separator between columns in the given tables")
parser.add_argument("--suffixes", nargs=2, help="what to add when two columns have the same value")
parser.add_argument("--output", help="concatenated table")
args = parser.parse_args()
# Read tables.
tables = []
for i in range(0, len(args.tables)):
tables.append( | pd.read_csv(args.tables[i], sep=args.separator) | pandas.read_csv |
import pandas as pd
from OCAES import ocaes
import time
from joblib import Parallel, delayed, parallel_backend
# =====================
# Function to enable parameter sweep
# =====================
def parameterSweep(sweep_inputs, index):
# Record time to solve
t0 = time.time()
# create and run model
data = pd.read_csv('model_inputs.csv')
model_inputs = ocaes.get_default_inputs(storage_type=sweep_inputs.loc[index, 'storage_type'])
if sweep_inputs.loc[index, 'storage_type'] == 'wind_only':
model_inputs['X_well'] = 0.0
model_inputs['X_cmp'] = 0.0
model_inputs['X_exp'] = 0.0
else:
model_inputs['X_well'] = sweep_inputs.loc[index, 'capacity']
model_inputs['X_cmp'] = sweep_inputs.loc[index, 'capacity']
model_inputs['X_exp'] = sweep_inputs.loc[index, 'capacity']
if sweep_inputs.loc[index, 'storage_type'] == 'OCAES-10':
model_inputs['pwr2energy'] = 10
if sweep_inputs.loc[index, 'storage_type'] == 'OCAES-24':
model_inputs['pwr2energy'] = 24
model = ocaes(data, model_inputs)
df, s = model.get_full_results()
s['revenue'], s['LCOE'], s['COVE'], s['emissions_avoided'] = model.post_process(s)
# Display Elapsed Time
t1 = time.time()
print("Time Elapsed: " + str(round(t1 - t0, 2)) + " s")
# Combine inputs and results into output and then return
return pd.concat([sweep_inputs.loc[index, :], s], axis=0)
# =====================
# Main Program
# =====================
if __name__ == '__main__':
# ==============
# User Inputs
# ==============
studyname = 'sweep_capacity'
# storage technologies to investigate
storage_types = ['OCAES-10', 'OCAES-24', 'battery', 'wind_only']
# storage capacities to investigate
capacities = [0, 100, 200, 300, 400, 500]
# Number of cores to use
ncpus = -1 # int(os.getenv('NUM_PROCS'))
# ==============
# Run Simulations
# ==============
# prepare inputs
entries = ['storage_type', 'capacity']
sweep_inputs = pd.DataFrame(columns=entries)
for storage_type in storage_types:
for capacity in capacities:
s = | pd.Series(index=entries) | pandas.Series |
import sys
import warnings
from itertools import combinations
import matplotlib.pyplot as plt
import pandas as pd
import holoviews as hv
hv.extension('bokeh')
hv.output(size=200)
warnings.filterwarnings("ignore", "Only Polygon objects", UserWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
pd.options.display.float_format = '{:,.2f}'.format
""" Saving code to plot families house prices changes and chord of migration flows among municipalities
"""
def organize(f):
f.columns = ['months', 'id', 'long', 'lat', 'size', 'house_value', 'rent', 'quality', 'qli', 'on_market',
'family_id', 'region_id', 'mun_id']
return f
def simplify(f):
return f[['months', 'long', 'lat', 'size', 'house_value', 'quality', 'qli', 'on_market', 'family_id']]
def plot(f):
# f = organize(f)
f = f[f['family_id'] != 'None']
g = (f.sort_values('months').groupby('family_id').filter(lambda g: g.house_value.iat[-1] != g.house_value.iat[0]))
fplot = g.pivot_table(index='months', columns='family_id', values='house_value')
fplot.plot(legend=False)
plt.show()
def basics(f, names, by_what='region_id', name=None):
g = organize(f)
# g = g.reset_index(drop=True)
non = g[g.family_id != 'None']
vacant = (1 - len(non)/len(g)) * 100
# vacant_region = g[g['family_id'] == 'None'].groupby(by_what).id.count() / \
# g.groupby(by_what).id.count() * 100
m239 = g[g.months == '2010-01-01'].house_value.mean()
perc = (g[g.months == '2019-12-01'].house_value.mean() - m239) / m239 * 100
move = (non.sort_values('months').groupby('family_id')
.filter(lambda h: h.house_value.iat[-1] != h.house_value.iat[0]))
up = (non.sort_values('months').groupby('family_id')
.filter(lambda h: h.house_value.iat[-1] > h.house_value.iat[0]))
down = (non.sort_values('months').groupby('family_id')
.filter(lambda h: h.house_value.iat[-1] < h.house_value.iat[0]))
num_families = len(set(move.family_id))
p = 'Families which moved up/downwards weighed by the number of families which moved from a given municipality.'
up_down = pd.DataFrame()
up_down['up'] = up[up.months == '2019-12-01'].groupby(by_what).family_id.count() / \
move[move.months == '2010-01-01'].groupby(by_what).family_id.count()
up_down['down'] = down[down.months == '2019-12-01'].groupby(by_what).family_id.count() / \
move[move.months == '2010-01-01'].groupby(by_what).family_id.count()
# stack bar plot
fig = plt.figure(figsize=(20, 20))
ax = fig.gca()
up_down = up_down.merge(names, left_index=True, how='inner', right_on='cod_mun')
up_down = up_down.set_index('cod_name')
up_down = up_down[['up', 'down']]
up_down.sort_values('up').plot(kind='barh', stacked=False, ax=ax)
ax.set_ylabel("Municipalities")
plt.yticks(fontsize=24)
plt.legend(frameon=False)
plt.savefig(f'output/maps/hist/{name}_hist.png', bbox_inches='tight')
plt.show()
print('Vacant houses: {:.2f}%'.format(vacant))
# print('Vacant houses by municipalities: {}%'.format(vacant_region.to_string()))
print('Median house values: full base ${:.2f}'.format(g.house_value.median()))
print('Median house values: occupied ${:.2f}'.format(non.house_value.median()))
print('Median house values: vacant base ${:.2f}'.format(g[g.family_id=='None'].house_value.median()))
print('Percentage of increase house prices for given period: {:.2f} %'.format(perc))
print('Number of families that have moved: {:.0f}. '
'Percentage of total families {:.2f} %'.format(num_families, num_families / len(set(g.family_id)) * 100))
print('Upwards {:.2f}% and Downwards {:.2f} %'.format(float(len(set(up.family_id)) / num_families) * 100,
float(len(set(down.family_id)) / num_families) * 100))
return g
def prepare_chord(df, names):
# Cleaning up received DataFrame
# df = organize(df)
df = df[['family_id', 'mun_id']]
tf = df.drop_duplicates(keep='first')
tl = df.drop_duplicates(keep='last')
df = pd.concat([tf, tl])
# Generating nodes and links
cnxns = []
for k, g in df.groupby('family_id'):
[cnxns.extend((n1, n2, len(g)) for n1, n2 in combinations(g['mun_id'], 2))]
df = | pd.DataFrame(cnxns, columns=['region1', 'region2', 'total']) | pandas.DataFrame |
"""
High-level utilities and wrappers on top of high-level APIs of other libraries.
"""
import numpy as np
import pandas as pd
import sklearn.metrics
import sklearn.preprocessing
import tensorflow as tf
import lidbox.metrics
import lidbox.data.steps
TF_AUTOTUNE = tf.data.experimental.AUTOTUNE
def predictions_to_dataframe(ids, predictions):
return ( | pd.DataFrame.from_dict({"id": ids, "prediction": predictions}) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 14 13:08:38 2020
@author: Lajari
"""
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
from re import search
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
from nltk import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
class CleanDescriptionFile(TransformerMixin, BaseEstimator):
'''This subclass is used to cleaning the data in description file'''
def __init__(self, check_ASIN = True, add_category = True):
self.check_ASIN = check_ASIN
self.add_category = add_category
def fit(self, X, y=None):
return self
def check_ASIN_validity(self,X):
""" This fuction checks valid ASIN.
ASIN : ASIN is unique identifier defined by amazon
for each productI"""
if self.check_ASIN == True:
col = X['ASIN'].copy()
uniq_col = pd.Series(col.unique())
mask = (uniq_col.str.match(r'\b[B\d][\dA-Z]{9}\b')) & (uniq_col.str.len()==10)
inval_ASIN = uniq_col[~mask]
print(inval_ASIN)
return inval_ASIN
def transform(self,X):
""" Transformation includes cleaning inappropriate column and casting to appropriate format"""
X =X[~X.duplicated(keep='first')].copy()
X['Keyword'] = X['Keyword'].astype(str).str.replace('+',' ').str.replace('%27',"'").copy()
X['MatchTerm'] = X['MatchTerm'].astype(str).str.replace('%27',"''").copy()
X = X.fillna('Not Available').copy()
X['RetrievedTime'] = pd.to_datetime(X['RetrievedTime']).copy()
X = X[~(X['ProductName'] == 'No_Name')]
def classify(row):
if search(r"[tT][eE][aA]|Traditional Medicinals Nighty Night Valerian,",row):
return 'tea'
elif search(r"[cC][oO][fF][fF][eE][eE]", row):
return 'coffee'
elif search(r"[cC][aA][pP][sS][uU][lL]|[Tt][aA][bB][lL][eE][tT]",row):
return 'tablet'
elif search(r"[cC][hH][oO][cC][oO][lL][aA][tT]",row):
return 'chocolate'
elif search(r"[oO][iI][lL]",row):
return 'oil'
elif search(r"[cC][oO][oO][kK][iI]",row):
return 'cookies'
elif search(r"[hH][oO][nN][eE][yY]",row):
return 'honey'
elif search(r"[Mm][iI][lL][kK]",row):
return 'milk'
elif search(r"[jJ][aA][mM]|[jJ][eE][lL][lL][yY]",row):
return 'jam'
elif search(r"[Bb][eE][Vv][Ee][rR][aA][gG][eE]",row):
return 'beverage'
elif search(r"[Cc][aA][kK][eE]",row):
return 'cake mix'
elif search(r"[Ee][xX][tT][rR][Aa][cC][tT]",row):
return 'extract'
elif search(r"[sS][uU][pP][pP][lL][eE][mM][eE][nN][tT]",row):
return 'supplement'
elif search(r"[rR][oO][oO][tT]",row):
return 'root'
elif search(r"[lL][eE][aA][fFvV][eE]?",row):
return 'leaf'
elif search(r"[pP][oO][wW][dD][eE][rR]",row):
return 'powder'
else:
return 'other'
if self.add_category:
X['Category'] = X['ProductName'].map(classify)
return X
class CleanReviewFile(TransformerMixin, BaseEstimator):
'''This subclass is used to cleaning the data in review file'''
def __init__(self, check_ASIN = True, add_ProcessedText = True):
self.check_ASIN = check_ASIN
self.add_ProcessedText = add_ProcessedText
def fit(self, X, y=None):
return self
def check_ASIN_validity(self,X,y=None):
""" This fuction checks valid ASIN.
ASIN : ASIN is unique identifier defined by amazon
for each productI"""
if self.check_ASIN == True:
col = X['ASIN'].copy()
uniq_col = pd.Series(col.unique())
mask = (uniq_col.str.match(r'\b[B\d][\dA-Z]{9}\b')) & (uniq_col.str.len()==10)
inval_ASIN = uniq_col[~mask]
print(inval_ASIN)
return inval_ASIN
def transform(self,X,y=None):
""" Transformation includes cleaning inappropriate column and casting to appropriate format"""
X =X[~X.duplicated(keep='first')].copy()
X['ProductNumReviews'] = X['ProductNumReviews'].astype(str).str.replace(',','').astype('int64')
X['RetrievedTime'] = | pd.to_datetime(X['RetrievedTime']) | pandas.to_datetime |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
"""
we consider a list of integers going from 2^0 to 2^159, and we use sys.getsizeof to inspect how many bytes are actually used to store the integer
"""
import sys
int_sizes = {}
for i in range(160):
int_sizes[i] = sys.getsizeof(2 ** i)
int_sizes = | pd.Series(int_sizes) | pandas.Series |
import pandas as pd
import math
import sqlite3 as sql
def read_tables_info(con):
data = pd.read_sql_query('select * from tables_info',con,index_col='index')
return data
def is_table_exists(cursor,table_name):
cursor.execute('select count(*) from sqlite_master where type="table" and name="'+table_name+'"')
values = cursor.fetchall()
#print(values[0][0])
return values[0][0] == 1
def table_info(cursor,table_name):
cursor.execute('pragma table_info("'+table_name+'")')
values = cursor.fetchall()
print(values)
def read_trade_cal(con):
data = pd.read_sql_query('select * from trade_cal',con,index_col='index')
return data
def read_daily_by_date(con,sdate,edate):
sql_str = 'select * from daily where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_daily_by_tscode(con,tscode):
sql_str = 'select * from fut_daily where ts_code = "'+tscode+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_daily_by_symbol(con,symbol):
sql_str = 'select * from fut_daily where symbol = "'+symbol+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_holding_by_symbol(con,symbol):
sql_str = 'select * from fut_holding where symbol = "'+symbol+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_concept_info(con):
sql_str = 'select * from concept_info'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_concept_detail(con):
sql_str = 'select * from concept_detail'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_daily_by_tscode(con,tscode):
sql_str = 'select * from daily where ts_code="'+tscode+'"'
data = pd.read_sql_query(sql_str,con)
return data
def read_daily_basic_by_date(con,sdate,edate):
sql_str = 'select * from daily_basic where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_margin_detail_by_date(con,sdate,edate):
sql_str = 'select * from margin_detail where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_hk_hold_by_date(con,sdate,edate):
sql_str = 'select * from hk_hold where trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_daily_by_date_and_tscode(con,tscode,sdate,edate):
sql_str = 'select * from daily where ts_code="'+tscode+'" and trade_date >= "'+sdate+'" and trade_date <= "'+edate+'"'
data = pd.read_sql_query(sql_str,con,index_col='trade_date')
return data
def read_daily_basic_by_tscode(con,tscode):
sql_str = 'select * from daily_basic where ts_code="'+tscode+'"'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and (cal_date not in (select trade_date from daily) or cal_date not in (select trade_date from daily_basic))'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_daily(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from daily)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_daily_basic(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from daily_basic)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_adj_factor(con,sdate,edate):
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from adj_factor)'
data = pd.read_sql_query(sql_str,con)
return data
def find_date_need_update_block_trade(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from block_trade)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_stock_suspend(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select suspend_date from stock_suspend)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_longhubang_list(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from longhubang_list)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_money_flow(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from money_flow)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_stock_limit_price(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+sdate+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from stock_price_limit)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_stk_holdernumber(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select end_date from stk_holder_num)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_hk_hold(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from hk_hold)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_fut_daily(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from fut_daily)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_fut_holding(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from fut_holding)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_fut_wsr(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where is_open = 1 and cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from fut_wsr)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def find_date_need_update_margin_detail(con,sdate,edate):
try:
sql_str='select cal_date from trade_cal where cal_date >="'+'20190101'+'" and cal_date <="'+edate+'" and cal_date not in (select trade_date from margin_detail)'
data = pd.read_sql_query(sql_str,con)
except Exception as e:
print("ex:"+str(e))
return None
return data
def read_money_flow(con,tscode):
sql_str='select * from money_flow where ts_code="'+tscode+'"'
data = pd.read_sql_query(sql_str,con)
return data
def read_stock_basic(con):
sql_str='select * from stock_basic'
data = pd.read_sql_query(sql_str,con,index_col='index')
return data
def read_fut_basic(con,name_pix = ''):
sql_str='select * from fut_basic'
data = pd.read_sql_query(sql_str,con,index_col='index')
if type(data) == pd.DataFrame and name_pix != '':
def subfun(item):
return item.find(name_pix) >= 0;
return data[data.name.apply(subfun)]
else:
return data
def read_stock_basic_by_name(con,name):
sql_str='select * from stock_basic where name="'+name+'"'
data = pd.read_sql_query(sql_str,con)
return data
def read_ts_codes(con):
sql_str='select ts_code from stock_basic'
data = | pd.read_sql_query(sql_str,con) | pandas.read_sql_query |
# Analysis of *rXiv clusters
# %%
import logging
import re
from datetime import datetime
import altair as alt
import pandas as pd
import statsmodels.api as sm
from numpy.random import choice
from scipy.spatial.distance import cityblock
from statsmodels.api import OLS, Poisson, ZeroInflatedPoisson
from eurito_indicators import PROJECT_DIR
from eurito_indicators.getters.arxiv_getters import (
get_arxiv_articles,
get_cluster_names,
get_arxiv_tokenised,
get_arxiv_topic_model,
get_covid_papers,
query_arxiv_institute,
)
from eurito_indicators.pipeline.clustering_naming import (
k_check_clusters,
make_distance_to_clusters,
make_doc_comm_lookup,
)
from eurito_indicators.pipeline.processing_utils import make_lq
from eurito_indicators.pipeline.topic_modelling import post_process_model_clusters
from eurito_indicators.pipeline.topic_utils import make_topic_mix, train_topic_model
from eurito_indicators.utils.altair_save_utils import (
ch_resize,
google_chrome_driver_setup,
save_altair,
)
from eurito_indicators.utils.other_utils import clean_table
VAL_PATH = f"{PROJECT_DIR}/outputs/reports/val_figures"
FIG_PATH = f"{PROJECT_DIR}/outputs/reports/final_report_deck"
def plot_k_check_outputs(val_results, cluster_names):
"""
Plots the results of a validation of clustering results using kmeans
"""
logging.info("Cluster overlaps")
cluster_check = (
pd.concat([x[0] for x in kmeans_validation_results], axis=1)
.mean(axis=1)
.reset_index(name="co_occ")
.assign(c1_name=lambda df: df["c1"].map(cluster_names))
.assign(c2_name=lambda df: df["c2"].map(cluster_names))
)
hm = (
alt.Chart(cluster_check)
.mark_point(filled=True, stroke="black", strokeWidth=1)
.encode(
x=alt.X("c1_name:N", sort=alt.EncodingSortField("c1")),
y=alt.Y("c2_name:N", sort=alt.EncodingSortField("c2")),
size=alt.Size("co_occ", title="Number of co-occurrences"),
color=alt.Color("co_occ", scale=alt.Scale(scheme="oranges")),
tooltip=["c1_name", "c2_name"],
)
)
logging.info("Correlations between assignment shares")
dists_df = (
pd.concat([x[1] for x in kmeans_validation_results], axis=1)
.mean(axis=1)
.reset_index(name="share_corr")
.assign(c1_name=lambda df: df["c1"].map(cluster_names))
.assign(c2_name=lambda df: df["c2"].map(cluster_names))
)
pl = (
alt.Chart(dists_df)
.mark_rect()
.encode(
x=alt.X("c1_name:N", sort=alt.EncodingSortField("c1")),
y=alt.Y("c2_name:N", sort=alt.EncodingSortField("c2")),
color="share_corr",
tooltip=["c1_name", "c2_name", "share_corr"],
)
)
logging.info("Distribution of correlations with other clusters")
melted = (
dists_df[["c1", "c2", "share_corr"]]
.melt(id_vars=["share_corr"])
.assign(cl_name=lambda df: df["value"].map(cluster_names))
)
sort_clusters = (
melted.groupby("cl_name")["share_corr"]
.mean()
.sort_values(ascending=False)
.index.tolist()
)
boxp = (
alt.Chart(melted)
.mark_boxplot()
.encode(y=alt.Y("cl_name:N", sort=sort_clusters), x="share_corr")
)
return hm, pl, boxp
def make_clean_cluster_names(cluster_names):
clean_clusters = {
k: " ".join([x.capitalize() for x in re.sub("_", " ", v).split(" ")])
for k,v in cluster_names.items()
}
return clean_clusters
def tag_covid_cluster(table, cluster_lookup, cluster_name):
t = table.assign(
cluster=lambda df: df["article_id"].map(cluster_lookup).map(cluster_name)
).assign(is_covid=lambda df: ~df["cluster"].isna())
return t
def tag_month_year(table, art):
return table["article_id"].map(art.set_index("article_id")["month_year"].to_dict())
def make_temp_reg_table(inst_cov, all_arts, focus_countries):
"""Creates a regression table to analyse the link
between research nationality, topic and timeliness of Covid-19 response
"""
inst_cov["created"] = inst_cov["article_id"].map(
all_arts.set_index("article_id")["created"]
)
number_collabs = inst_cov.groupby("article_id")["country"].apply(
lambda x: len(set(x))
)
inst_cov["n_collabs"] = inst_cov["article_id"].map(number_collabs)
reg_data = (
inst_cov.query("is_covid==True")
.query("month_year < '2021-01-01'")
.query("month_year >= '2020-01-01'")
.copy()[["country", "cluster", "month_year", "created", "n_collabs"]]
.reset_index(drop=True)
)
reg_data["y"] = [x.month for x in reg_data["month_year"]]
reg_data["time_since_cov"] = [
(x - datetime(2019, 12, 30)).days for x in reg_data["created"]
]
reg_data = reg_data.loc[reg_data["country"].isin(focus_countries)].reset_index(
drop=True
)
return reg_data
def time_reg_comparison(reg_data):
X_count = sm.add_constant(pd.get_dummies(reg_data["country"]))
X_clust = sm.add_constant(
pd.concat(
[
pd.get_dummies(reg_data["country"]),
| pd.get_dummies(reg_data["cluster"]) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 22 21:44:29 2016
@author: Carrie
"""
#Program will:
# Parse json file with tweets from Seattle for the last week,
# 10 million public geotagged tweets every day, which is about 120 per second
import json, time, pandas as pd
from datetime import datetime, timedelta
from email.utils import parsedate_tz
from memory_profiler import profile
import json, sys
#Set up: track time of program and set print options
start = time.time()
#@profile
#Parse json objects from text file
def parseJson(inputTweetTextFile):
tweets_data = []
tweets_file = open(inputTweetTextFile, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
print(( "Number of Tweets to be Processed: {0}".format( len(tweets_data) )))
return tweets_data
#Convert Twitter Date Time
def StrToDatetime(datestring):
time_tuple = parsedate_tz(datestring.strip())
dt = datetime(*time_tuple[:6])
return dt - timedelta(seconds=time_tuple[-1])
#@profile
#Format the Data and Just pull the stuff you want in the dataframe
def PullJsonintoDataFrame(tweets_data):
tweets = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
from math import sqrt, nan
import pandas as pd
def lowVol(market, last_nyears=1, num_pf=30, interval='d'):
''' Portfolio selection based on low volatility (annualized)
args:
last_nyears: int
num_pf: number of stocks included in the portfolio
interval: time unit the volatility is calculated, ['d', 'w', 'm', 'y']
'''
if type(last_nyears) == int:
num_days = str(last_nyears * 365)+'D' # 365 includes holidays
start = market.prices.last(num_days).index[0]
else:
start = market.prices.index[0]
returns = market.calculate_returns(interval=interval, start=start)
if interval == 'd':
n_units = 252
elif interval == 'w':
n_units = 52
elif interval == 'm':
n_units = 12
else: # 'y'
n_units = 1
std = returns.std(axis=0, skipna=True) * sqrt(n_units) # annualize
std[std == 0] = nan # Get rid of non-traded stocks
# Ranking: the smaller, the better
ranked = std[std.rank(ascending=True)<= num_pf]
tickers = ranked.index
return tickers
def momentum(market, last_nyears=1, num_pf=30):
''' Portfolio selection based on momentum
args:
last_nyears: int
num_pf: number of stocks included in the portfolio
'''
if type(last_nyears) == int:
num_days = str(last_nyears * 365)+'D' # 365 includes holidays
start = market.prices.last(num_days).index[0]
else:
start = market.prices.index[0]
returns = market.calculate_returns(interval='d', start=start)
accumulated_returns = (returns+1).prod(axis=0, skipna=True)
# Ranking: the bigger, the better
ranked = accumulated_returns[accumulated_returns.rank(ascending=False)<= num_pf]
tickers = ranked.index
return tickers
def riskAdj(market, last_nyears=1, num_pf=30, interval='d'):
''' Portfolio selection based on risk-adjusted return (annualized volatility)
args:
last_nyears: int
num_pf: number of stocks included in the portfolio
interval: time unit the volatility is calculated, ['d', 'w', 'm', 'y']
'''
if type(last_nyears) == int:
num_days = str(last_nyears * 365)+'D' # 365 includes holidays
start = market.prices.last(num_days).index[0]
else:
start = market.prices.index[0]
# Numerator: accumulated return
daily_returns = market.calculate_returns(interval='d', start=start)
accumulated_returns = (daily_returns+1).prod(axis=0)
# Denominator: risk (annualized volatility)
returns = market.calculate_returns(interval=interval, start=start)
if interval == 'd':
n_units = 252
elif interval == 'w':
n_units = 52
elif interval == 'm':
n_units = 12
else: # 'y'
n_units = 1
std = returns.std(axis=0, skipna=True) * sqrt(n_units) # annualize
std[std == 0] = nan # Get rid of non-traded stocks
# risk-adjusted return
risk_adj = accumulated_returns / std
# Ranking: the bigger, the better
ranked = risk_adj[risk_adj.rank(ascending=False)<= num_pf]
tickers = ranked.index
return tickers
def indicator(market, ind, low_or_high, num_pf=30):
''' Portfolio selection based on indicator (value investing)
args:
ind: name of indicator, such as ['per', 'pbr', 'pcr', 'psr']
low_or_high: the lower(higher), the better, ['low', 'high']
num_pf: number of stocks included in the portfolio
'''
if low_or_high not in ['low', 'high']:
raise ValueError('low_or_high should be one of ["low", "high"]')
ind_value = market.indicators.loc[ind.upper()]
# Ranking: the bigger, the better
ascending = low_or_high == 'low'
ranked = ind_value[ind_value.rank(ascending=ascending)<= num_pf]
tickers = ranked.index
return tickers
def fscore_kr(market, scores=[9]):
''' Portfolio selection based on f-score (Piotroski et al., 2000) (quality investing)
args:
scores: stocks with the given scores are returned
'''
print('Attention: you have to keep financial statements up-to-date!')
# Financial statement
fs = market.fss
# Probability
roa = fs['지배주주순이익'] / fs['자산']
cfo = fs['영업활동으로인한현금흐름'] / fs['자산']
accurual = cfo - roa
# Financial performance
lev = fs['장기차입금'] / fs['자산']
liq = fs['유동자산'] / fs['유동부채']
offer = fs['유상증자'] # estimated
# Operating efficiency
margin = fs['매출총이익'] / fs['매출액']
turn = fs['매출액'] / fs['자산']
if datetime.now().month not in [1,2,3,4]:
col_idx = -1
else:
col_idx = -2
f_1 = (roa.iloc[:, col_idx] > 0).astype(int)
f_2 = (cfo.iloc[:, col_idx] > 0).astype(int)
f_3 = ((roa.iloc[:, col_idx] - roa.iloc[:, col_idx-1]) > 0).astype(int)
f_4 = (accurual.iloc[:, col_idx] > 0).astype(int)
f_5 = ((lev.iloc[:, col_idx] - lev.iloc[:, col_idx-1]) <= 0).astype(int)
f_6 = ((liq.iloc[:, col_idx] - liq.iloc[:, col_idx-1]) > 0).astype(int)
f_7 = (offer.iloc[:, col_idx].isna() | (offer.iloc[:, col_idx] <= 0)).astype(int)
f_8 = ((margin.iloc[:, col_idx] - margin.iloc[:, col_idx-1]) > 0).astype(int)
f_9 = ((turn.iloc[:, col_idx] - turn.iloc[:, col_idx-1]) > 0).astype(int)
f_table = pd.concat([f_1, f_2, f_3, f_4, f_5, f_6, f_7, f_8, f_9], axis=1)
f_score = f_table.sum(axis=1)
tickers = | pd.Series() | pandas.Series |
from os.path import join
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from src import utils as cutil
def convert_non_monotonic_to_nan(array):
"""Converts a numpy array to a monotonically increasing one.
Args:
array (numpy.ndarray [N,]): input array
Returns:
numpy.ndarray [N,]: some values marked as missing, all non-missing
values should be monotonically increasing
Usage:
>>> convert_non_monotonic_to_nan(np.array([0, 0, 5, 3, 4, 6, 3, 7, 6, 7, 8]))
np.array([ 0., 0., np.nan, 3., np.nan, np.nan, 3., np.nan, 6., 7., 8.])
"""
keep = np.arange(0, len(array))
is_monotonic = False
while not is_monotonic:
is_monotonic_array = np.hstack(
(array[keep][1:] >= array[keep][:-1], np.array(True))
)
is_monotonic = is_monotonic_array.all()
keep = keep[is_monotonic_array]
out_array = np.full_like(array.astype(np.float), np.nan)
out_array[keep] = array[keep]
return out_array
def log_interpolate(array):
"""Interpolates assuming log growth.
Args:
array (numpy.ndarray [N,]): input array with missing values
Returns:
numpy.ndarray [N,]: all missing values will be filled
Usage:
>>> log_interpolate(np.array([0, np.nan, 2, np.nan, 4, 6, np.nan, 7, 8]))
np.array([0, 0, 2, 3, 4, 6, 7, 7, 8])
"""
idx = np.arange(0, len(array))
log_array = np.log(array.astype(np.float32) + 1e-1)
interp_array = np.interp(
x=idx, xp=idx[~np.isnan(array)], fp=log_array[~np.isnan(array)]
)
return np.round(np.exp(interp_array)).astype(int)
DATA_CHINA = cutil.DATA_RAW / "china"
health_dxy_file = join(DATA_CHINA, "DXYArea.csv")
health_jan_file = join(DATA_CHINA, "china_city_health_jan.xlsx")
policy_file = join(DATA_CHINA, "CHN_policy_data_sources.csv")
pop_file = join(DATA_CHINA, "china_city_pop.csv")
output_file = cutil.DATA_PROCESSED / "adm2" / "CHN_processed.csv"
match_file = join(DATA_CHINA, "match_china_city_name_w_adm2.csv")
shp_file = cutil.DATA_INTERIM / "adm" / "adm2" / "adm2.shp"
end_date_file = cutil.CODE / "data" / "cutoff_dates.csv"
end_date = pd.read_csv(end_date_file)
(end_date,) = end_date.loc[end_date["tag"] == "default", "end_date"].values
end_date = str(end_date)
print("End Date: ", end_date)
## Load and clean pre 01/24 data
# load pre 01/24 data
df_jan = | pd.read_excel(health_jan_file, sheet_name=None) | pandas.read_excel |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_noargs(any_string_dtype):
# #1859
s = Series(["<NAME>", "Travis Oliphant"], dtype=any_string_dtype)
result = s.str.split()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
def test_split_maxsplit(data, pat, any_string_dtype):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=-1)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
result = s.str.split(pat=pat, n=0)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
| Series({0: ["split", "once"], 1: ["split", "once_too!"]}) | pandas.Series |
import pandas as pd
import pytest
import torch
import greattunes.utils
from greattunes.data_format_mappings import tensor2pretty_covariate
@pytest.mark.parametrize("method, tmp_val",
[
["functions", 1.0],
["iterative", 2.0]
])
def test_observe_get_and_verify_response_input_unit(tmp_observe_class, method, tmp_val, monkeypatch):
"""
test that _get_and_verify_response_input works for self.sampling["method"] = "iteratuve" or "functions". Leverage
monkeypatching and create false class to mock that greattunes._observe will be called inside
TuneSession class in greattunes.__init__. Rely on manual input for "iterative" option
"""
# # define class
cls = tmp_observe_class
cls.sampling["method"] = method
# monkeypatch the "support" functions _get_response_function_input, _read_response_manual_input
def mock_get_response_function_input():
return torch.tensor([[tmp_val]], dtype=torch.double,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
monkeypatch.setattr(
cls, "_get_response_function_input", mock_get_response_function_input
)
manual_tmp_val = tmp_val + 1.0
def mock_read_response_manual_input(additional_text):
return torch.tensor([[manual_tmp_val]], dtype=torch.double,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
monkeypatch.setattr(
cls, "_read_response_manual_input", mock_read_response_manual_input
)
# set kwarg response to None (so manually provided input is used)
kwarg_response = None
# run test
output = cls._get_and_verify_response_input(response=kwarg_response)
if method == "functions":
assert output[0].item() == tmp_val
elif method == "iterative":
assert output[0].item() == manual_tmp_val
@pytest.mark.parametrize("method", ["WRONG", None])
def test_observe_get_and_verify_response_input_fail_unit(tmp_observe_class, method):
"""
test that _get_and_verify_response_input fails for self.sampling["method"] not equal to "iterative" or "functions".
"""
# # define class
cls = tmp_observe_class
cls.sampling["method"] = method
# set kwarg response to None (so manually provided input is used)
kwarg_response = None
with pytest.raises(Exception) as e:
assert cls._get_and_verify_response_input(response=kwarg_response)
assert str(e.value) == "greattunes._observe._get_and_verify_response_input: class attribute " \
"self.sampling['method'] has non-permissable value " + str(method) + ", must be in " \
"['iterative', 'functions']."
@pytest.mark.parametrize(
"kwarg_response",
[
[1.2],
torch.tensor([[1.2]], dtype=torch.double)
]
)
def test_get_and_verify_response_input_kwarg_input_works(tmp_observe_class, kwarg_response, monkeypatch):
"""
test that _get_and_verify_response_input works for self.sampling["method"] = "iterative" with programmatically
provided input. Leverage monkeypatching for utils.__get_covars_from_kwargs and create false class to mock that
greattunes._observe will be called inside TuneSession class in greattunes.__init__
"""
# set device for torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # define class
cls = tmp_observe_class
cls.sampling["method"] = "iterative"
# monkeypatch "__get_covars_from_kwargs"
def mock__get_covars_from_kwargs(x):
if isinstance(kwarg_response, list):
return torch.tensor([kwarg_response], dtype=torch.double, device=device)
else:
return kwarg_response
monkeypatch.setattr(greattunes.utils, "__get_covars_from_kwargs", mock__get_covars_from_kwargs)
# run test
output = cls._get_and_verify_response_input(response=kwarg_response)
# assert
if isinstance(kwarg_response, list):
assert output[0].item() == kwarg_response[0]
elif isinstance(kwarg_response, torch.DoubleTensor):
assert output[0].item() == kwarg_response[0].item()
@pytest.mark.parametrize("FLAG_TRAINING_DATA", [True, False])
def test_observe_get_response_function_input_unit(tmp_observe_class, training_data_covar_complex, FLAG_TRAINING_DATA):
"""
test _get_response_function_input for pass and fail
"""
# temp class for test
cls = tmp_observe_class
# data
train_X = training_data_covar_complex[1]
covar_details = training_data_covar_complex[3]
# set attributes on class, required for test
cls.train_X = None
cls.covar_details = covar_details
if FLAG_TRAINING_DATA:
cls.train_X = train_X
# add simple response function
tmp_val = 2.2
def mock_response_function(covar):
"""
test response function
:param covar: torch.tensor (num_obs X num_covariates)
:return:
"""
return tmp_val
cls.sampling["response_func"] = mock_response_function
# assert
if FLAG_TRAINING_DATA:
# run test
output = cls._get_response_function_input()
assert output[0].item() == tmp_val
else:
with pytest.raises(Exception) as e:
assert output == cls._get_response_function_input()
assert str(e.value) == "'NoneType' object has no attribute 'shape'"
@pytest.mark.parametrize(
"response, kwarg_response, error_msg",
[
[torch.tensor([[2]], dtype=torch.double), ['a'], "too many dimensions 'str'"],
[torch.tensor([[2]], dtype=torch.double), [1, 2], "greattunes._observe._get_and_verify_response_input: incorrect number of variables provided. Was expecting input of size (1,1) but received torch.Size([1, 2])"],
[torch.tensor([[2]], dtype=torch.double), [1, 'a'], "must be real number, not str"],
[torch.tensor([[2, 3]], dtype=torch.double), None, "greattunes._observe._get_and_verify_response_input: incorrect number of variables provided. Was expecting input of size (1,1) but received torch.Size([1, 2])"],
[torch.tensor([[2]], dtype=torch.double), torch.tensor([[1, 2]], dtype=torch.double), "greattunes.utils.__get_response_from_kwargs: dimension mismatch in provided 'response'. Was expecting torch tensor of size (1,1) but received one of size (1, 2)."],
]
)
def test_get_and_verify_response_input_fails_wrong_input(tmp_observe_class, response, kwarg_response, error_msg,
monkeypatch):
"""
test that _get_and_verify_response_input fails for wrong inputs. Use only the "iterative" sampling option for this
test
"""
# set device for torch
mydevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # define class
cls = tmp_observe_class
cls.sampling["method"] = "iterative"
cls.device = mydevice
# monkeypatch "__get_response_from_kwargs"
def mock__get_response_from_kwargs(x, device):
if isinstance(kwarg_response, list):
return torch.tensor([kwarg_response], dtype=torch.double, device=mydevice)
else:
return kwarg_response
monkeypatch.setattr(greattunes.utils, "__get_response_from_kwargs", mock__get_response_from_kwargs)
# monkeypatch _read_response_manual_input
def mock_read_response_manual_input(additional_text):
return response
monkeypatch.setattr(
cls, "_read_response_manual_input", mock_read_response_manual_input
)
# run test
with pytest.raises(Exception) as e:
output = cls._get_and_verify_response_input(response=kwarg_response)
assert str(e.value) == error_msg
@pytest.mark.parametrize("additional_text, input_data",
[
["temp", [1.1, 2.2]],
["try again", [3.1, -12.2]],
["simple try", [4.5]],
]
)
def test_observe_read_response_manual_input_unit(tmp_observe_class, additional_text, input_data, monkeypatch):
"""
test _read_response_manual_input, monkeypatching the "input" function call in the method
"""
# temp class for test
cls = tmp_observe_class
# set attribute
cls.model = {"covars_proposed_iter": 0}
# monkeypatching "input"
monkeypatch_output = ", ".join([str(x) for x in input_data]) # match data from "input" function
monkeypatch.setattr("builtins.input", lambda _: monkeypatch_output)
# run function
output = cls._read_response_manual_input(additional_text)
# assert
for it in range(len(input_data)):
assert output[0, it].item() == input_data[it]
@pytest.mark.parametrize(
"candidate",
[
torch.tensor([[2.2]], dtype=torch.double),
torch.tensor([[2.2, 3.3, -1]], dtype=torch.double),
]
)
def test_observe_print_candidate_to_prompt_works_unit(tmp_observe_class, candidate):
"""
test that given a candidate, the right string is written by the method _print_candidate_to_prompt
:param candidate (torch tensor): one-row tensor of new datapoint to be investigated
"""
# temporary class to run the test
cls = tmp_observe_class
# extend with required attributes
tmp_covars_proposed_iter = 2
cls.model = {"covars_proposed_iter": tmp_covars_proposed_iter}
# add covariate details to tmp_observe_class
covar_details = {}
for i in range(candidate.size()[1]):
key = "covar" + str(i)
val = candidate[0,i].item()
covar_details[key] = {"guess": val, "min": val-1.0, "max": val+1.0, "type": float, "columns": i}
cls.covar_details = covar_details
# run the method: generate the string to be printed
input_request = cls._print_candidate_to_prompt(candidate=candidate)
# build expected output
cand_pretty = tensor2pretty_covariate(train_X_sample=candidate, covar_details=covar_details)
new_cand_names = [i + " (" + str(covar_details[i]["type"]) + ")" for i in list(cand_pretty.columns)]
cand_pretty.columns = new_cand_names
outtext = "\tNEW datapoint to sample:\n\t" + cand_pretty.to_string(index=False).replace("\n", "\n\t")
# assert
assert input_request == outtext
@pytest.mark.parametrize(
"candidate, error_msg",
[
[torch.tensor([], dtype=torch.double), "greattunes.greattunes._observe._print_candidate_to_prompt: provided input 'candidate' is empty. Expecting torch tensor of size 1 X num_covariates"],
[None, "greattunes.greattunes._observe._print_candidate_to_prompt: provided input 'candidate' is incorrect datatype. Expecting to be of type torch.Tensor"]
]
)
def test_observe_print_candidate_to_prompt_fails_unit(tmp_observe_class, candidate, error_msg):
"""
test that _print_candidate_to_prompt throws the right error for the two cases
:param candidate: supposed to be one-row tensor of new datapoint to be investigated of type torch tensor, here hijacking
"""
# temporary class to run the test
cls = tmp_observe_class
# run _print_candidate_to_prompt method and ensure correct error returned
with pytest.raises(Exception) as e:
# run the method: generate the string to be printed
input_request = cls._print_candidate_to_prompt(candidate=candidate)
assert str(e.value) == error_msg
@pytest.mark.parametrize(
"additional_text", ["testing function", "12345_ygh", None, 22.0, [1.0, 4.4], torch.tensor([[2.2]], dtype=torch.double)]
)
def test_read_covars_manual_input(tmp_observe_class,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
additional_text, monkeypatch):
"""
test reading of covars from manual input by user. Monkeypatches reliance on function 'input'
"""
covariates = [1.1, 2.2, 200, -1.7]
# temp class to execute the test
cls = tmp_observe_class
# add attribute 'initial_guess' required for '_read_covars_manual'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
covar_tensor = torch.tensor([covariates], dtype=torch.double, device=device)
cls.initial_guess = covar_tensor
# add proposed_X attributed required for '_read_covars_manual'
cls.proposed_X = covar_tensor
# add attributes defining the covariate expectation
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
cls.covar_mapped_names = covar_details_mapped_covar_mapped_names_tmp_observe_class[1]
cls.sorted_pandas_columns = covar_details_mapped_covar_mapped_names_tmp_observe_class[2]
# monkeypatch
def mock_input(x): # mock function to replace 'input' for unit testing purposes
return ", ".join([str(x) for x in covariates])
monkeypatch.setattr("builtins.input", mock_input)
# run the test
# different tests for cases where it's supposed to pass vs fail
if isinstance(additional_text, str):
covars_candidate_float_tensor = cls._read_covars_manual_input(additional_text)
print(covars_candidate_float_tensor)
# assert that the right elements are returned in 'covars_candidate_float_tensor'
for i in range(covars_candidate_float_tensor.size()[1]):
assert covars_candidate_float_tensor[0, i].item() == covariates[i]
# cases where type of additonal_text should make test fail
else:
with pytest.raises(AssertionError) as e:
covars_candidate_float_tensor = cls._read_covars_manual_input(additional_text)
assert str(e.value) == "greattunes._observe._read_covars_manual_input: wrong datatype of parameter 'additional_text'. Was expecting 'str' but received " + str(type(additional_text))
def test_get_and_verify_covars_input_works(tmp_observe_class, monkeypatch):
"""
test that _get_and_verify_covars_input works when providing the correct data. Monkeypatching methods
"_read_covars_manual_input" and "__validate_num_covars"
"""
# covariates to sample
covariates = [1.1, 2.2, 200, -1.7]
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# temp class to execute the test
cls = tmp_observe_class
# monkeypatch "_read_covars_manual_input"
def mock_read_covars_manual_input(x):
return torch.tensor([covariates], dtype=torch.double, device=device)
monkeypatch.setattr(cls, "_read_covars_manual_input", mock_read_covars_manual_input)
# monkeypatch "_Validators__validate_num_covars"
def mock_Validators__validate_num_covars(x):
return True
monkeypatch.setattr(cls, "_Validators__validate_num_covars", mock_Validators__validate_num_covars)
# covariate kwargs is set to None so input-based method is used
kwarg_covariates = None
# run method
covars_candidate_float_tensor = cls._get_and_verify_covars_input(covars=kwarg_covariates)
# assert the output
# assert that the right elements are returned in 'covars_candidate_float_tensor'
for i in range(covars_candidate_float_tensor.size()[1]):
assert covars_candidate_float_tensor[0, i].item() == covariates[i]
@pytest.mark.parametrize(
"covars",
[
[1.1, 2.2, 200, -1.7],
torch.tensor([[1.1, 2.2, 200, -1.7]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")),
]
)
def test_get_and_verify_covars_programmatic_works(tmp_observe_class,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
covars, monkeypatch):
"""
test that _get_and_verify_covars_input works when providing the correct data programmatically. Monkeypatching
method "__validate_num_covars" and helper function "utils.__get_covars_from_kwargs"
"""
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# temp class to execute the test
cls = tmp_observe_class
# attributes
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
cls.covar_mapped_names = covar_details_mapped_covar_mapped_names_tmp_observe_class[1]
# monkeypatch "__get_covars_from_kwargs"
def mock__get_covars_from_kwargs(x):
if isinstance(covars, list):
return torch.tensor([covars], dtype=torch.double, device=device)
else:
return covars
monkeypatch.setattr(greattunes.utils, "__get_covars_from_kwargs", mock__get_covars_from_kwargs)
# monkeypatch "_Validators__validate_num_covars"
def mock_Validators__validate_num_covars(x):
return True
monkeypatch.setattr(cls, "_Validators__validate_num_covars", mock_Validators__validate_num_covars)
# run method
covars_candidate_float_tensor = cls._get_and_verify_covars_input(covars=covars)
# assert the output
# assert that the right elements are returned in 'covars_candidate_float_tensor'
for i in range(covars_candidate_float_tensor.size()[1]):
if isinstance(covars, list):
assert covars_candidate_float_tensor[0, i].item() == covars[i]
else:
assert covars_candidate_float_tensor[0, i].item() == covars[0, i].item()
@pytest.mark.parametrize(
"proposed_X",
[torch.tensor([[1.1, 2.2]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")),
None]
)
def test_get_and_verify_covars_input_fails(tmp_observe_class, proposed_X, monkeypatch):
"""
test that _get_and_verify_covars_input fails for both providing incorrect data. Monkeypatching methods
"_read_covars_manual_input" and "__validate_num_covars"
"""
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# covariates to sample
covariates = [1.1, 2.2, 200, -1.7]
covars_tensor = torch.tensor([covariates], dtype=torch.double, device=device)
# temp class to execute the test
cls = tmp_observe_class
# set proposed_X attribute (required for method to work)
cls.proposed_X = proposed_X
# monkeypatch "_read_covars_manual_input"
def mock_read_covars_manual_input(x):
return covars_tensor
monkeypatch.setattr(cls, "_read_covars_manual_input", mock_read_covars_manual_input)
# monkeypatch "_Validators__validate_num_covars"
def mock_Validators__validate_num_covars(x):
return False
monkeypatch.setattr(cls, "_Validators__validate_num_covars", mock_Validators__validate_num_covars)
# expected error message returned
add_text = ""
if cls.proposed_X is not None:
add_text = " Was expecting something like '" + str(cls.proposed_X[-1]) + "', but got '" + str(covars_tensor) + "'"
error_msg = "greattunes._observe._get_and_verify_covars_input: unable to get acceptable covariate input in 3 iterations." + add_text
# covariate kwargs is set to None so input-based method is used
kwarg_covariates = None
# run method
with pytest.raises(Exception) as e:
covars_candidate_float_tensor = cls._get_and_verify_covars_input(covars=kwarg_covariates)
assert str(e.value) == error_msg
# negative tests for _get_and_verify_covars for kwargs input
@pytest.mark.parametrize(
"covars, error_msg",
[
[[1.1, 2.2, 200, -1.7], "greattunes._observe._get_and_verify_covars_input: unable to get acceptable covariate input in 3 iterations."],
[torch.tensor([[1.1, 2.2, 200, -1.7]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), "greattunes._observe._get_and_verify_covars_input: unable to get acceptable covariate input in 3 iterations."],
[torch.tensor([1.1, 2.2, 200, -1.7], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), "greattunes.utils.__get_covars_from_kwargs: dimension mismatch in provided 'covars'. Was expecting torch tensor of size (1,<num_covariates>) but received one of size (4)."], # this one fails in utils.__get_covars_from_kwargs because of wrong size of input tensor
]
)
def test_get_and_verify_covars_programmatic_fails(tmp_observe_class,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
covars, error_msg, monkeypatch):
"""
test that _get_and_verify_covars_input fails when providing incorrect data programmatically. Monkeypatching
method "__validate_num_covars". Expected error is related to wrong number of elements returned
"""
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# temp class to execute the test
cls = tmp_observe_class
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
cls.covar_mapped_names = covar_details_mapped_covar_mapped_names_tmp_observe_class[1]
# monkeypatch "__get_covars_from_kwargs"
def mock__get_covars_from_kwargs(x):
if isinstance(covars, list):
return torch.tensor([covars], dtype=torch.double, device=device)
else:
return covars
monkeypatch.setattr(greattunes.utils, "__get_covars_from_kwargs", mock__get_covars_from_kwargs)
# monkeypatch "_Validators__validate_num_covars"
def mock_Validators__validate_num_covars(x):
return False
monkeypatch.setattr(cls, "_Validators__validate_num_covars", mock_Validators__validate_num_covars)
# run method
with pytest.raises(Exception) as e:
covars_candidate_float_tensor = cls._get_and_verify_covars_input(covars=covars)
assert str(e.value) == error_msg
@pytest.mark.parametrize(
"train_X, x_data, covars_proposed_iter, covars_sampled_iter, kwarg_covariates",
[
[torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"covar0": [0.1], "covar1": [2.5], "covar2": [12], "covar3": [0.22]}), 2, 1, None],
[torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"covar0": [0.1], "covar1": [2.5], "covar2": [12], "covar3": [0.22]}), 2, 1, torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))],
[torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), | pd.DataFrame({"covar0": [0.1], "covar1": [2.5], "covar2": [12], "covar3": [0.22]}) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2020, <NAME>.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import re
import sys
import yaml
import random
import itertools
import pandas as pd
from os.path import dirname, isfile, splitext
from prep_songbird.analyses_prep import AnalysisPrep
from prep_songbird._io_utils import read_meta_pd, get_analysis_folder
from prep_songbird._cmds import (
get_case, get_new_meta_pd, run_import, songbird_cmd)
from prep_songbird._metadata import (
rename_duplicate_columns, get_train_perc_from_numeric,
get_cat_vars_and_vc, make_train_test_from_cat)
from prep_songbird._filter import (
filter_table, write_filtered_tsv, get_unique_filterings)
class DiffModels(object):
def __init__(self, config, project) -> None:
self.cmds = {}
self.config = config
self.project = project
if config.diff_models:
songbird_dicts = self.get_songbird_dicts()
self.songbird_models = songbird_dicts[0]
self.songbird_filtering = songbird_dicts[1]
self.unique_filtering = get_unique_filterings(songbird_dicts[1])
self.params = songbird_dicts[2]
self.models_baselines = songbird_dicts[3]
self.models = {}
self.models_issues = {}
self.songbird_subsets = songbird_dicts[4]
self.songbirds = pd.DataFrame(columns=[
'dataset', 'filter', 'prevalence', 'abundance'])
self.params_list = [
'train', 'batches', 'learns', 'epochs', 'diff_priors',
'thresh_feats', 'thresh_samples', 'summary_interval']
self.q2s_pd = pd.DataFrame()
self.songbird_pd = pd.DataFrame()
def get_songbird_models(self, diff_dict):
if 'models' not in diff_dict:
print('No models in %s' % self.config.diff_models)
sys.exit(0)
return diff_dict['models']
def get_songbird_baselines(self, diff_dict):
baselines = {}
if 'baselines' in diff_dict:
return diff_dict['baselines']
return baselines
def get_songbird_params(self, diff_dict):
params = {
'train': ['0.7'],
'batches': ['2'],
'learns': ['1e-4'],
'epochs': ['5000'],
'thresh_feats': ['0'],
'thresh_samples': ['0'],
'diff_priors': ['0.5'],
'summary_interval': ['1']
}
if 'params' not in diff_dict:
print('No parameters set in %s:\nUsing defaults: %s' % (
diff_dict,
', '.join(['%s: %s' % (k, v) for k, v in params.items()])))
else:
for param in diff_dict['params']:
cur_param = diff_dict['params'][param]
if not isinstance(cur_param, list):
print('Parameter %s should be a list (correct in %s)\n' % (
param, self.config.diff_models))
sys.exit(0)
params[param] = cur_param
return params
def get_filtering(self, diff_dict, models):
dats = []
filtering = {'': {'0_0': {}}}
for dat in models.keys():
dats.append(dat)
filtering['']['0_0'][dat] = ['0', '0']
if 'filtering' not in diff_dict:
print('No filtering thresholds in %s\n:' % self.config.diff_models)
else:
if 'global' in diff_dict['filtering']:
for fname, p_a in diff_dict['filtering']['global'].items():
for dat in models.keys():
if fname not in filtering['']:
filtering[''][fname] = {}
filtering[''][fname][dat] = p_a
for dat, filts in diff_dict['filtering'].items():
if dat == 'global':
continue
for fname, p_a in filts.items():
if fname not in filtering['']:
filtering[''][fname] = {}
if dat in dats:
filtering[''][fname][dat] = p_a
return filtering
def get_songbird_dicts(self):
with open(self.config.diff_models) as handle:
try:
diff_dict = yaml.load(handle, Loader=yaml.FullLoader)
except AttributeError:
diff_dict = yaml.load(handle)
main_cases_dict = {'ALL': [[]]}
if 'subsets' in diff_dict:
main_cases_dict.update(diff_dict['subsets'])
models = self.get_songbird_models(diff_dict)
params = self.get_songbird_params(diff_dict)
baselines = self.get_songbird_baselines(diff_dict)
filtering = self.get_filtering(diff_dict, models)
return models, filtering, params, baselines, main_cases_dict
def merge_subsets_apply(self):
subsets_fp = [
[dataset, var, subset, get_case(subset, var), '']
for var, subsets in self.songbird_subsets.items()
for subset in subsets
for dataset in self.songbirds.dataset.unique()]
if subsets_fp:
subsets = pd.DataFrame(
subsets_fp, columns=['dataset', 'variable', 'factors',
'subset', 'pair'])
self.songbirds = self.songbirds.merge(
subsets, on=['dataset'], how='outer')
def get_songbirds_filts(self, project):
filts_df = []
for dat, filts_dats in self.unique_filtering.items():
if dat not in project.datasets:
continue
for (filt, prev, abund) in filts_dats:
filts_df.append([dat, filt, prev, abund])
if filts_df:
self.songbirds = pd.DataFrame(filts_df, columns=[
'dataset', 'filter', 'prevalence', 'abundance'])
def prep_songbirds(self, project):
self.get_songbirds_filts(project)
self.merge_subsets_apply()
self.make_datasets_paths()
def make_datasets_paths(self):
cmds = {}
self.get_datasets_paths()
if self.songbirds.shape[0]:
for (dataset, filter, subset), row in self.songbirds.groupby(
['dataset', 'filter', 'subset']):
row_d = row.iloc[0, :].to_dict()
tsv, qza, meta = row_d['tsv'], row_d['qza'], row_d['meta']
data = self.project.datasets[dataset]
variable, factors = row_d['variable'], row_d['factors']
meta_pd = get_new_meta_pd(
data.metadata[0], subset, variable, factors)
meta_pd.to_csv(meta, index=False, sep='\t')
if not self.config.force and isfile(tsv) and isfile(qza):
continue
tsv_pd = data.data[0][meta_pd.sample_name.tolist()]
preval, abund = row_d['prevalence'], row_d['abundance']
tsv_pd = filter_table(preval, abund, tsv_pd)
if self.config.force or not isfile(tsv):
write_filtered_tsv(tsv, tsv_pd)
if self.config.force or not isfile(qza):
cmd = run_import(tsv, qza, 'FeatureTable[Frequency]')
cmds.setdefault(dataset, []).append(cmd)
self.register_command('songbird_imports', cmds)
def get_datasets_paths(self):
paths = []
if self.songbirds.shape[0]:
for r, row in self.songbirds.iterrows():
dataset = row['dataset']
filter = row['filter']
subset = row['subset']
odir = get_analysis_folder(
self.config.i_datasets_folder,
'songbird/datasets/%s/%s' % (dataset, subset))
rad = '%s_%s' % (dataset, filter)
tsv = '%s/tab_%s.tsv' % (odir, rad)
qza = '%s.qza' % splitext(tsv)[0]
meta = '%s/meta_%s.tsv' % (odir, rad)
paths.append([tsv, qza, meta])
if paths:
self.songbirds = pd.concat([
self.songbirds, pd.DataFrame(
paths, columns=['tsv', 'qza', 'meta'])], axis=1)
@staticmethod
def get_traintests(meta_fp, new_meta_pd, vars, train, train_col):
if train.isdigit() or train.replace('.', '').isdigit():
train_perc = get_train_perc_from_numeric(train, new_meta_pd)
vars_pd = new_meta_pd[vars].copy()
cat_vars, cat_pd, vc, rep_d = get_cat_vars_and_vc(vars, vars_pd)
if cat_vars and vc.size < cat_pd.shape[0] * 0.5:
train_samples = make_train_test_from_cat(
cat_pd, vc, train_perc, meta_fp, cat_vars, train_col, rep_d)
else:
train_samples = random.sample(
new_meta_pd.index.tolist(),
k=int(train_perc * new_meta_pd.shape[0]))
return train_samples
return None
def make_train_test_column(self, meta_fp, train_test_d,
meta_pd, dat) -> dict:
train_tests = {}
train = train_test_d['train']
meta_tt_pd = meta_pd.set_index('sample_name').copy()
if 'datasets' in train_test_d and dat in train_test_d['datasets']:
for tt, vars in train_test_d['datasets'][dat].items():
vars_pd = meta_tt_pd[vars].copy()
vars_pd = vars_pd.loc[~vars_pd.isna().any(1)]
vars_pd = rename_duplicate_columns(vars_pd)
trains = self.get_traintests(
meta_fp, vars_pd, vars, str(train), tt)
if trains:
train_tests[tt] = trains
return train_tests
def make_train_test(self):
if self.songbirds.shape[0]:
for _, sb in self.songbirds.groupby(
['dataset', 'filter', 'subset']):
d = sb.iloc[0, :].to_dict()
fps = ['dataset', 'tsv', 'qza', 'meta']
dat, tsv, qza, meta_fp = [d[x] for x in fps]
meta_subset = read_meta_pd(meta_fp)
train_tests = self.make_train_test_column(
meta_fp, self.config.train_test_dict, meta_subset, dat)
rewrite = False
meta_subset_cols = set(meta_subset.columns)
for train_col, train_samples in train_tests.items():
if train_col not in meta_subset_cols:
rewrite = True
meta_subset[train_col] = [
'Train' if x in set(train_samples) else
'Test' for x in meta_subset.sample_name.tolist()]
if self.config.force or rewrite:
meta_subset.to_csv(meta_fp, index=False, sep='\t')
def get_params_combinations(self):
"""Make a pandas data frame from the combinations
of songbird run/hyper-parameters. It includes the
handling of user-specified 'train_column', which
always take precedence over the default 'n_examples'.
Returns
-------
params_pd : pd.DataFrame
Comobinations of parameters as rows, and
individual parameters as columns.
"""
params = []
to_combine = [self.params[param] for param in self.params_list]
for params_combination in itertools.product(*to_combine):
params.append(params_combination)
params_pd = pd.DataFrame(params, columns=self.params_list).astype(str)
return params_pd
@staticmethod
def print_message_or_not(mess, m):
if m not in mess:
mess.add(m)
def process_params_combinations(
self,
dataset: str,
meta_pd: pd.DataFrame,
params_pd: pd.DataFrame,
mess: set):
"""Filter the combinations of parameters too remove
those involving unusable train/test splits, e.g. not
having the specified or too few samples therein.
Parameters
----------
dataset : str
Dataset
meta_pd : pd.DataFrame
Dataset metadata table.
params_pd : pd.DataFrame
Combinations of parameters (rows)
mess : set
Messages to print
"""
examples = []
valid_params = []
nsams = meta_pd.shape[0]
meta_cols = meta_pd.columns
for p, params in params_pd.iterrows():
train = params['train']
if train.replace('.', '').isdigit():
if float(train) < 0.1:
valid_params.append(p)
m = '\t[skip] "%s": train %s too low (%s)' % (
dataset, '%', train)
self.print_message_or_not(mess, m)
elif float(train) > 0.95:
valid_params.append(p)
m = '\t[skip] "%s": train %s too high (%s)' % (
dataset, '%', train)
self.print_message_or_not(mess, m)
else:
examples.append(int(nsams * (1 - float(train))))
else:
if train not in set(meta_cols):
valid_params.append(p)
m = '\t[skip] Training column "%s" not in metadata' % (
train)
self.print_message_or_not(mess, m)
else:
train_vc = meta_pd[train].value_counts()
if {'Train', 'Test'}.issubset(set(train_vc.index)):
ntrain = train_vc['Train']
if nsams < (1.2 * ntrain):
valid_params.append(p)
m = '\t[skip] "%s": %s samples for %s training ' \
'samples:' % (dataset, nsams, ntrain)
self.print_message_or_not(mess, m)
else:
valid_params.append(p)
m = '\t[skip] "%s": no TrainTest in column "%s"' % (
dataset, train)
self.print_message_or_not(mess, m)
if valid_params:
params_pd.drop(index=valid_params, inplace=True)
if examples:
params_pd['examples'] = examples
@staticmethod
def get_filt_params(params):
"""
Parameters
----------
params : pd.Series
Returns
-------
filt_list : list
params_list : list
"""
filt_list = [
('--min-feature-count', str(params['thresh_feats'])),
('--min-sample-count', str(params['thresh_samples']))]
params_list = [
('--p-batch-size', str(params['batches'])),
('--p-learning-rate', str(params['learns'])),
('--p-epochs', str(params['epochs'])),
('--differential-prior: %s' % str(params['diff_priors']),
str(params['diff_priors']).replace('.', '')),
('--p-training-column: %s' % str(params['train']),
str(params['train']).replace('.', '')),
('--p-summary-interval: %s' % str(params['summary_interval']),
str(params['summary_interval']).replace('.', ''))]
return filt_list, params_list
def get_main_dirs(
self, dat, filt, subset, filt_list, params_list, model) -> tuple:
datdir = '%s/unpaired' % dat
pdir = ''
for (name, level) in [
('filter', filt),
('subset', subset),
('songbird_filt', filt_list),
('params', params_list),
('model', model)
]:
text = ''
if name == 'filter':
add_dir = level
text += 'Preliminary feature filtering:\n\n'
text += '%s: <prevalence>_<abundance> thresholds\n' % filt
text += '\nValue between 0 and <1 indicates a fraction:\n'
text += '- prevalence: min. fraction of sample presences\n'
text += '- abundance: min. fraction of samples reads\n'
text += ' * e.g. "0.1" corresponds to 10 percent\n'
text += '\nValue >=1 indicates an absolute number:\n'
text += '- prevalence: min number of sample presences\n'
text += '- abundance: min number of reads (per sample)\n'
text += 'In both cases, filtering on prevalence occurs on '
text += 'per-sample, abundance-filtered features, i.e.:\n'
text += '\n`tab.loc[(tab_perc > abund).sum(1) > preval]`\n'
elif name == 'subset':
add_dir = level
text += 'Sample subset:\n\n'
if subset == 'ALL':
text += '%s: No sample subset\n' % subset
else:
text += '%s: <variable>_<factor(s)>:\n' % subset
text += '\n(see your config for formula of this model)\n'
elif name == 'songbird_filt':
text += 'Feature and sample filtering in songbird:\n\n'
for f in filt_list:
text += ' %s\n' % ' '.join(f)
text += '\n(see songbird command line usage)\n'
add_dir = 'filt_f%s_s%s' % tuple([x[1] for x in filt_list])
pdir += add_dir
elif name == 'params':
add_dir = '%s_%s_%s_%s_%s_%s' % tuple(
[x[1] for x in params_list])
pdir += '/' + add_dir
text += 'Songbird run parameters:\n\n'
for param_list in params_list:
text += ' %s\n' % ' = '.join(list(param_list))
text += '\n(see songbird command line usage)\n'
elif name == 'model':
add_dir = level
text += 'Model: %s\n' % model
text += '\n(see your config for formula of this model)\n'
datdir = '%s/%s' % (datdir, add_dir)
odir = get_analysis_folder(
self.config.i_datasets_folder, 'songbird/%s' % datdir)
readme = '%s/readme.txt' % odir
with open(readme, 'w') as o:
o.write(text)
new_qza = '%s/tab.qza' % odir
new_meta = '%s/metadata.tsv' % odir
return datdir, pdir, odir, new_qza, new_meta
@staticmethod
def get_out_paths(odir, bodir, model_baseline, baselines) -> dict:
if model_baseline in baselines:
bdiff_qza = ''
bstat = baselines[model_baseline]
bplot = ''
else:
bdiff_qza = '%s/differentials-baseline.qza' % bodir
bstat = '%s/differentials-stats-baseline.qza' % bodir
bplot = '%s/differentials-biplot-baseline.qza' % bodir
baselines[model_baseline] = bstat
out_paths = {
'diff': '%s/differentials.tsv' % odir,
'diff_qza': '%s/differentials.qza' % odir,
'stat': '%s/differentials-stats.qza' % odir,
'plot': '%s/differentials-biplot.qza' % odir,
'tens': '%s/tensorboard.qzv' % bodir,
'html': '%s/tensorboard.html' % bodir,
'bdiff_qza': bdiff_qza,
'bstat': bstat,
'bplot': bplot
}
return out_paths
@staticmethod
def write_new_meta(meta_pd, new_meta, meta_vars, drop, params):
meta_cols = set(meta_pd.columns)
if params['train'] in meta_cols:
meta_vars.add(params['train'])
new_meta_pd = meta_pd[
(['sample_name'] + [x for x in meta_vars if x in meta_cols])
].copy()
new_meta_pd = new_meta_pd.loc[~new_meta_pd.isna().any(1)]
new_meta_pd = rename_duplicate_columns(new_meta_pd)
if drop:
to_remove = pd.concat([
new_meta_pd[meta_var].isin(var_drop)
for meta_var, var_drop in drop.items()
], axis=1).any(axis=1)
new_meta_pd = new_meta_pd.loc[~to_remove]
new_meta_pd.to_csv(new_meta, index=False, sep='\t')
return new_meta_pd.shape[0]
def summarize_songbirds(self):
q2s = []
songbird = get_analysis_folder(
self.config.i_datasets_folder, 'songbird')
for root, dirs, files in os.walk(songbird):
for fil in files:
if fil == 'tensorboard.html':
path = root + '/' + fil
diff = '%s/differentials.tsv' % dirname(root)
root_split = root.split('%s/' % songbird)[-1].split('/')
d, pr, fr, sb, sr, ps, ml, be = root_split
with open(path) as f:
for line in f:
if 'Pseudo Q-squared' in line:
ls = line.split(
'Pseudo Q-squared:</a></strong> ')
q2s.append([
pr, d, fr, sb, ml, sr, ps, be, diff,
float(ls[-1].split('<')[0])])
if q2s:
self.q2s_pd = pd.DataFrame(q2s, columns=[
'pair', 'dataset', 'filter', 'subset', 'model',
'songbird_filter', 'parameters', 'baseline', 'differentials',
'Pseudo_Q_squared'])
q2s_fp = '%s/songbird_q2.tsv' % songbird
self.q2s_pd.to_csv(q2s_fp, index=False, sep='\t')
print('\t==> Written: %s' % q2s_fp)
def create_songbird_feature_metadata(self):
if self.q2s_pd.shape[0]:
q2_pd = self.q2s_pd.loc[(self.q2s_pd.pair == 'no_pair') &
(self.q2s_pd.Pseudo_Q_squared > 0)]
for dat, dataset_pd in q2_pd.groupby('dataset'):
dataset_sbs = []
for r, row in dataset_pd.iterrows():
pr = 'pair=%s' % row['pair']
fr = 'filter=%s' % row['filter']
sb = 'subset=%s' % row['subset']
ml = 'model=%s' % row['model']
st = 'sb_filt=%s' % row['songbird_filter']
ps = 'params=%s' % row['parameters']
be = 'baseline=%s' % row['baseline']
q2 = '[Q2=%s]' % row['Pseudo_Q_squared']
diffs = row['differentials']
sb_pd = | pd.read_csv(diffs, index_col=0, sep='\t') | pandas.read_csv |
import pandas as pd
# Dataframe
data = pd.DataFrame({
'kelas': 6*['A'] + 6*['B'],
'murid': 2*['A1'] + 2*['A2'] + 2*['A3'] + 2*['B1'] + 2*['B2'] + 2*['B3'],
'pelajaran': 6*['math','english'],
'nilai': [90,60,70,85,50,60,100,40,95,80,60,45]
}, columns=['kelas','murid','pelajaran','nilai'])
# Pivoting dataframe
data_pivot = data.pivot_table(index='kelas',columns='pelajaran',values='nilai', aggfunc='mean').reset_index()
print('Pivoting dataframe:\n', data_pivot)
# [3.a] Melting dataframe data_pivot dengan value_vars
data_melt_3a = pd.melt(data_pivot, value_vars=['math'])
print('Melting dataframe dengan value_vars:\n', data_melt_3a)
# [3.b] Melting dataframe data_pivot dengan id_vars dan value_vars
data_melt_3b = | pd.melt(data_pivot, id_vars='kelas', value_vars=['math']) | pandas.melt |
'''recurring_spend.py docstring
Author: <NAME>
'''
import os
import glob
import datetime as dt
import pickle
import numpy as np
import pandas as pd
from google.cloud import bigquery
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import config_google
import config_recurringspend
from gcp_utility import download_table_from_gbq, upload_table_to_gbq
# df = download_table_from_gbq(project_name, dataset_name, table_name)
# upload_table_to_gbq(ndf, dataset_name, table_name)
def return_dataframe_from_sheet(spreadsheet_id, sample_range):
'''docstring for return_dataframe_from_sheet function
# If modifying these scopes, delete the file token.pickle.
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
'''
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=spreadsheet_id,
range=sample_range).execute()
values = result.get('values', [])
df = pd.DataFrame(values)
new_header = df.iloc[0] #grab the first row for the header
df = df[1:] #take the data less the header row
df.columns = new_header #set the header row as the df header
return df
def blank_to_nan(df):
'''docstring for blank_to_nan function
# replace blank elements (of length zero) with numpy nan
'''
data = []
for i in list(df):
temp = []
for j in df[i]:
if len(j) == 0:
temp.append(np.nan)
else:
temp.append(j)
data.append(temp)
return data
def generate_time_series_v1(df, start_col, cost_col):
'''docstring for generate_time_series_v1 function
# generate time series by service_name
'''
ndf = df
df_list = []
for name in ndf['service_name'].unique():
temp = ndf.loc[ndf['service_name'] == name]
dfs = []
for index, row in temp.iterrows():
df = pd.DataFrame(
list(
pd.date_range(row[start_col].date(),
row['end_date'].date())))
df.columns = ['dates']
df['service'] = name
df['cost'] = float(row[cost_col]) / 7
dfs.append(df)
df = pd.concat(dfs)
df = df.sort_values('dates', ascending=True)
df_list.append(df)
df = | pd.concat(df_list) | pandas.concat |
# %matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from keras.models import Sequential, Model
from keras.layers import Dense, Input
from keras import optimizers
import keras.backend as K
# data directory
DATA_DIR = os.path.join('../..', 'pover-t', 'data')
A_TRAIN_HHOLD = os.path.join(DATA_DIR, 'train', 'A_hhold_train.csv')
B_TRAIN_HHOLD = os.path.join(DATA_DIR, 'train', 'B_hhold_train.csv')
C_TRAIN_HHOLD = os.path.join(DATA_DIR, 'train', 'C_hhold_train.csv')
A_TRAIN_IND = os.path.join(DATA_DIR, 'train', 'A_indiv_train.csv')
B_TRAIN_IND = os.path.join(DATA_DIR, 'train', 'B_indiv_train.csv')
C_TRAIN_IND = os.path.join(DATA_DIR, 'train', 'C_indiv_train.csv')
A_TEST_HHOLD = os.path.join(DATA_DIR, 'test', 'A_hhold_test.csv')
B_TEST_HHOLD = os.path.join(DATA_DIR, 'test', 'B_hhold_test.csv')
C_TEST_HHOLD = os.path.join(DATA_DIR, 'test', 'C_hhold_test.csv')
A_TEST_IND = os.path.join(DATA_DIR, 'test', 'A_indiv_test.csv')
B_TEST_IND = os.path.join(DATA_DIR, 'test', 'B_indiv_test.csv')
C_TEST_IND = os.path.join(DATA_DIR, 'test', 'C_indiv_test.csv')
data_paths = {'A': {'train': A_TRAIN_HHOLD, 'test': A_TEST_HHOLD},
'B': {'train': B_TRAIN_HHOLD, 'test': B_TEST_HHOLD},
'C': {'train': C_TRAIN_HHOLD, 'test': C_TEST_HHOLD}}
ind_data_paths = {'A': {'train': A_TRAIN_IND, 'test': A_TEST_IND},
'B': {'train': B_TRAIN_IND, 'test': B_TEST_IND},
'C': {'train': C_TRAIN_IND, 'test': C_TEST_IND}}
def main():
a_train_hhold, b_train_hhold, c_train_hhold, a_train_ind, b_train_ind,\
c_train_ind = read_train_data()
print("Country A")
aX_train_hhold = preprocess_data(a_train_hhold.drop('poor', axis=1))
aY_train = np.ravel(a_train_hhold.poor)
aX_train_ind = preprocess_data(a_train_ind.drop('poor', axis=1))
aY_train_ind = np.ravel(a_train_ind.poor)
print("\nCountry B")
bX_train_hhold = preprocess_data(b_train_hhold.drop('poor', axis=1))
bY_train = np.ravel(b_train_hhold.poor)
bX_train_ind = preprocess_data(b_train_ind.drop('poor', axis=1))
bY_train_ind = np.ravel(b_train_ind.poor)
print("\nCountry C")
cX_train_hhold = preprocess_data(c_train_hhold.drop('poor', axis=1))
cY_train = np.ravel(c_train_hhold.poor)
cX_train_ind = preprocess_data(c_train_ind.drop('poor', axis=1))
cY_train_ind = np.ravel(c_train_ind.poor)
print("\nTest Data")
a_test_hhold, b_test_hhold, c_test_hhold, a_test_ind, b_test_ind,\
c_test_ind = read_test_data(aX_train_hhold, aX_train_ind,\
bX_train_hhold, bX_train_ind, cX_train_hhold, cX_train_ind)
# Train and predict over the data sets
a_preds = train_and_predict(aX_train_hhold, aY_train, a_test_hhold)
a_sub = make_country_sub(a_preds, a_test_hhold, 'A')
b_preds = train_and_predict(bX_train_hhold, bY_train, b_test_hhold)
b_sub = make_country_sub(b_preds, b_test_hhold, 'B')
c_preds = train_and_predict(cX_train_hhold, cY_train, c_test_hhold)
c_sub = make_country_sub(c_preds, c_test_hhold, 'C')
# combine predictions and save for submission
submission = pd.concat([a_sub, b_sub, c_sub])
print("Submission head:")
print(submission.head())
print("\nSubmission tail:")
print(submission.tail())
print("Converting to csv for submission...")
submission.to_csv('submission_3_nn.csv')
print("All done")
def train_and_predict(train, ids, test):
model = Sequential()
# Add an input layer
model.add(Dense(72, activation='relu', input_shape=(train.shape[1],)))
# Add some hidden layers
model.add(Dense(36, activation='relu'))
#model.add(Dense(36, activation='relu'))
#model.add(Dense(36, activation='sigmoid'))
model.add(Dense(36, activation='sigmoid'))
# Add an output layer
model.add(Dense(1, activation='sigmoid'))
model.output_shape
model.summary()
model.get_config()
model.get_weights()
# Compile the model and fit the model to the data
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=['accuracy', precision, recall, fmeasure])
model.fit(train, ids, epochs=50, batch_size=36, verbose=1)
score = model.evaluate(train, ids, verbose=1)
print(score)
preds = model.predict(test)
return preds
def read_train_data():
# load training data
a_train = pd.read_csv(data_paths['A']['train'], index_col='id')
b_train = pd.read_csv(data_paths['B']['train'], index_col='id')
c_train = pd.read_csv(data_paths['C']['train'], index_col='id')
a_indiv_train = | pd.read_csv(ind_data_paths['A']['train'], index_col='id') | pandas.read_csv |
"""plotting utilities that are used to visualize the curl, divergence."""
import numpy as np, pandas as pd
import anndata
from anndata import AnnData
from typing import List, Union, Optional
from .scatters import scatters
from .scatters import docstrings
from .utils import (
_matplotlib_points,
save_fig,
arrowed_spines,
deaxis_all,
despline_all,
is_gene_name,
is_cell_anno_column,
is_layer_keys,
)
from ..tools.utils import (
update_dict,
flatten,
)
from ..vectorfield.utils import intersect_sources_targets
docstrings.delete_params("scatters.parameters", "adata", "color", "cmap", "frontier", "sym_c")
docstrings.delete_params("scatters.parameters", "adata", "color", "cmap", "frontier")
@docstrings.with_indent(4)
def speed(
adata: AnnData,
basis: str = "pca",
color: Union[str, list, None] = None,
frontier: bool = True,
*args,
**kwargs,
):
"""\
Scatter plot with cells colored by the estimated velocity speed (and other information if provided).
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with speed estimated.
basis: `str` or None (default: `pca`)
The embedding data in which the vector field was reconstructed and RNA speed was estimated.
color: `str`, `list` or None:
Any column names or gene names, etc. in addition to the `curl` to be used for coloring cells.
frontier: `bool` (default: `False`)
Whether to add the frontier. Scatter plots can be enhanced by using transparency (alpha) in order to show area
of high density and multiple scatter plots can be used to delineate a frontier. See matplotlib tips & tricks
cheatsheet (https://github.com/matplotlib/cheatsheets). Originally inspired by figures from scEU-seq paper:
https://science.sciencemag.org/content/367/6482/1151.
%(scatters.parameters.no_adata|color|cmap|frontier)s
Returns
-------
Nothing but plots scatterplots with cells colored by the estimated speed (and other information if provided).
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> adata = dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.reduceDimension(adata)
>>> dyn.tl.cell_velocities(adata, basis='pca')
>>> dyn.vf.VectorField(adata, basis='pca')
>>> dyn.vf.speed(adata)
>>> dyn.pl.speed(adata)
See also:: :func:`..external.ddhodge.curl` for calculating curl with a diffusion graph built from reconstructed vector
field.
"""
speed_key = "speed" if basis is None else "speed_" + basis
color_ = [speed_key]
if not np.any(adata.obs.columns.isin(color_)):
raise Exception(f"{speed_key} is not existed in .obs, try run dyn.tl.speed(adata, basis='{basis}') first.")
if color is not None:
color = [color] if type(color) == str else color
color_.extend(color)
return scatters(adata, color=color_, frontier=frontier, *args, **kwargs)
@docstrings.with_indent(4)
def curl(
adata: AnnData,
basis: str = "umap",
color: Union[str, list, None] = None,
cmap: str = "bwr",
frontier: bool = True,
sym_c: bool = True,
*args,
**kwargs,
):
"""\
Scatter plot with cells colored by the estimated curl (and other information if provided).
Cells with negative or positive curl correspond to cells with clock-wise rotation vectors or counter-clock-wise
ration vectors. Currently only support for 2D vector field. But in principal could be generated to high dimension
space.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with curl estimated.
basis: `str` or None (default: `umap`)
The embedding data in which the vector field was reconstructed and RNA curl was estimated.
color: `str`, `list` or None:
Any column names or gene names, etc. in addition to the `curl` to be used for coloring cells.
frontier: `bool` (default: `False`)
Whether to add the frontier. Scatter plots can be enhanced by using transparency (alpha) in order to show area
of high density and multiple scatter plots can be used to delineate a frontier. See matplotlib tips & tricks
cheatsheet (https://github.com/matplotlib/cheatsheets). Originally inspired by figures from scEU-seq paper:
https://science.sciencemag.org/content/367/6482/1151.
sym_c: `bool` (default: `False`)
Whether do you want to make the limits of continuous color to be symmetric, normally this should be used for
plotting velocity, curl, divergence or other types of data with both positive or negative values.
%(scatters.parameters.no_adata|color|cmap|frontier|sym_c)s
Returns
-------
Nothing but plots scatterplots with cells colored by the estimated curl (and other information if provided).
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> adata = dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.reduceDimension(adata)
>>> dyn.tl.cell_velocities(adata, basis='umap')
>>> dyn.vf.VectorField(adata, basis='umap')
>>> dyn.vf.curl(adata, basis='umap')
>>> dyn.pl.curl(adata, basis='umap')
See also:: :func:`..external.ddhodge.curl` for calculating curl with a diffusion graph built from reconstructed vector
field.
"""
curl_key = "curl" if basis is None else "curl_" + basis
color_ = [curl_key]
if not np.any(adata.obs.columns.isin(color_)):
raise Exception(f"{curl_key} is not existed in .obs, try run dyn.tl.curl(adata, basis='{basis}') first.")
if color is not None:
color = [color] if type(color) == str else color
color_.extend(color)
# adata.obs[curl_key] = adata.obs[curl_key].astype('float')
# adata_ = adata[~ adata.obs[curl_key].isna(), :]
return scatters(
adata,
color=color_,
cmap=cmap,
frontier=frontier,
sym_c=sym_c,
*args,
**kwargs,
)
@docstrings.with_indent(4)
def divergence(
adata: AnnData,
basis: str = "pca",
color: Union[str, list, None] = None,
cmap: str = "bwr",
frontier: bool = True,
sym_c: bool = True,
*args,
**kwargs,
):
"""\
Scatter plot with cells colored by the estimated divergence (and other information if provided).
Cells with negative or positive divergence correspond to possible sink (stable cell types) or possible source
(unstable metastable states or progenitors)
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with divergence estimated.
basis: `str` or None (default: `pca`)
The embedding data in which the vector field was reconstructed and RNA divergence was estimated.
color: `str`, `list` or None:
Any column names or gene names, etc. in addition to the `divergence` to be used for coloring cells.
frontier: `bool` (default: `False`)
Whether to add the frontier. Scatter plots can be enhanced by using transparency (alpha) in order to show area
of high density and multiple scatter plots can be used to delineate a frontier. See matplotlib tips & tricks
cheatsheet (https://github.com/matplotlib/cheatsheets). Originally inspired by figures from scEU-seq paper:
https://science.sciencemag.org/content/367/6482/1151.
sym_c: `bool` (default: `False`)
Whether do you want to make the limits of continuous color to be symmetric, normally this should be used for
plotting velocity, divergence or other types of data with both positive or negative values.
%(scatters.parameters.no_adata|color|cmap|frontier|sym_c)s
Returns
-------
Nothing but plots scatterplots with cells colored by the estimated divergence (and other information if provided).
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.cell_velocities(adata, basis='pca')
>>> dyn.vf.VectorField(adata, basis='pca')
>>> dyn.vf.divergence(adata)
>>> dyn.pl.divergence(adata)
See also:: :func:`..external.ddhodge.divergence` for calculating divergence with a diffusion graph built from reconstructed
vector field.
"""
div_key = "divergence" if basis is None else "divergence_" + basis
color_ = [div_key]
if not np.any(adata.obs.columns.isin(color_)):
raise Exception(f"{div_key} is not existed in .obs, try run dyn.tl.divergence(adata, basis='{basis}') first.")
# adata.obs[div_key] = adata.obs[div_key].astype('float')
# adata_ = adata[~ adata.obs[div_key].isna(), :]
if color is not None:
color = [color] if type(color) == str else color
color_.extend(color)
return scatters(
adata,
color=color_,
cmap=cmap,
frontier=frontier,
sym_c=sym_c,
*args,
**kwargs,
)
@docstrings.with_indent(4)
def acceleration(
adata: AnnData,
basis: str = "pca",
color: Union[str, list, None] = None,
frontier: bool = True,
*args,
**kwargs,
):
"""\
Scatter plot with cells colored by the estimated acceleration (and other information if provided).
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with curvature estimated.
basis: `str` or None (default: `pca`)
The embedding data in which the vector field was reconstructed and RNA curvature was estimated.
color: `str`, `list` or None:
Any column names or gene names, etc. in addition to the `acceleration` to be used for coloring cells.
frontier: `bool` (default: `False`)
Whether to add the frontier. Scatter plots can be enhanced by using transparency (alpha) in order to show area
of high density and multiple scatter plots can be used to delineate a frontier. See matplotlib tips & tricks
cheatsheet (https://github.com/matplotlib/cheatsheets). Originally inspired by figures from scEU-seq paper:
https://science.sciencemag.org/content/367/6482/1151.
%(scatters.parameters.no_adata|color|cmap|frontier)s
Returns
-------
Nothing but plots scatterplots with cells colored by the estimated curvature (and other information if provided).
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.cell_velocities(adata, basis='pca')
>>> dyn.vf.VectorField(adata, basis='pca')
>>> dyn.vf.acceleration(adata)
>>> dyn.pl.acceleration(adata)
"""
acc_key = "acceleration" if basis is None else "acceleration_" + basis
color_ = [acc_key]
if not np.any(adata.obs.columns.isin(color_)):
raise Exception(
f"{acc_key} is not existed in .obs, try run dyn.tl.acceleration(adata, basis='{acc_key}') first."
)
adata.obs[acc_key] = adata.obs[acc_key].astype("float")
adata_ = adata[~adata.obs[acc_key].isna(), :]
if color is not None:
color = [color] if type(color) == str else color
color_.extend(color)
return scatters(adata_, color=color_, frontier=frontier, *args, **kwargs)
@docstrings.with_indent(4)
def curvature(
adata: AnnData,
basis: str = "pca",
color: Union[str, list, None] = None,
frontier: bool = True,
*args,
**kwargs,
):
"""\
Scatter plot with cells colored by the estimated curvature (and other information if provided).
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with curvature estimated.
basis: `str` or None (default: `pca`)
The embedding data in which the vector field was reconstructed and RNA curvature was estimated.
color: `str`, `list` or None:
Any column names or gene names, etc. in addition to the `curvature` to be used for coloring cells.
frontier: `bool` (default: `False`)
Whether to add the frontier. Scatter plots can be enhanced by using transparency (alpha) in order to show area
of high density and multiple scatter plots can be used to delineate a frontier. See matplotlib tips & tricks
cheatsheet (https://github.com/matplotlib/cheatsheets). Originally inspired by figures from scEU-seq paper:
https://science.sciencemag.org/content/367/6482/1151.
%(scatters.parameters.no_adata|color|cmap|frontier)s
Returns
-------
Nothing but plots scatterplots with cells colored by the estimated curvature (and other information if provided).
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.cell_velocities(adata, basis='pca')
>>> dyn.vf.VectorField(adata, basis='pca')
>>> dyn.vf.curvature(adata)
>>> dyn.pl.curvature(adata)
"""
curv_key = "curvature" if basis is None else "curvature_" + basis
color_ = [curv_key]
if not np.any(adata.obs.columns.isin(color_)):
raise Exception(
f"{curv_key} is not existed in .obs, try run dyn.tl.curvature(adata, basis='{curv_key}') first."
)
adata.obs[curv_key] = adata.obs[curv_key].astype("float")
adata_ = adata[~adata.obs[curv_key].isna(), :]
if color is not None:
color = [color] if type(color) == str else color
color_.extend(color)
return scatters(adata_, color=color_, frontier=frontier, *args, **kwargs)
@docstrings.with_indent(4)
def jacobian(
adata: AnnData,
regulators: Optional[List] = None,
effectors: Optional[List] = None,
basis: str = "umap",
jkey: str = "<PASSWORD>",
j_basis: str = "pca",
x: int = 0,
y: int = 1,
layer: str = "M_s",
highlights: list = None,
cmap: str = "bwr",
background: Optional[str] = None,
pointsize: Union[None, float] = None,
figsize: tuple = (6, 4),
show_legend: bool = True,
frontier: bool = True,
sym_c: bool = True,
sort: str = "abs",
show_arrowed_spines: bool = False,
stacked_fraction: bool = False,
save_show_or_return: str = "show",
save_kwargs: dict = {},
**kwargs,
):
"""\
Scatter plot of Jacobian values across cells.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with Jacobian matrix estimated.
regulators: `list` or `None` (default: `None`)
The list of genes that will be used as regulators for plotting the Jacobian heatmap, only limited to genes
that have already performed Jacobian analysis.
effectors: `List` or `None` (default: `None`)
The list of genes that will be used as targets for plotting the Jacobian heatmap, only limited to genes
that have already performed Jacobian analysis.
basis: `str` (default: `umap`)
The reduced dimension basis.
jkey: `str` (default: `jacobian`)
The key to the jacobian dictionary in .uns.
j_basis: `str` (default: `pca`)
The reduced dimension space that will be used to calculate the jacobian matrix.
x: `int` (default: `0`)
The column index of the low dimensional embedding for the x-axis.
y: `int` (default: `1`)
The column index of the low dimensional embedding for the y-axis.
highlights: `list` (default: None)
Which color group will be highlighted. if highligts is a list of lists - each list is relate to each color element.
cmap: string (optional, default 'Blues')
The name of a matplotlib colormap to use for coloring
or shading points. If no labels or values are passed
this will be used for shading points according to
density (largely only of relevance for very large
datasets). If values are passed this will be used for
shading according the value. Note that if theme
is passed then this value will be overridden by the
corresponding option of the theme.
background: string or None (optional, default 'None`)
The color of the background. Usually this will be either
'white' or 'black', but any color name will work. Ideally
one wants to match this appropriately to the colors being
used for points etc. This is one of the things that themes
handle for you. Note that if theme
is passed then this value will be overridden by the
corresponding option of the theme.
figsize: `None` or `[float, float]` (default: (6, 4))
The width and height of each panel in the figure.
show_legend: bool (optional, default True)
Whether to display a legend of the labels
frontier: `bool` (default: `False`)
Whether to add the frontier. Scatter plots can be enhanced by using transparency (alpha) in order to show area
of high density and multiple scatter plots can be used to delineate a frontier. See matplotlib tips & tricks
cheatsheet (https://github.com/matplotlib/cheatsheets). Originally inspired by figures from scEU-seq paper:
https://science.sciencemag.org/content/367/6482/1151.
sym_c: `bool` (default: `True`)
Whether do you want to make the limits of continuous color to be symmetric, normally this should be used for
plotting velocity, jacobian, curl, divergence or other types of data with both positive or negative values.
sort: `str` (optional, default `abs`)
The method to reorder data so that high values points will be on top of background points. Can be one of
{'raw', 'abs', 'neg'}, i.e. sorted by raw data, sort by absolute values or sort by negative values.
show_arrowed_spines: bool (optional, default False)
Whether to show a pair of arrowed spines representing the basis of the scatter is currently using.
stacked_fraction: bool (default: False)
If True the jacobian will be represented as a stacked fraction in the title, otherwise a linear fraction
style is used.
save_show_or_return: `str` {'save', 'show', 'return'} (default: `show`)
Whether to save, show or return the figure.
save_kwargs: `dict` (default: `{}`)
A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig
function will use the {"path": None, "prefix": 'scatter', "dpi": None, "ext": 'pdf', "transparent": True,
"close": True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly
modify those keys according to your needs.
kwargs:
Additional arguments passed to plt._matplotlib_points.
Returns
-------
Nothing but plots the n_source x n_targets scatter plots of low dimensional embedding of the adata object, each
corresponds to one element in the Jacobian matrix for all sampled cells.
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.cell_velocities(adata, basis='pca')
>>> dyn.vf.VectorField(adata, basis='pca')
>>> valid_gene_list = adata[:, adata.var.use_for_transition].var.index[:2]
>>> dyn.vf.jacobian(adata, regulators=valid_gene_list[0], effectors=valid_gene_list[1])
>>> dyn.pl.jacobian(adata)
"""
regulators, effectors = (
list(np.unique(regulators)) if regulators is not None else None,
list(np.unique(effectors)) if effectors is not None else None,
)
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import to_hex
if background is None:
_background = rcParams.get("figure.facecolor")
_background = to_hex(_background) if type(_background) is tuple else _background
else:
_background = background
Jacobian_ = jkey if j_basis is None else jkey + "_" + j_basis
Der, cell_indx, jacobian_gene, regulators_, effectors_ = (
adata.uns[Jacobian_].get(jkey.split("_")[-1]),
adata.uns[Jacobian_].get("cell_idx"),
adata.uns[Jacobian_].get(jkey.split("_")[-1] + "_gene"),
adata.uns[Jacobian_].get("regulators"),
adata.uns[Jacobian_].get("effectors"),
)
adata_ = adata[cell_indx, :]
if regulators is None and effectors is not None:
regulators = effectors
elif effectors is None and regulators is not None:
effectors = regulators
# test the simulation data here
if regulators_ is None or effectors_ is None:
if Der.shape[0] != adata_.n_vars:
source_genes = [j_basis + "_" + str(i) for i in range(Der.shape[0])]
target_genes = [j_basis + "_" + str(i) for i in range(Der.shape[1])]
else:
source_genes, target_genes = adata_.var_names, adata_.var_names
else:
Der, source_genes, target_genes = intersect_sources_targets(
regulators,
regulators_,
effectors,
effectors_,
Der if jacobian_gene is None else jacobian_gene,
)
## integrate this with the code in scatter ##
if type(x) is int and type(y) is int:
prefix = "X_"
cur_pd = pd.DataFrame(
{
basis + "_" + str(x): adata_.obsm[prefix + basis][:, x],
basis + "_" + str(y): adata_.obsm[prefix + basis][:, y],
}
)
elif is_gene_name(adata_, x) and is_gene_name(adata_, y):
cur_pd = pd.DataFrame(
{
x: adata_.obs_vector(k=x, layer=None) if layer == "X" else adata_.obs_vector(k=x, layer=layer),
y: adata_.obs_vector(k=y, layer=None) if layer == "X" else adata_.obs_vector(k=y, layer=layer),
}
)
# cur_pd = cur_pd.loc[(cur_pd > 0).sum(1) > 1, :]
cur_pd.columns = [
x + " (" + layer + ")",
y + " (" + layer + ")",
]
elif is_cell_anno_column(adata_, x) and is_cell_anno_column(adata_, y):
cur_pd = pd.DataFrame(
{
x: adata_.obs_vector(x),
y: adata_.obs_vector(y),
}
)
cur_pd.columns = [x, y]
elif is_cell_anno_column(adata_, x) and is_gene_name(adata_, y):
cur_pd = pd.DataFrame(
{
x: adata_.obs_vector(x),
y: adata_.obs_vector(k=y, layer=None) if layer == "X" else adata_.obs_vector(k=y, layer=layer),
}
)
cur_pd.columns = [x, y + " (" + layer + ")"]
elif is_gene_name(adata_, x) and is_cell_anno_column(adata_, y):
cur_pd = pd.DataFrame(
{
x: adata_.obs_vector(k=x, layer=None) if layer == "X" else adata_.obs_vector(k=x, layer=layer),
y: adata_.obs_vector(y),
}
)
# cur_pd = cur_pd.loc[cur_pd.iloc[:, 0] > 0, :]
cur_pd.columns = [x + " (" + layer + ")", y]
elif is_layer_keys(adata_, x) and is_layer_keys(adata_, y):
x_, y_ = adata_[:, basis].layers[x], adata_[:, basis].layers[y]
cur_pd = pd.DataFrame({x: flatten(x_), y: flatten(y_)})
# cur_pd = cur_pd.loc[cur_pd.iloc[:, 0] > 0, :]
cur_pd.columns = [x, y]
elif type(x) in [anndata._core.views.ArrayView, np.ndarray] and type(y) in [
anndata._core.views.ArrayView,
np.ndarray,
]:
cur_pd = pd.DataFrame({"x": flatten(x), "y": flatten(y)})
cur_pd.columns = ["x", "y"]
point_size = 500.0 / np.sqrt(adata_.shape[0]) if pointsize is None else 500.0 / np.sqrt(adata_.shape[0]) * pointsize
point_size = 4 * point_size
scatter_kwargs = dict(
alpha=0.2,
s=point_size,
edgecolor=None,
linewidth=0,
) # (0, 0, 0, 1)
if kwargs is not None:
scatter_kwargs.update(kwargs)
nrow, ncol = len(source_genes), len(target_genes)
if figsize is None:
g = plt.figure(None, (3 * ncol, 3 * nrow), facecolor=_background) # , dpi=160
else:
g = plt.figure(None, (figsize[0] * ncol, figsize[1] * nrow), facecolor=_background) # , dpi=160
gs = plt.GridSpec(nrow, ncol, wspace=0.12)
for i, source in enumerate(source_genes):
for j, target in enumerate(target_genes):
ax = plt.subplot(gs[i * ncol + j])
J = Der[j, i, :] # dim 0: target; dim 1: source
cur_pd["jacobian"] = J
# cur_pd.loc[:, "jacobian"] = np.array([scinot(i) for i in cur_pd.loc[:, "jacobian"].values])
v_max = np.max(np.abs(J))
scatter_kwargs.update({"vmin": -v_max, "vmax": v_max})
ax, color = _matplotlib_points(
cur_pd.iloc[:, [0, 1]].values,
ax=ax,
labels=None,
values=J,
highlights=highlights,
cmap=cmap,
color_key=None,
color_key_cmap=None,
background=_background,
width=figsize[0],
height=figsize[1],
show_legend=show_legend,
frontier=frontier,
sort=sort,
sym_c=sym_c,
**scatter_kwargs,
)
if stacked_fraction:
ax.set_title(r"$\frac{\partial f_{%s}}{\partial x_{%s}}$" % (target, source))
else:
ax.set_title(r"$\partial f_{%s} / \partial x_{%s}$" % (target, source))
if i + j == 0 and show_arrowed_spines:
arrowed_spines(ax, basis, background)
else:
despline_all(ax)
deaxis_all(ax)
if save_show_or_return == "save":
s_kwargs = {
"path": None,
"prefix": jkey,
"dpi": None,
"ext": "pdf",
"transparent": True,
"close": True,
"verbose": True,
}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return gs
def jacobian_heatmap(
adata: AnnData,
cell_idx: Union[int, List],
jkey: str = "jacobian",
basis: str = "umap",
regulators: Optional[List] = None,
effectors: Optional[List] = None,
figsize: tuple = (7, 5),
ncols: int = 1,
cmap: str = "bwr",
save_show_or_return: str = "show",
save_kwargs: dict = {},
**kwargs,
):
"""\
Plot the Jacobian matrix for each cell as a heatmap.
Note that Jacobian matrix can be understood as a regulatory activity matrix between genes directly computed from the
reconstructed vector fields.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with Jacobian matrix estimated.
cell_idx: `int` or `list`
The numeric indices of the cells that you want to draw the jacobian matrix to reveal the regulatory activity.
jkey: `str` (default: `jacobian`)
The key to the jacobian dictionary in .uns.
basis: `str`
The reduced dimension basis.
regulators: `list` or `None` (default: `None`)
The list of genes that will be used as regulators for plotting the Jacobian heatmap, only limited to genes
that have already performed Jacobian analysis.
effectors: `List` or `None` (default: `None`)
The list of genes that will be used as targets for plotting the Jacobian heatmap, only limited to genes
that have already performed Jacobian analysis.
figsize: `None` or `[float, float]` (default: None)
The width and height of each panel in the figure.
ncols: `int` (default: `1`)
The number of columns for drawing the heatmaps.
cmap: `str` (default: `bwr`)
The mapping from data values to color space. If not provided, the default will depend on whether center is set.
save_show_or_return: `str` {'save', 'show', 'return'} (default: `show`)
Whether to save, show or return the figure.
save_kwargs: `dict` (default: `{}`)
A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function
will use the {"path": None, "prefix": 'scatter', "dpi": None, "ext": 'pdf', "transparent": True, "close":
True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys
according to your needs.
kwargs:
Additional arguments passed to sns.heatmap.
Returns
-------
Nothing but plots the n_cell_idx heatmaps of the corresponding Jacobian matrix for each selected cell.
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.cell_velocities(adata, basis='pca')
>>> dyn.vf.VectorField(adata, basis='pca')
>>> valid_gene_list = adata[:, adata.var.use_for_transition].var.index[:2]
>>> dyn.vf.jacobian(adata, regulators=valid_gene_list[0], effectors=valid_gene_list[1])
>>> dyn.pl.jacobian_heatmap(adata)
"""
regulators, effectors = (
list(np.unique(regulators)) if regulators is not None else None,
list(np.unique(effectors)) if effectors is not None else None,
)
import matplotlib.pyplot as plt
import seaborn as sns
Jacobian_ = jkey if basis is None else jkey + "_" + basis
if type(cell_idx) == int:
cell_idx = [cell_idx]
Der, cell_indx, jacobian_gene, regulators_, effectors_ = (
adata.uns[Jacobian_].get(jkey.split("_")[-1]),
adata.uns[Jacobian_].get("cell_idx"),
adata.uns[Jacobian_].get(jkey.split("_")[-1] + "_gene"),
adata.uns[Jacobian_].get("regulators"),
adata.uns[Jacobian_].get("effectors"),
)
Der, regulators, effectors = intersect_sources_targets(regulators, regulators_, effectors, effectors_, Der)
adata_ = adata[cell_indx, :]
valid_cell_idx = list(set(cell_idx).intersection(cell_indx))
if len(valid_cell_idx) == 0:
raise ValueError(
f"Jacobian matrix was not calculated for the cells you provided {cell_indx}."
f"Check adata.uns[{Jacobian_}].values() for available cells that have Jacobian matrix calculated."
f"Note that limiting calculation of Jacobian matrix only for a subset of cells are required for "
f"speeding up calculations."
)
else:
cell_names = adata.obs_names[valid_cell_idx]
total_panels, ncols = len(valid_cell_idx), ncols
nrow, ncol = int(np.ceil(total_panels / ncols)), ncols
if figsize is None:
g = plt.figure(None, (3 * ncol, 3 * nrow)) # , dpi=160
else:
g = plt.figure(None, (figsize[0] * ncol, figsize[1] * nrow)) # , dpi=160
gs = plt.GridSpec(nrow, ncol)
heatmap_kwargs = dict(xticklabels=1, yticklabels=1)
heatmap_kwargs = update_dict(heatmap_kwargs, kwargs)
for i, name in enumerate(cell_names):
ind = np.where(adata_.obs_names == name)[0]
J = Der[:, :, ind][:, :, 0].T # dim 0: target; dim 1: source
J = pd.DataFrame(J, index=regulators, columns=effectors)
ax = plt.subplot(gs[i])
sns.heatmap(
J,
annot=True,
ax=ax,
cmap=cmap,
cbar=False,
center=0,
**heatmap_kwargs,
)
plt.title(name)
if save_show_or_return == "save":
s_kwargs = {
"path": None,
"prefix": jkey + "_heatmap",
"dpi": None,
"ext": "pdf",
"transparent": True,
"close": True,
"verbose": True,
}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return gs
@docstrings.with_indent(4)
def sensitivity(
adata,
regulators=None,
effectors=None,
basis="umap",
skey="sensitivity",
s_basis="pca",
x=0,
y=1,
layer="M_s",
highlights=None,
cmap="bwr",
background=None,
pointsize=None,
figsize=(6, 4),
show_legend=True,
frontier=True,
sym_c=True,
sort="abs",
show_arrowed_spines=False,
stacked_fraction=False,
save_show_or_return="show",
save_kwargs={},
**kwargs,
):
"""\
Scatter plot of Sensitivity value across cells.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with Jacobian matrix estimated.
regulators: `list` or `None` (default: `None`)
The list of genes that will be used as regulators for plotting the Jacobian heatmap, only limited to genes
that have already performed Jacobian analysis.
effectors: `List` or `None` (default: `None`)
The list of genes that will be used as targets for plotting the Jacobian heatmap, only limited to genes
that have already performed Jacobian analysis.
basis: `str` (default: `umap`)
The reduced dimension basis.
skey: `str` (default: `sensitivity`)
The key to the sensitivity dictionary in .uns.
s_basis: `str` (default: `pca`)
The reduced dimension space that will be used to calculate the jacobian matrix.
x: `int` (default: `0`)
The column index of the low dimensional embedding for the x-axis.
y: `int` (default: `1`)
The column index of the low dimensional embedding for the y-axis.
highlights: `list` (default: None)
Which color group will be highlighted. if highligts is a list of lists - each list is relate to each color element.
cmap: string (optional, default 'Blues')
The name of a matplotlib colormap to use for coloring
or shading points. If no labels or values are passed
this will be used for shading points according to
density (largely only of relevance for very large
datasets). If values are passed this will be used for
shading according the value. Note that if theme
is passed then this value will be overridden by the
corresponding option of the theme.
background: string or None (optional, default 'None`)
The color of the background. Usually this will be either
'white' or 'black', but any color name will work. Ideally
one wants to match this appropriately to the colors being
used for points etc. This is one of the things that themes
handle for you. Note that if theme
is passed then this value will be overridden by the
corresponding option of the theme.
figsize: `None` or `[float, float]` (default: (6, 4))
The width and height of each panel in the figure.
show_legend: bool (optional, default True)
Whether to display a legend of the labels
frontier: `bool` (default: `False`)
Whether to add the frontier. Scatter plots can be enhanced by using transparency (alpha) in order to show area
of high density and multiple scatter plots can be used to delineate a frontier. See matplotlib tips & tricks
cheatsheet (https://github.com/matplotlib/cheatsheets). Originally inspired by figures from scEU-seq paper:
https://science.sciencemag.org/content/367/6482/1151.
sym_c: `bool` (default: `True`)
Whether do you want to make the limits of continuous color to be symmetric, normally this should be used for
plotting velocity, jacobian, curl, divergence or other types of data with both positive or negative values.
sort: `str` (optional, default `abs`)
The method to reorder data so that high values points will be on top of background points. Can be one of
{'raw', 'abs', 'neg'}, i.e. sorted by raw data, sort by absolute values or sort by negative values.
show_arrowed_spines: bool (optional, default False)
Whether to show a pair of arrowed spines representing the basis of the scatter is currently using.
stacked_fraction: bool (default: False)
If True the jacobian will be represented as a stacked fraction in the title, otherwise a linear fraction
style is used.
save_show_or_return: `str` {'save', 'show', 'return'} (default: `show`)
Whether to save, show or return the figure.
save_kwargs: `dict` (default: `{}`)
A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig
function will use the {"path": None, "prefix": 'scatter', "dpi": None, "ext": 'pdf', "transparent": True,
"close": True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly
modify those keys according to your needs.
kwargs:
Additional arguments passed to plt._matplotlib_points.
Returns
-------
Nothing but plots the n_source x n_targets scatter plots of low dimensional embedding of the adata object, each
corresponds to one element in the Jacobian matrix for all sampled cells.
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.cell_velocities(adata, basis='pca')
>>> dyn.vf.VectorField(adata, basis='pca')
>>> valid_gene_list = adata[:, adata.var.use_for_transition].var.index[:2]
>>> dyn.vf.sensitivity(adata, regulators=valid_gene_list[0], effectors=valid_gene_list[1])
>>> dyn.pl.sensitivity(adata)
"""
regulators, effectors = (
list(np.unique(regulators)) if regulators is not None else None,
list(np.unique(effectors)) if effectors is not None else None,
)
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import to_hex
if background is None:
_background = rcParams.get("figure.facecolor")
_background = to_hex(_background) if type(_background) is tuple else _background
else:
_background = background
Sensitivity_ = skey if s_basis is None else skey + "_" + s_basis
Der, cell_indx, sensitivity_gene, regulators_, effectors_ = (
adata.uns[Sensitivity_].get(skey.split("_")[-1]),
adata.uns[Sensitivity_].get("cell_idx"),
adata.uns[Sensitivity_].get(skey.split("_")[-1] + "_gene"),
adata.uns[Sensitivity_].get("regulators"),
adata.uns[Sensitivity_].get("effectors"),
)
adata_ = adata[cell_indx, :]
# test the simulation data here
if regulators_ is None or effectors_ is None:
if Der.shape[0] != adata_.n_vars:
source_genes = [s_basis + "_" + str(i) for i in range(Der.shape[0])]
target_genes = [s_basis + "_" + str(i) for i in range(Der.shape[1])]
else:
source_genes, target_genes = adata_.var_names, adata_.var_names
else:
Der, source_genes, target_genes = intersect_sources_targets(
regulators,
regulators_,
effectors,
effectors_,
Der if sensitivity_gene is None else sensitivity_gene,
)
## integrate this with the code in scatter ##
if type(x) is int and type(y) is int:
prefix = "X_"
cur_pd = pd.DataFrame(
{
basis + "_" + str(x): adata_.obsm[prefix + basis][:, x],
basis + "_" + str(y): adata_.obsm[prefix + basis][:, y],
}
)
elif is_gene_name(adata_, x) and is_gene_name(adata_, y):
cur_pd = pd.DataFrame(
{
x: adata_.obs_vector(k=x, layer=None) if layer == "X" else adata_.obs_vector(k=x, layer=layer),
y: adata_.obs_vector(k=y, layer=None) if layer == "X" else adata_.obs_vector(k=y, layer=layer),
}
)
# cur_pd = cur_pd.loc[(cur_pd > 0).sum(1) > 1, :]
cur_pd.columns = [
x + " (" + layer + ")",
y + " (" + layer + ")",
]
elif is_cell_anno_column(adata_, x) and is_cell_anno_column(adata_, y):
cur_pd = pd.DataFrame(
{
x: adata_.obs_vector(x),
y: adata_.obs_vector(y),
}
)
cur_pd.columns = [x, y]
elif is_cell_anno_column(adata_, x) and is_gene_name(adata_, y):
cur_pd = pd.DataFrame(
{
x: adata_.obs_vector(x),
y: adata_.obs_vector(k=y, layer=None) if layer == "X" else adata_.obs_vector(k=y, layer=layer),
}
)
cur_pd.columns = [x, y + " (" + layer + ")"]
elif is_gene_name(adata_, x) and is_cell_anno_column(adata_, y):
cur_pd = pd.DataFrame(
{
x: adata_.obs_vector(k=x, layer=None) if layer == "X" else adata_.obs_vector(k=x, layer=layer),
y: adata_.obs_vector(y),
}
)
# cur_pd = cur_pd.loc[cur_pd.iloc[:, 0] > 0, :]
cur_pd.columns = [x + " (" + layer + ")", y]
elif is_layer_keys(adata_, x) and is_layer_keys(adata_, y):
x_, y_ = adata_[:, basis].layers[x], adata_[:, basis].layers[y]
cur_pd = pd.DataFrame({x: flatten(x_), y: flatten(y_)})
# cur_pd = cur_pd.loc[cur_pd.iloc[:, 0] > 0, :]
cur_pd.columns = [x, y]
elif type(x) in [anndata._core.views.ArrayView, np.ndarray] and type(y) in [
anndata._core.views.ArrayView,
np.ndarray,
]:
cur_pd = pd.DataFrame({"x": flatten(x), "y": flatten(y)})
cur_pd.columns = ["x", "y"]
point_size = 500.0 / np.sqrt(adata_.shape[0]) if pointsize is None else 500.0 / np.sqrt(adata_.shape[0]) * pointsize
point_size = 4 * point_size
scatter_kwargs = dict(
alpha=0.2,
s=point_size,
edgecolor=None,
linewidth=0,
) # (0, 0, 0, 1)
if kwargs is not None:
scatter_kwargs.update(kwargs)
nrow, ncol = len(source_genes), len(target_genes)
if figsize is None:
g = plt.figure(None, (3 * ncol, 3 * nrow), facecolor=_background) # , dpi=160
else:
g = plt.figure(None, (figsize[0] * ncol, figsize[1] * nrow), facecolor=_background) # , dpi=160
gs = plt.GridSpec(nrow, ncol, wspace=0.12)
for i, source in enumerate(source_genes):
for j, target in enumerate(target_genes):
ax = plt.subplot(gs[i * ncol + j])
S = Der[j, i, :] # dim 0: target; dim 1: source
cur_pd["sensitivity"] = S
# cur_pd.loc[:, "sensitivity"] = np.array([scinot(i) for i in cur_pd.loc[:, "jacobian"].values])
v_max = np.max(np.abs(S))
scatter_kwargs.update({"vmin": -v_max, "vmax": v_max})
ax, color = _matplotlib_points(
cur_pd.iloc[:, [0, 1]].values,
ax=ax,
labels=None,
values=S,
highlights=highlights,
cmap=cmap,
color_key=None,
color_key_cmap=None,
background=_background,
width=figsize[0],
height=figsize[1],
show_legend=show_legend,
frontier=frontier,
sort=sort,
sym_c=sym_c,
**scatter_kwargs,
)
if stacked_fraction:
ax.set_title(r"$\frac{d x_{%s}}{d x_{%s}}$" % (target, source))
else:
ax.set_title(r"$d x_{%s} / d x_{%s}$" % (target, source))
if i + j == 0 and show_arrowed_spines:
arrowed_spines(ax, basis, background)
else:
despline_all(ax)
deaxis_all(ax)
if save_show_or_return == "save":
s_kwargs = {
"path": None,
"prefix": skey,
"dpi": None,
"ext": "pdf",
"transparent": True,
"close": True,
"verbose": True,
}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return gs
def sensitivity_heatmap(
adata,
cell_idx,
skey="sensitivity",
basis="pca",
regulators=None,
effectors=None,
figsize=(7, 5),
ncols=1,
cmap="bwr",
save_show_or_return="show",
save_kwargs={},
**kwargs,
):
"""\
Plot the Jacobian matrix for each cell as a heatmap.
Note that Jacobian matrix can be understood as a regulatory activity matrix between genes directly computed from the
reconstructed vector fields.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with Jacobian matrix estimated.
cell_idx: `int` or `list`
The numeric indices of the cells that you want to draw the sensitivity matrix to reveal the regulatory activity.
skey: `str` (default: `sensitivity`)
The key to the sensitivity dictionary in .uns.
basis: `str`
The reduced dimension basis.
regulators: `list` or `None` (default: `None`)
The list of genes that will be used as regulators for plotting the Jacobian heatmap, only limited to genes
that have already performed Jacobian analysis.
effectors: `List` or `None` (default: `None`)
The list of genes that will be used as targets for plotting the Jacobian heatmap, only limited to genes
that have already performed Jacobian analysis.
figsize: `None` or `[float, float]` (default: None)
The width and height of each panel in the figure.
ncols: `int` (default: `1`)
The number of columns for drawing the heatmaps.
cmap: `str` (default: `bwr`)
The mapping from data values to color space. If not provided, the default will depend on whether center is set.
save_show_or_return: `str` {'save', 'show', 'return'} (default: `show`)
Whether to save, show or return the figure.
save_kwargs: `dict` (default: `{}`)
A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function
will use the {"path": None, "prefix": 'scatter', "dpi": None, "ext": 'pdf', "transparent": True, "close":
True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys
according to your needs.
kwargs:
Additional arguments passed to sns.heatmap.
Returns
-------
Nothing but plots the n_cell_idx heatmaps of the corresponding Jacobian matrix for each selected cell.
Examples
--------
>>> import dynamo as dyn
>>> adata = dyn.sample_data.hgForebrainGlutamatergic()
>>> dyn.pp.recipe_monocle(adata)
>>> dyn.tl.dynamics(adata)
>>> dyn.tl.reduceDimension(adata)
>>> dyn.tl.cell_velocities(adata, basis='pca')
>>> dyn.vf.VectorField(adata, basis='pca')
>>> valid_gene_list = adata[:, adata.var.use_for_transition].var.index[:2]
>>> dyn.vf.sensitivity(adata, regulators=valid_gene_list[0], effectors=valid_gene_list[1])
>>> dyn.pl.sensitivity_heatmap(adata)
"""
regulators, effectors = (
list(np.unique(regulators)) if regulators is not None else None,
list(np.unique(effectors)) if effectors is not None else None,
)
import matplotlib.pyplot as plt
import seaborn as sns
Sensitivity_ = skey if basis is None else skey + "_" + basis
if type(cell_idx) == int:
cell_idx = [cell_idx]
Der, cell_indx, sensitivity_gene, regulators_, effectors_ = (
adata.uns[Sensitivity_].get(skey.split("_")[-1]),
adata.uns[Sensitivity_].get("cell_idx"),
adata.uns[Sensitivity_].get(skey.split("_")[-1] + "_gene"),
adata.uns[Sensitivity_].get("regulators"),
adata.uns[Sensitivity_].get("effectors"),
)
Der, regulators, effectors = intersect_sources_targets(regulators, regulators_, effectors, effectors_, Der)
adata_ = adata[cell_indx, :]
valid_cell_idx = list(set(cell_idx).intersection(cell_indx))
if len(valid_cell_idx) == 0:
raise ValueError(
f"Sensitivity matrix was not calculated for the cells you provided {cell_indx}."
f"Check adata.uns[{Sensitivity_}].values() for available cells that have Sensitivity matrix calculated."
f"Note that limiting calculation of Sensitivity matrix only for a subset of cells are required for "
f"speeding up calculations."
)
else:
cell_names = adata.obs_names[valid_cell_idx]
total_panels, ncols = len(valid_cell_idx), ncols
nrow, ncol = int(np.ceil(total_panels / ncols)), ncols
if figsize is None:
g = plt.figure(None, (3 * ncol, 3 * nrow)) # , dpi=160
else:
g = plt.figure(None, (figsize[0] * ncol, figsize[1] * nrow)) # , dpi=160
gs = plt.GridSpec(nrow, ncol)
heatmap_kwargs = dict(xticklabels=1, yticklabels=1)
heatmap_kwargs = update_dict(heatmap_kwargs, kwargs)
for i, name in enumerate(cell_names):
ind = np.where(adata_.obs_names == name)[0]
J = Der[:, :, ind][:, :, 0].T # dim 0: target; dim 1: source
J = | pd.DataFrame(J, index=regulators, columns=effectors) | pandas.DataFrame |
"""
Web scrape Alcoholics Anonymous website for meeting details
========================================
This file web scrapes information about Alcoholics Anonymous
meetings in Great Britain, including location, time and
duration
Requirements
------------
:requires: bs4
:requires: urllib
:requires: selenium
:requires: time
:requires: pandas
Author
------
:author: Gaskyk
Version
-------
:version: 0.1
:date: 10-Sep-2018
"""
from bs4 import BeautifulSoup
from urllib.request import urlopen
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import time
import pandas as pd
def get_area_list():
"""
AA website requires knowing the area of meetings before meetings
are displayed. This function gets a list of area codes for feeding
into web scraping of meetings in areas later
:return: area_list
:rtype: list of strings of numbers
"""
r = urlopen('https://www.alcoholics-anonymous.org.uk/aa-meetings/Find-a-Meeting').read()
soup = BeautifulSoup(r, 'lxml')
select_area_url = soup.find_all("select",{"id":"map-igroup"})
options = select_area_url[0].find_all("option")
area_list = []
for i in options:
area_list.append(i['value'])
# There are no values for the options for larger areas eg. South West
# So filter these out to keep only values in the list
area_list = list(filter(lambda x: x != '', area_list))
return area_list
def aa_scraping(area_value):
"""
Function to extract meeting location and times from AA website
for a specfic area value. Selenium is required to be used due
to need to select elements from a drop-down menu and click
'Search' button
:param area_value: string value of an area such as Bournemouth District
:type area_value: str
:return: headers, main_text of location and times of meetings
:rtype: Two Selenium WebElements for each of headers and main_text
"""
# Launch web driver and go to website
driver = webdriver.Chrome('C:/Program Files/ChromeDriver/chromedriver.exe')
driver.get('https://www.alcoholics-anonymous.org.uk/aa-meetings/Find-a-Meeting')
# Select area from drop-down menu
select = Select(driver.find_element_by_id('map-igroup'))
select.select_by_value(area_value)
# Click 'search' button
driver.find_element_by_class_name('cSubmit').click()
# Wait for javascript to fully load
driver.implicitly_wait(10)
# Get required elements from page
headers = driver.find_elements_by_tag_name('h3')
main_text = driver.find_elements_by_tag_name('p')
# Decode headers and main_text as lists not Selenium WebElements
headers_decoded = [i.text for i in headers]
main_text_decoded = [i.text for i in main_text]
main_text_decoded = main_text_decoded[5:] # First 5 elements talk about cookies, general stuff
# Quit driver
driver.quit()
return headers_decoded, main_text_decoded
# Get all area codes
area_list = get_area_list()
# Get all meetings in all areas
meeting_name = []
meeting_info = []
for i in ('31', '23', '29'):
temp = aa_scraping(i)
meeting_name.append(temp[0])
meeting_info.append(temp[1])
time.sleep(10)
# We have a list of lists. Convert this to one long list
all_names = [item for sublist in meeting_name for item in sublist]
all_info = [item for sublist in meeting_info for item in sublist]
def format_scrape_info(my_list):
"""
Reformat output of web scraping to a pandas dataframe
:param headers: output from web scraping AA website
:type headers: list
:return: Pandas DataFrame of web scraped data
:rtype: Pandas DataFrame
"""
# Split into addresses, times and postcodes
addresses = [i.splitlines()[0] for i in my_list]
times = [i.splitlines()[1] for i in my_list]
postcodes = [i.splitlines()[2] for i in my_list]
# Format of postcodes[0] is 'Postcode: POSTCODE'
postcodes = [i.replace('Postcode: ', '') for i in postcodes]
# Format of times[0] is 'Time: 18.00 - duration 1hr 15 mins
times = [i.replace('Time: ', '') for i in times]
meetings = pd.DataFrame({'addresses': addresses,
'postcodes': postcodes,
'times': times})
return meetings
meetings_info_df = format_scrape_info(all_info)
def create_final_df(my_list, df):
"""
Add meeting name to meeting info for final pandas dataframe
:param headers: output from web scraping AA website
:type headers: list
:return: Pandas DataFrame of web scraped data
:rtype: Pandas DataFrame
"""
meeting_names = pd.DataFrame({'meeting_names': my_list})
meetings = pd.concat([meeting_names, df], axis=1)
return meetings
meetings_df = create_final_df(all_names, meetings_info_df)
# Import postcode to local authority lookup and merge
postcode_lookup = pd.read_csv('xx/NSPL_AUG_2018_UK.csv', usecols=['pcds', 'laua', 'lat', 'long'])
meetings = | pd.merge(meetings_df, postcode_lookup, left_on='postcodes', right_on='pcds', how='left') | pandas.merge |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
from collections import OrderedDict
from utils.canvas import canvas
def odict2prop_list(odict):
store = []
for key, value in odict.iteritems():
max_prop = np.max(value.prop, axis=-1)
uid = np.unique(max_prop)
for ui in uid:
store.append([key, ui, value[(max_prop == ui)]])
return store
def iterate_axes(func):
def wrapper(arr, **args):
if isinstance(arr, OrderedDict):
store = odict2prop_list(arr)
fig, axes = canvas.make_axes(len(store))
for ax, (key, pid, value) in zip(axes, store):
func(value, ax=ax, **args)
ax.set_title(value.condition + ", pid={0}".format(pid))
else:
fig, axes = canvas.make_axes(1)
func(arr, ax=axes[0], **args)
return fig, axes
return wrapper
@iterate_axes
def plot_all(arr, ax=None, **kwargs):
pd.DataFrame(arr.T, index=arr.time).plot(legend=False, ax=ax, **kwargs)
@iterate_axes
def plot_heatmap(arr, ax=None, **kwargs):
sns.heatmap(arr, ax=ax, **kwargs)
@iterate_axes
def plot_tsplot(arr, ax=None, **kwargs):
"""
Use seaborn tsplot function.
"""
sns.tsplot(arr, time=arr.time, estimator=np.nanmean, ax=ax, **kwargs)
@iterate_axes
def plot_histogram_pdstats(arr, ax, pd_func_name='mean', **keys):
func = getattr(pd.DataFrame, pd_func_name)
df_stats = func( | pd.DataFrame(arr) | pandas.DataFrame |
import pandas as pd
import sys
from pathlib import Path
fileName = sys.argv[1]
cvf = Path("./ChildResults/FinalCV_"+str(fileName))
gwf = Path("./ChildResults/FinalGW_"+str(fileName))
phf = Path("./ChildResults/FinalPhG_" + str(fileName))
dif = Path("./ChildResults/FinalDi_"+str(fileName))
mcapf = Path("./ChildResults/FinalMCAP_"+str(fileName))
print (cvf)
print (gwf)
print (phf)
print (dif)
if cvf.is_file():
cv = pd.read_csv("./ChildResults/FinalCV_"+str(fileName),sep="\t")
else:
cv = pd.DataFrame(columns=['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT','GT','Likelihood'])
if gwf.is_file():
gwas = pd.read_csv("./ChildResults/FinalGW_"+str(fileName),sep="\t")
else:
gwas = pd.DataFrame(columns=['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT','GT','Likelihood'])
if phf.is_file():
pharm = pd.read_csv("./ChildResults/FinalPhG_" + str(fileName), sep="\t")
else:
pharm = | pd.DataFrame(columns=['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT','GT','Likelihood']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from typing import Mapping, List, Tuple
from collections import defaultdict, OrderedDict
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_boston, load_iris, load_wine, load_digits, \
load_breast_cancer, load_diabetes, fetch_mldata
from matplotlib.collections import LineCollection
import time
from pandas.api.types import is_string_dtype, is_object_dtype, is_categorical_dtype, \
is_bool_dtype
from sklearn.ensemble.partial_dependence import partial_dependence, \
plot_partial_dependence
from sklearn import svm
from sklearn.neighbors import KNeighborsRegressor
from pdpbox import pdp
from rfpimp import *
from scipy.integrate import cumtrapz
from stratx.partdep import *
from stratx.ice import *
import inspect
import statsmodels.api as sm
from sklearn.datasets import load_boston
from stratx.partdep import *
def df_string_to_cat(df: pd.DataFrame) -> dict:
catencoders = {}
for colname in df.columns:
if is_string_dtype(df[colname]) or is_object_dtype(df[colname]):
df[colname] = df[colname].astype('category').cat.as_ordered()
catencoders[colname] = df[colname].cat.categories
return catencoders
def toy_weather_data():
def temp(x): return np.sin((x + 365 / 2) * (2 * np.pi) / 365)
def noise(state): return np.random.normal(-5, 5, sum(df['state'] == state))
df = pd.DataFrame()
df['dayofyear'] = range(1, 365 + 1)
df['state'] = np.random.choice(['CA', 'CO', 'AZ', 'WA'], len(df))
df['temperature'] = temp(df['dayofyear'])
df.loc[df['state'] == 'CA', 'temperature'] = 70 + df.loc[
df['state'] == 'CA', 'temperature'] * noise('CA')
df.loc[df['state'] == 'CO', 'temperature'] = 40 + df.loc[
df['state'] == 'CO', 'temperature'] * noise('CO')
df.loc[df['state'] == 'AZ', 'temperature'] = 90 + df.loc[
df['state'] == 'AZ', 'temperature'] * noise('AZ')
df.loc[df['state'] == 'WA', 'temperature'] = 60 + df.loc[
df['state'] == 'WA', 'temperature'] * noise('WA')
return df
def weather():
df_yr1 = toy_weather_data()
df_yr1['year'] = 1980
df_yr2 = toy_weather_data()
df_yr2['year'] = 1981
df_yr3 = toy_weather_data()
df_yr3['year'] = 1982
df_raw = pd.concat([df_yr1, df_yr2, df_yr3], axis=0)
df = df_raw.copy()
catencoders = df_string_to_cat(df_raw.copy())
# states = catencoders['state']
# print(states)
#
# df_cat_to_catcode(df)
names = {'CO': 5, 'CA': 10, 'AZ': 15, 'WA': 20}
df['state'] = df['state'].map(names)
catnames = OrderedDict()
for k,v in names.items():
catnames[v] = k
X = df.drop('temperature', axis=1)
y = df['temperature']
# leaf_xranges, leaf_slopes, slope_counts_at_x, dx, slope_at_x, pdpx, pdpy, ignored_ = \
# partial_dependence(X=X, y=y, colname='dayofyear',
# verbose=True)
# print(pdpx)
# print(pdpy)
plot_catstratpd(X, y, 'state', 'temperature', catnames=catnames,
# min_samples_leaf=30,
n_trials=10,
min_y_shifted_to_zero=True,
show_x_counts=False,
bootstrap=True,
yrange=(-2, 60),
figsize=(2.1,2.5)
)
plt.show()
def bigX_data(n):
x1 = np.random.uniform(-1, 1, size=n)
x2 = np.random.uniform(-1, 1, size=n)
x3 = np.random.uniform(-1, 1, size=n)
y = 0.2 * x1 - 5 * x2 + 10 * x2 * np.where(x3 >= 0, 1, 0) + np.random.normal(0, 1,
size=n)
df = | pd.DataFrame() | pandas.DataFrame |
#! /usr/bin/env python
import sys, os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, create_engine, MetaData
from sqlalchemy import Table, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.sql.expression import update
from psycopg2.extensions import register_adapter, AsIs
import numpy
import datetime as dt
import logging
import pandas as pd
import re
# Adapter for numpy datatypes
def adapt_numpy_int64(numpy_int64):
''' Enable postgres to recognize numpy's int64 data type'''
return AsIs(numpy_int64)
register_adapter(numpy.int64, adapt_numpy_int64)
engine = create_engine(
'postgresql+psycopg2://---/popler_3',
echo=True)
conn = engine.connect()
# Mapping metadata
metadata = MetaData(bind=engine)
# Creating base
base = declarative_base()
# creating classes for tables to query things
class lter_table(base):
__table__ = Table('lter_table', metadata, autoload=True)
class study_site_table(base):
__table__ = Table('study_site_table', metadata, autoload=True)
class project_table(base):
__table__ = Table('project_table', metadata, autoload=True)
site_in_proj = relationship(
'site_in_project_table', cascade="delete, delete-orphan")
class site_in_project_table(base):
__table__ = Table('site_in_project_table', metadata, autoload=True)
taxa = relationship(
'taxa_table', cascade="delete, delete-orphan")
class taxa_table(base):
__table__ = Table('taxa_table', metadata, autoload=True)
count = relationship(
'count_table', cascade="delete, delete-orphan")
density = relationship(
'density_table', cascade="delete, delete-orphan")
biomass = relationship(
'biomass_table', cascade="delete, delete-orphan")
percent_cover = relationship(
'percent_cover_table', cascade="delete, delete-orphan")
individual = relationship(
'individual_table', cascade="delete, delete-orphan")
class taxa_accepted_table(base):
__table__ = Table('taxa_accepted_table', metadata, autoload=True)
class count_table(base):
__table__ = Table('count_table', metadata, autoload=True)
class biomass_table(base):
__table__ = Table('biomass_table', metadata, autoload=True)
class density_table(base):
__table__ = Table('density_table', metadata, autoload=True)
class percent_cover_table(base):
__table__ = Table('percent_cover_table', metadata, autoload=True)
class individual_table(base):
__table__ = Table('individual_table', metadata, autoload=True)
Session = sessionmaker(bind=engine, autoflush=False)
# Helper Functions
def find_types(tbl, name):
''' Method to get data types from Tbls'''
dictname = {}
for i, item in enumerate(tbl.__table__.c):
name = (str(item).split('.')[1])
dictname[name] = str(
tbl.__table__.c[name].type)
return dictname
study_site_types = find_types(study_site_table, 'study_site')
project_types = find_types(project_table, 'project')
taxa_types = find_types(taxa_table, 'taxa')
taxa_accepted_types = find_types(taxa_accepted_table, 'taxa_accepted')
count_types = find_types(count_table, 'count')
biomass_types = find_types(biomass_table, 'biomass')
density_types = find_types(density_table, 'density')
percent_cover_types = find_types(percent_cover_table, 'percent_cover')
individual_types = find_types(individual_table, 'individual')
def convert_types(dataframe, types):
'''
Method to convert data types in dataframe to match
column types in database
'''
for i in dataframe.columns:
if types[i] in ['FLOAT', 'Float', 'NUMERIC']:
dataframe.loc[:, i] = pd.to_numeric(dataframe[i], errors='ignore')
if types[i] in ['INTEGER', 'Integer']:
dataframe.loc[:, i] = dataframe[i].astype(int)
if types[i] in ['VARCHAR', 'TEXT', 'VARCHAR(50)', 'VARCHAR(200)']:
try:
dataframe.loc[:, i] = dataframe[i].astype(str)
print('In CONVERT: ', i ,dataframe.loc[:,i].dtypes)
except Exception as e:
print('string conversion did not work:', i, str(e))
dataframe.loc[:, i] = dataframe[i].astype(object)
if i in ['year', 'month', 'day']:
dataframe.loc[:, i] = | pd.to_numeric(dataframe[i], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# **[Machine Learning Home Page](https://www.kaggle.com/learn/intro-to-machine-learning)**
#
# ---
#
# # Introduction
# Machine learning competitions are a great way to improve your data science skills and measure your progress.
#
# In this exercise, you will create and submit predictions for a Kaggle competition. You can then improve your model (e.g. by adding features) to improve and see how you stack up to others taking this micro-course.
#
# The steps in this notebook are:
# 1. Build a Random Forest model with all of your data (**X** and **y**)
# 2. Read in the "test" data, which doesn't include values for the target. Predict home values in the test data with your Random Forest model.
# 3. Submit those predictions to the competition and see your score.
# 4. Optionally, come back to see if you can improve your model by adding features or changing your model. Then you can resubmit to see how that stacks up on the competition leaderboard.
# ## Recap
# Here's the code you've written so far. Start by running it again.
# In[ ]:
# Code you have previously used to load data
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex7 import *
# Path of the file to read. We changed the directory structure to simplify submitting to a competition
iowa_file_path = '../input/train.csv'
home_data = | pd.read_csv(iowa_file_path) | pandas.read_csv |
name = 'nfl_data_py'
import pandas
import numpy
import datetime
def import_pbp_data(years, columns=None, downcast=True):
"""Imports play-by-play data
Args:
years (List[int]): years to get PBP data for
columns (List[str]): only return these columns
downcast (bool): convert float64 to float32, default True
Returns:
DataFrame
"""
if not isinstance(years, (list, range)):
raise ValueError('Input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if columns is None:
columns = []
plays = | pandas.DataFrame() | pandas.DataFrame |
import statistics
import numpy as np
import pandas as pd
import scipy.stats as scs
from numpy import ndarray
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from Common.Measures.Time.TimeSpan import TimeSpan
from Common.Readers.Engine.PandaEngine import PandaEngine
from Common.Readers.Engine.YahooFinanceEngine import YahooFinanceEngine
from Common.StockOptions.AbstractStockOption import AbstractStockOption
from Common.WebScrappers.Yahoo.YahooSummaryScrapper import YahooSummaryScrapper
class YahooStockOption(AbstractStockOption):
__snp_ratio: float = 0.0
ForecastSpan: int = 30
_train_size: int = -1
_test_size: int = -1
_length: int = -1
_training_percent: int = 0.8
_min_max_scaler: MinMaxScaler
_column_series_loc: pd.Series
_column_series: pd.Series
_date_series: pd.Series
_column_array: ndarray
_column_train_array: ndarray
_column_test_array: ndarray
DataSimpleReturns: pd.DataFrame
DataLogReturns: pd.DataFrame
SimpleAnnually: pd.DataFrame
SimpleAnnuallyCum: pd.Series
SimpleDaily: pd.DataFrame
SimplyDailyCum: pd.Series
SimpleMonthly: pd.DataFrame
SimpleMonthlyCum: pd.Series
SimpleQuarterly: pd.DataFrame
SimpleQuarterlyCum: pd.Series
SimpleWeekly: pd.DataFrame
SimpleWeeklyCum: pd.Series
_x_scaled_array: np.ndarray
_x_train_array: np.ndarray
_x_test_array: np.ndarray
SimpleDailyReturnAvg: float = -1.1
SimpleWeeklyReturnAvg: float = -1.1
SimpleMonthlyReturnAvg: float = -1.1
SimpleQuarterlyReturnAvg: float = -1.1
SimpleAnnuallyReturnAvg: float = -1.1
RMSE: float = -1.1
YeUrl: str = 'NA'
YeLogoUrl: str = 'NA'
YeAddress: str = 'NA'
YeCity: str = 'NA'
YePostalCode: str = 'NA'
YeState: str = 'NA'
YeCountry: str = 'NA'
YeBeta: float = -1.1
YeMarket: str = 'NA'
YeCurrency: str = 'NA'
YeExchange: str = 'NA'
YeHigh52: float = -1.1
YeLow52: float = -1.1
YeAverage50: float = -1.1
YeAverage200: float = -1.1
YeMarketCap: float = -1.1
YePayoutRatio: float = -1.1
YePeForward: float = -1.1
YePeTrailing: float = -1.1
YePegRatio: float = -1.1
YeShortRatio: float = -1.1
YeBookValue: float = -1.1
YePriceToBook: float = -1.1
YssBeta: str = ''
YssEarningsDate: str = ''
YssLink: str = ''
YssMarketCap: str = ''
YssPeRatio: str = ''
def __init__(self, y_fin_engine: YahooFinanceEngine, y_sum_scapper: YahooSummaryScrapper, a_ticker: str = 'CNI', a_src: str = 'yahoo', a_col: str = 'Adj Close'):
self._source = a_src
self._column = a_col
self._ticker = a_ticker
self._historical = self._setData()
self._t_s = self._updateTimeSpan(self._t_s, self._historical)
self._data = self._historical[self._column].to_frame()
self._data_range = self._getDataRange(1000, self._data[self._column])
self._mu = round(self._historical[self._column].mean(), 2)
self._sigma = round(self._historical[self._column].std(), 2)
self._median = round(self._historical[self._column].median(), 2)
self._norm_pdf = self._getProbabilityDensityFunction(self.DataRange, self._mu, self._sigma)
self._data['Norm'] = self._setNormalizer(self._historical)
self._data['NormL1'] = self._setNormalizerL1(self._historical)
self._data['Binary'] = self._setBinarizer(self._historical)
self._data['Sparse'] = self._setSparser(self._historical)
self._data['Scaled'] = self._setScaler(self._historical)
self._setPreProcessing(self._historical)
self.DataSimpleReturns = self._setSimpleReturns('', self._historical)
self.DataSimpleReturns = self._setSimpleReturnsPlus(self.DataSimpleReturns)
self._data['IsOutlier'] = self.DataSimpleReturns.IsOutlier.astype(bool)
self.DataLogReturns = self._setLogReturns(self._historical)
self.DataLogReturns = self._setLogReturnsPlus(self.DataLogReturns)
self.SimpleDaily = self._setSimpleReturns('', self._historical)
self.SimplyDailyCum = self._setSimpleCumulative(self.SimpleDaily)
self.SimpleDailyReturnAvg = self._setSimpleReturnAverage(self.SimplyDailyCum)
self.SimpleWeekly = self._setSimpleReturns('W', self._historical)
self.SimpleWeeklyCum = self._setSimpleCumulative(self.SimpleWeekly)
self.SimpleWeeklyReturnAvg = self._setSimpleReturnAverage(self.SimpleWeeklyCum)
self.SimpleMonthly = self._setSimpleReturns('M', self._historical)
self.SimpleMonthlyCum = self._setSimpleCumulative(self.SimpleMonthly)
self.SimpleMonthlyReturnAvg = self._setSimpleReturnAverage(self.SimpleMonthlyCum)
self.SimpleQuarterly = self._setSimpleReturns('Q', self._historical)
self.SimpleQuarterlyCum = self._setSimpleCumulative(self.SimpleQuarterly)
self.SimpleQuarterlyReturnAvg = self._setSimpleReturnAverage(self.SimpleQuarterlyCum)
self.SimpleAnnually = self._setSimpleReturns('A', self._historical)
self.SimpleAnnuallyCum = self._setSimpleCumulative(self.SimpleAnnually)
self.SimpleAnnuallyReturnAvg = self._setSimpleReturnAverage(self.SimpleAnnuallyCum)
(self.IsDaily, self.IsWeekly, self.IsMonthly, self.IsQuarterly, self.IsAnnually) = \
self._setIsTimely(self.SimpleDailyReturnAvg, self.SimpleWeeklyReturnAvg,
self.SimpleMonthlyReturnAvg, self.SimpleQuarterlyReturnAvg, self.SimpleAnnuallyReturnAvg)
self._setYahooFinance(y_fin_engine)
self._setYahooSummary(y_sum_scapper)
@property
def SnpRatio(self):
return self.__snp_ratio
@property
def ArrayScaledX(self):
return self._x_scaled_array
@property
def ArrayTestX(self):
return self._x_test_array
@property
def ArrayTrainX(self):
return self._x_train_array
@property
def ColumnLocSeries(self):
return self._column_series_loc
@property
def ColumnSeries(self):
return self._column_series
@property
def ColumnArray(self):
return self._column_array
@property
def ColumnTestArray(self):
return self._column_test_array
@property
def ColumnTrainArray(self):
return self._column_train_array
@property
def DateSeries(self):
return self._date_series
@property
def TrainPercent(self):
return self._training_percent
@property
def TestSize(self):
return self._test_size
@property
def TrainSize(self):
return self._train_size
@property
def Length(self):
return self._length
@property
def MinMaxScale(self):
return self._min_max_scaler
def SetSnpRatio(self, a_df: pd.DataFrame, a_col: str):
a_series = round(self.Data['Norm'].divide(a_df[a_col].replace(0, 1)), 3)
self.__snp_ratio = a_series[-1]
def _getOutliers(self, a_df: pd.DataFrame, n_sigmas: int = 3):
a_df['IsOutlier'] = pd.Series(dtype=int)
a_df['Outliers'] = pd.Series(dtype=float)
for ind in a_df.index:
x = a_df[self.Column][ind]
mu = a_df['mean'][ind]
sigma = a_df['std'][ind]
a_df['IsOutlier'][ind] = 1 if (x > mu + n_sigmas * sigma) | (x < mu - n_sigmas * sigma) else 0
if a_df['IsOutlier'][ind] == 1:
a_df['Outliers'][ind] = x
return a_df
def _setData(self) -> pd.DataFrame:
a_df: pd.DataFrame = PandaEngine(self.Source, self._t_s, self._ticker).DataFrame
a_df.fillna(method='ffill', inplace=True)
a_df.fillna(method='bfill', inplace=True)
# self.HistoricalData.columns = self.Ticker + self.HistoricalData.columns
self._high52 = self.__setHigh52(a_df)
self._low52 = self.__setLow52(a_df)
self._range52 = [self._low52, self._high52]
self._price = self.__setPrice(a_df)
return a_df
def __setHigh52(self, a_df) -> float:
return round(self.YeHigh52, 6)
def __setLow52(self, a_df) -> float:
return round(self.YeLow52, 6)
def __setPrice(self, a_df) -> float:
i = a_df['Adj Close'].iloc[-1]
return round(i, 6)
def _setNormalizer(self, a_df: pd.DataFrame = pd.DataFrame()) -> pd.DataFrame:
return (a_df / a_df.iloc[0])[self.Column]
def _setNormalizerL1(self, a_df: pd.DataFrame = pd.DataFrame()) -> pd.DataFrame:
return \
pd.DataFrame(preprocessing.normalize(a_df, norm='l1'), columns=a_df.columns, index=a_df.index)[self.Column]
def _setBinarizer(self, a_df: pd.DataFrame = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.